code
stringlengths 3
1.01M
| repo_name
stringlengths 5
116
| path
stringlengths 3
311
| language
stringclasses 30
values | license
stringclasses 15
values | size
int64 3
1.01M
|
|---|---|---|---|---|---|
/*
* Copyright (c) 2007-2008 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Module Name : usbdrv.c */
/* */
/* Abstract */
/* This module contains network interface up/down related functions.*/
/* */
/* NOTES */
/* Platform dependent. */
/* */
/************************************************************************/
/* src/usbdrv.c */
#define ZM_PIBSS_MODE 0
#define ZM_AP_MODE 0
#define ZM_CHANNEL 11
#define ZM_WEP_MOME 0
#define ZM_SHARE_AUTH 0
#define ZM_DISABLE_XMIT 0
#include "usbdrv.h"
#include "oal_dt.h"
#include "80211core/pub_zfi.h"
#include "linux/netlink.h"
#include "linux/rtnetlink.h"
#include "linux/slab.h"
#include <net/iw_handler.h>
#ifdef ZM_HOSTAPD_SUPPORT
#include "athr_common.h"
#endif
extern void zfDumpDescriptor(zdev_t* dev, u16_t type);
//extern void zfiWlanQueryMacAddress(zdev_t* dev, u8_t* addr);
// ISR handler
irqreturn_t usbdrv_intr(int, void *, struct pt_regs *);
// Network Device interface related function
int usbdrv_open(struct net_device *);
int usbdrv_close(struct net_device *);
int usbdrv_change_mtu(struct net_device *, int);
int usbdrv_set_mac(struct net_device *, void *);
int usbdrv_xmit_frame(struct sk_buff *, struct net_device *);
void usbdrv_set_multi(struct net_device *);
struct net_device_stats *usbdrv_get_stats(struct net_device *);
//wireless extension helper functions
int usbdrv_ioctl_setessid(struct net_device *dev, struct iw_point *erq);
int usbdrv_ioctl_getessid(struct net_device *dev, struct iw_point *erq);
int usbdrv_ioctl_setrts(struct net_device *dev, struct iw_param *rrq);
/* Wireless Extension Handler functions */
int usbdrvwext_giwmode(struct net_device *dev, struct iw_request_info* info,
__u32 *mode, char *extra);
int zfLnxPrivateIoctl(struct usbdrv_private *macp, struct zdap_ioctl *zdreq);
void zfLnx10msTimer(struct net_device* dev);
int zfUnregisterWdsDev(struct net_device* parentDev, u16_t wdsId);
int zfRegisterWdsDev(struct net_device* parentDev, u16_t wdsId);
int zfWdsOpen(struct net_device *dev);
int zfWdsClose(struct net_device *dev);
int zfLnxVapOpen(struct net_device *dev);
int zfLnxVapClose(struct net_device *dev);
int zfLnxVapXmitFrame(struct sk_buff *skb, struct net_device *dev);
int zfLnxRegisterVapDev(struct net_device* parentDev, u16_t vapId);
int usbdrv_wpa_ioctl(struct net_device *dev, struct athr_wlan_param *zdparm);
extern u16_t zfLnxGetVapId(zdev_t* dev);
extern u16_t zfLnxCheckTxBufferCnt(zdev_t *dev);
extern UsbTxQ_t *zfLnxGetUsbTxBuffer(zdev_t *dev);
extern u16_t zfLnxAuthNotify(zdev_t* dev, u16_t* macAddr);
extern u16_t zfLnxAsocNotify(zdev_t* dev, u16_t* macAddr, u8_t* body, u16_t bodySize, u16_t port);
extern u16_t zfLnxDisAsocNotify(zdev_t* dev, u8_t* macAddr, u16_t port);
extern u16_t zfLnxApConnectNotify(zdev_t* dev, u8_t* macAddr, u16_t port);
extern void zfLnxConnectNotify(zdev_t* dev, u16_t status, u16_t* bssid);
extern void zfLnxScanNotify(zdev_t* dev, struct zsScanResult* result);
extern void zfLnxStatisticsNotify(zdev_t* dev, struct zsStastics* result);
extern void zfLnxMicFailureNotify(zdev_t* dev, u16_t* addr, u16_t status);
extern void zfLnxApMicFailureNotify(zdev_t* dev, u8_t* addr, zbuf_t* buf);
extern void zfLnxIbssPartnerNotify(zdev_t* dev, u16_t status, struct zsPartnerNotifyEvent *event);
extern void zfLnxMacAddressNotify(zdev_t* dev, u8_t* addr);
extern void zfLnxSendCompleteIndication(zdev_t* dev, zbuf_t* buf);
extern void zfLnxRecvEth(zdev_t* dev, zbuf_t* buf, u16_t port);
extern void zfLnxRestoreBufData(zdev_t* dev, zbuf_t* buf);
#ifdef ZM_ENABLE_CENC
extern u16_t zfLnxCencAsocNotify(zdev_t* dev, u16_t* macAddr, u8_t* body, u16_t bodySize, u16_t port);
#endif //ZM_ENABLE_CENC
extern void zfLnxWatchDogNotify(zdev_t* dev);
extern void zfLnxRecv80211(zdev_t* dev, zbuf_t* buf, struct zsAdditionInfo* addInfo);
extern u8_t zfLnxCreateThread(zdev_t *dev);
/******************************************************************************
* P U B L I C D A T A
*******************************************************************************
*/
/* Definition of Wireless Extension */
/* wireless extension helper functions */
extern int usbdrv_ioctl_setessid(struct net_device *dev, struct iw_point *erq);
extern int usbdrv_ioctl_setrts(struct net_device *dev, struct iw_param *rrq);
/* Wireless Extension Handler functions */
extern int usbdrvwext_giwname(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrq, char *extra);
extern int usbdrvwext_siwfreq(struct net_device *dev, struct iw_request_info *info,
struct iw_freq *freq, char *extra);
extern int usbdrvwext_giwfreq(struct net_device *dev, struct iw_request_info *info,
struct iw_freq *freq, char *extra);
extern int usbdrvwext_siwmode(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrq, char *extra);
extern int usbdrvwext_giwmode(struct net_device *dev, struct iw_request_info *info,
__u32 *mode, char *extra);
extern int usbdrvwext_siwsens(struct net_device *dev, struct iw_request_info *info,
struct iw_param *sens, char *extra);
extern int usbdrvwext_giwsens(struct net_device *dev, struct iw_request_info *info,
struct iw_param *sens, char *extra);
extern int usbdrvwext_giwrange(struct net_device *dev, struct iw_request_info *info,
struct iw_point *data, char *extra);
extern int usbdrvwext_siwap(struct net_device *dev, struct iw_request_info *info,
struct sockaddr *MacAddr, char *extra);
extern int usbdrvwext_giwap(struct net_device *dev, struct iw_request_info *info,
struct sockaddr *MacAddr, char *extra);
extern int usbdrvwext_iwaplist(struct net_device *dev, struct iw_request_info *info,
struct iw_point *data, char *extra);
extern int usbdrvwext_siwscan(struct net_device *dev, struct iw_request_info *info,
struct iw_point *data, char *extra);
extern int usbdrvwext_giwscan(struct net_device *dev, struct iw_request_info *info,
struct iw_point *data, char *extra);
extern int usbdrvwext_siwessid(struct net_device *dev, struct iw_request_info *info,
struct iw_point *essid, char *extra);
extern int usbdrvwext_giwessid(struct net_device *dev, struct iw_request_info *info,
struct iw_point *essid, char *extra);
extern int usbdrvwext_siwnickn(struct net_device *dev, struct iw_request_info *info,
struct iw_point *data, char *nickname);
extern int usbdrvwext_giwnickn(struct net_device *dev, struct iw_request_info *info,
struct iw_point *data, char *nickname);
extern int usbdrvwext_siwrate(struct net_device *dev, struct iw_request_info *info,
struct iw_param *frq, char *extra);
extern int usbdrvwext_giwrate(struct net_device *dev, struct iw_request_info *info,
struct iw_param *frq, char *extra);
extern int usbdrvwext_siwrts(struct net_device *dev, struct iw_request_info *info,
struct iw_param *rts, char *extra);
extern int usbdrvwext_giwrts(struct net_device *dev, struct iw_request_info *info,
struct iw_param *rts, char *extra);
extern int usbdrvwext_siwfrag(struct net_device *dev, struct iw_request_info *info,
struct iw_param *frag, char *extra);
extern int usbdrvwext_giwfrag(struct net_device *dev, struct iw_request_info *info,
struct iw_param *frag, char *extra);
extern int usbdrvwext_siwtxpow(struct net_device *dev, struct iw_request_info *info,
struct iw_param *rrq, char *extra);
extern int usbdrvwext_giwtxpow(struct net_device *dev, struct iw_request_info *info,
struct iw_param *rrq, char *extra);
extern int usbdrvwext_siwretry(struct net_device *dev, struct iw_request_info *info,
struct iw_param *rrq, char *extra);
extern int usbdrvwext_giwretry(struct net_device *dev, struct iw_request_info *info,
struct iw_param *rrq, char *extra);
extern int usbdrvwext_siwencode(struct net_device *dev, struct iw_request_info *info,
struct iw_point *erq, char *key);
extern int usbdrvwext_giwencode(struct net_device *dev, struct iw_request_info *info,
struct iw_point *erq, char *key);
extern int usbdrvwext_siwpower(struct net_device *dev, struct iw_request_info *info,
struct iw_param *frq, char *extra);
extern int usbdrvwext_giwpower(struct net_device *dev, struct iw_request_info *info,
struct iw_param *frq, char *extra);
extern int usbdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/*
* Structures to export the Wireless Handlers
*/
struct iw_priv_args usbdrv_private_args[] = {
// { SIOCIWFIRSTPRIV + 0x0, 0, 0, "list_bss" },
// { SIOCIWFIRSTPRIV + 0x1, 0, 0, "card_reset" },
{ SIOCIWFIRSTPRIV + 0x2, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_auth" }, /* 0 - open, 1 - shared key */
{ SIOCIWFIRSTPRIV + 0x3, 0, IW_PRIV_TYPE_CHAR | 12, "get_auth" },
// { SIOCIWFIRSTPRIV + 0x4, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_preamble" }, /* 0 - long, 1 - short */
// { SIOCIWFIRSTPRIV + 0x5, 0, IW_PRIV_TYPE_CHAR | 6, "get_preamble" },
// { SIOCIWFIRSTPRIV + 0x6, 0, 0, "cnt" },
// { SIOCIWFIRSTPRIV + 0x7, 0, 0, "regs" },
// { SIOCIWFIRSTPRIV + 0x8, 0, 0, "probe" },
// { SIOCIWFIRSTPRIV + 0x9, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dbg_flag" },
// { SIOCIWFIRSTPRIV + 0xA, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "connect" },
// { SIOCIWFIRSTPRIV + 0xB, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_mac_mode" },
// { SIOCIWFIRSTPRIV + 0xC, 0, IW_PRIV_TYPE_CHAR | 12, "get_mac_mode" },
};
static iw_handler usbdrvwext_handler[] = {
(iw_handler) NULL, /* SIOCSIWCOMMIT */
(iw_handler) usbdrvwext_giwname, /* SIOCGIWNAME */
(iw_handler) NULL, /* SIOCSIWNWID */
(iw_handler) NULL, /* SIOCGIWNWID */
(iw_handler) usbdrvwext_siwfreq, /* SIOCSIWFREQ */
(iw_handler) usbdrvwext_giwfreq, /* SIOCGIWFREQ */
(iw_handler) usbdrvwext_siwmode, /* SIOCSIWMODE */
(iw_handler) usbdrvwext_giwmode, /* SIOCGIWMODE */
(iw_handler) usbdrvwext_siwsens, /* SIOCSIWSENS */
(iw_handler) usbdrvwext_giwsens, /* SIOCGIWSENS */
(iw_handler) NULL, /* not used */ /* SIOCSIWRANGE */
(iw_handler) usbdrvwext_giwrange, /* SIOCGIWRANGE */
(iw_handler) NULL, /* not used */ /* SIOCSIWPRIV */
(iw_handler) NULL, /* kernel code */ /* SIOCGIWPRIV */
(iw_handler) NULL, /* not used */ /* SIOCSIWSTATS */
(iw_handler) NULL, /* kernel code */ /* SIOCGIWSTATS */
(iw_handler) NULL, /* SIOCSIWSPY */
(iw_handler) NULL, /* SIOCGIWSPY */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) usbdrvwext_siwap, /* SIOCSIWAP */
(iw_handler) usbdrvwext_giwap, /* SIOCGIWAP */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) usbdrvwext_iwaplist, /* SIOCGIWAPLIST */
(iw_handler) usbdrvwext_siwscan, /* SIOCSIWSCAN */
(iw_handler) usbdrvwext_giwscan, /* SIOCGIWSCAN */
(iw_handler) usbdrvwext_siwessid, /* SIOCSIWESSID */
(iw_handler) usbdrvwext_giwessid, /* SIOCGIWESSID */
(iw_handler) usbdrvwext_siwnickn, /* SIOCSIWNICKN */
(iw_handler) usbdrvwext_giwnickn, /* SIOCGIWNICKN */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) usbdrvwext_siwrate, /* SIOCSIWRATE */
(iw_handler) usbdrvwext_giwrate, /* SIOCGIWRATE */
(iw_handler) usbdrvwext_siwrts, /* SIOCSIWRTS */
(iw_handler) usbdrvwext_giwrts, /* SIOCGIWRTS */
(iw_handler) usbdrvwext_siwfrag, /* SIOCSIWFRAG */
(iw_handler) usbdrvwext_giwfrag, /* SIOCGIWFRAG */
(iw_handler) usbdrvwext_siwtxpow, /* SIOCSIWTXPOW */
(iw_handler) usbdrvwext_giwtxpow, /* SIOCGIWTXPOW */
(iw_handler) usbdrvwext_siwretry, /* SIOCSIWRETRY */
(iw_handler) usbdrvwext_giwretry, /* SIOCGIWRETRY */
(iw_handler) usbdrvwext_siwencode, /* SIOCSIWENCODE */
(iw_handler) usbdrvwext_giwencode, /* SIOCGIWENCODE */
(iw_handler) usbdrvwext_siwpower, /* SIOCSIWPOWER */
(iw_handler) usbdrvwext_giwpower, /* SIOCGIWPOWER */
};
static const iw_handler usbdrv_private_handler[] =
{
//(iw_handler) usbdrvwext_setparam, /* SIOCWFIRSTPRIV+0 */
//(iw_handler) usbdrvwext_getparam, /* SIOCWFIRSTPRIV+1 */
//(iw_handler) usbdrvwext_setkey, /* SIOCWFIRSTPRIV+2 */
//(iw_handler) usbdrvwext_setwmmparams, /* SIOCWFIRSTPRIV+3 */
//(iw_handler) usbdrvwext_delkey, /* SIOCWFIRSTPRIV+4 */
//(iw_handler) usbdrvwext_getwmmparams, /* SIOCWFIRSTPRIV+5 */
//(iw_handler) usbdrvwext_setmlme, /* SIOCWFIRSTPRIV+6 */
//(iw_handler) usbdrvwext_getchaninfo, /* SIOCWFIRSTPRIV+7 */
//(iw_handler) usbdrvwext_setoptie, /* SIOCWFIRSTPRIV+8 */
//(iw_handler) usbdrvwext_getoptie, /* SIOCWFIRSTPRIV+9 */
//(iw_handler) usbdrvwext_addmac, /* SIOCWFIRSTPRIV+10 */
//(iw_handler) usbdrvwext_getscanresults, /* SIOCWFIRSTPRIV+11 */
//(iw_handler) usbdrvwext_delmac, /* SIOCWFIRSTPRIV+12 */
//(iw_handler) usbdrvwext_getchanlist, /* SIOCWFIRSTPRIV+13 */
//(iw_handler) usbdrvwext_setchanlist, /* SIOCWFIRSTPRIV+14 */
//(iw_handler) NULL, /* SIOCWFIRSTPRIV+15 */
//(iw_handler) usbdrvwext_chanswitch, /* SIOCWFIRSTPRIV+16 */
//(iw_handler) usbdrvwext_setmode, /* SIOCWFIRSTPRIV+17 */
//(iw_handler) usbdrvwext_getmode, /* SIOCWFIRSTPRIV+18 */
NULL, /* SIOCIWFIRSTPRIV */
};
static struct iw_handler_def p80211wext_handler_def = {
.num_standard = sizeof(usbdrvwext_handler) / sizeof(iw_handler),
.num_private = sizeof(usbdrv_private_handler)/sizeof(iw_handler),
.num_private_args = sizeof(usbdrv_private_args)/sizeof(struct iw_priv_args),
.standard = usbdrvwext_handler,
.private = (iw_handler *) usbdrv_private_handler,
.private_args = (struct iw_priv_args *) usbdrv_private_args
};
/* WDS */
/* struct zsWdsStruct wds[ZM_WDS_PORT_NUMBER]; */
/* void zfInitWdsStruct(void); */
/* VAP */
struct zsVapStruct vap[ZM_VAP_PORT_NUMBER];
void zfLnxInitVapStruct(void);
/**
* usbdrv_intr - interrupt handler
* @irq: the IRQ number
* @dev_inst: the net_device struct
* @regs: registers (unused)
*
* This routine is the ISR for the usbdrv board. It services
* the RX & TX queues & starts the RU if it has stopped due
* to no resources.
*/
irqreturn_t usbdrv_intr(int irq, void *dev_inst, struct pt_regs *regs)
{
struct net_device *dev;
struct usbdrv_private *macp;
dev = dev_inst;
macp = dev->ml_priv;
/* Read register error, card may be unpluged */
if (0)//(intr_status == -1)
return IRQ_NONE;
/* the device is closed, don't continue or else bad things may happen. */
if (!netif_running(dev))
return IRQ_NONE;
if (macp->driver_isolated)
return IRQ_NONE;
#if (WLAN_HOSTIF == WLAN_PCI)
//zfiIsrPci(dev);
#endif
return IRQ_HANDLED;
}
int usbdrv_open(struct net_device *dev)
{
struct usbdrv_private *macp = dev->ml_priv;
int rc = 0;
u16_t size;
void* mem;
//unsigned char addr[6];
struct zsCbFuncTbl cbFuncTbl;
printk("Enter open()\n");
/*
* #ifndef CONFIG_SMP
* read_lock(&(macp->isolate_lock));
* #endif
*/
if (macp->driver_isolated) {
rc = -EBUSY;
goto exit;
}
size = zfiGlobalDataSize(dev);
mem = kmalloc(size, GFP_KERNEL);
if (mem == NULL)
{
rc = -EBUSY;
goto exit;
}
macp->wd = mem;
memset(&cbFuncTbl, 0, sizeof(struct zsCbFuncTbl));
cbFuncTbl.zfcbAuthNotify = zfLnxAuthNotify;
cbFuncTbl.zfcbAuthNotify = zfLnxAuthNotify;
cbFuncTbl.zfcbAsocNotify = zfLnxAsocNotify;
cbFuncTbl.zfcbDisAsocNotify = zfLnxDisAsocNotify;
cbFuncTbl.zfcbApConnectNotify = zfLnxApConnectNotify;
cbFuncTbl.zfcbConnectNotify = zfLnxConnectNotify;
cbFuncTbl.zfcbScanNotify = zfLnxScanNotify;
cbFuncTbl.zfcbMicFailureNotify = zfLnxMicFailureNotify;
cbFuncTbl.zfcbApMicFailureNotify = zfLnxApMicFailureNotify;
cbFuncTbl.zfcbIbssPartnerNotify = zfLnxIbssPartnerNotify;
cbFuncTbl.zfcbMacAddressNotify = zfLnxMacAddressNotify;
cbFuncTbl.zfcbSendCompleteIndication = zfLnxSendCompleteIndication;
cbFuncTbl.zfcbRecvEth = zfLnxRecvEth;
cbFuncTbl.zfcbRecv80211 = zfLnxRecv80211;
cbFuncTbl.zfcbRestoreBufData = zfLnxRestoreBufData;
#ifdef ZM_ENABLE_CENC
cbFuncTbl.zfcbCencAsocNotify = zfLnxCencAsocNotify;
#endif //ZM_ENABLE_CENC
cbFuncTbl.zfcbHwWatchDogNotify = zfLnxWatchDogNotify;
zfiWlanOpen(dev, &cbFuncTbl);
/* zfwMacAddressNotify() will be called to setup dev->dev_addr[] */
zfLnxCreateThread(dev);
mod_timer(&(macp->hbTimer10ms), jiffies + (1*HZ)/100); /* 10 ms */
netif_carrier_on(dev);
netif_start_queue(dev);
#if ZM_AP_MODE == 1
zfiWlanSetWlanMode(dev, ZM_MODE_AP);
zfiWlanSetBasicRate(dev, 0xf, 0, 0);
zfiWlanSetSSID(dev, "OTUS_CWY", 8);
zfiWlanSetDtimCount(dev, 3);
#if ZM_WEP_MOME == 1
{
u8_t key[16] = {0x12, 0x34, 0x56, 0x78, 0x90};
struct zsKeyInfo keyInfo;
keyInfo.keyLength = 5;
keyInfo.keyIndex = 0;
keyInfo.flag = 0;
keyInfo.key = key;
zfiWlanSetKey(dev, keyInfo);
zfiWlanSetEncryMode(dev, ZM_WEP64);
}
#if ZM_SHARE_AUTH == 1
zfiWlanSetAuthenticationMode(dev, 1);
#endif /* #if ZM_SHARE_AUTH == 1 */
#endif /* #if ZM_WEP_MOME == 1 */
#elif ZM_PIBSS_MODE == 1
zfiWlanSetWlanMode(dev, ZM_MODE_PSEUDO);
#else
zfiWlanSetWlanMode(dev, ZM_MODE_INFRASTRUCTURE);
#endif
/* zfiWlanSetChannel(dev, ZM_CHANNEL, FALSE); */
zfiWlanSetFrequency(dev, 2462000, FALSE);
zfiWlanSetRtsThreshold(dev, 32767);
zfiWlanSetFragThreshold(dev, 0);
zfiWlanEnable(dev);
#ifdef ZM_ENABLE_CENC
macp->netlink_sk = netlink_kernel_create(NETLINK_USERSOCK, 1, NULL, THIS_MODULE);
if (macp->netlink_sk == NULL)
{
printk(KERN_ERR "Can't create NETLINK socket\n");
}
#endif
macp->DeviceOpened = 1;
exit:
//#ifndef CONFIG_SMP
// read_unlock(&(macp->isolate_lock));
//#endif
//zfRegisterWdsDev(dev, 0);
//zfLnxRegisterVapDev(dev, 0);
return rc;
}
/**
* usbdrv_get_stats - get driver statistics
* @dev: adapter's net_device struct
*
* This routine is called when the OS wants the adapter's stats returned.
* It returns the address of the net_device_stats stucture for the device.
* If the statistics are currently being updated, then they might be incorrect
* for a short while. However, since this cannot actually cause damage, no
* locking is used.
*/
struct net_device_stats * usbdrv_get_stats(struct net_device *dev)
{
struct usbdrv_private *macp = dev->ml_priv;
macp->drv_stats.net_stats.tx_errors =
macp->drv_stats.net_stats.tx_carrier_errors +
macp->drv_stats.net_stats.tx_aborted_errors;
macp->drv_stats.net_stats.rx_errors =
macp->drv_stats.net_stats.rx_crc_errors +
macp->drv_stats.net_stats.rx_frame_errors +
macp->drv_stats.net_stats.rx_length_errors;
return &(macp->drv_stats.net_stats);
}
/**
* usbdrv_set_mac - set the MAC address
* @dev: adapter's net_device struct
* @addr: the new address
*
* This routine sets the ethernet address of the board
* Returns:
* 0 - if successful
* -1 - otherwise
*/
int usbdrv_set_mac(struct net_device *dev, void *addr)
{
struct usbdrv_private *macp;
int rc = -1;
macp = dev->ml_priv;
read_lock(&(macp->isolate_lock));
if (macp->driver_isolated) {
goto exit;
}
rc = 0;
exit:
read_unlock(&(macp->isolate_lock));
return rc;
}
void
usbdrv_isolate_driver(struct usbdrv_private *macp)
{
#ifndef CONFIG_SMP
write_lock_irq(&(macp->isolate_lock));
#endif
macp->driver_isolated = TRUE;
#ifndef CONFIG_SMP
write_unlock_irq(&(macp->isolate_lock));
#endif
if (netif_running(macp->device))
{
netif_carrier_off(macp->device);
netif_stop_queue(macp->device);
}
}
#define VLAN_SIZE 4
int usbdrv_change_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < 68) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE)))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
void zfLnxUnlinkAllUrbs(struct usbdrv_private *macp);
int usbdrv_close(struct net_device *dev)
{
extern void zfHpLedCtrl(struct net_device *dev, u16_t ledId, u8_t mode);
struct usbdrv_private *macp = dev->ml_priv;
printk(KERN_DEBUG "usbdrv_close\n");
netif_carrier_off(macp->device);
del_timer_sync(&macp->hbTimer10ms);
printk(KERN_DEBUG "usbdrv_netif_carrier_off\n");
usbdrv_isolate_driver(macp);
printk(KERN_DEBUG "usbdrv_isolate_driver\n");
netif_carrier_off(macp->device);
#ifdef ZM_ENABLE_CENC
/* CENC */
if (macp->netlink_sk != NULL)
{
// sock_release(macp->netlink_sk);
printk(KERN_ERR "usbdrv close netlink socket\n");
}
#endif //ZM_ENABLE_CENC
#if (WLAN_HOSTIF == WLAN_PCI)
//free_irq(dev->irq, dev);
#endif
/* Turn off LED */
zfHpLedCtrl(dev, 0, 0);
zfHpLedCtrl(dev, 1, 0);
/* Delay for a while */
mdelay(10);
/* clear WPA/RSN IE */
macp->supIe[1] = 0;
/* set the isolate flag to false, so usbdrv_open can be called */
macp->driver_isolated = FALSE;
zfiWlanClose(dev);
kfree(macp->wd);
zfLnxUnlinkAllUrbs(macp);
return 0;
}
int usbdrv_xmit_frame(struct sk_buff *skb, struct net_device *dev)
{
int notify_stop = FALSE;
struct usbdrv_private *macp = dev->ml_priv;
#if ZM_DISABLE_XMIT
dev_kfree_skb_irq(skb);
#else
zfiTxSendEth(dev, skb, 0);
#endif
macp->drv_stats.net_stats.tx_bytes += skb->len;
macp->drv_stats.net_stats.tx_packets++;
//dev_kfree_skb_irq(skb);
if (notify_stop) {
netif_carrier_off(dev);
netif_stop_queue(dev);
}
return NETDEV_TX_OK;
}
void usbdrv_set_multi(struct net_device *dev)
{
if (!(dev->flags & IFF_UP))
return;
return;
}
/**
* usbdrv_clear_structs - free resources
* @dev: adapter's net_device struct
*
* Free all device specific structs, unmap i/o address, etc.
*/
void usbdrv_clear_structs(struct net_device *dev)
{
struct usbdrv_private *macp = dev->ml_priv;
#if (WLAN_HOSTIF == WLAN_PCI)
iounmap(macp->regp);
pci_release_regions(macp->pdev);
pci_disable_device(macp->pdev);
pci_set_drvdata(macp->pdev, NULL);
#endif
kfree(macp);
kfree(dev);
}
void usbdrv_remove1(struct pci_dev *pcid)
{
struct net_device *dev;
struct usbdrv_private *macp;
dev = (struct net_device *)pci_get_drvdata(pcid);
if (!dev)
return;
macp = dev->ml_priv;
unregister_netdev(dev);
usbdrv_clear_structs(dev);
}
void zfLnx10msTimer(struct net_device* dev)
{
struct usbdrv_private *macp = dev->ml_priv;
mod_timer(&(macp->hbTimer10ms), jiffies + (1*HZ)/100); //10 ms
zfiHeartBeat(dev);
return;
}
void zfLnxInitVapStruct(void)
{
u16_t i;
for (i = 0; i < ZM_VAP_PORT_NUMBER; i++)
{
vap[i].dev = NULL;
vap[i].openFlag = 0;
}
}
int zfLnxVapOpen(struct net_device *dev)
{
u16_t vapId;
vapId = zfLnxGetVapId(dev);
if (vap[vapId].openFlag == 0)
{
vap[vapId].openFlag = 1;
printk("zfLnxVapOpen : device name=%s, vap ID=%d\n", dev->name, vapId);
zfiWlanSetSSID(dev, "vap1", 4);
zfiWlanEnable(dev);
netif_start_queue(dev);
}
else
{
printk("VAP opened error : vap ID=%d\n", vapId);
}
return 0;
}
int zfLnxVapClose(struct net_device *dev)
{
u16_t vapId;
vapId = zfLnxGetVapId(dev);
if (vapId != 0xffff)
{
if (vap[vapId].openFlag == 1)
{
printk("zfLnxVapClose: device name=%s, vap ID=%d\n", dev->name, vapId);
netif_stop_queue(dev);
vap[vapId].openFlag = 0;
}
else
{
printk("VAP port was not opened : vap ID=%d\n", vapId);
}
}
return 0;
}
int zfLnxVapXmitFrame(struct sk_buff *skb, struct net_device *dev)
{
int notify_stop = FALSE;
struct usbdrv_private *macp = dev->ml_priv;
u16_t vapId;
vapId = zfLnxGetVapId(dev);
//printk("zfLnxVapXmitFrame: vap ID=%d\n", vapId);
//printk("zfLnxVapXmitFrame(), skb=%lxh\n", (u32_t)skb);
if (vapId >= ZM_VAP_PORT_NUMBER)
{
dev_kfree_skb_irq(skb);
return NETDEV_TX_OK;
}
if (vap[vapId].openFlag == 0)
{
dev_kfree_skb_irq(skb);
return NETDEV_TX_OK;
}
zfiTxSendEth(dev, skb, 0x1);
macp->drv_stats.net_stats.tx_bytes += skb->len;
macp->drv_stats.net_stats.tx_packets++;
//dev_kfree_skb_irq(skb);
if (notify_stop) {
netif_carrier_off(dev);
netif_stop_queue(dev);
}
return NETDEV_TX_OK;
}
static const struct net_device_ops vap_netdev_ops = {
.ndo_open = zfLnxVapOpen,
.ndo_stop = zfLnxVapClose,
.ndo_start_xmit = zfLnxVapXmitFrame,
.ndo_get_stats = usbdrv_get_stats,
.ndo_change_mtu = usbdrv_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef ZM_HOSTAPD_SUPPORT
.ndo_do_ioctl = usbdrv_ioctl,
#else
.ndo_do_ioctl = NULL,
#endif
};
int zfLnxRegisterVapDev(struct net_device* parentDev, u16_t vapId)
{
/* Allocate net device structure */
vap[vapId].dev = alloc_etherdev(0);
printk("Register vap dev=%p\n", vap[vapId].dev);
if(vap[vapId].dev == NULL) {
printk("alloc_etherdev fail\n");
return -ENOMEM;
}
/* Setup the default settings */
ether_setup(vap[vapId].dev);
/* MAC address */
memcpy(vap[vapId].dev->dev_addr, parentDev->dev_addr, ETH_ALEN);
vap[vapId].dev->irq = parentDev->irq;
vap[vapId].dev->base_addr = parentDev->base_addr;
vap[vapId].dev->mem_start = parentDev->mem_start;
vap[vapId].dev->mem_end = parentDev->mem_end;
vap[vapId].dev->ml_priv = parentDev->ml_priv;
//dev->hard_start_xmit = &zd1212_wds_xmit_frame;
vap[vapId].dev->netdev_ops = &vap_netdev_ops;
vap[vapId].dev->destructor = free_netdev;
vap[vapId].dev->tx_queue_len = 0;
vap[vapId].dev->dev_addr[0] = parentDev->dev_addr[0];
vap[vapId].dev->dev_addr[1] = parentDev->dev_addr[1];
vap[vapId].dev->dev_addr[2] = parentDev->dev_addr[2];
vap[vapId].dev->dev_addr[3] = parentDev->dev_addr[3];
vap[vapId].dev->dev_addr[4] = parentDev->dev_addr[4];
vap[vapId].dev->dev_addr[5] = parentDev->dev_addr[5] + (vapId+1);
/* Stop the network queue first */
netif_stop_queue(vap[vapId].dev);
sprintf(vap[vapId].dev->name, "vap%d", vapId);
printk("Register VAP dev success : %s\n", vap[vapId].dev->name);
if(register_netdevice(vap[vapId].dev) != 0) {
printk("register VAP device fail\n");
vap[vapId].dev = NULL;
return -EINVAL;
}
return 0;
}
int zfLnxUnregisterVapDev(struct net_device* parentDev, u16_t vapId)
{
int ret = 0;
printk("Unregister VAP dev : %s\n", vap[vapId].dev->name);
if(vap[vapId].dev != NULL) {
printk("Unregister vap dev=%p\n", vap[vapId].dev);
//
//unregister_netdevice(wds[wdsId].dev);
unregister_netdev(vap[vapId].dev);
printk("VAP unregister_netdevice\n");
vap[vapId].dev = NULL;
}
else {
printk("unregister VAP device: %d fail\n", vapId);
ret = -EINVAL;
}
return ret;
}
# define SUBMIT_URB(u,f) usb_submit_urb(u,f)
# define USB_ALLOC_URB(u,f) usb_alloc_urb(u,f)
//extern void zfiWlanQueryMacAddress(zdev_t* dev, u8_t* addr);
extern int usbdrv_open(struct net_device *dev);
extern int usbdrv_close(struct net_device *dev);
extern int usbdrv_xmit_frame(struct sk_buff *skb, struct net_device *dev);
extern int usbdrv_xmit_frame(struct sk_buff *skb, struct net_device *dev);
extern int usbdrv_change_mtu(struct net_device *dev, int new_mtu);
extern void usbdrv_set_multi(struct net_device *dev);
extern int usbdrv_set_mac(struct net_device *dev, void *addr);
extern struct net_device_stats * usbdrv_get_stats(struct net_device *dev);
extern int usbdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
extern UsbTxQ_t *zfLnxGetUsbTxBuffer(struct net_device *dev);
int zfLnxAllocAllUrbs(struct usbdrv_private *macp)
{
struct usb_interface *interface = macp->interface;
struct usb_host_interface *iface_desc = &interface->altsetting[0];
struct usb_endpoint_descriptor *endpoint;
int i;
/* descriptor matches, let's find the endpoints needed */
/* check out the endpoints */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i)
{
endpoint = &iface_desc->endpoint[i].desc;
if (usb_endpoint_is_bulk_in(endpoint))
{
/* we found a bulk in endpoint */
printk(KERN_ERR "bulk in: wMaxPacketSize = %x\n", le16_to_cpu(endpoint->wMaxPacketSize));
}
if (usb_endpoint_is_bulk_out(endpoint))
{
/* we found a bulk out endpoint */
printk(KERN_ERR "bulk out: wMaxPacketSize = %x\n", le16_to_cpu(endpoint->wMaxPacketSize));
}
if (usb_endpoint_is_int_in(endpoint))
{
/* we found a interrupt in endpoint */
printk(KERN_ERR "interrupt in: wMaxPacketSize = %x\n", le16_to_cpu(endpoint->wMaxPacketSize));
printk(KERN_ERR "interrupt in: int_interval = %d\n", endpoint->bInterval);
}
if (usb_endpoint_is_int_out(endpoint))
{
/* we found a interrupt out endpoint */
printk(KERN_ERR "interrupt out: wMaxPacketSize = %x\n", le16_to_cpu(endpoint->wMaxPacketSize));
printk(KERN_ERR "interrupt out: int_interval = %d\n", endpoint->bInterval);
}
}
/* Allocate all Tx URBs */
for (i = 0; i < ZM_MAX_TX_URB_NUM; i++)
{
macp->WlanTxDataUrb[i] = USB_ALLOC_URB(0, GFP_KERNEL);
if (macp->WlanTxDataUrb[i] == 0)
{
int j;
/* Free all urbs */
for (j = 0; j < i; j++)
{
usb_free_urb(macp->WlanTxDataUrb[j]);
}
return 0;
}
}
/* Allocate all Rx URBs */
for (i = 0; i < ZM_MAX_RX_URB_NUM; i++)
{
macp->WlanRxDataUrb[i] = USB_ALLOC_URB(0, GFP_KERNEL);
if (macp->WlanRxDataUrb[i] == 0)
{
int j;
/* Free all urbs */
for (j = 0; j < i; j++)
{
usb_free_urb(macp->WlanRxDataUrb[j]);
}
for (j = 0; j < ZM_MAX_TX_URB_NUM; j++)
{
usb_free_urb(macp->WlanTxDataUrb[j]);
}
return 0;
}
}
/* Allocate Register Read/Write USB */
macp->RegOutUrb = USB_ALLOC_URB(0, GFP_KERNEL);
macp->RegInUrb = USB_ALLOC_URB(0, GFP_KERNEL);
return 1;
}
void zfLnxFreeAllUrbs(struct usbdrv_private *macp)
{
int i;
/* Free all Tx URBs */
for (i = 0; i < ZM_MAX_TX_URB_NUM; i++)
{
if (macp->WlanTxDataUrb[i] != NULL)
{
usb_free_urb(macp->WlanTxDataUrb[i]);
}
}
/* Free all Rx URBs */
for (i = 0; i < ZM_MAX_RX_URB_NUM; i++)
{
if (macp->WlanRxDataUrb[i] != NULL)
{
usb_free_urb(macp->WlanRxDataUrb[i]);
}
}
/* Free USB Register Read/Write URB */
usb_free_urb(macp->RegOutUrb);
usb_free_urb(macp->RegInUrb);
}
void zfLnxUnlinkAllUrbs(struct usbdrv_private *macp)
{
int i;
/* Unlink all Tx URBs */
for (i = 0; i < ZM_MAX_TX_URB_NUM; i++)
{
if (macp->WlanTxDataUrb[i] != NULL)
{
usb_unlink_urb(macp->WlanTxDataUrb[i]);
}
}
/* Unlink all Rx URBs */
for (i = 0; i < ZM_MAX_RX_URB_NUM; i++)
{
if (macp->WlanRxDataUrb[i] != NULL)
{
usb_unlink_urb(macp->WlanRxDataUrb[i]);
}
}
/* Unlink USB Register Read/Write URB */
usb_unlink_urb(macp->RegOutUrb);
usb_unlink_urb(macp->RegInUrb);
}
static const struct net_device_ops otus_netdev_ops = {
.ndo_open = usbdrv_open,
.ndo_stop = usbdrv_close,
.ndo_start_xmit = usbdrv_xmit_frame,
.ndo_change_mtu = usbdrv_change_mtu,
.ndo_get_stats = usbdrv_get_stats,
.ndo_set_multicast_list = usbdrv_set_multi,
.ndo_set_mac_address = usbdrv_set_mac,
.ndo_do_ioctl = usbdrv_ioctl,
.ndo_validate_addr = eth_validate_addr,
};
u8_t zfLnxInitSetup(struct net_device *dev, struct usbdrv_private *macp)
{
//unsigned char addr[6];
//init_MUTEX(&macp->ps_sem);
//init_MUTEX(&macp->reg_sem);
//init_MUTEX(&macp->bcn_sem);
//init_MUTEX(&macp->config_sem);
spin_lock_init(&(macp->cs_lock));
dev->wireless_handlers = (struct iw_handler_def *)&p80211wext_handler_def;
dev->netdev_ops = &otus_netdev_ops;
dev->flags |= IFF_MULTICAST;
dev->dev_addr[0] = 0x00;
dev->dev_addr[1] = 0x03;
dev->dev_addr[2] = 0x7f;
dev->dev_addr[3] = 0x11;
dev->dev_addr[4] = 0x22;
dev->dev_addr[5] = 0x33;
/* Initialize Heart Beat timer */
init_timer(&macp->hbTimer10ms);
macp->hbTimer10ms.data = (unsigned long)dev;
macp->hbTimer10ms.function = (void *)&zfLnx10msTimer;
/* Initialize WDS and VAP data structure */
//zfInitWdsStruct();
zfLnxInitVapStruct();
return 1;
}
u8_t zfLnxClearStructs(struct net_device *dev)
{
u16_t ii;
u16_t TxQCnt;
TxQCnt = zfLnxCheckTxBufferCnt(dev);
printk(KERN_ERR "TxQCnt: %d\n", TxQCnt);
for (ii = 0; ii < TxQCnt; ii++) {
UsbTxQ_t *TxQ = zfLnxGetUsbTxBuffer(dev);
printk(KERN_ERR "dev_kfree_skb_any\n");
/* Free buffer */
dev_kfree_skb_any(TxQ->buf);
}
return 0;
}
|
wkritzinger/asuswrt-merlin
|
release/src-rt-7.x.main/src/linux/linux-2.6.36/drivers/staging/otus/usbdrv.c
|
C
|
gpl-2.0
| 35,420
|
<?php
/**
* AllControllersTest file
*
* PHP 5
*
* CakePHP(tm) : Rapid Development Framework (http://cakephp.org)
* Copyright 2005-2012, Cake Software Foundation, Inc. (http://cakefoundation.org)
*
* Licensed under The MIT License
* Redistributions of files must retain the above copyright notice.
*
* @copyright Copyright 2005-2012, Cake Software Foundation, Inc. (http://cakefoundation.org)
* @link http://cakephp.org CakePHP(tm) Project
* @package Cake.Test.Case
* @since CakePHP(tm) v 2.0
* @license MIT License (http://www.opensource.org/licenses/mit-license.php)
*/
/**
* AllControllersTest class
*
* This test group will run Controller related tests.
*
* @package Cake.Test.Case
*/
class AllControllersTest extends PHPUnit_Framework_TestSuite {
/**
* suite method, defines tests for this suite.
*
* @return void
*/
public static function suite() {
$suite = new CakeTestSuite('All Controller related class tests');
$suite->addTestFile(CORE_TEST_CASES . DS . 'Controller' . DS . 'ControllerTest.php');
$suite->addTestFile(CORE_TEST_CASES . DS . 'Controller' . DS . 'ScaffoldTest.php');
$suite->addTestFile(CORE_TEST_CASES . DS . 'Controller' . DS . 'PagesControllerTest.php');
$suite->addTestFile(CORE_TEST_CASES . DS . 'Controller' . DS . 'ComponentTest.php');
$suite->addTestFile(CORE_TEST_CASES . DS . 'Controller' . DS . 'ControllerMergeVarsTest.php');
return $suite;
}
}
|
anoochit/wcbookstore
|
webstore/lib/Cake/Test/Case/AllControllerTest.php
|
PHP
|
gpl-3.0
| 1,463
|
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Composition;
using System.Diagnostics;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.CodeAnalysis.Host;
using Microsoft.CodeAnalysis.Host.Mef;
using Microsoft.CodeAnalysis.LanguageServices;
using Microsoft.CodeAnalysis.Shared.Extensions;
using Roslyn.Utilities;
namespace Microsoft.CodeAnalysis.SemanticModelWorkspaceService
{
[ExportWorkspaceServiceFactory(typeof(ISemanticModelService), ServiceLayer.Default), Shared]
internal class SemanticModelWorkspaceServiceFactory : IWorkspaceServiceFactory
{
public IWorkspaceService CreateService(HostWorkspaceServices workspaceServices)
{
return new SemanticModelService();
}
private class SemanticModelService : ISemanticModelService
{
private static readonly ConditionalWeakTable<Workspace, ConditionalWeakTable<BranchId, Dictionary<ProjectId, CompilationSet>>> s_map =
new ConditionalWeakTable<Workspace, ConditionalWeakTable<BranchId, Dictionary<ProjectId, CompilationSet>>>();
private static readonly ConditionalWeakTable<Compilation, ConditionalWeakTable<SyntaxNode, WeakReference<SemanticModel>>> s_semanticModelMap =
new ConditionalWeakTable<Compilation, ConditionalWeakTable<SyntaxNode, WeakReference<SemanticModel>>>();
private readonly ReaderWriterLockSlim _gate = new ReaderWriterLockSlim(LockRecursionPolicy.NoRecursion);
public async Task<SemanticModel> GetSemanticModelForNodeAsync(Document document, SyntaxNode node, CancellationToken cancellationToken = default(CancellationToken))
{
var syntaxFactsService = document.Project.LanguageServices.GetService<ISyntaxFactsService>();
var semanticFactsService = document.Project.LanguageServices.GetService<ISemanticFactsService>();
if (syntaxFactsService == null || semanticFactsService == null || node == null)
{
// it only works if we can track member
return await document.GetSemanticModelAsync(cancellationToken).ConfigureAwait(false);
}
if (IsPrimaryBranch(document) && !document.IsOpen())
{
// for ones in primary branch, we only support opened documents (mostly to help typing scenario)
return await document.GetSemanticModelAsync(cancellationToken).ConfigureAwait(false);
}
var versionMap = GetVersionMapFromBranchOrPrimary(document.Project.Solution.Workspace, document.Project.Solution.BranchId);
var projectId = document.Project.Id;
var version = await document.Project.GetDependentSemanticVersionAsync(cancellationToken).ConfigureAwait(false);
CompilationSet compilationSet;
using (_gate.DisposableRead())
{
versionMap.TryGetValue(projectId, out compilationSet);
}
// this is first time
if (compilationSet == null)
{
// update the cache
await AddVersionCacheAsync(document.Project, version, cancellationToken).ConfigureAwait(false);
// get the base one
return await document.GetSemanticModelAsync(cancellationToken).ConfigureAwait(false);
}
// we have compilation set check whether it is something we can use
if (version.Equals(compilationSet.Version))
{
Compilation oldCompilation;
if (!compilationSet.Compilation.TryGetValue(out oldCompilation))
{
await AddVersionCacheAsync(document.Project, version, cancellationToken).ConfigureAwait(false);
// get the base one
return await document.GetSemanticModelAsync(cancellationToken).ConfigureAwait(false);
}
// first check whether the set has this document
SyntaxTree oldTree;
if (!compilationSet.Trees.TryGetValue(document.Id, out oldTree))
{
// noop.
return await document.GetSemanticModelAsync(cancellationToken).ConfigureAwait(false);
}
// Yes, we have compilation we might be able to re-use
var root = await document.GetSyntaxRootAsync(cancellationToken).ConfigureAwait(false);
if (root.SyntaxTree == oldTree)
{
// the one we have and the one in the document is same one. but tree in other file might
// have changed (no top level change). in that case, just use one from the document.
return await document.GetSemanticModelAsync(cancellationToken).ConfigureAwait(false);
}
// let's track member that we can re-use
var member = syntaxFactsService.GetContainingMemberDeclaration(root, node.SpanStart);
if (!syntaxFactsService.IsMethodLevelMember(member))
{
// oops, given node is not something we can support
return await document.GetSemanticModelAsync(cancellationToken).ConfigureAwait(false);
}
// check whether we already have speculative semantic model for this
var cachedModel = GetCachedSemanticModel(oldCompilation, member);
if (cachedModel != null)
{
// Yes!
return cachedModel;
}
// alright, we have member id. find same member from old compilation
var memberId = syntaxFactsService.GetMethodLevelMemberId(root, member);
var oldRoot = await oldTree.GetRootAsync(cancellationToken).ConfigureAwait(false);
var oldMember = syntaxFactsService.GetMethodLevelMember(oldRoot, memberId);
if (oldMember == null)
{
// oops, something went wrong. we can't find old member.
//
// due to how we do versioning (filestamp based versioning), there is always a possibility that
// sources get changed without proper version changes in some rare situations,
// so in those rare cases which we can't control until we move to content based versioning,
// just bail out and use full semantic model
return await document.GetSemanticModelAsync(cancellationToken).ConfigureAwait(false);
}
var oldModel = oldCompilation.GetSemanticModel(oldTree);
SemanticModel model;
if (!semanticFactsService.TryGetSpeculativeSemanticModel(oldModel, oldMember, member, out model))
{
return await document.GetSemanticModelAsync(cancellationToken).ConfigureAwait(false);
}
// cache the new speculative semantic model for the given node
Contract.ThrowIfNull(model);
return CacheSemanticModel(oldCompilation, member, model);
}
// oops, it looks like we can't use cached one.
// update the cache
await UpdateVersionCacheAsync(document.Project, version, compilationSet, cancellationToken).ConfigureAwait(false);
// get the base one
return await document.GetSemanticModelAsync(cancellationToken).ConfigureAwait(false);
}
private bool IsPrimaryBranch(Document document)
{
return document.Project.Solution.BranchId == document.Project.Solution.Workspace.PrimaryBranchId;
}
private Task AddVersionCacheAsync(Project project, VersionStamp version, CancellationToken cancellationToken)
{
return UpdateVersionCacheAsync(project, version, primarySet: null, cancellationToken: cancellationToken);
}
private async Task UpdateVersionCacheAsync(Project project, VersionStamp version, CompilationSet primarySet, CancellationToken cancellationToken)
{
var versionMap = GetVersionMapFromBranch(project.Solution.Workspace, project.Solution.BranchId);
CompilationSet compilationSet;
Compilation compilation;
if (!AlreadyHasLatestCompilationSet(versionMap, project.Id, version, out compilationSet) ||
!compilationSet.Compilation.TryGetValue(out compilation))
{
var newSet = await CompilationSet.CreateAsync(project, compilationSet ?? primarySet, cancellationToken).ConfigureAwait(false);
using (_gate.DisposableWrite())
{
// we still don't have it or if someone has beaten us, check what we have is newer
if (!versionMap.TryGetValue(project.Id, out compilationSet) || version != compilationSet.Version)
{
versionMap[project.Id] = newSet;
}
}
}
}
private bool AlreadyHasLatestCompilationSet(
Dictionary<ProjectId, CompilationSet> versionMap, ProjectId projectId, VersionStamp version, out CompilationSet compilationSet)
{
using (_gate.DisposableRead())
{
// we still don't have it or if someone has beaten us, check what we have is newer
return versionMap.TryGetValue(projectId, out compilationSet) && version == compilationSet.Version;
}
}
private static readonly ConditionalWeakTable<BranchId, Dictionary<ProjectId, CompilationSet>>.CreateValueCallback s_createVersionMap =
_ => new Dictionary<ProjectId, CompilationSet>();
private static readonly ConditionalWeakTable<Compilation, ConditionalWeakTable<SyntaxNode, WeakReference<SemanticModel>>>.CreateValueCallback s_createNodeMap =
_ => new ConditionalWeakTable<SyntaxNode, WeakReference<SemanticModel>>();
private static SemanticModel GetCachedSemanticModel(
ConditionalWeakTable<SyntaxNode, WeakReference<SemanticModel>> nodeMap, SyntaxNode newMember)
{
SemanticModel model;
WeakReference<SemanticModel> cached;
if (!nodeMap.TryGetValue(newMember, out cached) || !cached.TryGetTarget(out model))
{
return null;
}
return model;
}
private static SemanticModel GetCachedSemanticModel(Compilation oldCompilation, SyntaxNode newMember)
{
var nodeMap = s_semanticModelMap.GetValue(oldCompilation, s_createNodeMap);
// see whether we have cached one
return GetCachedSemanticModel(nodeMap, newMember);
}
private static SemanticModel CacheSemanticModel(Compilation oldCompilation, SyntaxNode newMember, SemanticModel speculativeSemanticModel)
{
var nodeMap = s_semanticModelMap.GetValue(oldCompilation, s_createNodeMap);
// check whether somebody already have put one for me
var model = GetCachedSemanticModel(nodeMap, newMember);
if (model != null)
{
return model;
}
// noop. put one
var weakReference = new WeakReference<SemanticModel>(speculativeSemanticModel);
var cached = nodeMap.GetValue(newMember, _ => weakReference);
SemanticModel cachedModel;
if (cached.TryGetTarget(out cachedModel))
{
return cachedModel;
}
// oops. somebody has beaten me, but the model has gone.
// set me as new target
cached.SetTarget(speculativeSemanticModel);
return speculativeSemanticModel;
}
private Dictionary<ProjectId, CompilationSet> GetVersionMapFromBranchOrPrimary(Workspace workspace, BranchId branchId)
{
var branchMap = GetBranchMap(workspace);
// check whether we already have one
Dictionary<ProjectId, CompilationSet> versionMap;
if (branchMap.TryGetValue(branchId, out versionMap))
{
return versionMap;
}
// check primary branch
if (branchMap.TryGetValue(workspace.PrimaryBranchId, out versionMap))
{
return versionMap;
}
// okay, create one
return branchMap.GetValue(branchId, s_createVersionMap);
}
private Dictionary<ProjectId, CompilationSet> GetVersionMapFromBranch(Workspace workspace, BranchId branchId)
{
var branchMap = GetBranchMap(workspace);
return branchMap.GetValue(branchId, s_createVersionMap);
}
private ConditionalWeakTable<BranchId, Dictionary<ProjectId, CompilationSet>> GetBranchMap(Workspace workspace)
{
ConditionalWeakTable<BranchId, Dictionary<ProjectId, CompilationSet>> branchMap;
if (!s_map.TryGetValue(workspace, out branchMap))
{
var newBranchMap = new ConditionalWeakTable<BranchId, Dictionary<ProjectId, CompilationSet>>();
branchMap = s_map.GetValue(workspace, _ => newBranchMap);
if (branchMap == newBranchMap)
{
// it is first time we see this workspace. subscribe to it
workspace.DocumentClosed += OnDocumentClosed;
workspace.WorkspaceChanged += OnWorkspaceChanged;
}
}
return branchMap;
}
private void OnDocumentClosed(object sender, DocumentEventArgs e)
{
ClearVersionMap(e.Document.Project.Solution.Workspace, e.Document.Id);
}
private void OnWorkspaceChanged(object sender, WorkspaceChangeEventArgs e)
{
switch (e.Kind)
{
case WorkspaceChangeKind.SolutionAdded:
case WorkspaceChangeKind.SolutionChanged:
case WorkspaceChangeKind.SolutionRemoved:
case WorkspaceChangeKind.SolutionCleared:
case WorkspaceChangeKind.SolutionReloaded:
ClearVersionMap(e.NewSolution.Workspace, e.NewSolution.ProjectIds);
break;
case WorkspaceChangeKind.ProjectAdded:
case WorkspaceChangeKind.ProjectRemoved:
case WorkspaceChangeKind.ProjectChanged:
case WorkspaceChangeKind.ProjectReloaded:
ClearVersionMap(e.NewSolution.Workspace, e.ProjectId);
break;
case WorkspaceChangeKind.DocumentRemoved:
ClearVersionMap(e.NewSolution.Workspace, e.DocumentId);
break;
case WorkspaceChangeKind.DocumentAdded:
case WorkspaceChangeKind.DocumentReloaded:
case WorkspaceChangeKind.DocumentChanged:
case WorkspaceChangeKind.AdditionalDocumentAdded:
case WorkspaceChangeKind.AdditionalDocumentRemoved:
case WorkspaceChangeKind.AdditionalDocumentChanged:
case WorkspaceChangeKind.AdditionalDocumentReloaded:
break;
default:
Contract.Fail("Unknown event");
break;
}
}
private void ClearVersionMap(Workspace workspace, DocumentId documentId)
{
if (workspace.GetOpenDocumentIds(documentId.ProjectId).Any())
{
return;
}
var versionMap = GetVersionMapFromBranch(workspace, workspace.PrimaryBranchId);
using (_gate.DisposableWrite())
{
versionMap.Remove(documentId.ProjectId);
}
}
private void ClearVersionMap(Workspace workspace, ProjectId projectId)
{
var versionMap = GetVersionMapFromBranch(workspace, workspace.PrimaryBranchId);
using (_gate.DisposableWrite())
{
versionMap.Remove(projectId);
}
}
private void ClearVersionMap(Workspace workspace, IReadOnlyList<ProjectId> projectIds)
{
var versionMap = GetVersionMapFromBranch(workspace, workspace.PrimaryBranchId);
using (_gate.DisposableWrite())
{
using (var pooledObject = SharedPools.Default<HashSet<ProjectId>>().GetPooledObject())
{
var set = pooledObject.Object;
set.UnionWith(versionMap.Keys);
set.ExceptWith(projectIds);
foreach (var projectId in set)
{
versionMap.Remove(projectId);
}
}
}
}
private class CompilationSet
{
private const int RebuildThreshold = 3;
public readonly VersionStamp Version;
public readonly ValueSource<Compilation> Compilation;
public readonly ImmutableDictionary<DocumentId, SyntaxTree> Trees;
public static async Task<CompilationSet> CreateAsync(Project project, CompilationSet oldCompilationSet, CancellationToken cancellationToken)
{
var compilation = await project.GetCompilationAsync(cancellationToken).ConfigureAwait(false);
var version = await project.GetDependentSemanticVersionAsync(cancellationToken).ConfigureAwait(false);
var map = GetTreeMap(project, compilation, oldCompilationSet, cancellationToken);
ValidateTreeMap(map, project, compilation);
return new CompilationSet(version, GetCompilation(project, compilation), map);
}
private CompilationSet(VersionStamp version, ValueSource<Compilation> compilation, ImmutableDictionary<DocumentId, SyntaxTree> map)
{
this.Version = version;
this.Compilation = compilation;
this.Trees = map;
}
private static ImmutableDictionary<DocumentId, SyntaxTree> GetTreeMap(Project project, Compilation compilation, CompilationSet oldCompilationSet, CancellationToken cancellationToken)
{
// enumerable count should take a quick path since ImmutableArray implements ICollection
var newTreeCount = compilation.SyntaxTrees.Count();
// TODO: all this could go away if this is maintained by project itself and one can just get the map from it.
if (oldCompilationSet == null || Math.Abs(oldCompilationSet.Trees.Count - newTreeCount) > RebuildThreshold)
{
return ImmutableDictionary.CreateRange(GetNewTreeMap(project, compilation));
}
var map = AddOrUpdateNewTreeToOldMap(project, compilation, oldCompilationSet, cancellationToken);
// check simple case. most of typing case should hit this.
// number of items in the map is same as number of new trees and old compilation doesn't have
// more trees than current one
if (map.Count == newTreeCount && oldCompilationSet.Trees.Count <= newTreeCount)
{
return map;
}
// a bit more expensive case where there is a document in oldCompilationSet that doesn't exist in new compilation
return RemoveOldTreeFromMap(compilation, oldCompilationSet.Trees, map, cancellationToken);
}
private static ImmutableDictionary<DocumentId, SyntaxTree> RemoveOldTreeFromMap(
Compilation newCompilation,
ImmutableDictionary<DocumentId, SyntaxTree> oldMap, ImmutableDictionary<DocumentId, SyntaxTree> map,
CancellationToken cancellationToken)
{
foreach (var oldIdAndTree in oldMap)
{
cancellationToken.ThrowIfCancellationRequested();
// check whether new compilation still has the tree
if (newCompilation.ContainsSyntaxTree(oldIdAndTree.Value))
{
continue;
}
var documentId = oldIdAndTree.Key;
// check whether the tree has been updated
SyntaxTree currentTree;
if (!map.TryGetValue(documentId, out currentTree) ||
currentTree != oldIdAndTree.Value)
{
continue;
}
// this has been removed
map = map.Remove(documentId);
}
return map;
}
private static ImmutableDictionary<DocumentId, SyntaxTree> AddOrUpdateNewTreeToOldMap(
Project newProject, Compilation newCompilation, CompilationSet oldSet, CancellationToken cancellationToken)
{
Compilation oldCompilation;
if (!oldSet.Compilation.TryGetValue(out oldCompilation))
{
return ImmutableDictionary.CreateRange(GetNewTreeMap(newProject, newCompilation));
}
var map = oldSet.Trees;
foreach (var newTree in newCompilation.SyntaxTrees)
{
cancellationToken.ThrowIfCancellationRequested();
if (oldCompilation.ContainsSyntaxTree(newTree))
{
continue;
}
var documentId = newProject.GetDocumentId(newTree);
// GetDocumentId will return null for #load'ed trees.
// TODO: Remove this check and add logic to fetch the #load'ed tree's
// Document once https://github.com/dotnet/roslyn/issues/5260 is fixed.
if (documentId == null)
{
Debug.Assert(newProject.Solution.Workspace.Kind == "Interactive");
continue;
}
map = map.SetItem(documentId, newTree);
}
return map;
}
private static IEnumerable<KeyValuePair<DocumentId, SyntaxTree>> GetNewTreeMap(Project project, Compilation compilation)
{
foreach (var tree in compilation.SyntaxTrees)
{
var documentId = project.GetDocumentId(tree);
if (documentId != null)
{
yield return KeyValuePair.Create(documentId, tree);
}
}
}
private static ValueSource<Compilation> GetCompilation(Project project, Compilation compilation)
{
var cache = project.Solution.Workspace.Services.GetService<IProjectCacheHostService>();
if (cache != null && project.Solution.BranchId == project.Solution.Workspace.PrimaryBranchId)
{
return new WeakConstantValueSource<Compilation>(cache.CacheObjectIfCachingEnabledForKey(project.Id, project, compilation));
}
return new ConstantValueSource<Compilation>(compilation);
}
[Conditional("DEBUG")]
private static void ValidateTreeMap(ImmutableDictionary<DocumentId, SyntaxTree> actual, Project project, Compilation compilation)
{
var expected = ImmutableDictionary.CreateRange(GetNewTreeMap(project, compilation));
Contract.Requires(actual.SetEquals(expected));
}
}
}
}
}
|
MatthieuMEZIL/roslyn
|
src/Workspaces/Core/Portable/SemanticModelWorkspaceService/SemanticModelWorkspaceServiceFactory.cs
|
C#
|
apache-2.0
| 26,002
|
#ifndef __GLX_glxint_h__
#define __GLX_glxint_h__
/*
* SGI FREE SOFTWARE LICENSE B (Version 2.0, Sept. 18, 2008)
* Copyright (C) 1991-2000 Silicon Graphics, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice including the dates of first publication and
* either this permission notice or a reference to
* http://oss.sgi.com/projects/FreeB/
* shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* SILICON GRAPHICS, INC. BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Except as contained in this notice, the name of Silicon Graphics, Inc.
* shall not be used in advertising or otherwise to promote the sale, use or
* other dealings in this Software without prior written authorization from
* Silicon Graphics, Inc.
*/
#include <X11/X.h>
#include <X11/Xdefs.h>
#include "GL/gl.h"
typedef struct __GLXvisualConfigRec __GLXvisualConfig;
typedef struct __GLXFBConfigRec __GLXFBConfig;
struct __GLXvisualConfigRec {
VisualID vid;
int class;
Bool rgba;
int redSize, greenSize, blueSize, alphaSize;
unsigned long redMask, greenMask, blueMask, alphaMask;
int accumRedSize, accumGreenSize, accumBlueSize, accumAlphaSize;
Bool doubleBuffer;
Bool stereo;
int bufferSize;
int depthSize;
int stencilSize;
int auxBuffers;
int level;
/* Start of Extended Visual Properties */
int visualRating; /* visual_rating extension */
int transparentPixel; /* visual_info extension */
/* colors are floats scaled to ints */
int transparentRed, transparentGreen, transparentBlue, transparentAlpha;
int transparentIndex;
int multiSampleSize;
int nMultiSampleBuffers;
int visualSelectGroup;
};
#define __GLX_MIN_CONFIG_PROPS 18
#define __GLX_MAX_CONFIG_PROPS 500
#define __GLX_EXT_CONFIG_PROPS 10
/*
** Since we send all non-core visual properties as token, value pairs,
** we require 2 words across the wire. In order to maintain backwards
** compatibility, we need to send the total number of words that the
** VisualConfigs are sent back in so old libraries can simply "ignore"
** the new properties.
*/
#define __GLX_TOTAL_CONFIG (__GLX_MIN_CONFIG_PROPS + \
2 * __GLX_EXT_CONFIG_PROPS)
struct __GLXFBConfigRec {
int visualType;
int transparentType;
/* colors are floats scaled to ints */
int transparentRed, transparentGreen, transparentBlue, transparentAlpha;
int transparentIndex;
int visualCaveat;
int associatedVisualId;
int screen;
int drawableType;
int renderType;
int maxPbufferWidth, maxPbufferHeight, maxPbufferPixels;
int optimalPbufferWidth, optimalPbufferHeight; /* for SGIX_pbuffer */
int visualSelectGroup; /* visuals grouped by select priority */
unsigned int id;
GLboolean rgbMode;
GLboolean colorIndexMode;
GLboolean doubleBufferMode;
GLboolean stereoMode;
GLboolean haveAccumBuffer;
GLboolean haveDepthBuffer;
GLboolean haveStencilBuffer;
/* The number of bits present in various buffers */
GLint accumRedBits, accumGreenBits, accumBlueBits, accumAlphaBits;
GLint depthBits;
GLint stencilBits;
GLint indexBits;
GLint redBits, greenBits, blueBits, alphaBits;
GLuint redMask, greenMask, blueMask, alphaMask;
GLuint multiSampleSize; /* Number of samples per pixel (0 if no ms) */
GLuint nMultiSampleBuffers; /* Number of availble ms buffers */
GLint maxAuxBuffers;
/* frame buffer level */
GLint level;
/* color ranges (for SGI_color_range) */
GLboolean extendedRange;
GLdouble minRed, maxRed;
GLdouble minGreen, maxGreen;
GLdouble minBlue, maxBlue;
GLdouble minAlpha, maxAlpha;
};
#define __GLX_TOTAL_FBCONFIG_PROPS 35
#endif /* !__GLX_glxint_h__ */
|
execunix/vinos
|
xsrc/external/mit/glproto/dist/glxint.h
|
C
|
apache-2.0
| 4,706
|
#region License, Terms and Author(s)
//
// ELMAH - Error Logging Modules and Handlers for ASP.NET
// Copyright (c) 2004-9 Atif Aziz. All rights reserved.
//
// Author(s):
//
// Atif Aziz, http://www.raboof.com
// James Driscoll, mailto:jamesdriscoll@btinternet.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#endregion
[assembly: Elmah.Scc("$Id$")]
namespace Elmah
{
#region Imports
using System;
using System.Configuration;
using System.Data.Common;
using System.IO;
using System.Runtime.CompilerServices;
using IDictionary = System.Collections.IDictionary;
#endregion
/// <summary>
/// Helper class for resolving connection strings.
/// </summary>
internal class ConnectionStringHelper
{
/// <summary>
/// Gets the connection string from the given configuration
/// dictionary.
/// </summary>
public static string GetConnectionString(IDictionary config)
{
Debug.Assert(config != null);
#if !NET_1_1 && !NET_1_0
//
// First look for a connection string name that can be
// subsequently indexed into the <connectionStrings> section of
// the configuration to get the actual connection string.
//
string connectionStringName = (string)config["connectionStringName"] ?? string.Empty;
if (connectionStringName.Length > 0)
{
ConnectionStringSettings settings = ConfigurationManager.ConnectionStrings[connectionStringName];
if (settings == null)
return string.Empty;
return settings.ConnectionString ?? string.Empty;
}
#endif
//
// Connection string name not found so see if a connection
// string was given directly.
//
string connectionString = Mask.NullString((string)config["connectionString"]);
if (connectionString.Length > 0)
return connectionString;
//
// As a last resort, check for another setting called
// connectionStringAppKey. The specifies the key in
// <appSettings> that contains the actual connection string to
// be used.
//
string connectionStringAppKey = Mask.NullString((string)config["connectionStringAppKey"]);
if (connectionStringAppKey.Length == 0)
return string.Empty;
return Configuration.AppSettings[connectionStringAppKey];
}
#if NET_1_1 || NET_1_0
/// <summary>
/// Extracts the Data Source file path from a connection string
/// </summary>
/// <param name="connectionString">The connection string</param>
/// <returns>File path to the Data Source element of a connection string</returns>
public static string GetDataSourceFilePath(string connectionString)
{
Debug.AssertStringNotEmpty(connectionString);
string result = string.Empty;
string loweredConnectionString = connectionString.ToLower();
int dataSourcePosition = loweredConnectionString.IndexOf("data source");
if (dataSourcePosition >= 0)
{
int equalsPosition = loweredConnectionString.IndexOf('=', dataSourcePosition);
if (equalsPosition >= 0)
{
int semiColonPosition = loweredConnectionString.IndexOf(';', equalsPosition);
if (semiColonPosition < equalsPosition)
result = connectionString.Substring(equalsPosition + 1);
else
result = connectionString.Substring(equalsPosition + 1, semiColonPosition - equalsPosition - 1);
result = result.Trim();
char firstChar = result[0];
char lastChar = result[result.Length - 1];
if (firstChar == lastChar && (firstChar == '\'' || firstChar == '\"') && result.Length > 1)
{
result = result.Substring(1, result.Length - 2);
}
}
}
return result;
}
#else
/// <summary>
/// Extracts the Data Source file path from a connection string
/// ~/ gets resolved as does |DataDirectory|
/// </summary>
public static string GetDataSourceFilePath(string connectionString)
{
Debug.AssertStringNotEmpty(connectionString);
DbConnectionStringBuilder builder = new DbConnectionStringBuilder();
return GetDataSourceFilePath(builder, connectionString);
}
/// <summary>
/// Gets the connection string from the given configuration,
/// resolving ~/ and DataDirectory if necessary.
/// </summary>
public static string GetConnectionString(IDictionary config, bool resolveDataSource)
{
string connectionString = GetConnectionString(config);
return resolveDataSource ? GetResolvedConnectionString(connectionString) : connectionString;
}
/// <summary>
/// Converts the supplied connection string so that the Data Source
/// specification contains the full path and not ~/ or DataDirectory.
/// </summary>
public static string GetResolvedConnectionString(string connectionString)
{
Debug.AssertStringNotEmpty(connectionString);
DbConnectionStringBuilder builder = new DbConnectionStringBuilder();
builder["Data Source"] = GetDataSourceFilePath(builder, connectionString);
return builder.ToString();
}
[MethodImpl(MethodImplOptions.NoInlining)]
private static string MapPath(string path)
{
return System.Web.Hosting.HostingEnvironment.MapPath(path);
}
private static string GetDataSourceFilePath(DbConnectionStringBuilder builder, string connectionString)
{
builder.ConnectionString = connectionString;
if (!builder.ContainsKey("Data Source"))
throw new ArgumentException("A 'Data Source' parameter was expected in the supplied connection string, but it was not found.");
string dataSource = builder["Data Source"].ToString();
return ResolveDataSourceFilePath(dataSource);
}
private static readonly char[] _dirSeparators = new char[] { Path.DirectorySeparatorChar };
private static string ResolveDataSourceFilePath(string path)
{
const string dataDirectoryMacroString = "|DataDirectory|";
//
// Check to see if it starts with a ~/ and if so map it and return it.
//
if (path.StartsWith("~/"))
return MapPath(path);
//
// Else see if it uses the DataDirectory macro/substitution
// string, and if so perform the appropriate substitution.
//
if (!path.StartsWith(dataDirectoryMacroString, StringComparison.OrdinalIgnoreCase))
return path;
//
// Look-up the data directory from the current AppDomain.
// See "Working with local databases" for more:
// http://blogs.msdn.com/smartclientdata/archive/2005/08/26/456886.aspx
//
string baseDirectory = AppDomain.CurrentDomain.GetData("DataDirectory") as string;
//
// If not, try the current AppDomain's base directory.
//
if (string.IsNullOrEmpty(baseDirectory))
baseDirectory = AppDomain.CurrentDomain.BaseDirectory;
//
// Piece the file path back together, taking leading and
// trailing backslashes into account to avoid duplication.
//
return Mask.NullString(baseDirectory).TrimEnd(_dirSeparators)
+ Path.DirectorySeparatorChar
+ path.Substring(dataDirectoryMacroString.Length).TrimStart(_dirSeparators);
}
#endif
private ConnectionStringHelper() {}
}
}
|
kyleleeli/elmah.1x
|
src/Elmah/ConnectionStringHelper.cs
|
C#
|
apache-2.0
| 9,063
|
# encoding: utf-8
module Holidays
# This file is generated by the Ruby Holidays gem.
#
# Definitions loaded: data/pl.yaml
#
# To use the definitions in this file, load it right after you load the
# Holiday gem:
#
# require 'holidays'
# require 'holidays/pl'
#
# All the definitions are available at https://github.com/alexdunae/holidays
module PL # :nodoc:
def self.defined_regions
[:pl]
end
def self.holidays_by_month
{
0 => [{:function => lambda { |year| Holidays.easter(year)-52 }, :function_id => "easter(year)-52", :type => :informal, :name => "Tłusty Czwartek", :regions => [:pl]},
{:function => lambda { |year| Holidays.easter(year)-47 }, :function_id => "easter(year)-47", :type => :informal, :name => "Ostatki", :regions => [:pl]},
{:function => lambda { |year| Holidays.easter(year)-46 }, :function_id => "easter(year)-46", :type => :informal, :name => "Środa Popielcowa", :regions => [:pl]},
{:function => lambda { |year| Holidays.easter(year)-7 }, :function_id => "easter(year)-7", :type => :informal, :name => "Niedziela Palmowa", :regions => [:pl]},
{:function => lambda { |year| Holidays.easter(year)-3 }, :function_id => "easter(year)-3", :type => :informal, :name => "Wielki Czwartek", :regions => [:pl]},
{:function => lambda { |year| Holidays.easter(year)-2 }, :function_id => "easter(year)-2", :type => :informal, :name => "Wielki Piątek", :regions => [:pl]},
{:function => lambda { |year| Holidays.easter(year)-1 }, :function_id => "easter(year)-1", :type => :informal, :name => "Wielka Sobota", :regions => [:pl]},
{:function => lambda { |year| Holidays.easter(year) }, :function_id => "easter(year)", :name => "Niedziela Wielkanocna", :regions => [:pl]},
{:function => lambda { |year| Holidays.easter(year)+1 }, :function_id => "easter(year)+1", :name => "Poniedziałek Wielkanocny (Lany Poniedziałek)", :regions => [:pl]},
{:function => lambda { |year| Holidays.easter(year)+49 }, :function_id => "easter(year)+49", :name => "Zesłanie Ducha Świętego (Zielone Świątki)", :regions => [:pl]},
{:function => lambda { |year| Holidays.easter(year)+60 }, :function_id => "easter(year)+60", :name => "Uroczystość Najświętszego Ciała i Krwi Pańskiej (Boże Ciało)", :regions => [:pl]}],
1 => [{:mday => 1, :name => "Nowy Rok", :regions => [:pl]},
{:function => lambda { |year| Holidays.pl_trzech_kroli(year) }, :function_id => "pl_trzech_kroli(year)", :name => "Objawienie Pańskie (święto Trzech Króli)", :regions => [:pl]},
{:function => lambda { |year| Holidays.pl_trzech_kroli_informal(year) }, :function_id => "pl_trzech_kroli_informal(year)", :type => :informal, :name => "Objawienie Pańskie (święto Trzech Króli)", :regions => [:pl]},
{:mday => 21, :type => :informal, :name => "Dzień Babci", :regions => [:pl]},
{:mday => 22, :type => :informal, :name => "Dzień Dziadka", :regions => [:pl]}],
2 => [{:mday => 2, :type => :informal, :name => "Ofiarowanie Pańskie (Matki Boskiej Gromnicznej)", :regions => [:pl]},
{:mday => 14, :type => :informal, :name => "Dzień Zakochanych (Walentynki)", :regions => [:pl]}],
3 => [{:mday => 8, :type => :informal, :name => "Dzień Kobiet", :regions => [:pl]},
{:mday => 10, :type => :informal, :name => "Dzień Mężczyzn", :regions => [:pl]}],
4 => [{:mday => 1, :type => :informal, :name => "Prima Aprilis", :regions => [:pl]},
{:mday => 22, :type => :informal, :name => "Międzynarodowy Dzień Ziemi", :regions => [:pl]}],
5 => [{:mday => 1, :name => "Święto Państwowe (Święto Pracy)", :regions => [:pl]},
{:mday => 2, :type => :informal, :name => "Dzień Flagi Rzeczpospolitej Polskiej", :regions => [:pl]},
{:mday => 3, :name => "Święto Narodowe Trzeciego Maja", :regions => [:pl]}],
6 => [{:mday => 23, :type => :informal, :name => "Dzień Ojca", :regions => [:pl]}],
8 => [{:mday => 15, :name => "Wniebowzięcie Najświętszej Maryi Panny", :regions => [:pl]},
{:mday => 15, :name => "Święto Wojska Polskiego", :regions => [:pl]}],
9 => [{:mday => 30, :type => :informal, :name => "Dzień Chłopaka", :regions => [:pl]}],
10 => [{:mday => 14, :type => :informal, :name => "Dzień Nauczyciela (Dzień Edukacji Narodowej)", :regions => [:pl]}],
11 => [{:mday => 1, :name => "Wszystkich Świętych", :regions => [:pl]},
{:mday => 2, :type => :informal, :name => "Dzień Zaduszny", :regions => [:pl]},
{:mday => 11, :name => "Narodowe Święto Niepodległości", :regions => [:pl]},
{:mday => 29, :type => :informal, :name => "Andrzejki", :regions => [:pl]}],
12 => [{:mday => 4, :type => :informal, :name => "Barbórka (Dzień Górnika, Naftowca i Gazownika)", :regions => [:pl]},
{:mday => 6, :type => :informal, :name => "Mikołajki", :regions => [:pl]},
{:mday => 24, :type => :informal, :name => "Wigilia Bożego Narodzenia", :regions => [:pl]},
{:mday => 25, :name => "pierwszy dzień Bożego Narodzenia", :regions => [:pl]},
{:mday => 26, :name => "drugi dzień Bożego Narodzenia", :regions => [:pl]},
{:mday => 31, :type => :informal, :name => "Sylwester", :regions => [:pl]}]
}
end
end
# Poland: January 6 is holiday since 2011
def self.pl_trzech_kroli(year)
year >= 2011 ? 6 : nil
end
# Poland: January 6 wasn't holiday before 2011
def self.pl_trzech_kroli_informal(year)
year < 2011 ? 6 : nil
end
end
Holidays.merge_defs(Holidays::PL.defined_regions, Holidays::PL.holidays_by_month)
|
jak78/holidays
|
lib/holidays/pl.rb
|
Ruby
|
mit
| 5,785
|
#run_test.sh
run_test()
{
prog=$1
mkdir -p ./dbenv
rm -f -r dbenv/*
echo "Auto commit btree transaction tests:" >&2
$prog -s b -m t -t a -T 200 -k 50 -l 100 -c 33554432 -n $total && mv gmon.out gmon${i}.out
i=`expr $i + 1`
echo "Transaction btree tests:" >&2
$prog -s b -m t -t e -T 200 -k 50 -l 100 -c 33554432 -n $total && mv gmon.out gmon${i}.out
i=`expr $i + 1`
echo "CDS btree tests:" >&2
$prog -s b -m c -T 200 -k 50 -l 100 -c 33554432 -n $total && mv gmon.out gmon${i}.out
i=`expr $i + 1`
echo "DS btree tests:" >&2
$prog -s b -T 200 -k 50 -l 100 -c 33554432 -n $total && mv gmon.out gmon${i}.out
i=`expr $i + 1`
echo "Auto commit hash transaction tests:" >&2
$prog -s h -m t -t a -T 200 -k 50 -l 100 -c 33554432 -n $total && mv gmon.out gmon${i}.out
i=`expr $i + 1`
echo "Transaction hash tests:" >&2
$prog -s h -m t -t e -T 200 -k 50 -l 100 -c 33554432 -n $total && mv gmon.out gmon${i}.out
i=`expr $i + 1`
echo "CDS hash tests:" >&2
$prog -s h -m c -T 200 -k 50 -l 100 -c 33554432 -n $total && mv gmon.out gmon${i}.out
i=`expr $i + 1`
echo "DS hash tests:" >&2
$prog -s h -T 200 -k 50 -l 100 -c 33554432 -n $total && mv gmon.out gmon${i}.out
i=`expr $i + 1`
}
if test $# -ne 1 ; then
echo "Usage: sh run_test.sh number-of-run-in-a-loop"
exit 1
fi
total=$1
i=0
os=`uname -s`
if test $os = "CYGWIN_NT-5.1" ; then
run_test "../build_windows/Win32/Debug/test.exe"
else
run_test "../build_unix/test_dbstl"
fi
ii=0
profiles=""
while [ $ii -lt $i ]; do
profiles="${profiles} gmon${ii}.out"
ii=`expr $ii + 1`
done
echo "Generating profiling report..." >&2
gprof $1/.libs/test_dbstl $profiles > gprof.out
|
mxrrow/zaicoin
|
src/deps/db/test_stl/run_base_profile.sh
|
Shell
|
mit
| 1,654
|
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 2002-2009 Oracle. All rights reserved.
*
* $Id$
*/
package com.sleepycat.persist;
import com.sleepycat.db.DatabaseException;
import com.sleepycat.db.LockMode;
import com.sleepycat.util.keyrange.RangeCursor;
/**
* The cursor for a SubIndex treats Dup and NoDup operations specially because
* the SubIndex never has duplicates -- the keys are primary keys. So a
* next/prevDup operation always returns null, and a next/prevNoDup operation
* actually does next/prev.
*
* @author Mark Hayes
*/
class SubIndexCursor<V> extends BasicCursor<V> {
SubIndexCursor(RangeCursor cursor, ValueAdapter<V> adapter) {
super(cursor, adapter, false/*updateAllowed*/);
}
@Override
public EntityCursor<V> dup()
throws DatabaseException {
return new SubIndexCursor<V>(cursor.dup(true), adapter);
}
@Override
public V nextDup(LockMode lockMode) {
checkInitialized();
return null;
}
@Override
public V nextNoDup(LockMode lockMode)
throws DatabaseException {
return returnValue(cursor.getNext(key, pkey, data, lockMode));
}
@Override
public V prevDup(LockMode lockMode) {
checkInitialized();
return null;
}
@Override
public V prevNoDup(LockMode lockMode)
throws DatabaseException {
return returnValue(cursor.getPrev(key, pkey, data, lockMode));
}
}
|
mxrrow/zaicoin
|
src/deps/db/java/src/com/sleepycat/persist/SubIndexCursor.java
|
Java
|
mit
| 1,484
|
/*! jQuery UI - v1.11.1 - 2014-08-13
* http://jqueryui.com
* Copyright 2014 jQuery Foundation and other contributors; Licensed MIT */
(function(t){"function"==typeof define&&define.amd?define(["jquery","./effect"],t):t(jQuery)})(function(t){return t.effects.effect.explode=function(e,i){function s(){b.push(this),b.length===u*d&&n()}function n(){p.css({visibility:"visible"}),t(b).remove(),g||p.hide(),i()}var o,a,r,h,l,c,u=e.pieces?Math.round(Math.sqrt(e.pieces)):3,d=u,p=t(this),f=t.effects.setMode(p,e.mode||"hide"),g="show"===f,m=p.show().css("visibility","hidden").offset(),v=Math.ceil(p.outerWidth()/d),_=Math.ceil(p.outerHeight()/u),b=[];for(o=0;u>o;o++)for(h=m.top+o*_,c=o-(u-1)/2,a=0;d>a;a++)r=m.left+a*v,l=a-(d-1)/2,p.clone().appendTo("body").wrap("<div></div>").css({position:"absolute",visibility:"visible",left:-a*v,top:-o*_}).parent().addClass("ui-effects-explode").css({position:"absolute",overflow:"hidden",width:v,height:_,left:r+(g?l*v:0),top:h+(g?c*_:0),opacity:g?0:1}).animate({left:r+(g?0:l*v),top:h+(g?0:c*_),opacity:g?1:0},e.duration||500,e.easing,s)}});
|
saintwilbur/bandwagon
|
yolo-octo-bear.bak/bower_components/jquery-ui/ui/minified/effect-explode.min.js
|
JavaScript
|
mit
| 1,077
|
/*
* Copyright 2012-2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.autoconfigure.orm.jpa;
import java.util.Map;
import javax.persistence.EntityManagerFactory;
import javax.sql.DataSource;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.config.BeanPostProcessor;
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
import org.springframework.beans.factory.support.GenericBeanDefinition;
import org.springframework.boot.autoconfigure.jdbc.DataSourceInitializedEvent;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.ImportBeanDefinitionRegistrar;
import org.springframework.core.type.AnnotationMetadata;
/**
* {@link BeanPostProcessor} used to fire {@link DataSourceInitializedEvent}s. Should only
* be registered via the inner {@link Registrar} class.
*
* @author Dave Syer
* @since 1.1.0
*/
class DataSourceInitializedPublisher implements BeanPostProcessor {
@Autowired
private ApplicationContext applicationContext;
private DataSource dataSource;
private JpaProperties properties;
@Override
public Object postProcessBeforeInitialization(Object bean, String beanName)
throws BeansException {
return bean;
}
@Override
public Object postProcessAfterInitialization(Object bean, String beanName)
throws BeansException {
if (bean instanceof DataSource) {
// Normally this will be the right DataSource
this.dataSource = (DataSource) bean;
}
if (bean instanceof JpaProperties) {
this.properties = (JpaProperties) bean;
}
if (bean instanceof EntityManagerFactory && this.dataSource != null
&& isInitializingDatabase()) {
this.applicationContext
.publishEvent(new DataSourceInitializedEvent(this.dataSource));
}
return bean;
}
private boolean isInitializingDatabase() {
if (this.properties == null) {
return true; // better safe than sorry
}
Map<String, String> hibernate = this.properties
.getHibernateProperties(this.dataSource);
if (hibernate.containsKey("hibernate.hbm2ddl.auto")) {
return true;
}
return false;
}
/**
* {@link ImportBeanDefinitionRegistrar} to register the
* {@link DataSourceInitializedPublisher} without causing early bean instantiation
* issues.
*/
static class Registrar implements ImportBeanDefinitionRegistrar {
private static final String BEAN_NAME = "dataSourceInitializedPublisher";
@Override
public void registerBeanDefinitions(AnnotationMetadata importingClassMetadata,
BeanDefinitionRegistry registry) {
if (!registry.containsBeanDefinition(BEAN_NAME)) {
GenericBeanDefinition beanDefinition = new GenericBeanDefinition();
beanDefinition.setBeanClass(DataSourceInitializedPublisher.class);
beanDefinition.setRole(BeanDefinition.ROLE_INFRASTRUCTURE);
// We don't need this one to be post processed otherwise it can cause a
// cascade of bean instantiation that we would rather avoid.
beanDefinition.setSynthetic(true);
registry.registerBeanDefinition(BEAN_NAME, beanDefinition);
}
}
}
}
|
rokn/Count_Words_2015
|
testing/spring-boot-master/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/orm/jpa/DataSourceInitializedPublisher.java
|
Java
|
mit
| 3,794
|
/*
* Intel Wireless Multicomm 3200 WiFi driver
*
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Intel Corporation <ilw@linux.intel.com>
* Samuel Ortiz <samuel.ortiz@intel.com>
* Zhu Yi <yi.zhu@intel.com>
*
*/
/*
* This is the SDIO bus specific hooks for iwm.
* It also is the module's entry point.
*
* Interesting code paths:
* iwm_sdio_probe() (Called by an SDIO bus scan)
* -> iwm_if_alloc() (netdev.c)
* -> iwm_wdev_alloc() (cfg80211.c, allocates and register our wiphy)
* -> wiphy_new()
* -> wiphy_register()
* -> alloc_netdev_mq()
* -> register_netdev()
*
* iwm_sdio_remove()
* -> iwm_if_free() (netdev.c)
* -> unregister_netdev()
* -> iwm_wdev_free() (cfg80211.c)
* -> wiphy_unregister()
* -> wiphy_free()
*
* iwm_sdio_isr() (called in process context from the SDIO core code)
* -> queue_work(.., isr_worker)
* -- [async] --> iwm_sdio_isr_worker()
* -> iwm_rx_handle()
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/debugfs.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include "iwm.h"
#include "debug.h"
#include "bus.h"
#include "sdio.h"
static void iwm_sdio_isr_worker(struct work_struct *work)
{
struct iwm_sdio_priv *hw;
struct iwm_priv *iwm;
struct iwm_rx_info *rx_info;
struct sk_buff *skb;
u8 *rx_buf;
unsigned long rx_size;
hw = container_of(work, struct iwm_sdio_priv, isr_worker);
iwm = hw_to_iwm(hw);
while (!skb_queue_empty(&iwm->rx_list)) {
skb = skb_dequeue(&iwm->rx_list);
rx_info = skb_to_rx_info(skb);
rx_size = rx_info->rx_size;
rx_buf = skb->data;
IWM_HEXDUMP(iwm, DBG, SDIO, "RX: ", rx_buf, rx_size);
if (iwm_rx_handle(iwm, rx_buf, rx_size) < 0)
IWM_WARN(iwm, "RX error\n");
kfree_skb(skb);
}
}
static void iwm_sdio_isr(struct sdio_func *func)
{
struct iwm_priv *iwm;
struct iwm_sdio_priv *hw;
struct iwm_rx_info *rx_info;
struct sk_buff *skb;
unsigned long buf_size, read_size;
int ret;
u8 val;
hw = sdio_get_drvdata(func);
iwm = hw_to_iwm(hw);
buf_size = hw->blk_size;
/* We're checking the status */
val = sdio_readb(func, IWM_SDIO_INTR_STATUS_ADDR, &ret);
if (val == 0 || ret < 0) {
IWM_ERR(iwm, "Wrong INTR_STATUS\n");
return;
}
/* See if we have free buffers */
if (skb_queue_len(&iwm->rx_list) > IWM_RX_LIST_SIZE) {
IWM_ERR(iwm, "No buffer for more Rx frames\n");
return;
}
/* We first read the transaction size */
read_size = sdio_readb(func, IWM_SDIO_INTR_GET_SIZE_ADDR + 1, &ret);
read_size = read_size << 8;
if (ret < 0) {
IWM_ERR(iwm, "Couldn't read the xfer size\n");
return;
}
/* We need to clear the INT register */
sdio_writeb(func, 1, IWM_SDIO_INTR_CLEAR_ADDR, &ret);
if (ret < 0) {
IWM_ERR(iwm, "Couldn't clear the INT register\n");
return;
}
while (buf_size < read_size)
buf_size <<= 1;
skb = dev_alloc_skb(buf_size);
if (!skb) {
IWM_ERR(iwm, "Couldn't alloc RX skb\n");
return;
}
rx_info = skb_to_rx_info(skb);
rx_info->rx_size = read_size;
rx_info->rx_buf_size = buf_size;
/* Now we can read the actual buffer */
ret = sdio_memcpy_fromio(func, skb_put(skb, read_size),
IWM_SDIO_DATA_ADDR, read_size);
/* The skb is put on a driver's specific Rx SKB list */
skb_queue_tail(&iwm->rx_list, skb);
/* We can now schedule the actual worker */
queue_work(hw->isr_wq, &hw->isr_worker);
}
static void iwm_sdio_rx_free(struct iwm_sdio_priv *hw)
{
struct iwm_priv *iwm = hw_to_iwm(hw);
flush_workqueue(hw->isr_wq);
skb_queue_purge(&iwm->rx_list);
}
/* Bus ops */
static int if_sdio_enable(struct iwm_priv *iwm)
{
struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
int ret;
sdio_claim_host(hw->func);
ret = sdio_enable_func(hw->func);
if (ret) {
IWM_ERR(iwm, "Couldn't enable the device: is TOP driver "
"loaded and functional?\n");
goto release_host;
}
iwm_reset(iwm);
ret = sdio_claim_irq(hw->func, iwm_sdio_isr);
if (ret) {
IWM_ERR(iwm, "Failed to claim irq: %d\n", ret);
goto release_host;
}
sdio_writeb(hw->func, 1, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
if (ret < 0) {
IWM_ERR(iwm, "Couldn't enable INTR: %d\n", ret);
goto release_irq;
}
sdio_release_host(hw->func);
IWM_DBG_SDIO(iwm, INFO, "IWM SDIO enable\n");
return 0;
release_irq:
sdio_release_irq(hw->func);
release_host:
sdio_release_host(hw->func);
return ret;
}
static int if_sdio_disable(struct iwm_priv *iwm)
{
struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
int ret;
sdio_claim_host(hw->func);
sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
if (ret < 0)
IWM_WARN(iwm, "Couldn't disable INTR: %d\n", ret);
sdio_release_irq(hw->func);
sdio_disable_func(hw->func);
sdio_release_host(hw->func);
iwm_sdio_rx_free(hw);
iwm_reset(iwm);
IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n");
return 0;
}
static int if_sdio_send_chunk(struct iwm_priv *iwm, u8 *buf, int count)
{
struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
int aligned_count = ALIGN(count, hw->blk_size);
int ret;
if ((unsigned long)buf & 0x3) {
IWM_ERR(iwm, "buf <%p> is not dword aligned\n", buf);
/* TODO: Is this a hardware limitation? use get_unligned */
return -EINVAL;
}
sdio_claim_host(hw->func);
ret = sdio_memcpy_toio(hw->func, IWM_SDIO_DATA_ADDR, buf,
aligned_count);
sdio_release_host(hw->func);
return ret;
}
/* debugfs hooks */
static int iwm_debugfs_sdio_open(struct inode *inode, struct file *filp)
{
filp->private_data = inode->i_private;
return 0;
}
static ssize_t iwm_debugfs_sdio_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
struct iwm_priv *iwm = filp->private_data;
struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
char *buf;
u8 cccr;
int buf_len = 4096, ret;
size_t len = 0;
if (*ppos != 0)
return 0;
if (count < sizeof(buf))
return -ENOSPC;
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
sdio_claim_host(hw->func);
cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IOEx, &ret);
if (ret) {
IWM_ERR(iwm, "Could not read SDIO_CCCR_IOEx\n");
goto err;
}
len += snprintf(buf + len, buf_len - len, "CCCR_IOEx: 0x%x\n", cccr);
cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IORx, &ret);
if (ret) {
IWM_ERR(iwm, "Could not read SDIO_CCCR_IORx\n");
goto err;
}
len += snprintf(buf + len, buf_len - len, "CCCR_IORx: 0x%x\n", cccr);
cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IENx, &ret);
if (ret) {
IWM_ERR(iwm, "Could not read SDIO_CCCR_IENx\n");
goto err;
}
len += snprintf(buf + len, buf_len - len, "CCCR_IENx: 0x%x\n", cccr);
cccr = sdio_f0_readb(hw->func, SDIO_CCCR_INTx, &ret);
if (ret) {
IWM_ERR(iwm, "Could not read SDIO_CCCR_INTx\n");
goto err;
}
len += snprintf(buf + len, buf_len - len, "CCCR_INTx: 0x%x\n", cccr);
cccr = sdio_f0_readb(hw->func, SDIO_CCCR_ABORT, &ret);
if (ret) {
IWM_ERR(iwm, "Could not read SDIO_CCCR_ABORTx\n");
goto err;
}
len += snprintf(buf + len, buf_len - len, "CCCR_ABORT: 0x%x\n", cccr);
cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IF, &ret);
if (ret) {
IWM_ERR(iwm, "Could not read SDIO_CCCR_IF\n");
goto err;
}
len += snprintf(buf + len, buf_len - len, "CCCR_IF: 0x%x\n", cccr);
cccr = sdio_f0_readb(hw->func, SDIO_CCCR_CAPS, &ret);
if (ret) {
IWM_ERR(iwm, "Could not read SDIO_CCCR_CAPS\n");
goto err;
}
len += snprintf(buf + len, buf_len - len, "CCCR_CAPS: 0x%x\n", cccr);
cccr = sdio_f0_readb(hw->func, SDIO_CCCR_CIS, &ret);
if (ret) {
IWM_ERR(iwm, "Could not read SDIO_CCCR_CIS\n");
goto err;
}
len += snprintf(buf + len, buf_len - len, "CCCR_CIS: 0x%x\n", cccr);
ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
err:
sdio_release_host(hw->func);
kfree(buf);
return ret;
}
static const struct file_operations iwm_debugfs_sdio_fops = {
.owner = THIS_MODULE,
.open = iwm_debugfs_sdio_open,
.read = iwm_debugfs_sdio_read,
};
static int if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir)
{
int result;
struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
hw->cccr_dentry = debugfs_create_file("cccr", 0200,
parent_dir, iwm,
&iwm_debugfs_sdio_fops);
result = PTR_ERR(hw->cccr_dentry);
if (IS_ERR(hw->cccr_dentry) && (result != -ENODEV)) {
IWM_ERR(iwm, "Couldn't create CCCR entry: %d\n", result);
return result;
}
return 0;
}
static void if_sdio_debugfs_exit(struct iwm_priv *iwm)
{
struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
debugfs_remove(hw->cccr_dentry);
}
static struct iwm_if_ops if_sdio_ops = {
.enable = if_sdio_enable,
.disable = if_sdio_disable,
.send_chunk = if_sdio_send_chunk,
.debugfs_init = if_sdio_debugfs_init,
.debugfs_exit = if_sdio_debugfs_exit,
.umac_name = "iwmc3200wifi-umac-sdio.bin",
.calib_lmac_name = "iwmc3200wifi-calib-sdio.bin",
.lmac_name = "iwmc3200wifi-lmac-sdio.bin",
};
MODULE_FIRMWARE("iwmc3200wifi-umac-sdio.bin");
MODULE_FIRMWARE("iwmc3200wifi-calib-sdio.bin");
MODULE_FIRMWARE("iwmc3200wifi-lmac-sdio.bin");
static int iwm_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
struct iwm_priv *iwm;
struct iwm_sdio_priv *hw;
struct device *dev = &func->dev;
int ret;
/* check if TOP has already initialized the card */
sdio_claim_host(func);
ret = sdio_enable_func(func);
if (ret) {
dev_err(dev, "wait for TOP to enable the device\n");
sdio_release_host(func);
return ret;
}
ret = sdio_set_block_size(func, IWM_SDIO_BLK_SIZE);
sdio_disable_func(func);
sdio_release_host(func);
if (ret < 0) {
dev_err(dev, "Failed to set block size: %d\n", ret);
return ret;
}
iwm = iwm_if_alloc(sizeof(struct iwm_sdio_priv), dev, &if_sdio_ops);
if (IS_ERR(iwm)) {
dev_err(dev, "allocate SDIO interface failed\n");
return PTR_ERR(iwm);
}
hw = iwm_private(iwm);
hw->iwm = iwm;
ret = iwm_debugfs_init(iwm);
if (ret < 0) {
IWM_ERR(iwm, "Debugfs registration failed\n");
goto if_free;
}
sdio_set_drvdata(func, hw);
hw->func = func;
hw->blk_size = IWM_SDIO_BLK_SIZE;
hw->isr_wq = create_singlethread_workqueue(KBUILD_MODNAME "_sdio");
if (!hw->isr_wq) {
ret = -ENOMEM;
goto debugfs_exit;
}
INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker);
ret = iwm_if_add(iwm);
if (ret) {
dev_err(dev, "add SDIO interface failed\n");
goto destroy_wq;
}
dev_info(dev, "IWM SDIO probe\n");
return 0;
destroy_wq:
destroy_workqueue(hw->isr_wq);
debugfs_exit:
iwm_debugfs_exit(iwm);
if_free:
iwm_if_free(iwm);
return ret;
}
static void iwm_sdio_remove(struct sdio_func *func)
{
struct iwm_sdio_priv *hw = sdio_get_drvdata(func);
struct iwm_priv *iwm = hw_to_iwm(hw);
struct device *dev = &func->dev;
iwm_if_remove(iwm);
destroy_workqueue(hw->isr_wq);
iwm_debugfs_exit(iwm);
iwm_if_free(iwm);
sdio_set_drvdata(func, NULL);
dev_info(dev, "IWM SDIO remove\n");
return;
}
static const struct sdio_device_id iwm_sdio_ids[] = {
/* Global/AGN SKU */
{ SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1403) },
/* BGN SKU */
{ SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1408) },
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, iwm_sdio_ids);
static struct sdio_driver iwm_sdio_driver = {
.name = "iwm_sdio",
.id_table = iwm_sdio_ids,
.probe = iwm_sdio_probe,
.remove = iwm_sdio_remove,
};
static int __init iwm_sdio_init_module(void)
{
return sdio_register_driver(&iwm_sdio_driver);
}
static void __exit iwm_sdio_exit_module(void)
{
sdio_unregister_driver(&iwm_sdio_driver);
}
module_init(iwm_sdio_init_module);
module_exit(iwm_sdio_exit_module);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(IWM_COPYRIGHT " " IWM_AUTHOR);
|
hwlzc/2.6.34
|
drivers/net/wireless/iwmc3200wifi/sdio.c
|
C
|
gpl-2.0
| 13,178
|
! { dg-do compile }
! Tests fix for PR25058 in which references to dummy
! parameters before the entry would be missed.
!
! Contributed by Joost VandeVondele <jv244@cam.ac.uk>
!
MODULE M1
CONTAINS
FUNCTION F1(I) RESULT(RF1)
INTEGER :: I,K,RE1,RF1
RE1=K ! { dg-error "before the ENTRY statement" }
RETURN
ENTRY E1(K) RESULT(RE1)
RE1=-I
RETURN
END FUNCTION F1
END MODULE M1
END
! { dg-final { cleanup-modules "M1" } }
|
SanDisk-Open-Source/SSD_Dashboard
|
uefi/gcc/gcc-4.6.3/gcc/testsuite/gfortran.dg/entry_dummy_ref_2.f90
|
FORTRAN
|
gpl-2.0
| 425
|
# Copyright (c) 2009 NHN Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of NHN Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os #@UnusedImport
import sys #@UnusedImport
import sre_compile
from nsiqcppstyle_util import * #@UnusedWildImport
class RuleManager :
def __init__(self, runtimePath) :
self.availRuleNames = []
basePath = os.path.join(runtimePath, "rules")
ruleFiles = os.listdir(basePath)
rulePattern = sre_compile.compile("^(.*)\.py$")
for eachRuleFile in ruleFiles :
if os.path.isfile(os.path.join(basePath, eachRuleFile)) :
ruleMatch = rulePattern.match(eachRuleFile)
if ruleMatch != None and eachRuleFile.find("__init__") == -1 :
ruleName = ruleMatch.group(1)
self.availRuleNames.append(ruleName)
self.availRuleCount = len(self.availRuleNames)
self.availRuleModules = {}
self.loadedRule = []
self.rules = []
self.preprocessRules = []
self.functionNameRules = []
self.functionScopeRules = []
self.typeNameRules = []
self.typeScopeRules = []
self.lineRules = []
self.fileEndRules = []
self.fileStartRules = []
self.projectRules = []
self.rollBackImporter = None
# self.LoadAllRules()
def LoadRules(self, checkingRuleNames, printRule = True):
"""
Load Rules. It resets rule before loading rules
"""
self.ResetRules()
self.ResetRegisteredRules()
if self.rollBackImporter != None :
self.rollBackImporter.uninstall()
self.rollBackImporter = RollbackImporter()
if printRule :
print "======================================================================================"
for ruleName in checkingRuleNames :
count = self.availRuleNames.count(ruleName)
if count == 0 :
print "%s does not exist or incompatible." % ruleName
continue
else :
if printRule :
print " - ", ruleName, "is applied."
ruleModule = __import__("rules."+ruleName)
self.loadedRule.append(ruleModule)
if len(self.loadedRule) == 0 :
print " No Rule is specified. Please configure rules in filefilter.txt."
if printRule :
print "======================================================================================"
def ResetRules(self):
self.loadedRule = []
############################################################################
# Rule Runner
############################################################################
def RunPreprocessRule(self, lexer, contextStack):
""" Run rules which runs in the preprecessor blocks """
for preprocessRule in self.preprocessRules :
data = lexer.Backup()
preprocessRule(lexer, contextStack)
lexer.Restore(data)
def RunFunctionNameRule(self, lexer, functionFullName, decl, contextStack, functionContext) :
""" Run rules which runs on the function name """
for eachFunctionNameRule in self.functionNameRules :
data = lexer.Backup()
eachFunctionNameRule(lexer, functionFullName, decl, contextStack, functionContext)
lexer.Restore(data)
def RunFunctionScopeRule(self, lexer, contextStack):
""" Run rules which runs in the function blocks """
for eachFunctionScopeRule in self.functionScopeRules :
data = lexer.Backup()
eachFunctionScopeRule(lexer, contextStack)
lexer.Restore(data)
def RunTypeNameRule(self, lexer, typeName, typeFullName, decl, contextStack, typeContext):
""" Run rules which runs on the type names """
for typeNameRule in self.typeNameRules :
data = lexer.Backup()
typeNameRule(lexer, typeName, typeFullName, decl, contextStack, typeContext)
lexer.Restore(data)
def RunTypeScopeRule(self, lexer, contextStack):
""" Run rules which runs in the type blocks """
for typeScopeRule in self.typeScopeRules :
data = lexer.Backup()
typeScopeRule(lexer, contextStack)
lexer.Restore(data)
def RunRule(self, lexer, contextStack):
""" Run rules which runs in any tokens """
for rule in self.rules :
data = lexer.Backup()
rule(lexer, contextStack)
lexer.Restore(data)
def RunLineRule(self, lexer, line, lineno):
""" Run rules which runs in each lines. """
for lineRule in self.lineRules :
data = lexer.Backup()
lineRule(lexer, line, lineno)
lexer.Restore(data)
def RunFileEndRule(self, lexer, filename, dirname):
""" Run rules which runs at the end of files. """
for fileEndRule in self.fileEndRules :
data = lexer.Backup()
fileEndRule(lexer, filename, dirname)
lexer.Restore(data)
def RunFileStartRule(self, lexer, filename, dirname):
""" Run rules which runs at the start of files. """
for fileStartRule in self.fileStartRules :
data = lexer.Backup()
fileStartRule(lexer, filename, dirname)
lexer.Restore(data)
def RunProjectRules(self, targetName):
""" Run rules which runs once a project. """
for projectRule in self.projectRules :
projectRule(targetName)
############################################################################
# Rule Resister Methods
############################################################################
def ResetRegisteredRules(self):
""" Reset all registered rules. """
del self.functionNameRules[:]
del self.functionScopeRules[:]
del self.lineRules[:]
del self.rules[:]
del self.typeNameRules[:]
del self.typeScopeRules[:]
del self.fileStartRules[:]
del self.fileEndRules[:]
del self.projectRules[:]
del self.preprocessRules[:]
def AddPreprocessRule(self, preprocessRule):
""" Add rule which runs in preprocess statements """
self.preprocessRules.append(preprocessRule)
def AddFunctionScopeRule(self, functionScopeRule):
""" Add rule which runs in function scope """
self.functionScopeRules.append(functionScopeRule)
def AddFunctionNameRule(self, functionRule):
""" Add rule on the function name place"""
self.functionNameRules.append(functionRule)
def AddLineRule(self, lineRule):
""" Add rule on the each line """
self.lineRules.append(lineRule)
def AddRule(self, rule):
""" Add rule on any token """
self.rules.append(rule)
def AddTypeNameRule(self, typeNameRule):
""" Add rule on any type (class / struct / union / namesapce / enum) """
self.typeNameRules.append(typeNameRule)
def AddTypeScopeRule(self, typeScopeRule):
""" Add rule on the any type definition scope """
self.typeScopeRules.append(typeScopeRule)
def AddFileEndRule(self, fileEndRule):
"""
Add rule on the file end
Added Rule should be function with following prototype "def RunRule(lexer, filename, dirname)"
lexer is the lexer used to analyze the source. it points the end token of source.
filename is the filename analyzed.
dirname is the file directory.
"""
self.fileEndRules.append(fileEndRule)
def AddFileStartRule(self, fileStartRule):
"""
Add rule on the file start
Added Rule should be function with following prototype "def RunRule(lexer, filename, dirname)"
lexer is the lexer used to analyze the source. it points the start token of source.
filename is the filename analyzed.
dirname is the file directory.
"""
self.fileStartRules.append(fileStartRule)
def AddProjectRules(self, projectRule):
"""
Add rule on the project
Added Rule should be function with following prototype "def RunRule(targetName)"
targetName is the analysis target directory.
"""
self.projectRules.append(projectRule)
class RollbackImporter:
def __init__(self):
"Creates an instance and installs as the global importer"
self.previousModules = sys.modules.copy()
self.realImport = __builtins__["__import__"]
__builtins__["__import__"] = self._import
self.newModules = {}
def _import(self, name, globals=None, locals=None, fromlist=[]):
result = apply(self.realImport, (name, globals, locals, fromlist))
if name.find("rules") != -1 :
self.newModules[name] = 1
return result
def uninstall(self):
for modname in self.newModules.keys():
if modname.find("rules") != -1 :
if not self.previousModules.has_key(modname):
# Force reload when modname next imported
del(sys.modules[modname])
__builtins__["__import__"] = self.realImport
ruleManager = RuleManager(GetRuntimePath())
|
gleicher27/Tardigrade
|
moose/framework/contrib/nsiqcppstyle/nsiqcppstyle_rulemanager.py
|
Python
|
lgpl-2.1
| 10,691
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import signal
import subprocess
import io
import os
import re
import locale
import tempfile
import warnings
from luigi import six
class FileWrapper(object):
"""
Wrap `file` in a "real" so stuff can be added to it after creation.
"""
def __init__(self, file_object):
self._subpipe = file_object
def __getattr__(self, name):
# forward calls to 'write', 'close' and other methods not defined below
return getattr(self._subpipe, name)
def __enter__(self, *args, **kwargs):
# instead of returning whatever is returned by __enter__ on the subpipe
# this returns self, so whatever custom injected methods are still available
# this might cause problems with custom file_objects, but seems to work
# fine with standard python `file` objects which is the only default use
return self
def __exit__(self, *args, **kwargs):
return self._subpipe.__exit__(*args, **kwargs)
def __iter__(self):
return iter(self._subpipe)
class InputPipeProcessWrapper(object):
def __init__(self, command, input_pipe=None):
"""
Initializes a InputPipeProcessWrapper instance.
:param command: a subprocess.Popen instance with stdin=input_pipe and
stdout=subprocess.PIPE.
Alternatively, just its args argument as a convenience.
"""
self._command = command
self._input_pipe = input_pipe
self._original_input = True
if input_pipe is not None:
try:
input_pipe.fileno()
except AttributeError:
# subprocess require a fileno to work, if not present we copy to disk first
self._original_input = False
f = tempfile.NamedTemporaryFile('wb', prefix='luigi-process_tmp', delete=False)
self._tmp_file = f.name
f.write(input_pipe.read())
input_pipe.close()
f.close()
self._input_pipe = FileWrapper(io.BufferedReader(io.FileIO(self._tmp_file, 'r')))
self._process = command if isinstance(command, subprocess.Popen) else self.create_subprocess(command)
# we want to keep a circular reference to avoid garbage collection
# when the object is used in, e.g., pipe.read()
self._process._selfref = self
def create_subprocess(self, command):
"""
http://www.chiark.greenend.org.uk/ucgi/~cjwatson/blosxom/2009-07-02-python-sigpipe.html
"""
def subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
return subprocess.Popen(command,
stdin=self._input_pipe,
stdout=subprocess.PIPE,
preexec_fn=subprocess_setup,
close_fds=True)
def _finish(self):
# Need to close this before input_pipe to get all SIGPIPE messages correctly
self._process.stdout.close()
if not self._original_input and os.path.exists(self._tmp_file):
os.remove(self._tmp_file)
if self._input_pipe is not None:
self._input_pipe.close()
self._process.wait() # deadlock?
if self._process.returncode not in (0, 141, 128 - 141):
# 141 == 128 + 13 == 128 + SIGPIPE - normally processes exit with 128 + {reiceived SIG}
# 128 - 141 == -13 == -SIGPIPE, sometimes python receives -13 for some subprocesses
raise RuntimeError('Error reading from pipe. Subcommand exited with non-zero exit status %s.' % self._process.returncode)
def close(self):
self._finish()
def __del__(self):
self._finish()
def __enter__(self):
return self
def _abort(self):
"""
Call _finish, but eat the exception (if any).
"""
try:
self._finish()
except KeyboardInterrupt:
raise
except BaseException:
pass
def __exit__(self, type, value, traceback):
if type:
self._abort()
else:
self._finish()
def __getattr__(self, name):
if name == '_process':
raise AttributeError(name)
try:
return getattr(self._process.stdout, name)
except AttributeError:
return getattr(self._input_pipe, name)
def __iter__(self):
for line in self._process.stdout:
yield line
self._finish()
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return False
class OutputPipeProcessWrapper(object):
WRITES_BEFORE_FLUSH = 10000
def __init__(self, command, output_pipe=None):
self.closed = False
self._command = command
self._output_pipe = output_pipe
self._process = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=output_pipe,
close_fds=True)
self._flushcount = 0
def write(self, *args, **kwargs):
self._process.stdin.write(*args, **kwargs)
self._flushcount += 1
if self._flushcount == self.WRITES_BEFORE_FLUSH:
self._process.stdin.flush()
self._flushcount = 0
def writeLine(self, line):
assert '\n' not in line
self.write(line + '\n')
def _finish(self):
"""
Closes and waits for subprocess to exit.
"""
if self._process.returncode is None:
self._process.stdin.flush()
self._process.stdin.close()
self._process.wait()
self.closed = True
def __del__(self):
if not self.closed:
self.abort()
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
self.abort()
def __enter__(self):
return self
def close(self):
self._finish()
if self._process.returncode == 0:
if self._output_pipe is not None:
self._output_pipe.close()
else:
raise RuntimeError('Error when executing command %s' % self._command)
def abort(self):
self._finish()
def __getattr__(self, name):
if name == '_process':
raise AttributeError(name)
try:
return getattr(self._process.stdin, name)
except AttributeError:
return getattr(self._output_pipe, name)
def readable(self):
return False
def writable(self):
return True
def seekable(self):
return False
class BaseWrapper(object):
def __init__(self, stream, *args, **kwargs):
self._stream = stream
try:
super(BaseWrapper, self).__init__(stream, *args, **kwargs)
except TypeError:
pass
def __getattr__(self, name):
if name == '_stream':
raise AttributeError(name)
return getattr(self._stream, name)
def __enter__(self):
self._stream.__enter__()
return self
def __exit__(self, *args):
self._stream.__exit__(*args)
def __iter__(self):
try:
for line in self._stream:
yield line
finally:
self.close()
class NewlineWrapper(BaseWrapper):
def __init__(self, stream, newline=None):
if newline is None:
self.newline = newline
else:
self.newline = newline.encode('ascii')
if self.newline not in (b'', b'\r\n', b'\n', b'\r', None):
raise ValueError("newline need to be one of {b'', b'\r\n', b'\n', b'\r', None}")
super(NewlineWrapper, self).__init__(stream)
def read(self, n=-1):
b = self._stream.read(n)
if self.newline == b'':
return b
if self.newline is None:
newline = b'\n'
return re.sub(b'(\n|\r\n|\r)', newline, b)
def writelines(self, lines):
if self.newline is None or self.newline == '':
newline = os.linesep.encode('ascii')
else:
newline = self.newline
self._stream.writelines(
(re.sub(b'(\n|\r\n|\r)', newline, line) for line in lines)
)
def write(self, b):
if self.newline is None or self.newline == '':
newline = os.linesep.encode('ascii')
else:
newline = self.newline
self._stream.write(re.sub(b'(\n|\r\n|\r)', newline, b))
class MixedUnicodeBytesWrapper(BaseWrapper):
"""
"""
def __init__(self, stream, encoding=None):
if encoding is None:
encoding = locale.getpreferredencoding()
self.encoding = encoding
super(MixedUnicodeBytesWrapper, self).__init__(stream)
def write(self, b):
self._stream.write(self._convert(b))
def writelines(self, lines):
self._stream.writelines((self._convert(line) for line in lines))
def _convert(self, b):
if isinstance(b, six.text_type):
b = b.encode(self.encoding)
warnings.warn('Writing unicode to byte stream', stacklevel=2)
return b
class Format(object):
"""
Interface for format specifications.
"""
@classmethod
def pipe_reader(cls, input_pipe):
raise NotImplementedError()
@classmethod
def pipe_writer(cls, output_pipe):
raise NotImplementedError()
def __rshift__(a, b):
return ChainFormat(a, b)
class ChainFormat(Format):
def __init__(self, *args, **kwargs):
self.args = args
try:
self.input = args[0].input
except AttributeError:
pass
try:
self.output = args[-1].output
except AttributeError:
pass
if not kwargs.get('check_consistency', True):
return
for x in range(len(args) - 1):
try:
if args[x].output != args[x + 1].input:
raise TypeError(
'The format chaining is not valid, %s expect %s'
'but %s provide %s' % (
args[x].__class__.__name__,
args[x].input,
args[x + 1].__class__.__name__,
args[x + 1].output,
)
)
except AttributeError:
pass
def pipe_reader(self, input_pipe):
for x in reversed(self.args):
input_pipe = x.pipe_reader(input_pipe)
return input_pipe
def pipe_writer(self, output_pipe):
for x in reversed(self.args):
output_pipe = x.pipe_writer(output_pipe)
return output_pipe
class TextWrapper(io.TextIOWrapper):
def __exit__(self, *args):
# io.TextIOWrapper close the file on __exit__, let the underlying file decide
if not self.closed and self.writable():
super(TextWrapper, self).flush()
self._stream.__exit__(*args)
def __del__(self, *args):
# io.TextIOWrapper close the file on __del__, let the underlying file decide
if not self.closed and self.writable():
super(TextWrapper, self).flush()
try:
self._stream.__del__(*args)
except AttributeError:
pass
def __init__(self, stream, *args, **kwargs):
self._stream = stream
try:
super(TextWrapper, self).__init__(stream, *args, **kwargs)
except TypeError:
pass
def __getattr__(self, name):
if name == '_stream':
raise AttributeError(name)
return getattr(self._stream, name)
def __enter__(self):
self._stream.__enter__()
return self
class NopFormat(Format):
def pipe_reader(self, input_pipe):
return input_pipe
def pipe_writer(self, output_pipe):
return output_pipe
class WrappedFormat(Format):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def pipe_reader(self, input_pipe):
return self.wrapper_cls(input_pipe, *self.args, **self.kwargs)
def pipe_writer(self, output_pipe):
return self.wrapper_cls(output_pipe, *self.args, **self.kwargs)
class TextFormat(WrappedFormat):
input = 'unicode'
output = 'bytes'
wrapper_cls = TextWrapper
class MixedUnicodeBytesFormat(WrappedFormat):
output = 'bytes'
wrapper_cls = MixedUnicodeBytesWrapper
class NewlineFormat(WrappedFormat):
input = 'bytes'
output = 'bytes'
wrapper_cls = NewlineWrapper
class GzipFormat(Format):
input = 'bytes'
output = 'bytes'
def __init__(self, compression_level=None):
self.compression_level = compression_level
def pipe_reader(self, input_pipe):
return InputPipeProcessWrapper(['gunzip'], input_pipe)
def pipe_writer(self, output_pipe):
args = ['gzip']
if self.compression_level is not None:
args.append('-' + str(int(self.compression_level)))
return OutputPipeProcessWrapper(args, output_pipe)
class Bzip2Format(Format):
input = 'bytes'
output = 'bytes'
def pipe_reader(self, input_pipe):
return InputPipeProcessWrapper(['bzcat'], input_pipe)
def pipe_writer(self, output_pipe):
return OutputPipeProcessWrapper(['bzip2'], output_pipe)
Text = TextFormat()
UTF8 = TextFormat(encoding='utf8')
Nop = NopFormat()
SysNewLine = NewlineFormat()
Gzip = GzipFormat()
Bzip2 = Bzip2Format()
MixedUnicodeBytes = MixedUnicodeBytesFormat()
def get_default_format():
if six.PY3:
return Text
elif os.linesep == '\n':
return Nop
else:
return SysNewLine
|
ViaSat/luigi
|
luigi/format.py
|
Python
|
apache-2.0
| 14,652
|
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.indices.cache.filter.terms;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.query.QueryParseContext;
/**
*/
public class TermsLookup {
private final FieldMapper fieldMapper;
private final String index;
private final String type;
private final String id;
private final String routing;
private final String path;
@Nullable
private final QueryParseContext queryParseContext;
public TermsLookup(FieldMapper fieldMapper, String index, String type, String id, String routing, String path, @Nullable QueryParseContext queryParseContext) {
this.fieldMapper = fieldMapper;
this.index = index;
this.type = type;
this.id = id;
this.routing = routing;
this.path = path;
this.queryParseContext = queryParseContext;
}
public FieldMapper getFieldMapper() {
return fieldMapper;
}
public String getIndex() {
return index;
}
public String getType() {
return type;
}
public String getId() {
return id;
}
public String getRouting() {
return this.routing;
}
public String getPath() {
return path;
}
@Nullable
public QueryParseContext getQueryParseContext() {
return queryParseContext;
}
public String toString() {
return fieldMapper.names().fullName() + ":" + index + "/" + type + "/" + id + "/" + path;
}
}
|
corochoone/elasticsearch
|
src/main/java/org/elasticsearch/indices/cache/filter/terms/TermsLookup.java
|
Java
|
apache-2.0
| 2,322
|
/**
* Copyright 2011, Big Switch Networks, Inc.
* Originally created by David Erickson, Stanford University
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
**/
package net.floodlightcontroller.routing;
import java.util.HashMap;
import net.floodlightcontroller.routing.Link;
import org.projectfloodlight.openflow.types.DatapathId;
public class BroadcastTree {
protected HashMap<DatapathId, Link> links;
protected HashMap<DatapathId, Integer> costs;
public BroadcastTree() {
links = new HashMap<DatapathId, Link>();
costs = new HashMap<DatapathId, Integer>();
}
public BroadcastTree(HashMap<DatapathId, Link> links, HashMap<DatapathId, Integer> costs) {
this.links = links;
this.costs = costs;
}
public Link getTreeLink(DatapathId node) {
return links.get(node);
}
public int getCost(DatapathId node) {
if (costs.get(node) == null) return -1;
return (costs.get(node));
}
public HashMap<DatapathId, Link> getLinks() {
return links;
}
public void addTreeLink(DatapathId myNode, Link link) {
links.put(myNode, link);
}
public String toString() {
StringBuffer sb = new StringBuffer();
for(DatapathId n: links.keySet()) {
sb.append("[" + n.toString() + ": cost=" + costs.get(n) + ", " + links.get(n) + "]");
}
return sb.toString();
}
public HashMap<DatapathId, Integer> getCosts() {
return costs;
}
}
|
zsavvas/MPTCP-aware-SDN
|
src/main/java/net/floodlightcontroller/routing/BroadcastTree.java
|
Java
|
apache-2.0
| 2,036
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.serializers;
import org.apache.cassandra.utils.ByteBufferUtil;
import java.nio.ByteBuffer;
import java.text.SimpleDateFormat;
import java.text.ParseException;
import java.util.Date;
import java.util.regex.Pattern;
import org.apache.commons.lang3.time.DateUtils;
public class TimestampSerializer implements TypeSerializer<Date>
{
//NOTE: This list is used below and if you change the order
// you need to update the default format and json formats in the code below.
private static final String[] dateStringPatterns = new String[] {
"yyyy-MM-dd HH:mm",
"yyyy-MM-dd HH:mm:ss",
"yyyy-MM-dd HH:mm z",
"yyyy-MM-dd HH:mm zz",
"yyyy-MM-dd HH:mm zzz",
"yyyy-MM-dd HH:mmX",
"yyyy-MM-dd HH:mmXX", // DEFAULT_FORMAT
"yyyy-MM-dd HH:mmXXX",
"yyyy-MM-dd HH:mm:ss",
"yyyy-MM-dd HH:mm:ss z",
"yyyy-MM-dd HH:mm:ss zz",
"yyyy-MM-dd HH:mm:ss zzz",
"yyyy-MM-dd HH:mm:ssX",
"yyyy-MM-dd HH:mm:ssXX",
"yyyy-MM-dd HH:mm:ssXXX",
"yyyy-MM-dd HH:mm:ss.SSS", // TO_JSON_FORMAT
"yyyy-MM-dd HH:mm:ss.SSS z",
"yyyy-MM-dd HH:mm:ss.SSS zz",
"yyyy-MM-dd HH:mm:ss.SSS zzz",
"yyyy-MM-dd HH:mm:ss.SSSX",
"yyyy-MM-dd HH:mm:ss.SSSXX",
"yyyy-MM-dd HH:mm:ss.SSSXXX",
"yyyy-MM-dd'T'HH:mm",
"yyyy-MM-dd'T'HH:mm z",
"yyyy-MM-dd'T'HH:mm zz",
"yyyy-MM-dd'T'HH:mm zzz",
"yyyy-MM-dd'T'HH:mmX",
"yyyy-MM-dd'T'HH:mmXX",
"yyyy-MM-dd'T'HH:mmXXX",
"yyyy-MM-dd'T'HH:mm:ss",
"yyyy-MM-dd'T'HH:mm:ss z",
"yyyy-MM-dd'T'HH:mm:ss zz",
"yyyy-MM-dd'T'HH:mm:ss zzz",
"yyyy-MM-dd'T'HH:mm:ssX",
"yyyy-MM-dd'T'HH:mm:ssXX",
"yyyy-MM-dd'T'HH:mm:ssXXX",
"yyyy-MM-dd'T'HH:mm:ss.SSS",
"yyyy-MM-dd'T'HH:mm:ss.SSS z",
"yyyy-MM-dd'T'HH:mm:ss.SSS zz",
"yyyy-MM-dd'T'HH:mm:ss.SSS zzz",
"yyyy-MM-dd'T'HH:mm:ss.SSSX",
"yyyy-MM-dd'T'HH:mm:ss.SSSXX",
"yyyy-MM-dd'T'HH:mm:ss.SSSXXX",
"yyyy-MM-dd",
"yyyy-MM-dd z",
"yyyy-MM-dd zz",
"yyyy-MM-dd zzz",
"yyyy-MM-ddX",
"yyyy-MM-ddXX",
"yyyy-MM-ddXXX"
};
private static final String DEFAULT_FORMAT = dateStringPatterns[6];
private static final Pattern timestampPattern = Pattern.compile("^-?\\d+$");
private static final ThreadLocal<SimpleDateFormat> FORMATTER = new ThreadLocal<SimpleDateFormat>()
{
protected SimpleDateFormat initialValue()
{
return new SimpleDateFormat(DEFAULT_FORMAT);
}
};
public static final SimpleDateFormat TO_JSON_FORMAT = new SimpleDateFormat(dateStringPatterns[15]);
public static final TimestampSerializer instance = new TimestampSerializer();
public Date deserialize(ByteBuffer bytes)
{
return bytes.remaining() == 0 ? null : new Date(ByteBufferUtil.toLong(bytes));
}
public ByteBuffer serialize(Date value)
{
return value == null ? ByteBufferUtil.EMPTY_BYTE_BUFFER : ByteBufferUtil.bytes(value.getTime());
}
public static long dateStringToTimestamp(String source) throws MarshalException
{
if (source.equalsIgnoreCase("now"))
return System.currentTimeMillis();
// Milliseconds since epoch?
if (timestampPattern.matcher(source).matches())
{
try
{
return Long.parseLong(source);
}
catch (NumberFormatException e)
{
throw new MarshalException(String.format("Unable to make long (for date) from: '%s'", source), e);
}
}
// Last chance, attempt to parse as date-time string
try
{
return DateUtils.parseDateStrictly(source, dateStringPatterns).getTime();
}
catch (ParseException e1)
{
throw new MarshalException(String.format("Unable to coerce '%s' to a formatted date (long)", source), e1);
}
}
public void validate(ByteBuffer bytes) throws MarshalException
{
if (bytes.remaining() != 8 && bytes.remaining() != 0)
throw new MarshalException(String.format("Expected 8 or 0 byte long for date (%d)", bytes.remaining()));
}
public String toString(Date value)
{
return value == null ? "" : FORMATTER.get().format(value);
}
public Class<Date> getType()
{
return Date.class;
}
}
|
mourao666/cassandra-sim
|
src/java/org/apache/cassandra/serializers/TimestampSerializer.java
|
Java
|
apache-2.0
| 5,562
|
cask 'qcma' do
version '0.4.1,-1'
sha256 'fc286229be41cbeb83fdb8800231f67d8f2f0d51c2fca07f09c7f6e9d4eecca7'
# github.com/codestation was verified as official when first introduced to the cask
url "https://github.com/codestation/qcma/releases/download/v#{version.before_comma}/Qcma_#{version.before_comma}#{version.after_comma}.dmg"
appcast 'https://github.com/codestation/qcma/releases.atom'
name 'Qcma'
homepage 'https://codestation.github.io/qcma/'
app 'Qcma.app'
end
|
troyxmccall/homebrew-cask
|
Casks/qcma.rb
|
Ruby
|
bsd-2-clause
| 488
|
/*
* Copyright (C) 1999 Lars Knoll (knoll@kde.org)
* (C) 1999 Antti Koivisto (koivisto@kde.org)
* (C) 2000 Dirk Mueller (mueller@kde.org)
* Copyright (C) 2003, 2006, 2010, 2011 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#include "config.h"
#include "Font.h"
#include "FloatRect.h"
#include "FontCache.h"
#include "FontTranscoder.h"
#if PLATFORM(QT) && HAVE(QRAWFONT)
#include "ContextShadow.h"
#include "GraphicsContext.h"
#endif
#include "IntPoint.h"
#include "GlyphBuffer.h"
#include "TextRun.h"
#include "WidthIterator.h"
#include <wtf/MathExtras.h>
#include <wtf/UnusedParam.h>
using namespace WTF;
using namespace Unicode;
namespace WebCore {
Font::CodePath Font::s_codePath = Auto;
// ============================================================================================
// Font Implementation (Cross-Platform Portion)
// ============================================================================================
Font::Font()
: m_letterSpacing(0)
, m_wordSpacing(0)
, m_isPlatformFont(false)
, m_needsTranscoding(false)
{
}
Font::Font(const FontDescription& fd, short letterSpacing, short wordSpacing)
: m_fontDescription(fd)
, m_letterSpacing(letterSpacing)
, m_wordSpacing(wordSpacing)
, m_isPlatformFont(false)
, m_needsTranscoding(fontTranscoder().needsTranscoding(fd))
{
}
Font::Font(const FontPlatformData& fontData, bool isPrinterFont, FontSmoothingMode fontSmoothingMode)
: m_fontList(FontFallbackList::create())
, m_letterSpacing(0)
, m_wordSpacing(0)
, m_isPlatformFont(true)
{
m_fontDescription.setUsePrinterFont(isPrinterFont);
m_fontDescription.setFontSmoothing(fontSmoothingMode);
m_needsTranscoding = fontTranscoder().needsTranscoding(fontDescription());
m_fontList->setPlatformFont(fontData);
}
Font::Font(const Font& other)
: m_fontDescription(other.m_fontDescription)
, m_fontList(other.m_fontList)
, m_letterSpacing(other.m_letterSpacing)
, m_wordSpacing(other.m_wordSpacing)
, m_isPlatformFont(other.m_isPlatformFont)
, m_needsTranscoding(fontTranscoder().needsTranscoding(other.m_fontDescription))
{
}
Font& Font::operator=(const Font& other)
{
m_fontDescription = other.m_fontDescription;
m_fontList = other.m_fontList;
m_letterSpacing = other.m_letterSpacing;
m_wordSpacing = other.m_wordSpacing;
m_isPlatformFont = other.m_isPlatformFont;
m_needsTranscoding = other.m_needsTranscoding;
return *this;
}
bool Font::operator==(const Font& other) const
{
// Our FontData don't have to be checked, since checking the font description will be fine.
// FIXME: This does not work if the font was made with the FontPlatformData constructor.
if (loadingCustomFonts() || other.loadingCustomFonts())
return false;
FontSelector* first = m_fontList ? m_fontList->fontSelector() : 0;
FontSelector* second = other.m_fontList ? other.m_fontList->fontSelector() : 0;
return first == second
&& m_fontDescription == other.m_fontDescription
&& m_letterSpacing == other.m_letterSpacing
&& m_wordSpacing == other.m_wordSpacing
&& (m_fontList ? m_fontList->generation() : 0) == (other.m_fontList ? other.m_fontList->generation() : 0);
}
void Font::update(PassRefPtr<FontSelector> fontSelector) const
{
// FIXME: It is pretty crazy that we are willing to just poke into a RefPtr, but it ends up
// being reasonably safe (because inherited fonts in the render tree pick up the new
// style anyway. Other copies are transient, e.g., the state in the GraphicsContext, and
// won't stick around long enough to get you in trouble). Still, this is pretty disgusting,
// and could eventually be rectified by using RefPtrs for Fonts themselves.
if (!m_fontList)
m_fontList = FontFallbackList::create();
m_fontList->invalidate(fontSelector);
}
void Font::drawText(GraphicsContext* context, const TextRun& run, const FloatPoint& point, int from, int to) const
{
// Don't draw anything while we are using custom fonts that are in the process of loading.
if (loadingCustomFonts())
return;
to = (to == -1 ? run.length() : to);
#if ENABLE(SVG_FONTS)
if (primaryFont()->isSVGFont()) {
drawTextUsingSVGFont(context, run, point, from, to);
return;
}
#endif
CodePath codePathToUse = codePath(run);
#if PLATFORM(QT) && HAVE(QRAWFONT)
if (context->textDrawingMode() & TextModeStroke || context->contextShadow()->m_type != ContextShadow::NoShadow)
codePathToUse = Complex;
#endif
if (codePathToUse != Complex)
return drawSimpleText(context, run, point, from, to);
return drawComplexText(context, run, point, from, to);
}
void Font::drawEmphasisMarks(GraphicsContext* context, const TextRun& run, const AtomicString& mark, const FloatPoint& point, int from, int to) const
{
if (loadingCustomFonts())
return;
if (to < 0)
to = run.length();
#if ENABLE(SVG_FONTS)
// FIXME: Implement for SVG fonts.
if (primaryFont()->isSVGFont())
return;
#endif
if (codePath(run) != Complex)
drawEmphasisMarksForSimpleText(context, run, mark, point, from, to);
else
drawEmphasisMarksForComplexText(context, run, mark, point, from, to);
}
float Font::width(const TextRun& run, HashSet<const SimpleFontData*>* fallbackFonts, GlyphOverflow* glyphOverflow) const
{
#if ENABLE(SVG_FONTS)
if (primaryFont()->isSVGFont())
return floatWidthUsingSVGFont(run);
#endif
CodePath codePathToUse = codePath(run);
if (codePathToUse != Complex) {
// If the complex text implementation cannot return fallback fonts, avoid
// returning them for simple text as well.
static bool returnFallbackFonts = canReturnFallbackFontsForComplexText();
return floatWidthForSimpleText(run, 0, returnFallbackFonts ? fallbackFonts : 0, codePathToUse == SimpleWithGlyphOverflow || (glyphOverflow && glyphOverflow->computeBounds) ? glyphOverflow : 0);
}
return floatWidthForComplexText(run, fallbackFonts, glyphOverflow);
}
float Font::width(const TextRun& run, int extraCharsAvailable, int& charsConsumed, String& glyphName) const
{
#if !ENABLE(SVG_FONTS)
UNUSED_PARAM(extraCharsAvailable);
#else
if (primaryFont()->isSVGFont())
return floatWidthUsingSVGFont(run, extraCharsAvailable, charsConsumed, glyphName);
#endif
charsConsumed = run.length();
glyphName = "";
if (codePath(run) != Complex)
return floatWidthForSimpleText(run, 0);
return floatWidthForComplexText(run);
}
FloatRect Font::selectionRectForText(const TextRun& run, const FloatPoint& point, int h, int from, int to) const
{
#if ENABLE(SVG_FONTS)
if (primaryFont()->isSVGFont())
return selectionRectForTextUsingSVGFont(run, point, h, from, to);
#endif
to = (to == -1 ? run.length() : to);
if (codePath(run) != Complex)
return selectionRectForSimpleText(run, point, h, from, to);
return selectionRectForComplexText(run, point, h, from, to);
}
int Font::offsetForPosition(const TextRun& run, float x, bool includePartialGlyphs) const
{
#if ENABLE(SVG_FONTS)
if (primaryFont()->isSVGFont())
return offsetForPositionForTextUsingSVGFont(run, x, includePartialGlyphs);
#endif
if (codePath(run) != Complex)
return offsetForPositionForSimpleText(run, x, includePartialGlyphs);
return offsetForPositionForComplexText(run, x, includePartialGlyphs);
}
#if ENABLE(SVG_FONTS)
bool Font::isSVGFont() const
{
return primaryFont()->isSVGFont();
}
#endif
String Font::normalizeSpaces(const UChar* characters, unsigned length)
{
UChar* buffer;
String normalized = String::createUninitialized(length, buffer);
for (unsigned i = 0; i < length; ++i)
buffer[i] = normalizeSpaces(characters[i]);
return normalized;
}
static bool shouldUseFontSmoothing = true;
void Font::setShouldUseSmoothing(bool shouldUseSmoothing)
{
ASSERT(isMainThread());
shouldUseFontSmoothing = shouldUseSmoothing;
}
bool Font::shouldUseSmoothing()
{
return shouldUseFontSmoothing;
}
void Font::setCodePath(CodePath p)
{
s_codePath = p;
}
Font::CodePath Font::codePath()
{
return s_codePath;
}
Font::CodePath Font::codePath(const TextRun& run) const
{
if (s_codePath != Auto)
return s_codePath;
#if PLATFORM(QT) && !HAVE(QRAWFONT)
if (run.expansion() || run.rtl() || isSmallCaps() || wordSpacing() || letterSpacing())
return Complex;
#endif
CodePath result = Simple;
// Start from 0 since drawing and highlighting also measure the characters before run->from
// FIXME: Should use a UnicodeSet in ports where ICU is used. Note that we
// can't simply use UnicodeCharacter Property/class because some characters
// are not 'combining', but still need to go to the complex path.
// Alternatively, we may as well consider binary search over a sorted
// list of ranges.
for (int i = 0; i < run.length(); i++) {
const UChar c = run[i];
if (c < 0x2E5) // U+02E5 through U+02E9 (Modifier Letters : Tone letters)
continue;
if (c <= 0x2E9)
return Complex;
if (c < 0x300) // U+0300 through U+036F Combining diacritical marks
continue;
if (c <= 0x36F)
return Complex;
if (c < 0x0591 || c == 0x05BE) // U+0591 through U+05CF excluding U+05BE Hebrew combining marks, Hebrew punctuation Paseq, Sof Pasuq and Nun Hafukha
continue;
if (c <= 0x05CF)
return Complex;
// U+0600 through U+109F Arabic, Syriac, Thaana, NKo, Samaritan, Mandaic,
// Devanagari, Bengali, Gurmukhi, Gujarati, Oriya, Tamil, Telugu, Kannada,
// Malayalam, Sinhala, Thai, Lao, Tibetan, Myanmar
if (c < 0x0600)
continue;
if (c <= 0x109F)
return Complex;
// U+1100 through U+11FF Hangul Jamo (only Ancient Korean should be left here if you precompose;
// Modern Korean will be precomposed as a result of step A)
if (c < 0x1100)
continue;
if (c <= 0x11FF)
return Complex;
if (c < 0x135D) // U+135D through U+135F Ethiopic combining marks
continue;
if (c <= 0x135F)
return Complex;
if (c < 0x1700) // U+1780 through U+18AF Tagalog, Hanunoo, Buhid, Taghanwa,Khmer, Mongolian
continue;
if (c <= 0x18AF)
return Complex;
if (c < 0x1900) // U+1900 through U+194F Limbu (Unicode 4.0)
continue;
if (c <= 0x194F)
return Complex;
if (c < 0x1980) // U+1980 through U+19DF New Tai Lue
continue;
if (c <= 0x19DF)
return Complex;
if (c < 0x1A00) // U+1A00 through U+1CFF Buginese, Tai Tham, Balinese, Batak, Lepcha, Vedic
continue;
if (c <= 0x1CFF)
return Complex;
if (c < 0x1DC0) // U+1DC0 through U+1DFF Comining diacritical mark supplement
continue;
if (c <= 0x1DFF)
return Complex;
// U+1E00 through U+2000 characters with diacritics and stacked diacritics
if (c <= 0x2000) {
result = SimpleWithGlyphOverflow;
continue;
}
if (c < 0x20D0) // U+20D0 through U+20FF Combining marks for symbols
continue;
if (c <= 0x20FF)
return Complex;
if (c < 0x2CEF) // U+2CEF through U+2CF1 Combining marks for Coptic
continue;
if (c <= 0x2CF1)
return Complex;
if (c < 0x302A) // U+302A through U+302F Ideographic and Hangul Tone marks
continue;
if (c <= 0x302F)
return Complex;
if (c < 0xA67C) // U+A67C through U+A67D Combining marks for old Cyrillic
continue;
if (c <= 0xA67D)
return Complex;
if (c < 0xA6F0) // U+A6F0 through U+A6F1 Combining mark for Bamum
continue;
if (c <= 0xA6F1)
return Complex;
// U+A800 through U+ABFF Nagri, Phags-pa, Saurashtra, Devanagari Extended,
// Hangul Jamo Ext. A, Javanese, Myanmar Extended A, Tai Viet, Meetei Mayek,
if (c < 0xA800)
continue;
if (c <= 0xABFF)
return Complex;
if (c < 0xD7B0) // U+D7B0 through U+D7FF Hangul Jamo Ext. B
continue;
if (c <= 0xD7FF)
return Complex;
if (c < 0xFE20) // U+FE20 through U+FE2F Combining half marks
continue;
if (c <= 0xFE2F)
return Complex;
// FIXME: Make this loop UTF-16-aware and check for Brahmi (U+11000 block)
// Kaithi (U+11080 block) and other complex scripts in plane 1 or higher.
}
if (typesettingFeatures())
return Complex;
return result;
}
bool Font::isCJKIdeograph(UChar32 c)
{
// The basic CJK Unified Ideographs block.
if (c >= 0x4E00 && c <= 0x9FFF)
return true;
// CJK Unified Ideographs Extension A.
if (c >= 0x3400 && c <= 0x4DBF)
return true;
// CJK Radicals Supplement.
if (c >= 0x2E80 && c <= 0x2EFF)
return true;
// Kangxi Radicals.
if (c >= 0x2F00 && c <= 0x2FDF)
return true;
// CJK Strokes.
if (c >= 0x31C0 && c <= 0x31EF)
return true;
// CJK Compatibility Ideographs.
if (c >= 0xF900 && c <= 0xFAFF)
return true;
// CJK Unified Ideographs Extension B.
if (c >= 0x20000 && c <= 0x2A6DF)
return true;
// CJK Unified Ideographs Extension C.
if (c >= 0x2A700 && c <= 0x2B73F)
return true;
// CJK Unified Ideographs Extension D.
if (c >= 0x2B740 && c <= 0x2B81F)
return true;
// CJK Compatibility Ideographs Supplement.
if (c >= 0x2F800 && c <= 0x2FA1F)
return true;
return false;
}
bool Font::isCJKIdeographOrSymbol(UChar32 c)
{
// 0x2C7 Caron, Mandarin Chinese 3rd Tone
// 0x2CA Modifier Letter Acute Accent, Mandarin Chinese 2nd Tone
// 0x2CB Modifier Letter Grave Access, Mandarin Chinese 4th Tone
// 0x2D9 Dot Above, Mandarin Chinese 5th Tone
if ((c == 0x2C7) || (c == 0x2CA) || (c == 0x2CB) || (c == 0x2D9))
return true;
// Ideographic Description Characters.
if (c >= 0x2FF0 && c <= 0x2FFF)
return true;
// CJK Symbols and Punctuation.
if (c >= 0x3000 && c <= 0x303F)
return true;
// Hiragana
if (c >= 0x3040 && c <= 0x309F)
return true;
// Katakana
if (c >= 0x30A0 && c <= 0x30FF)
return true;
// Bopomofo
if (c >= 0x3100 && c <= 0x312F)
return true;
// Bopomofo Extended
if (c >= 0x31A0 && c <= 0x31BF)
return true;
// Enclosed CJK Letters and Months.
if (c >= 0x3200 && c <= 0x32FF)
return true;
// CJK Compatibility.
if (c >= 0x3300 && c <= 0x33FF)
return true;
// CJK Compatibility Forms.
if (c >= 0xFE30 && c <= 0xFE4F)
return true;
// Halfwidth and Fullwidth Forms
// Usually only used in CJK
if (c >= 0xFF00 && c <= 0xFFEF)
return true;
// Emoji.
if (c >= 0x1F200 && c <= 0x1F6F)
return true;
return isCJKIdeograph(c);
}
unsigned Font::expansionOpportunityCount(const UChar* characters, size_t length, TextDirection direction, bool& isAfterExpansion)
{
static bool expandAroundIdeographs = canExpandAroundIdeographsInComplexText();
unsigned count = 0;
if (direction == LTR) {
for (size_t i = 0; i < length; ++i) {
UChar32 character = characters[i];
if (treatAsSpace(character)) {
count++;
isAfterExpansion = true;
continue;
}
if (U16_IS_LEAD(character) && i + 1 < length && U16_IS_TRAIL(characters[i + 1])) {
character = U16_GET_SUPPLEMENTARY(character, characters[i + 1]);
i++;
}
if (expandAroundIdeographs && isCJKIdeographOrSymbol(character)) {
if (!isAfterExpansion)
count++;
count++;
isAfterExpansion = true;
continue;
}
isAfterExpansion = false;
}
} else {
for (size_t i = length; i > 0; --i) {
UChar32 character = characters[i - 1];
if (treatAsSpace(character)) {
count++;
isAfterExpansion = true;
continue;
}
if (U16_IS_TRAIL(character) && i > 1 && U16_IS_LEAD(characters[i - 2])) {
character = U16_GET_SUPPLEMENTARY(characters[i - 2], character);
i--;
}
if (expandAroundIdeographs && isCJKIdeographOrSymbol(character)) {
if (!isAfterExpansion)
count++;
count++;
isAfterExpansion = true;
continue;
}
isAfterExpansion = false;
}
}
return count;
}
bool Font::canReceiveTextEmphasis(UChar32 c)
{
CharCategory category = Unicode::category(c);
if (category & (Separator_Space | Separator_Line | Separator_Paragraph | Other_NotAssigned | Other_Control | Other_Format))
return false;
// Additional word-separator characters listed in CSS Text Level 3 Editor's Draft 3 November 2010.
if (c == ethiopicWordspace || c == aegeanWordSeparatorLine || c == aegeanWordSeparatorDot
|| c == ugariticWordDivider || c == tibetanMarkIntersyllabicTsheg || c == tibetanMarkDelimiterTshegBstar)
return false;
return true;
}
}
|
underwriteme/phantomjs
|
src/qt/src/3rdparty/webkit/Source/WebCore/platform/graphics/Font.cpp
|
C++
|
bsd-3-clause
| 18,633
|
<?php
namespace Herrera\Json\Exception;
/**
* Used for JSON specific errors.
*
* @author Kevin Herrera <kevin@herrera.io>
*/
class JsonException extends Exception
{
/**
* The recognized JSON error codes.
*
* @var array
*/
private static $codes = array(
JSON_ERROR_CTRL_CHAR => 'Control character error, possibly incorrectly encoded.',
JSON_ERROR_DEPTH => 'The maximum stack depth has been exceeded.',
JSON_ERROR_NONE => 'No error has occurred.',
JSON_ERROR_STATE_MISMATCH => 'Invalid or malformed JSON.',
JSON_ERROR_SYNTAX => 'Syntax error.',
JSON_ERROR_UTF8 => 'Malformed UTF-8 characters, possibly incorrectly encoded.'
);
/**
* The individual JSON error messages.
*
* @var array
*/
private $errors = array();
/**
* Sets the main error message, and the other error messages.
*
* @param string $message The main error message.
* @param array $errors The other error messages.
*/
public function __construct($message, array $errors = array())
{
$this->errors = $errors;
parent::__construct($message);
}
/**
* Creates a new exception using the JSON error code.
*
* @param integer $code The code.
*
* @return JsonException The exception.
*/
public static function createUsingCode($code)
{
$message = 'Unknown error.';
if (isset(self::$codes[$code])) {
$message = self::$codes[$code];
}
return new static($message);
}
/**
* Returns the other error messages.
*
* @return array The messages.
*/
public function getErrors()
{
return $this->errors;
}
}
|
Urbannet/cleanclean
|
yii2/vendor/herrera-io/json/src/lib/Herrera/Json/Exception/JsonException.php
|
PHP
|
bsd-3-clause
| 1,760
|
<div>
Use default maven settings (<tt>$HOME/.m2/settings.xml</tt>) as set on build node.
</div>
|
github-api-test-org/jenkins
|
core/src/main/resources/jenkins/mvn/DefaultSettingsProvider/help.html
|
HTML
|
mit
| 99
|
/**
* Copyright (c) 2010-2016 by the respective copyright holders.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package org.openhab.persistence.influxdb08.internal;
import java.text.DateFormat;
import java.util.Date;
import org.openhab.core.persistence.HistoricItem;
import org.openhab.core.types.State;
/**
* This is a Java bean used to return historic items from Influxdb.
*
* @author Theo Weiss - Initial Contribution
* @since 1.5.0
*
*/
public class InfluxdbItem implements HistoricItem {
final private String name;
final private State state;
final private Date timestamp;
public InfluxdbItem(String name, State state, Date timestamp) {
this.name = name;
this.state = state;
this.timestamp = timestamp;
}
public String getName() {
return name;
}
public State getState() {
return state;
}
public Date getTimestamp() {
return timestamp;
}
@Override
public String toString() {
return DateFormat.getDateTimeInstance().format(timestamp) + ": " + name + " -> "
+ state.toString();
}
}
|
theoweiss/openhab
|
bundles/persistence/org.openhab.persistence.influxdb08/java/org/openhab/persistence/influxdb08/internal/InfluxdbItem.java
|
Java
|
epl-1.0
| 1,274
|
/*
* Driver core for serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright 1999 ARM Limited
* Copyright (C) 2000-2001 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/serial.h> /* for serial_state and serial_icounter_struct */
#include <linux/serial_core.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
/*
* This is used to lock changes in serial line configuration.
*/
static DEFINE_MUTEX(port_mutex);
/*
* lockdep: port->lock is initialized in two places, but we
* want only one lock-class:
*/
static struct lock_class_key port_lock_key;
#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
#ifdef CONFIG_SERIAL_CORE_CONSOLE
#define uart_console(port) ((port)->cons && (port)->cons->index == (port)->line)
#else
#define uart_console(port) (0)
#endif
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios);
static void __uart_wait_until_sent(struct uart_port *port, int timeout);
static void uart_change_pm(struct uart_state *state, int pm_state);
/*
* This routine is used by the interrupt handler to schedule processing in
* the software interrupt portion of the driver.
*/
void uart_write_wakeup(struct uart_port *port)
{
struct uart_state *state = port->state;
/*
* This means you called this function _after_ the port was
* closed. No cookie for you.
*/
BUG_ON(!state);
tasklet_schedule(&state->tlet);
}
static void uart_stop(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
port->ops->stop_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
}
static void __uart_start(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
if (port->ops->wake_peer)
port->ops->wake_peer(port);
if (!uart_circ_empty(&state->xmit) && state->xmit.buf &&
!tty->stopped && !tty->hw_stopped)
port->ops->start_tx(port);
}
static void uart_start(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
__uart_start(tty);
spin_unlock_irqrestore(&port->lock, flags);
}
static void uart_tasklet_action(unsigned long data)
{
struct uart_state *state = (struct uart_state *)data;
tty_wakeup(state->port.tty);
}
static inline void
uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
{
unsigned long flags;
unsigned int old;
spin_lock_irqsave(&port->lock, flags);
old = port->mctrl;
port->mctrl = (old & ~clear) | set;
if (old != port->mctrl)
port->ops->set_mctrl(port, port->mctrl);
spin_unlock_irqrestore(&port->lock, flags);
}
#define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0)
#define uart_clear_mctrl(port, clear) uart_update_mctrl(port, 0, clear)
/*
* Startup the port. This will be called once per open. All calls
* will be serialised by the per-port mutex.
*/
static int uart_startup(struct tty_struct *tty, struct uart_state *state, int init_hw)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
unsigned long page;
int retval = 0;
if (port->flags & ASYNC_INITIALIZED)
return 0;
/*
* Set the TTY IO error marker - we will only clear this
* once we have successfully opened the port. Also set
* up the tty->alt_speed kludge
*/
set_bit(TTY_IO_ERROR, &tty->flags);
if (uport->type == PORT_UNKNOWN)
return 0;
/*
* Initialise and allocate the transmit and temporary
* buffer.
*/
if (!state->xmit.buf) {
/* This is protected by the per port mutex */
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
state->xmit.buf = (unsigned char *) page;
uart_circ_clear(&state->xmit);
}
retval = uport->ops->startup(uport);
if (retval == 0) {
if (uart_console(uport) && uport->cons->cflag) {
tty->termios->c_cflag = uport->cons->cflag;
uport->cons->cflag = 0;
}
/*
* Initialise the hardware port settings.
*/
uart_change_speed(tty, state, NULL);
if (init_hw) {
/*
* Setup the RTS and DTR signals once the
* port is open and ready to respond.
*/
if (tty->termios->c_cflag & CBAUD)
uart_set_mctrl(uport, TIOCM_RTS | TIOCM_DTR);
}
if (port->flags & ASYNC_CTS_FLOW) {
spin_lock_irq(&uport->lock);
if (!(uport->ops->get_mctrl(uport) & TIOCM_CTS))
tty->hw_stopped = 1;
spin_unlock_irq(&uport->lock);
}
set_bit(ASYNCB_INITIALIZED, &port->flags);
clear_bit(TTY_IO_ERROR, &tty->flags);
}
if (retval && capable(CAP_SYS_ADMIN))
retval = 0;
return retval;
}
/*
* This routine will shutdown a serial port; interrupts are disabled, and
* DTR is dropped if the hangup on close termio flag is on. Calls to
* uart_shutdown are serialised by the per-port semaphore.
*/
static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
/*
* Set the TTY IO error marker
*/
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
if (test_and_clear_bit(ASYNCB_INITIALIZED, &port->flags)) {
/*
* Turn off DTR and RTS early.
*/
if (!tty || (tty->termios->c_cflag & HUPCL))
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
/*
* clear delta_msr_wait queue to avoid mem leaks: we may free
* the irq here so the queue might never be woken up. Note
* that we won't end up waiting on delta_msr_wait again since
* any outstanding file descriptors should be pointing at
* hung_up_tty_fops now.
*/
wake_up_interruptible(&port->delta_msr_wait);
/*
* Free the IRQ and disable the port.
*/
uport->ops->shutdown(uport);
/*
* Ensure that the IRQ handler isn't running on another CPU.
*/
synchronize_irq(uport->irq);
}
/*
* kill off our tasklet
*/
tasklet_kill(&state->tlet);
/*
* Free the transmit buffer page.
*/
if (state->xmit.buf) {
free_page((unsigned long)state->xmit.buf);
state->xmit.buf = NULL;
}
}
/**
* uart_update_timeout - update per-port FIFO timeout.
* @port: uart_port structure describing the port
* @cflag: termios cflag value
* @baud: speed of the port
*
* Set the port FIFO timeout value. The @cflag value should
* reflect the actual hardware settings.
*/
void
uart_update_timeout(struct uart_port *port, unsigned int cflag,
unsigned int baud)
{
unsigned int bits;
/* byte size and parity */
switch (cflag & CSIZE) {
case CS5:
bits = 7;
break;
case CS6:
bits = 8;
break;
case CS7:
bits = 9;
break;
default:
bits = 10;
break; /* CS8 */
}
if (cflag & CSTOPB)
bits++;
if (cflag & PARENB)
bits++;
/*
* The total number of bits to be transmitted in the fifo.
*/
bits = bits * port->fifosize;
/*
* Figure the timeout to send the above number of bits.
* Add .02 seconds of slop
*/
port->timeout = (HZ * bits) / baud + HZ/50;
}
EXPORT_SYMBOL(uart_update_timeout);
/**
* uart_get_baud_rate - return baud rate for a particular port
* @port: uart_port structure describing the port in question.
* @termios: desired termios settings.
* @old: old termios (or NULL)
* @min: minimum acceptable baud rate
* @max: maximum acceptable baud rate
*
* Decode the termios structure into a numeric baud rate,
* taking account of the magic 38400 baud rate (with spd_*
* flags), and mapping the %B0 rate to 9600 baud.
*
* If the new baud rate is invalid, try the old termios setting.
* If it's still invalid, we try 9600 baud.
*
* Update the @termios structure to reflect the baud rate
* we're actually going to be using. Don't do this for the case
* where B0 is requested ("hang up").
*/
unsigned int
uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
struct ktermios *old, unsigned int min, unsigned int max)
{
unsigned int try, baud, altbaud = 38400;
int hung_up = 0;
upf_t flags = port->flags & UPF_SPD_MASK;
if (flags == UPF_SPD_HI)
altbaud = 57600;
else if (flags == UPF_SPD_VHI)
altbaud = 115200;
else if (flags == UPF_SPD_SHI)
altbaud = 230400;
else if (flags == UPF_SPD_WARP)
altbaud = 460800;
for (try = 0; try < 2; try++) {
baud = tty_termios_baud_rate(termios);
/*
* The spd_hi, spd_vhi, spd_shi, spd_warp kludge...
* Die! Die! Die!
*/
if (baud == 38400)
baud = altbaud;
/*
* Special case: B0 rate.
*/
if (baud == 0) {
hung_up = 1;
baud = 9600;
}
if (baud >= min && baud <= max)
return baud;
/*
* Oops, the quotient was zero. Try again with
* the old baud rate if possible.
*/
termios->c_cflag &= ~CBAUD;
if (old) {
baud = tty_termios_baud_rate(old);
if (!hung_up)
tty_termios_encode_baud_rate(termios,
baud, baud);
old = NULL;
continue;
}
/*
* As a last resort, if the range cannot be met then clip to
* the nearest chip supported rate.
*/
if (!hung_up) {
if (baud <= min)
tty_termios_encode_baud_rate(termios,
min + 1, min + 1);
else
tty_termios_encode_baud_rate(termios,
max - 1, max - 1);
}
}
/* Should never happen */
WARN_ON(1);
return 0;
}
EXPORT_SYMBOL(uart_get_baud_rate);
/**
* uart_get_divisor - return uart clock divisor
* @port: uart_port structure describing the port.
* @baud: desired baud rate
*
* Calculate the uart clock divisor for the port.
*/
unsigned int
uart_get_divisor(struct uart_port *port, unsigned int baud)
{
unsigned int quot;
/*
* Old custom speed handling.
*/
if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
quot = port->custom_divisor;
else
quot = (port->uartclk + (8 * baud)) / (16 * baud);
return quot;
}
EXPORT_SYMBOL(uart_get_divisor);
/* FIXME: Consistent locking policy */
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios)
{
struct tty_port *port = &state->port;
struct uart_port *uport = state->uart_port;
struct ktermios *termios;
/*
* If we have no tty, termios, or the port does not exist,
* then we can't set the parameters for this port.
*/
if (!tty || !tty->termios || uport->type == PORT_UNKNOWN)
return;
termios = tty->termios;
/*
* Set flags based on termios cflag
*/
if (termios->c_cflag & CRTSCTS)
set_bit(ASYNCB_CTS_FLOW, &port->flags);
else
clear_bit(ASYNCB_CTS_FLOW, &port->flags);
if (termios->c_cflag & CLOCAL)
clear_bit(ASYNCB_CHECK_CD, &port->flags);
else
set_bit(ASYNCB_CHECK_CD, &port->flags);
uport->ops->set_termios(uport, termios, old_termios);
}
static inline int __uart_put_char(struct uart_port *port,
struct circ_buf *circ, unsigned char c)
{
unsigned long flags;
int ret = 0;
if (!circ->buf)
return 0;
spin_lock_irqsave(&port->lock, flags);
if (uart_circ_chars_free(circ) != 0) {
circ->buf[circ->head] = c;
circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
ret = 1;
}
spin_unlock_irqrestore(&port->lock, flags);
return ret;
}
static int uart_put_char(struct tty_struct *tty, unsigned char ch)
{
struct uart_state *state = tty->driver_data;
return __uart_put_char(state->uart_port, &state->xmit, ch);
}
static void uart_flush_chars(struct tty_struct *tty)
{
uart_start(tty);
}
static int uart_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
struct circ_buf *circ;
unsigned long flags;
int c, ret = 0;
/*
* This means you called this function _after_ the port was
* closed. No cookie for you.
*/
if (!state) {
WARN_ON(1);
return -EL3HLT;
}
port = state->uart_port;
circ = &state->xmit;
if (!circ->buf)
return 0;
spin_lock_irqsave(&port->lock, flags);
while (1) {
c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
if (count < c)
c = count;
if (c <= 0)
break;
memcpy(circ->buf + circ->head, buf, c);
circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
buf += c;
count -= c;
ret += c;
}
spin_unlock_irqrestore(&port->lock, flags);
uart_start(tty);
return ret;
}
static int uart_write_room(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
unsigned long flags;
int ret;
spin_lock_irqsave(&state->uart_port->lock, flags);
ret = uart_circ_chars_free(&state->xmit);
spin_unlock_irqrestore(&state->uart_port->lock, flags);
return ret;
}
static int uart_chars_in_buffer(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
unsigned long flags;
int ret;
spin_lock_irqsave(&state->uart_port->lock, flags);
ret = uart_circ_chars_pending(&state->xmit);
spin_unlock_irqrestore(&state->uart_port->lock, flags);
return ret;
}
static void uart_flush_buffer(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
unsigned long flags;
/*
* This means you called this function _after_ the port was
* closed. No cookie for you.
*/
if (!state) {
WARN_ON(1);
return;
}
port = state->uart_port;
pr_debug("uart_flush_buffer(%d) called\n", tty->index);
spin_lock_irqsave(&port->lock, flags);
uart_circ_clear(&state->xmit);
if (port->ops->flush_buffer)
port->ops->flush_buffer(port);
spin_unlock_irqrestore(&port->lock, flags);
tty_wakeup(tty);
}
/*
* This function is used to send a high-priority XON/XOFF character to
* the device
*/
static void uart_send_xchar(struct tty_struct *tty, char ch)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
unsigned long flags;
if (port->ops->send_xchar)
port->ops->send_xchar(port, ch);
else {
port->x_char = ch;
if (ch) {
spin_lock_irqsave(&port->lock, flags);
port->ops->start_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
}
}
}
static void uart_throttle(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
if (I_IXOFF(tty))
uart_send_xchar(tty, STOP_CHAR(tty));
if (tty->termios->c_cflag & CRTSCTS)
uart_clear_mctrl(state->uart_port, TIOCM_RTS);
}
static void uart_unthrottle(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
if (I_IXOFF(tty)) {
if (port->x_char)
port->x_char = 0;
else
uart_send_xchar(tty, START_CHAR(tty));
}
if (tty->termios->c_cflag & CRTSCTS)
uart_set_mctrl(port, TIOCM_RTS);
}
static int uart_get_info(struct uart_state *state,
struct serial_struct __user *retinfo)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
struct serial_struct tmp;
memset(&tmp, 0, sizeof(tmp));
/* Ensure the state we copy is consistent and no hardware changes
occur as we go */
mutex_lock(&port->mutex);
tmp.type = uport->type;
tmp.line = uport->line;
tmp.port = uport->iobase;
if (HIGH_BITS_OFFSET)
tmp.port_high = (long) uport->iobase >> HIGH_BITS_OFFSET;
tmp.irq = uport->irq;
tmp.flags = uport->flags;
tmp.xmit_fifo_size = uport->fifosize;
tmp.baud_base = uport->uartclk / 16;
tmp.close_delay = port->close_delay / 10;
tmp.closing_wait = port->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
port->closing_wait / 10;
tmp.custom_divisor = uport->custom_divisor;
tmp.hub6 = uport->hub6;
tmp.io_type = uport->iotype;
tmp.iomem_reg_shift = uport->regshift;
tmp.iomem_base = (void *)(unsigned long)uport->mapbase;
mutex_unlock(&port->mutex);
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
return 0;
}
static int uart_set_info(struct tty_struct *tty, struct uart_state *state,
struct serial_struct __user *newinfo)
{
struct serial_struct new_serial;
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
unsigned long new_port;
unsigned int change_irq, change_port, closing_wait;
unsigned int old_custom_divisor, close_delay;
upf_t old_flags, new_flags;
int retval = 0;
if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
return -EFAULT;
new_port = new_serial.port;
if (HIGH_BITS_OFFSET)
new_port += (unsigned long) new_serial.port_high << HIGH_BITS_OFFSET;
new_serial.irq = irq_canonicalize(new_serial.irq);
close_delay = new_serial.close_delay * 10;
closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
/*
* This semaphore protects port->count. It is also
* very useful to prevent opens. Also, take the
* port configuration semaphore to make sure that a
* module insertion/removal doesn't change anything
* under us.
*/
mutex_lock(&port->mutex);
change_irq = !(uport->flags & UPF_FIXED_PORT)
&& new_serial.irq != uport->irq;
/*
* Since changing the 'type' of the port changes its resource
* allocations, we should treat type changes the same as
* IO port changes.
*/
change_port = !(uport->flags & UPF_FIXED_PORT)
&& (new_port != uport->iobase ||
(unsigned long)new_serial.iomem_base != uport->mapbase ||
new_serial.hub6 != uport->hub6 ||
new_serial.io_type != uport->iotype ||
new_serial.iomem_reg_shift != uport->regshift ||
new_serial.type != uport->type);
old_flags = uport->flags;
new_flags = new_serial.flags;
old_custom_divisor = uport->custom_divisor;
if (!capable(CAP_SYS_ADMIN)) {
retval = -EPERM;
if (change_irq || change_port ||
(new_serial.baud_base != uport->uartclk / 16) ||
(close_delay != port->close_delay) ||
(closing_wait != port->closing_wait) ||
(new_serial.xmit_fifo_size &&
new_serial.xmit_fifo_size != uport->fifosize) ||
(((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0))
goto exit;
uport->flags = ((uport->flags & ~UPF_USR_MASK) |
(new_flags & UPF_USR_MASK));
uport->custom_divisor = new_serial.custom_divisor;
goto check_and_exit;
}
/*
* Ask the low level driver to verify the settings.
*/
if (uport->ops->verify_port)
retval = uport->ops->verify_port(uport, &new_serial);
if ((new_serial.irq >= nr_irqs) || (new_serial.irq < 0) ||
(new_serial.baud_base < 9600))
retval = -EINVAL;
if (retval)
goto exit;
if (change_port || change_irq) {
retval = -EBUSY;
/*
* Make sure that we are the sole user of this port.
*/
if (tty_port_users(port) > 1)
goto exit;
/*
* We need to shutdown the serial port at the old
* port/type/irq combination.
*/
uart_shutdown(tty, state);
}
if (change_port) {
unsigned long old_iobase, old_mapbase;
unsigned int old_type, old_iotype, old_hub6, old_shift;
old_iobase = uport->iobase;
old_mapbase = uport->mapbase;
old_type = uport->type;
old_hub6 = uport->hub6;
old_iotype = uport->iotype;
old_shift = uport->regshift;
/*
* Free and release old regions
*/
if (old_type != PORT_UNKNOWN)
uport->ops->release_port(uport);
uport->iobase = new_port;
uport->type = new_serial.type;
uport->hub6 = new_serial.hub6;
uport->iotype = new_serial.io_type;
uport->regshift = new_serial.iomem_reg_shift;
uport->mapbase = (unsigned long)new_serial.iomem_base;
/*
* Claim and map the new regions
*/
if (uport->type != PORT_UNKNOWN) {
retval = uport->ops->request_port(uport);
} else {
/* Always success - Jean II */
retval = 0;
}
/*
* If we fail to request resources for the
* new port, try to restore the old settings.
*/
if (retval && old_type != PORT_UNKNOWN) {
uport->iobase = old_iobase;
uport->type = old_type;
uport->hub6 = old_hub6;
uport->iotype = old_iotype;
uport->regshift = old_shift;
uport->mapbase = old_mapbase;
retval = uport->ops->request_port(uport);
/*
* If we failed to restore the old settings,
* we fail like this.
*/
if (retval)
uport->type = PORT_UNKNOWN;
/*
* We failed anyway.
*/
retval = -EBUSY;
/* Added to return the correct error -Ram Gupta */
goto exit;
}
}
if (change_irq)
uport->irq = new_serial.irq;
if (!(uport->flags & UPF_FIXED_PORT))
uport->uartclk = new_serial.baud_base * 16;
uport->flags = (uport->flags & ~UPF_CHANGE_MASK) |
(new_flags & UPF_CHANGE_MASK);
uport->custom_divisor = new_serial.custom_divisor;
port->close_delay = close_delay;
port->closing_wait = closing_wait;
if (new_serial.xmit_fifo_size)
uport->fifosize = new_serial.xmit_fifo_size;
if (port->tty)
port->tty->low_latency =
(uport->flags & UPF_LOW_LATENCY) ? 1 : 0;
check_and_exit:
retval = 0;
if (uport->type == PORT_UNKNOWN)
goto exit;
if (port->flags & ASYNC_INITIALIZED) {
if (((old_flags ^ uport->flags) & UPF_SPD_MASK) ||
old_custom_divisor != uport->custom_divisor) {
/*
* If they're setting up a custom divisor or speed,
* instead of clearing it, then bitch about it. No
* need to rate-limit; it's CAP_SYS_ADMIN only.
*/
if (uport->flags & UPF_SPD_MASK) {
char buf[64];
printk(KERN_NOTICE
"%s sets custom speed on %s. This "
"is deprecated.\n", current->comm,
tty_name(port->tty, buf));
}
uart_change_speed(tty, state, NULL);
}
} else
retval = uart_startup(tty, state, 1);
exit:
mutex_unlock(&port->mutex);
return retval;
}
/**
* uart_get_lsr_info - get line status register info
* @tty: tty associated with the UART
* @state: UART being queried
* @value: returned modem value
*
* Note: uart_ioctl protects us against hangups.
*/
static int uart_get_lsr_info(struct tty_struct *tty,
struct uart_state *state, unsigned int __user *value)
{
struct uart_port *uport = state->uart_port;
unsigned int result;
result = uport->ops->tx_empty(uport);
/*
* If we're about to load something into the transmit
* register, we'll pretend the transmitter isn't empty to
* avoid a race condition (depending on when the transmit
* interrupt happens).
*/
if (uport->x_char ||
((uart_circ_chars_pending(&state->xmit) > 0) &&
!tty->stopped && !tty->hw_stopped))
result &= ~TIOCSER_TEMT;
return put_user(result, value);
}
static int uart_tiocmget(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport = state->uart_port;
int result = -EIO;
mutex_lock(&port->mutex);
if (!(tty->flags & (1 << TTY_IO_ERROR))) {
result = uport->mctrl;
spin_lock_irq(&uport->lock);
result |= uport->ops->get_mctrl(uport);
spin_unlock_irq(&uport->lock);
}
mutex_unlock(&port->mutex);
return result;
}
static int
uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
{
struct uart_state *state = tty->driver_data;
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
int ret = -EIO;
mutex_lock(&port->mutex);
if (!(tty->flags & (1 << TTY_IO_ERROR))) {
uart_update_mctrl(uport, set, clear);
ret = 0;
}
mutex_unlock(&port->mutex);
return ret;
}
static int uart_break_ctl(struct tty_struct *tty, int break_state)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport = state->uart_port;
mutex_lock(&port->mutex);
if (uport->type != PORT_UNKNOWN)
uport->ops->break_ctl(uport, break_state);
mutex_unlock(&port->mutex);
return 0;
}
static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
int flags, ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/*
* Take the per-port semaphore. This prevents count from
* changing, and hence any extra opens of the port while
* we're auto-configuring.
*/
if (mutex_lock_interruptible(&port->mutex))
return -ERESTARTSYS;
ret = -EBUSY;
if (tty_port_users(port) == 1) {
uart_shutdown(tty, state);
/*
* If we already have a port type configured,
* we must release its resources.
*/
if (uport->type != PORT_UNKNOWN)
uport->ops->release_port(uport);
flags = UART_CONFIG_TYPE;
if (uport->flags & UPF_AUTO_IRQ)
flags |= UART_CONFIG_IRQ;
/*
* This will claim the ports resources if
* a port is found.
*/
uport->ops->config_port(uport, flags);
ret = uart_startup(tty, state, 1);
}
mutex_unlock(&port->mutex);
return ret;
}
/*
* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
* - mask passed in arg for lines of interest
* (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
* Caller should use TIOCGICOUNT to see which one it was
*
* FIXME: This wants extracting into a common all driver implementation
* of TIOCMWAIT using tty_port.
*/
static int
uart_wait_modem_status(struct uart_state *state, unsigned long arg)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
DECLARE_WAITQUEUE(wait, current);
struct uart_icount cprev, cnow;
int ret;
/*
* note the counters on entry
*/
spin_lock_irq(&uport->lock);
memcpy(&cprev, &uport->icount, sizeof(struct uart_icount));
/*
* Force modem status interrupts on
*/
uport->ops->enable_ms(uport);
spin_unlock_irq(&uport->lock);
add_wait_queue(&port->delta_msr_wait, &wait);
for (;;) {
spin_lock_irq(&uport->lock);
memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
spin_unlock_irq(&uport->lock);
set_current_state(TASK_INTERRUPTIBLE);
if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
ret = 0;
break;
}
schedule();
/* see if a signal did it */
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
cprev = cnow;
}
current->state = TASK_RUNNING;
remove_wait_queue(&port->delta_msr_wait, &wait);
return ret;
}
/*
* Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
* Return: write counters to the user passed counter struct
* NB: both 1->0 and 0->1 transitions are counted except for
* RI where only 0->1 is counted.
*/
static int uart_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct uart_state *state = tty->driver_data;
struct uart_icount cnow;
struct uart_port *uport = state->uart_port;
spin_lock_irq(&uport->lock);
memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
spin_unlock_irq(&uport->lock);
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
icount->rng = cnow.rng;
icount->dcd = cnow.dcd;
icount->rx = cnow.rx;
icount->tx = cnow.tx;
icount->frame = cnow.frame;
icount->overrun = cnow.overrun;
icount->parity = cnow.parity;
icount->brk = cnow.brk;
icount->buf_overrun = cnow.buf_overrun;
return 0;
}
/*
* Called via sys_ioctl. We can use spin_lock_irq() here.
*/
static int
uart_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
void __user *uarg = (void __user *)arg;
int ret = -ENOIOCTLCMD;
/*
* These ioctls don't rely on the hardware to be present.
*/
switch (cmd) {
case TIOCGSERIAL:
ret = uart_get_info(state, uarg);
break;
case TIOCSSERIAL:
ret = uart_set_info(tty, state, uarg);
break;
case TIOCSERCONFIG:
ret = uart_do_autoconfig(tty, state);
break;
case TIOCSERGWILD: /* obsolete */
case TIOCSERSWILD: /* obsolete */
ret = 0;
break;
}
if (ret != -ENOIOCTLCMD)
goto out;
if (tty->flags & (1 << TTY_IO_ERROR)) {
ret = -EIO;
goto out;
}
/*
* The following should only be used when hardware is present.
*/
switch (cmd) {
case TIOCMIWAIT:
ret = uart_wait_modem_status(state, arg);
break;
}
if (ret != -ENOIOCTLCMD)
goto out;
mutex_lock(&port->mutex);
if (tty->flags & (1 << TTY_IO_ERROR)) {
ret = -EIO;
goto out_up;
}
/*
* All these rely on hardware being present and need to be
* protected against the tty being hung up.
*/
switch (cmd) {
case TIOCSERGETLSR: /* Get line status register */
ret = uart_get_lsr_info(tty, state, uarg);
break;
default: {
struct uart_port *uport = state->uart_port;
if (uport->ops->ioctl)
ret = uport->ops->ioctl(uport, cmd, arg);
break;
}
}
out_up:
mutex_unlock(&port->mutex);
out:
return ret;
}
static void uart_set_ldisc(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *uport = state->uart_port;
if (uport->ops->set_ldisc)
uport->ops->set_ldisc(uport, tty->termios->c_line);
}
static void uart_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
struct uart_state *state = tty->driver_data;
unsigned long flags;
unsigned int cflag = tty->termios->c_cflag;
/*
* These are the bits that are used to setup various
* flags in the low level driver. We can ignore the Bfoo
* bits in c_cflag; c_[io]speed will always be set
* appropriately by set_termios() in tty_ioctl.c
*/
#define RELEVANT_IFLAG(iflag) ((iflag) & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
if ((cflag ^ old_termios->c_cflag) == 0 &&
tty->termios->c_ospeed == old_termios->c_ospeed &&
tty->termios->c_ispeed == old_termios->c_ispeed &&
RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0) {
return;
}
uart_change_speed(tty, state, old_termios);
/* Handle transition to B0 status */
if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD))
uart_clear_mctrl(state->uart_port, TIOCM_RTS | TIOCM_DTR);
/* Handle transition away from B0 status */
else if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
unsigned int mask = TIOCM_DTR;
if (!(cflag & CRTSCTS) ||
!test_bit(TTY_THROTTLED, &tty->flags))
mask |= TIOCM_RTS;
uart_set_mctrl(state->uart_port, mask);
}
/* Handle turning off CRTSCTS */
if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) {
spin_lock_irqsave(&state->uart_port->lock, flags);
tty->hw_stopped = 0;
__uart_start(tty);
spin_unlock_irqrestore(&state->uart_port->lock, flags);
}
/* Handle turning on CRTSCTS */
else if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
spin_lock_irqsave(&state->uart_port->lock, flags);
if (!(state->uart_port->ops->get_mctrl(state->uart_port) & TIOCM_CTS)) {
tty->hw_stopped = 1;
state->uart_port->ops->stop_tx(state->uart_port);
}
spin_unlock_irqrestore(&state->uart_port->lock, flags);
}
}
/*
* In 2.4.5, calls to this will be serialized via the BKL in
* linux/drivers/char/tty_io.c:tty_release()
* linux/drivers/char/tty_io.c:do_tty_handup()
*/
static void uart_close(struct tty_struct *tty, struct file *filp)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port;
struct uart_port *uport;
unsigned long flags;
BUG_ON(!tty_locked());
if (!state)
return;
uport = state->uart_port;
port = &state->port;
pr_debug("uart_close(%d) called\n", uport->line);
mutex_lock(&port->mutex);
spin_lock_irqsave(&port->lock, flags);
if (tty_hung_up_p(filp)) {
spin_unlock_irqrestore(&port->lock, flags);
goto done;
}
if ((tty->count == 1) && (port->count != 1)) {
/*
* Uh, oh. tty->count is 1, which means that the tty
* structure will be freed. port->count should always
* be one in these conditions. If it's greater than
* one, we've got real problems, since it means the
* serial port won't be shutdown.
*/
printk(KERN_ERR "uart_close: bad serial port count; tty->count is 1, "
"port->count is %d\n", port->count);
port->count = 1;
}
if (--port->count < 0) {
printk(KERN_ERR "uart_close: bad serial port count for %s: %d\n",
tty->name, port->count);
port->count = 0;
}
if (port->count) {
spin_unlock_irqrestore(&port->lock, flags);
goto done;
}
/*
* Now we wait for the transmit buffer to clear; and we notify
* the line discipline to only process XON/XOFF characters by
* setting tty->closing.
*/
tty->closing = 1;
spin_unlock_irqrestore(&port->lock, flags);
if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE) {
/*
* hack: open-coded tty_wait_until_sent to avoid
* recursive tty_lock
*/
long timeout = msecs_to_jiffies(port->closing_wait);
if (wait_event_interruptible_timeout(tty->write_wait,
!tty_chars_in_buffer(tty), timeout) >= 0)
__uart_wait_until_sent(uport, timeout);
}
/*
* At this point, we stop accepting input. To do this, we
* disable the receive line status interrupts.
*/
if (port->flags & ASYNC_INITIALIZED) {
unsigned long flags;
spin_lock_irqsave(&uport->lock, flags);
uport->ops->stop_rx(uport);
spin_unlock_irqrestore(&uport->lock, flags);
/*
* Before we drop DTR, make sure the UART transmitter
* has completely drained; this is especially
* important if there is a transmit FIFO!
*/
__uart_wait_until_sent(uport, uport->timeout);
}
uart_shutdown(tty, state);
uart_flush_buffer(tty);
tty_ldisc_flush(tty);
tty_port_tty_set(port, NULL);
spin_lock_irqsave(&port->lock, flags);
tty->closing = 0;
if (port->blocked_open) {
spin_unlock_irqrestore(&port->lock, flags);
if (port->close_delay)
msleep_interruptible(port->close_delay);
spin_lock_irqsave(&port->lock, flags);
} else if (!uart_console(uport)) {
spin_unlock_irqrestore(&port->lock, flags);
uart_change_pm(state, 3);
spin_lock_irqsave(&port->lock, flags);
}
/*
* Wake up anyone trying to open this port.
*/
clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
spin_unlock_irqrestore(&port->lock, flags);
wake_up_interruptible(&port->open_wait);
done:
mutex_unlock(&port->mutex);
}
static void __uart_wait_until_sent(struct uart_port *port, int timeout)
{
unsigned long char_time, expire;
if (port->type == PORT_UNKNOWN || port->fifosize == 0)
return;
/*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
* interval should also be less than the timeout.
*
* Note: we have to use pretty tight timings here to satisfy
* the NIST-PCTS.
*/
char_time = (port->timeout - HZ/50) / port->fifosize;
char_time = char_time / 5;
if (char_time == 0)
char_time = 1;
if (timeout && timeout < char_time)
char_time = timeout;
/*
* If the transmitter hasn't cleared in twice the approximate
* amount of time to send the entire FIFO, it probably won't
* ever clear. This assumes the UART isn't doing flow
* control, which is currently the case. Hence, if it ever
* takes longer than port->timeout, this is probably due to a
* UART bug of some kind. So, we clamp the timeout parameter at
* 2*port->timeout.
*/
if (timeout == 0 || timeout > 2 * port->timeout)
timeout = 2 * port->timeout;
expire = jiffies + timeout;
pr_debug("uart_wait_until_sent(%d), jiffies=%lu, expire=%lu...\n",
port->line, jiffies, expire);
/*
* Check whether the transmitter is empty every 'char_time'.
* 'timeout' / 'expire' give us the maximum amount of time
* we wait.
*/
while (!port->ops->tx_empty(port)) {
msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (time_after(jiffies, expire))
break;
}
}
static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
tty_lock();
__uart_wait_until_sent(port, timeout);
tty_unlock();
}
/*
* This is called with the BKL held in
* linux/drivers/char/tty_io.c:do_tty_hangup()
* We're called from the eventd thread, so we can sleep for
* a _short_ time only.
*/
static void uart_hangup(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
unsigned long flags;
BUG_ON(!tty_locked());
pr_debug("uart_hangup(%d)\n", state->uart_port->line);
mutex_lock(&port->mutex);
if (port->flags & ASYNC_NORMAL_ACTIVE) {
uart_flush_buffer(tty);
uart_shutdown(tty, state);
spin_lock_irqsave(&port->lock, flags);
port->count = 0;
clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
spin_unlock_irqrestore(&port->lock, flags);
tty_port_tty_set(port, NULL);
wake_up_interruptible(&port->open_wait);
wake_up_interruptible(&port->delta_msr_wait);
}
mutex_unlock(&port->mutex);
}
static int uart_carrier_raised(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = state->uart_port;
int mctrl;
spin_lock_irq(&uport->lock);
uport->ops->enable_ms(uport);
mctrl = uport->ops->get_mctrl(uport);
spin_unlock_irq(&uport->lock);
if (mctrl & TIOCM_CAR)
return 1;
return 0;
}
static void uart_dtr_rts(struct tty_port *port, int onoff)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = state->uart_port;
if (onoff)
uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
else
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
}
static struct uart_state *uart_get(struct uart_driver *drv, int line)
{
struct uart_state *state;
struct tty_port *port;
int ret = 0;
state = drv->state + line;
port = &state->port;
if (mutex_lock_interruptible(&port->mutex)) {
ret = -ERESTARTSYS;
goto err;
}
port->count++;
if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
ret = -ENXIO;
goto err_unlock;
}
return state;
err_unlock:
port->count--;
mutex_unlock(&port->mutex);
err:
return ERR_PTR(ret);
}
/*
* calls to uart_open are serialised by the BKL in
* fs/char_dev.c:chrdev_open()
* Note that if this fails, then uart_close() _will_ be called.
*
* In time, we want to scrap the "opening nonpresent ports"
* behaviour and implement an alternative way for setserial
* to set base addresses/ports/types. This will allow us to
* get rid of a certain amount of extra tests.
*/
static int uart_open(struct tty_struct *tty, struct file *filp)
{
struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state;
struct uart_state *state;
struct tty_port *port;
int retval, line = tty->index;
BUG_ON(!tty_locked());
pr_debug("uart_open(%d) called\n", line);
/*
* We take the semaphore inside uart_get to guarantee that we won't
* be re-entered while allocating the state structure, or while we
* request any IRQs that the driver may need. This also has the nice
* side-effect that it delays the action of uart_hangup, so we can
* guarantee that state->port.tty will always contain something
* reasonable.
*/
state = uart_get(drv, line);
if (IS_ERR(state)) {
retval = PTR_ERR(state);
goto fail;
}
port = &state->port;
/*
* Once we set tty->driver_data here, we are guaranteed that
* uart_close() will decrement the driver module use count.
* Any failures from here onwards should not touch the count.
*/
tty->driver_data = state;
state->uart_port->state = state;
tty->low_latency = (state->uart_port->flags & UPF_LOW_LATENCY) ? 1 : 0;
tty->alt_speed = 0;
tty_port_tty_set(port, tty);
/*
* If the port is in the middle of closing, bail out now.
*/
if (tty_hung_up_p(filp)) {
retval = -EAGAIN;
port->count--;
mutex_unlock(&port->mutex);
goto fail;
}
/*
* Make sure the device is in D0 state.
*/
if (port->count == 1)
uart_change_pm(state, 0);
/*
* Start up the serial port.
*/
retval = uart_startup(tty, state, 0);
/*
* If we succeeded, wait until the port is ready.
*/
mutex_unlock(&port->mutex);
if (retval == 0)
retval = tty_port_block_til_ready(port, tty, filp);
fail:
return retval;
}
static const char *uart_type(struct uart_port *port)
{
const char *str = NULL;
if (port->ops->type)
str = port->ops->type(port);
if (!str)
str = "unknown";
return str;
}
#ifdef CONFIG_PROC_FS
static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
{
struct uart_state *state = drv->state + i;
struct tty_port *port = &state->port;
int pm_state;
struct uart_port *uport = state->uart_port;
char stat_buf[32];
unsigned int status;
int mmio;
if (!uport)
return;
mmio = uport->iotype >= UPIO_MEM;
seq_printf(m, "%d: uart:%s %s%08llX irq:%d",
uport->line, uart_type(uport),
mmio ? "mmio:0x" : "port:",
mmio ? (unsigned long long)uport->mapbase
: (unsigned long long)uport->iobase,
uport->irq);
if (uport->type == PORT_UNKNOWN) {
seq_putc(m, '\n');
return;
}
if (capable(CAP_SYS_ADMIN)) {
mutex_lock(&port->mutex);
pm_state = state->pm_state;
if (pm_state)
uart_change_pm(state, 0);
spin_lock_irq(&uport->lock);
status = uport->ops->get_mctrl(uport);
spin_unlock_irq(&uport->lock);
if (pm_state)
uart_change_pm(state, pm_state);
mutex_unlock(&port->mutex);
seq_printf(m, " tx:%d rx:%d",
uport->icount.tx, uport->icount.rx);
if (uport->icount.frame)
seq_printf(m, " fe:%d",
uport->icount.frame);
if (uport->icount.parity)
seq_printf(m, " pe:%d",
uport->icount.parity);
if (uport->icount.brk)
seq_printf(m, " brk:%d",
uport->icount.brk);
if (uport->icount.overrun)
seq_printf(m, " oe:%d",
uport->icount.overrun);
#define INFOBIT(bit, str) \
if (uport->mctrl & (bit)) \
strncat(stat_buf, (str), sizeof(stat_buf) - \
strlen(stat_buf) - 2)
#define STATBIT(bit, str) \
if (status & (bit)) \
strncat(stat_buf, (str), sizeof(stat_buf) - \
strlen(stat_buf) - 2)
stat_buf[0] = '\0';
stat_buf[1] = '\0';
INFOBIT(TIOCM_RTS, "|RTS");
STATBIT(TIOCM_CTS, "|CTS");
INFOBIT(TIOCM_DTR, "|DTR");
STATBIT(TIOCM_DSR, "|DSR");
STATBIT(TIOCM_CAR, "|CD");
STATBIT(TIOCM_RNG, "|RI");
if (stat_buf[0])
stat_buf[0] = ' ';
seq_puts(m, stat_buf);
}
seq_putc(m, '\n');
#undef STATBIT
#undef INFOBIT
}
static int uart_proc_show(struct seq_file *m, void *v)
{
struct tty_driver *ttydrv = m->private;
struct uart_driver *drv = ttydrv->driver_state;
int i;
seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n",
"", "", "");
for (i = 0; i < drv->nr; i++)
uart_line_info(m, drv, i);
return 0;
}
static int uart_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, uart_proc_show, PDE(inode)->data);
}
static const struct file_operations uart_proc_fops = {
.owner = THIS_MODULE,
.open = uart_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
/*
* uart_console_write - write a console message to a serial port
* @port: the port to write the message
* @s: array of characters
* @count: number of characters in string to write
* @write: function to write character to port
*/
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
void (*putchar)(struct uart_port *, int))
{
unsigned int i;
for (i = 0; i < count; i++, s++) {
if (*s == '\n')
putchar(port, '\r');
putchar(port, *s);
}
}
EXPORT_SYMBOL_GPL(uart_console_write);
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
struct uart_port * __init
uart_get_console(struct uart_port *ports, int nr, struct console *co)
{
int idx = co->index;
if (idx < 0 || idx >= nr || (ports[idx].iobase == 0 &&
ports[idx].membase == NULL))
for (idx = 0; idx < nr; idx++)
if (ports[idx].iobase != 0 ||
ports[idx].membase != NULL)
break;
co->index = idx;
return ports + idx;
}
/**
* uart_parse_options - Parse serial port baud/parity/bits/flow contro.
* @options: pointer to option string
* @baud: pointer to an 'int' variable for the baud rate.
* @parity: pointer to an 'int' variable for the parity.
* @bits: pointer to an 'int' variable for the number of data bits.
* @flow: pointer to an 'int' variable for the flow control character.
*
* uart_parse_options decodes a string containing the serial console
* options. The format of the string is <baud><parity><bits><flow>,
* eg: 115200n8r
*/
void
uart_parse_options(char *options, int *baud, int *parity, int *bits, int *flow)
{
char *s = options;
*baud = simple_strtoul(s, NULL, 10);
while (*s >= '0' && *s <= '9')
s++;
if (*s)
*parity = *s++;
if (*s)
*bits = *s++ - '0';
if (*s)
*flow = *s;
}
EXPORT_SYMBOL_GPL(uart_parse_options);
struct baud_rates {
unsigned int rate;
unsigned int cflag;
};
static const struct baud_rates baud_rates[] = {
{ 921600, B921600 },
{ 460800, B460800 },
{ 230400, B230400 },
{ 115200, B115200 },
{ 57600, B57600 },
{ 38400, B38400 },
{ 19200, B19200 },
{ 9600, B9600 },
{ 4800, B4800 },
{ 2400, B2400 },
{ 1200, B1200 },
{ 0, B38400 }
};
/**
* uart_set_options - setup the serial console parameters
* @port: pointer to the serial ports uart_port structure
* @co: console pointer
* @baud: baud rate
* @parity: parity character - 'n' (none), 'o' (odd), 'e' (even)
* @bits: number of data bits
* @flow: flow control character - 'r' (rts)
*/
int
uart_set_options(struct uart_port *port, struct console *co,
int baud, int parity, int bits, int flow)
{
struct ktermios termios;
static struct ktermios dummy;
int i;
/*
* Ensure that the serial console lock is initialised
* early.
*/
spin_lock_init(&port->lock);
lockdep_set_class(&port->lock, &port_lock_key);
memset(&termios, 0, sizeof(struct ktermios));
termios.c_cflag = CREAD | HUPCL | CLOCAL;
/*
* Construct a cflag setting.
*/
for (i = 0; baud_rates[i].rate; i++)
if (baud_rates[i].rate <= baud)
break;
termios.c_cflag |= baud_rates[i].cflag;
if (bits == 7)
termios.c_cflag |= CS7;
else
termios.c_cflag |= CS8;
switch (parity) {
case 'o': case 'O':
termios.c_cflag |= PARODD;
/*fall through*/
case 'e': case 'E':
termios.c_cflag |= PARENB;
break;
}
if (flow == 'r')
termios.c_cflag |= CRTSCTS;
/*
* some uarts on other side don't support no flow control.
* So we set * DTR in host uart to make them happy
*/
port->mctrl |= TIOCM_DTR;
port->ops->set_termios(port, &termios, &dummy);
/*
* Allow the setting of the UART parameters with a NULL console
* too:
*/
if (co)
co->cflag = termios.c_cflag;
return 0;
}
EXPORT_SYMBOL_GPL(uart_set_options);
#endif /* CONFIG_SERIAL_CORE_CONSOLE */
static void uart_change_pm(struct uart_state *state, int pm_state)
{
struct uart_port *port = state->uart_port;
if (state->pm_state != pm_state) {
if (port->ops->pm)
port->ops->pm(port, pm_state, state->pm_state);
state->pm_state = pm_state;
}
}
struct uart_match {
struct uart_port *port;
struct uart_driver *driver;
};
static int serial_match_port(struct device *dev, void *data)
{
struct uart_match *match = data;
struct tty_driver *tty_drv = match->driver->tty_driver;
dev_t devt = MKDEV(tty_drv->major, tty_drv->minor_start) +
match->port->line;
return dev->devt == devt; /* Actually, only one tty per port */
}
int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
struct device *tty_dev;
struct uart_match match = {uport, drv};
mutex_lock(&port->mutex);
tty_dev = device_find_child(uport->dev, &match, serial_match_port);
if (device_may_wakeup(tty_dev)) {
if (!enable_irq_wake(uport->irq))
uport->irq_wake = 1;
put_device(tty_dev);
mutex_unlock(&port->mutex);
return 0;
}
put_device(tty_dev);
if (console_suspend_enabled || !uart_console(uport))
uport->suspended = 1;
if (port->flags & ASYNC_INITIALIZED) {
const struct uart_ops *ops = uport->ops;
int tries;
if (console_suspend_enabled || !uart_console(uport)) {
set_bit(ASYNCB_SUSPENDED, &port->flags);
clear_bit(ASYNCB_INITIALIZED, &port->flags);
spin_lock_irq(&uport->lock);
ops->stop_tx(uport);
ops->set_mctrl(uport, 0);
ops->stop_rx(uport);
spin_unlock_irq(&uport->lock);
}
/*
* Wait for the transmitter to empty.
*/
for (tries = 3; !ops->tx_empty(uport) && tries; tries--)
msleep(10);
if (!tries)
printk(KERN_ERR "%s%s%s%d: Unable to drain "
"transmitter\n",
uport->dev ? dev_name(uport->dev) : "",
uport->dev ? ": " : "",
drv->dev_name,
drv->tty_driver->name_base + uport->line);
if (console_suspend_enabled || !uart_console(uport))
ops->shutdown(uport);
}
/*
* Disable the console device before suspending.
*/
if (console_suspend_enabled && uart_console(uport))
console_stop(uport->cons);
if (console_suspend_enabled || !uart_console(uport))
uart_change_pm(state, 3);
mutex_unlock(&port->mutex);
return 0;
}
int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
struct device *tty_dev;
struct uart_match match = {uport, drv};
struct ktermios termios;
mutex_lock(&port->mutex);
tty_dev = device_find_child(uport->dev, &match, serial_match_port);
if (!uport->suspended && device_may_wakeup(tty_dev)) {
if (uport->irq_wake) {
disable_irq_wake(uport->irq);
uport->irq_wake = 0;
}
put_device(tty_dev);
mutex_unlock(&port->mutex);
return 0;
}
put_device(tty_dev);
uport->suspended = 0;
/*
* Re-enable the console device after suspending.
*/
if (uart_console(uport)) {
/*
* First try to use the console cflag setting.
*/
memset(&termios, 0, sizeof(struct ktermios));
termios.c_cflag = uport->cons->cflag;
/*
* If that's unset, use the tty termios setting.
*/
if (port->tty && port->tty->termios && termios.c_cflag == 0)
termios = *(port->tty->termios);
/*
* As we need to set the uart clock rate back to 7.3 MHz.
* We need this change.
*
*/
if (console_suspend_enabled)
uart_change_pm(state, 0);
uport->ops->set_termios(uport, &termios, NULL);
if (console_suspend_enabled)
console_start(uport->cons);
}
if (port->flags & ASYNC_SUSPENDED) {
const struct uart_ops *ops = uport->ops;
int ret;
uart_change_pm(state, 0);
spin_lock_irq(&uport->lock);
ops->set_mctrl(uport, 0);
spin_unlock_irq(&uport->lock);
if (console_suspend_enabled || !uart_console(uport)) {
/* Protected by port mutex for now */
struct tty_struct *tty = port->tty;
ret = ops->startup(uport);
if (ret == 0) {
if (tty)
uart_change_speed(tty, state, NULL);
spin_lock_irq(&uport->lock);
ops->set_mctrl(uport, uport->mctrl);
ops->start_tx(uport);
spin_unlock_irq(&uport->lock);
set_bit(ASYNCB_INITIALIZED, &port->flags);
} else {
/*
* Failed to resume - maybe hardware went away?
* Clear the "initialized" flag so we won't try
* to call the low level drivers shutdown method.
*/
uart_shutdown(tty, state);
}
}
clear_bit(ASYNCB_SUSPENDED, &port->flags);
}
mutex_unlock(&port->mutex);
return 0;
}
static inline void
uart_report_port(struct uart_driver *drv, struct uart_port *port)
{
char address[64];
switch (port->iotype) {
case UPIO_PORT:
snprintf(address, sizeof(address), "I/O 0x%lx", port->iobase);
break;
case UPIO_HUB6:
snprintf(address, sizeof(address),
"I/O 0x%lx offset 0x%x", port->iobase, port->hub6);
break;
case UPIO_MEM:
case UPIO_MEM32:
case UPIO_AU:
case UPIO_TSI:
case UPIO_DWAPB:
case UPIO_DWAPB32:
snprintf(address, sizeof(address),
"MMIO 0x%llx", (unsigned long long)port->mapbase);
break;
default:
strlcpy(address, "*unknown*", sizeof(address));
break;
}
printk(KERN_INFO "%s%s%s%d at %s (irq = %d) is a %s\n",
port->dev ? dev_name(port->dev) : "",
port->dev ? ": " : "",
drv->dev_name,
drv->tty_driver->name_base + port->line,
address, port->irq, uart_type(port));
}
static void
uart_configure_port(struct uart_driver *drv, struct uart_state *state,
struct uart_port *port)
{
unsigned int flags;
/*
* If there isn't a port here, don't do anything further.
*/
if (!port->iobase && !port->mapbase && !port->membase)
return;
/*
* Now do the auto configuration stuff. Note that config_port
* is expected to claim the resources and map the port for us.
*/
flags = 0;
if (port->flags & UPF_AUTO_IRQ)
flags |= UART_CONFIG_IRQ;
if (port->flags & UPF_BOOT_AUTOCONF) {
if (!(port->flags & UPF_FIXED_TYPE)) {
port->type = PORT_UNKNOWN;
flags |= UART_CONFIG_TYPE;
}
port->ops->config_port(port, flags);
}
if (port->type != PORT_UNKNOWN) {
unsigned long flags;
uart_report_port(drv, port);
/* Power up port for set_mctrl() */
uart_change_pm(state, 0);
/*
* Ensure that the modem control lines are de-activated.
* keep the DTR setting that is set in uart_set_options()
* We probably don't need a spinlock around this, but
*/
spin_lock_irqsave(&port->lock, flags);
port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR);
spin_unlock_irqrestore(&port->lock, flags);
/*
* If this driver supports console, and it hasn't been
* successfully registered yet, try to re-register it.
* It may be that the port was not available.
*/
if (port->cons && !(port->cons->flags & CON_ENABLED))
register_console(port->cons);
/*
* Power down all ports by default, except the
* console if we have one.
*/
if (!uart_console(port))
uart_change_pm(state, 3);
}
}
#ifdef CONFIG_CONSOLE_POLL
static int uart_poll_init(struct tty_driver *driver, int line, char *options)
{
struct uart_driver *drv = driver->driver_state;
struct uart_state *state = drv->state + line;
struct uart_port *port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (!state || !state->uart_port)
return -1;
port = state->uart_port;
if (!(port->ops->poll_get_char && port->ops->poll_put_char))
return -1;
if (options) {
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, NULL, baud, parity, bits, flow);
}
return 0;
}
static int uart_poll_get_char(struct tty_driver *driver, int line)
{
struct uart_driver *drv = driver->driver_state;
struct uart_state *state = drv->state + line;
struct uart_port *port;
if (!state || !state->uart_port)
return -1;
port = state->uart_port;
return port->ops->poll_get_char(port);
}
static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
{
struct uart_driver *drv = driver->driver_state;
struct uart_state *state = drv->state + line;
struct uart_port *port;
if (!state || !state->uart_port)
return;
port = state->uart_port;
port->ops->poll_put_char(port, ch);
}
#endif
static const struct tty_operations uart_ops = {
.open = uart_open,
.close = uart_close,
.write = uart_write,
.put_char = uart_put_char,
.flush_chars = uart_flush_chars,
.write_room = uart_write_room,
.chars_in_buffer= uart_chars_in_buffer,
.flush_buffer = uart_flush_buffer,
.ioctl = uart_ioctl,
.throttle = uart_throttle,
.unthrottle = uart_unthrottle,
.send_xchar = uart_send_xchar,
.set_termios = uart_set_termios,
.set_ldisc = uart_set_ldisc,
.stop = uart_stop,
.start = uart_start,
.hangup = uart_hangup,
.break_ctl = uart_break_ctl,
.wait_until_sent= uart_wait_until_sent,
#ifdef CONFIG_PROC_FS
.proc_fops = &uart_proc_fops,
#endif
.tiocmget = uart_tiocmget,
.tiocmset = uart_tiocmset,
.get_icount = uart_get_icount,
#ifdef CONFIG_CONSOLE_POLL
.poll_init = uart_poll_init,
.poll_get_char = uart_poll_get_char,
.poll_put_char = uart_poll_put_char,
#endif
};
static const struct tty_port_operations uart_port_ops = {
.carrier_raised = uart_carrier_raised,
.dtr_rts = uart_dtr_rts,
};
/**
* uart_register_driver - register a driver with the uart core layer
* @drv: low level driver structure
*
* Register a uart driver with the core driver. We in turn register
* with the tty layer, and initialise the core driver per-port state.
*
* We have a proc file in /proc/tty/driver which is named after the
* normal driver.
*
* drv->port should be NULL, and the per-port structures should be
* registered using uart_add_one_port after this call has succeeded.
*/
int uart_register_driver(struct uart_driver *drv)
{
struct tty_driver *normal;
int i, retval;
BUG_ON(drv->state);
/*
* Maybe we should be using a slab cache for this, especially if
* we have a large number of ports to handle.
*/
drv->state = kzalloc(sizeof(struct uart_state) * drv->nr, GFP_KERNEL);
if (!drv->state)
goto out;
normal = alloc_tty_driver(drv->nr);
if (!normal)
goto out_kfree;
drv->tty_driver = normal;
normal->owner = drv->owner;
normal->driver_name = drv->driver_name;
normal->name = drv->dev_name;
normal->major = drv->major;
normal->minor_start = drv->minor;
normal->type = TTY_DRIVER_TYPE_SERIAL;
normal->subtype = SERIAL_TYPE_NORMAL;
normal->init_termios = tty_std_termios;
normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
normal->init_termios.c_ispeed = normal->init_termios.c_ospeed = 9600;
normal->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
normal->driver_state = drv;
tty_set_operations(normal, &uart_ops);
/*
* Initialise the UART state(s).
*/
for (i = 0; i < drv->nr; i++) {
struct uart_state *state = drv->state + i;
struct tty_port *port = &state->port;
tty_port_init(port);
port->ops = &uart_port_ops;
port->close_delay = 500; /* .5 seconds */
port->closing_wait = 30000; /* 30 seconds */
tasklet_init(&state->tlet, uart_tasklet_action,
(unsigned long)state);
}
retval = tty_register_driver(normal);
if (retval >= 0)
return retval;
put_tty_driver(normal);
out_kfree:
kfree(drv->state);
out:
return -ENOMEM;
}
/**
* uart_unregister_driver - remove a driver from the uart core layer
* @drv: low level driver structure
*
* Remove all references to a driver from the core driver. The low
* level driver must have removed all its ports via the
* uart_remove_one_port() if it registered them with uart_add_one_port().
* (ie, drv->port == NULL)
*/
void uart_unregister_driver(struct uart_driver *drv)
{
struct tty_driver *p = drv->tty_driver;
tty_unregister_driver(p);
put_tty_driver(p);
kfree(drv->state);
drv->state = NULL;
drv->tty_driver = NULL;
}
struct tty_driver *uart_console_device(struct console *co, int *index)
{
struct uart_driver *p = co->data;
*index = co->index;
return p->tty_driver;
}
/**
* uart_add_one_port - attach a driver-defined port structure
* @drv: pointer to the uart low level driver structure for this port
* @uport: uart port structure to use for this port.
*
* This allows the driver to register its own uart_port structure
* with the core driver. The main purpose is to allow the low
* level uart drivers to expand uart_port, rather than having yet
* more levels of structures.
*/
int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state;
struct tty_port *port;
int ret = 0;
struct device *tty_dev;
BUG_ON(in_interrupt());
if (uport->line >= drv->nr)
return -EINVAL;
state = drv->state + uport->line;
port = &state->port;
mutex_lock(&port_mutex);
mutex_lock(&port->mutex);
if (state->uart_port) {
ret = -EINVAL;
goto out;
}
state->uart_port = uport;
state->pm_state = -1;
uport->cons = drv->cons;
uport->state = state;
/*
* If this port is a console, then the spinlock is already
* initialised.
*/
if (!(uart_console(uport) && (uport->cons->flags & CON_ENABLED))) {
spin_lock_init(&uport->lock);
lockdep_set_class(&uport->lock, &port_lock_key);
}
uart_configure_port(drv, state, uport);
/*
* Register the port whether it's detected or not. This allows
* setserial to be used to alter this ports parameters.
*/
tty_dev = tty_register_device(drv->tty_driver, uport->line, uport->dev);
if (likely(!IS_ERR(tty_dev))) {
device_init_wakeup(tty_dev, 1);
device_set_wakeup_enable(tty_dev, 0);
} else
printk(KERN_ERR "Cannot register tty device on line %d\n",
uport->line);
/*
* Ensure UPF_DEAD is not set.
*/
uport->flags &= ~UPF_DEAD;
out:
mutex_unlock(&port->mutex);
mutex_unlock(&port_mutex);
return ret;
}
/**
* uart_remove_one_port - detach a driver defined port structure
* @drv: pointer to the uart low level driver structure for this port
* @uport: uart port structure for this port
*
* This unhooks (and hangs up) the specified port structure from the
* core driver. No further calls will be made to the low-level code
* for this port.
*/
int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
BUG_ON(in_interrupt());
if (state->uart_port != uport)
printk(KERN_ALERT "Removing wrong port: %p != %p\n",
state->uart_port, uport);
mutex_lock(&port_mutex);
/*
* Mark the port "dead" - this prevents any opens from
* succeeding while we shut down the port.
*/
mutex_lock(&port->mutex);
uport->flags |= UPF_DEAD;
mutex_unlock(&port->mutex);
/*
* Remove the devices from the tty layer
*/
tty_unregister_device(drv->tty_driver, uport->line);
if (port->tty)
tty_vhangup(port->tty);
/*
* Free the port IO and memory resources, if any.
*/
if (uport->type != PORT_UNKNOWN)
uport->ops->release_port(uport);
/*
* Indicate that there isn't a port here anymore.
*/
uport->type = PORT_UNKNOWN;
/*
* Kill the tasklet, and free resources.
*/
tasklet_kill(&state->tlet);
state->uart_port = NULL;
mutex_unlock(&port_mutex);
return 0;
}
/*
* Are the two ports equivalent?
*/
int uart_match_port(struct uart_port *port1, struct uart_port *port2)
{
if (port1->iotype != port2->iotype)
return 0;
switch (port1->iotype) {
case UPIO_PORT:
return (port1->iobase == port2->iobase);
case UPIO_HUB6:
return (port1->iobase == port2->iobase) &&
(port1->hub6 == port2->hub6);
case UPIO_MEM:
case UPIO_MEM32:
case UPIO_AU:
case UPIO_TSI:
case UPIO_DWAPB:
case UPIO_DWAPB32:
return (port1->mapbase == port2->mapbase);
}
return 0;
}
EXPORT_SYMBOL(uart_match_port);
EXPORT_SYMBOL(uart_write_wakeup);
EXPORT_SYMBOL(uart_register_driver);
EXPORT_SYMBOL(uart_unregister_driver);
EXPORT_SYMBOL(uart_suspend_port);
EXPORT_SYMBOL(uart_resume_port);
EXPORT_SYMBOL(uart_add_one_port);
EXPORT_SYMBOL(uart_remove_one_port);
MODULE_DESCRIPTION("Serial driver core");
MODULE_LICENSE("GPL");
|
thewisenerd/android_kernel_htc_pico
|
drivers/tty/serial/serial_core.c
|
C
|
gpl-2.0
| 63,333
|
svn export --force http://svn.red-bean.com/bob/macholib/trunk/macholib/ .
|
apierz/fipp
|
source/build/exe.macosx-10.12-x86_64-3.6/lib/python3.6/ctypes/macholib/fetch_macholib.bat
|
Batchfile
|
gpl-2.0
| 75
|
// Copyright Eric Friedman 2002-2003
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/mpl for documentation.
// $Id$
// $Date$
// $Revision$
#include <boost/mpl/max_element.hpp>
#include <boost/mpl/list_c.hpp>
#include <boost/mpl/aux_/test.hpp>
MPL_TEST_CASE()
{
typedef list_c<int,3,4,2,0,-5,8,-1,7>::type numbers;
typedef max_element< numbers >::type iter;
typedef deref<iter>::type max_value;
MPL_ASSERT_RELATION( max_value::value, ==, 8 );
}
|
gwq5210/litlib
|
thirdparty/sources/boost_1_60_0/libs/mpl/test/max_element.cpp
|
C++
|
gpl-3.0
| 620
|
/*
* QEMU Floppy disk emulator (Intel 82078)
*
* Copyright (c) 2003, 2007 Jocelyn Mayer
* Copyright (c) 2008 Hervé Poussineau
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/*
* The controller is used in Sun4m systems in a slightly different
* way. There are changes in DOR register and DMA is not available.
*/
#include "hw.h"
#include "fdc.h"
#include "qemu-error.h"
#include "qemu-timer.h"
#include "isa.h"
#include "sysbus.h"
#include "qdev-addr.h"
#include "blockdev.h"
#include "sysemu.h"
/********************************************************/
/* debug Floppy devices */
//#define DEBUG_FLOPPY
#ifdef DEBUG_FLOPPY
#define FLOPPY_DPRINTF(fmt, ...) \
do { printf("FLOPPY: " fmt , ## __VA_ARGS__); } while (0)
#else
#define FLOPPY_DPRINTF(fmt, ...)
#endif
#define FLOPPY_ERROR(fmt, ...) \
do { printf("FLOPPY ERROR: %s: " fmt, __func__ , ## __VA_ARGS__); } while (0)
/********************************************************/
/* Floppy drive emulation */
#define GET_CUR_DRV(fdctrl) ((fdctrl)->cur_drv)
#define SET_CUR_DRV(fdctrl, drive) ((fdctrl)->cur_drv = (drive))
/* Will always be a fixed parameter for us */
#define FD_SECTOR_LEN 512
#define FD_SECTOR_SC 2 /* Sector size code */
#define FD_RESET_SENSEI_COUNT 4 /* Number of sense interrupts on RESET */
/* Floppy disk drive emulation */
typedef enum FDiskFlags {
FDISK_DBL_SIDES = 0x01,
} FDiskFlags;
typedef struct FDrive {
BlockDriverState *bs;
/* Drive status */
FDriveType drive;
uint8_t perpendicular; /* 2.88 MB access mode */
/* Position */
uint8_t head;
uint8_t track;
uint8_t sect;
/* Media */
FDiskFlags flags;
uint8_t last_sect; /* Nb sector per track */
uint8_t max_track; /* Nb of tracks */
uint16_t bps; /* Bytes per sector */
uint8_t ro; /* Is read-only */
uint8_t media_changed; /* Is media changed */
} FDrive;
static void fd_init(FDrive *drv)
{
/* Drive */
drv->drive = FDRIVE_DRV_NONE;
drv->perpendicular = 0;
/* Disk */
drv->last_sect = 0;
drv->max_track = 0;
}
static int fd_sector_calc(uint8_t head, uint8_t track, uint8_t sect,
uint8_t last_sect)
{
return (((track * 2) + head) * last_sect) + sect - 1;
}
/* Returns current position, in sectors, for given drive */
static int fd_sector(FDrive *drv)
{
return fd_sector_calc(drv->head, drv->track, drv->sect, drv->last_sect);
}
/* Seek to a new position:
* returns 0 if already on right track
* returns 1 if track changed
* returns 2 if track is invalid
* returns 3 if sector is invalid
* returns 4 if seek is disabled
*/
static int fd_seek(FDrive *drv, uint8_t head, uint8_t track, uint8_t sect,
int enable_seek)
{
uint32_t sector;
int ret;
if (track > drv->max_track ||
(head != 0 && (drv->flags & FDISK_DBL_SIDES) == 0)) {
FLOPPY_DPRINTF("try to read %d %02x %02x (max=%d %d %02x %02x)\n",
head, track, sect, 1,
(drv->flags & FDISK_DBL_SIDES) == 0 ? 0 : 1,
drv->max_track, drv->last_sect);
return 2;
}
if (sect > drv->last_sect) {
FLOPPY_DPRINTF("try to read %d %02x %02x (max=%d %d %02x %02x)\n",
head, track, sect, 1,
(drv->flags & FDISK_DBL_SIDES) == 0 ? 0 : 1,
drv->max_track, drv->last_sect);
return 3;
}
sector = fd_sector_calc(head, track, sect, drv->last_sect);
ret = 0;
if (sector != fd_sector(drv)) {
#if 0
if (!enable_seek) {
FLOPPY_ERROR("no implicit seek %d %02x %02x (max=%d %02x %02x)\n",
head, track, sect, 1, drv->max_track, drv->last_sect);
return 4;
}
#endif
drv->head = head;
if (drv->track != track)
ret = 1;
drv->track = track;
drv->sect = sect;
}
return ret;
}
/* Set drive back to track 0 */
static void fd_recalibrate(FDrive *drv)
{
FLOPPY_DPRINTF("recalibrate\n");
drv->head = 0;
drv->track = 0;
drv->sect = 1;
}
/* Revalidate a disk drive after a disk change */
static void fd_revalidate(FDrive *drv)
{
int nb_heads, max_track, last_sect, ro;
FDriveType drive;
FLOPPY_DPRINTF("revalidate\n");
if (drv->bs != NULL && bdrv_is_inserted(drv->bs)) {
ro = bdrv_is_read_only(drv->bs);
bdrv_get_floppy_geometry_hint(drv->bs, &nb_heads, &max_track,
&last_sect, drv->drive, &drive);
if (nb_heads != 0 && max_track != 0 && last_sect != 0) {
FLOPPY_DPRINTF("User defined disk (%d %d %d)",
nb_heads - 1, max_track, last_sect);
} else {
FLOPPY_DPRINTF("Floppy disk (%d h %d t %d s) %s\n", nb_heads,
max_track, last_sect, ro ? "ro" : "rw");
}
if (nb_heads == 1) {
drv->flags &= ~FDISK_DBL_SIDES;
} else {
drv->flags |= FDISK_DBL_SIDES;
}
drv->max_track = max_track;
drv->last_sect = last_sect;
drv->ro = ro;
drv->drive = drive;
} else {
FLOPPY_DPRINTF("No disk in drive\n");
drv->last_sect = 0;
drv->max_track = 0;
drv->flags &= ~FDISK_DBL_SIDES;
}
}
/********************************************************/
/* Intel 82078 floppy disk controller emulation */
typedef struct FDCtrl FDCtrl;
static void fdctrl_reset(FDCtrl *fdctrl, int do_irq);
static void fdctrl_reset_fifo(FDCtrl *fdctrl);
static int fdctrl_transfer_handler (void *opaque, int nchan,
int dma_pos, int dma_len);
static void fdctrl_raise_irq(FDCtrl *fdctrl, uint8_t status0);
static uint32_t fdctrl_read_statusA(FDCtrl *fdctrl);
static uint32_t fdctrl_read_statusB(FDCtrl *fdctrl);
static uint32_t fdctrl_read_dor(FDCtrl *fdctrl);
static void fdctrl_write_dor(FDCtrl *fdctrl, uint32_t value);
static uint32_t fdctrl_read_tape(FDCtrl *fdctrl);
static void fdctrl_write_tape(FDCtrl *fdctrl, uint32_t value);
static uint32_t fdctrl_read_main_status(FDCtrl *fdctrl);
static void fdctrl_write_rate(FDCtrl *fdctrl, uint32_t value);
static uint32_t fdctrl_read_data(FDCtrl *fdctrl);
static void fdctrl_write_data(FDCtrl *fdctrl, uint32_t value);
static uint32_t fdctrl_read_dir(FDCtrl *fdctrl);
enum {
FD_DIR_WRITE = 0,
FD_DIR_READ = 1,
FD_DIR_SCANE = 2,
FD_DIR_SCANL = 3,
FD_DIR_SCANH = 4,
};
enum {
FD_STATE_MULTI = 0x01, /* multi track flag */
FD_STATE_FORMAT = 0x02, /* format flag */
FD_STATE_SEEK = 0x04, /* seek flag */
};
enum {
FD_REG_SRA = 0x00,
FD_REG_SRB = 0x01,
FD_REG_DOR = 0x02,
FD_REG_TDR = 0x03,
FD_REG_MSR = 0x04,
FD_REG_DSR = 0x04,
FD_REG_FIFO = 0x05,
FD_REG_DIR = 0x07,
};
enum {
FD_CMD_READ_TRACK = 0x02,
FD_CMD_SPECIFY = 0x03,
FD_CMD_SENSE_DRIVE_STATUS = 0x04,
FD_CMD_WRITE = 0x05,
FD_CMD_READ = 0x06,
FD_CMD_RECALIBRATE = 0x07,
FD_CMD_SENSE_INTERRUPT_STATUS = 0x08,
FD_CMD_WRITE_DELETED = 0x09,
FD_CMD_READ_ID = 0x0a,
FD_CMD_READ_DELETED = 0x0c,
FD_CMD_FORMAT_TRACK = 0x0d,
FD_CMD_DUMPREG = 0x0e,
FD_CMD_SEEK = 0x0f,
FD_CMD_VERSION = 0x10,
FD_CMD_SCAN_EQUAL = 0x11,
FD_CMD_PERPENDICULAR_MODE = 0x12,
FD_CMD_CONFIGURE = 0x13,
FD_CMD_LOCK = 0x14,
FD_CMD_VERIFY = 0x16,
FD_CMD_POWERDOWN_MODE = 0x17,
FD_CMD_PART_ID = 0x18,
FD_CMD_SCAN_LOW_OR_EQUAL = 0x19,
FD_CMD_SCAN_HIGH_OR_EQUAL = 0x1d,
FD_CMD_SAVE = 0x2e,
FD_CMD_OPTION = 0x33,
FD_CMD_RESTORE = 0x4e,
FD_CMD_DRIVE_SPECIFICATION_COMMAND = 0x8e,
FD_CMD_RELATIVE_SEEK_OUT = 0x8f,
FD_CMD_FORMAT_AND_WRITE = 0xcd,
FD_CMD_RELATIVE_SEEK_IN = 0xcf,
};
enum {
FD_CONFIG_PRETRK = 0xff, /* Pre-compensation set to track 0 */
FD_CONFIG_FIFOTHR = 0x0f, /* FIFO threshold set to 1 byte */
FD_CONFIG_POLL = 0x10, /* Poll enabled */
FD_CONFIG_EFIFO = 0x20, /* FIFO disabled */
FD_CONFIG_EIS = 0x40, /* No implied seeks */
};
enum {
FD_SR0_EQPMT = 0x10,
FD_SR0_SEEK = 0x20,
FD_SR0_ABNTERM = 0x40,
FD_SR0_INVCMD = 0x80,
FD_SR0_RDYCHG = 0xc0,
};
enum {
FD_SR1_EC = 0x80, /* End of cylinder */
};
enum {
FD_SR2_SNS = 0x04, /* Scan not satisfied */
FD_SR2_SEH = 0x08, /* Scan equal hit */
};
enum {
FD_SRA_DIR = 0x01,
FD_SRA_nWP = 0x02,
FD_SRA_nINDX = 0x04,
FD_SRA_HDSEL = 0x08,
FD_SRA_nTRK0 = 0x10,
FD_SRA_STEP = 0x20,
FD_SRA_nDRV2 = 0x40,
FD_SRA_INTPEND = 0x80,
};
enum {
FD_SRB_MTR0 = 0x01,
FD_SRB_MTR1 = 0x02,
FD_SRB_WGATE = 0x04,
FD_SRB_RDATA = 0x08,
FD_SRB_WDATA = 0x10,
FD_SRB_DR0 = 0x20,
};
enum {
#if MAX_FD == 4
FD_DOR_SELMASK = 0x03,
#else
FD_DOR_SELMASK = 0x01,
#endif
FD_DOR_nRESET = 0x04,
FD_DOR_DMAEN = 0x08,
FD_DOR_MOTEN0 = 0x10,
FD_DOR_MOTEN1 = 0x20,
FD_DOR_MOTEN2 = 0x40,
FD_DOR_MOTEN3 = 0x80,
};
enum {
#if MAX_FD == 4
FD_TDR_BOOTSEL = 0x0c,
#else
FD_TDR_BOOTSEL = 0x04,
#endif
};
enum {
FD_DSR_DRATEMASK= 0x03,
FD_DSR_PWRDOWN = 0x40,
FD_DSR_SWRESET = 0x80,
};
enum {
FD_MSR_DRV0BUSY = 0x01,
FD_MSR_DRV1BUSY = 0x02,
FD_MSR_DRV2BUSY = 0x04,
FD_MSR_DRV3BUSY = 0x08,
FD_MSR_CMDBUSY = 0x10,
FD_MSR_NONDMA = 0x20,
FD_MSR_DIO = 0x40,
FD_MSR_RQM = 0x80,
};
enum {
FD_DIR_DSKCHG = 0x80,
};
#define FD_MULTI_TRACK(state) ((state) & FD_STATE_MULTI)
#define FD_DID_SEEK(state) ((state) & FD_STATE_SEEK)
#define FD_FORMAT_CMD(state) ((state) & FD_STATE_FORMAT)
struct FDCtrl {
qemu_irq irq;
/* Controller state */
QEMUTimer *result_timer;
int dma_chann;
/* Controller's identification */
uint8_t version;
/* HW */
uint8_t sra;
uint8_t srb;
uint8_t dor;
uint8_t dor_vmstate; /* only used as temp during vmstate */
uint8_t tdr;
uint8_t dsr;
uint8_t msr;
uint8_t cur_drv;
uint8_t status0;
uint8_t status1;
uint8_t status2;
/* Command FIFO */
uint8_t *fifo;
int32_t fifo_size;
uint32_t data_pos;
uint32_t data_len;
uint8_t data_state;
uint8_t data_dir;
uint8_t eot; /* last wanted sector */
/* States kept only to be returned back */
/* precompensation */
uint8_t precomp_trk;
uint8_t config;
uint8_t lock;
/* Power down config (also with status regB access mode */
uint8_t pwrd;
/* Floppy drives */
uint8_t num_floppies;
/* Sun4m quirks? */
int sun4m;
FDrive drives[MAX_FD];
int reset_sensei;
/* Timers state */
uint8_t timer0;
uint8_t timer1;
};
typedef struct FDCtrlSysBus {
SysBusDevice busdev;
struct FDCtrl state;
} FDCtrlSysBus;
typedef struct FDCtrlISABus {
ISADevice busdev;
struct FDCtrl state;
int32_t bootindexA;
int32_t bootindexB;
} FDCtrlISABus;
static uint32_t fdctrl_read (void *opaque, uint32_t reg)
{
FDCtrl *fdctrl = opaque;
uint32_t retval;
reg &= 7;
switch (reg) {
case FD_REG_SRA:
retval = fdctrl_read_statusA(fdctrl);
break;
case FD_REG_SRB:
retval = fdctrl_read_statusB(fdctrl);
break;
case FD_REG_DOR:
retval = fdctrl_read_dor(fdctrl);
break;
case FD_REG_TDR:
retval = fdctrl_read_tape(fdctrl);
break;
case FD_REG_MSR:
retval = fdctrl_read_main_status(fdctrl);
break;
case FD_REG_FIFO:
retval = fdctrl_read_data(fdctrl);
break;
case FD_REG_DIR:
retval = fdctrl_read_dir(fdctrl);
break;
default:
retval = (uint32_t)(-1);
break;
}
FLOPPY_DPRINTF("read reg%d: 0x%02x\n", reg & 7, retval);
return retval;
}
static void fdctrl_write (void *opaque, uint32_t reg, uint32_t value)
{
FDCtrl *fdctrl = opaque;
FLOPPY_DPRINTF("write reg%d: 0x%02x\n", reg & 7, value);
reg &= 7;
switch (reg) {
case FD_REG_DOR:
fdctrl_write_dor(fdctrl, value);
break;
case FD_REG_TDR:
fdctrl_write_tape(fdctrl, value);
break;
case FD_REG_DSR:
fdctrl_write_rate(fdctrl, value);
break;
case FD_REG_FIFO:
fdctrl_write_data(fdctrl, value);
break;
default:
break;
}
}
static uint32_t fdctrl_read_mem (void *opaque, target_phys_addr_t reg)
{
return fdctrl_read(opaque, (uint32_t)reg);
}
static void fdctrl_write_mem (void *opaque,
target_phys_addr_t reg, uint32_t value)
{
fdctrl_write(opaque, (uint32_t)reg, value);
}
static CPUReadMemoryFunc * const fdctrl_mem_read[3] = {
fdctrl_read_mem,
fdctrl_read_mem,
fdctrl_read_mem,
};
static CPUWriteMemoryFunc * const fdctrl_mem_write[3] = {
fdctrl_write_mem,
fdctrl_write_mem,
fdctrl_write_mem,
};
static CPUReadMemoryFunc * const fdctrl_mem_read_strict[3] = {
fdctrl_read_mem,
NULL,
NULL,
};
static CPUWriteMemoryFunc * const fdctrl_mem_write_strict[3] = {
fdctrl_write_mem,
NULL,
NULL,
};
static bool fdrive_media_changed_needed(void *opaque)
{
FDrive *drive = opaque;
return (drive->bs != NULL && drive->media_changed != 1);
}
static const VMStateDescription vmstate_fdrive_media_changed = {
.name = "fdrive/media_changed",
.version_id = 1,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT8(media_changed, FDrive),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_fdrive = {
.name = "fdrive",
.version_id = 1,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT8(head, FDrive),
VMSTATE_UINT8(track, FDrive),
VMSTATE_UINT8(sect, FDrive),
VMSTATE_END_OF_LIST()
},
.subsections = (VMStateSubsection[]) {
{
.vmsd = &vmstate_fdrive_media_changed,
.needed = &fdrive_media_changed_needed,
} , {
/* empty */
}
}
};
static void fdc_pre_save(void *opaque)
{
FDCtrl *s = opaque;
s->dor_vmstate = s->dor | GET_CUR_DRV(s);
}
static int fdc_post_load(void *opaque, int version_id)
{
FDCtrl *s = opaque;
SET_CUR_DRV(s, s->dor_vmstate & FD_DOR_SELMASK);
s->dor = s->dor_vmstate & ~FD_DOR_SELMASK;
return 0;
}
static const VMStateDescription vmstate_fdc = {
.name = "fdc",
.version_id = 2,
.minimum_version_id = 2,
.minimum_version_id_old = 2,
.pre_save = fdc_pre_save,
.post_load = fdc_post_load,
.fields = (VMStateField []) {
/* Controller State */
VMSTATE_UINT8(sra, FDCtrl),
VMSTATE_UINT8(srb, FDCtrl),
VMSTATE_UINT8(dor_vmstate, FDCtrl),
VMSTATE_UINT8(tdr, FDCtrl),
VMSTATE_UINT8(dsr, FDCtrl),
VMSTATE_UINT8(msr, FDCtrl),
VMSTATE_UINT8(status0, FDCtrl),
VMSTATE_UINT8(status1, FDCtrl),
VMSTATE_UINT8(status2, FDCtrl),
/* Command FIFO */
VMSTATE_VARRAY_INT32(fifo, FDCtrl, fifo_size, 0, vmstate_info_uint8,
uint8_t),
VMSTATE_UINT32(data_pos, FDCtrl),
VMSTATE_UINT32(data_len, FDCtrl),
VMSTATE_UINT8(data_state, FDCtrl),
VMSTATE_UINT8(data_dir, FDCtrl),
VMSTATE_UINT8(eot, FDCtrl),
/* States kept only to be returned back */
VMSTATE_UINT8(timer0, FDCtrl),
VMSTATE_UINT8(timer1, FDCtrl),
VMSTATE_UINT8(precomp_trk, FDCtrl),
VMSTATE_UINT8(config, FDCtrl),
VMSTATE_UINT8(lock, FDCtrl),
VMSTATE_UINT8(pwrd, FDCtrl),
VMSTATE_UINT8_EQUAL(num_floppies, FDCtrl),
VMSTATE_STRUCT_ARRAY(drives, FDCtrl, MAX_FD, 1,
vmstate_fdrive, FDrive),
VMSTATE_END_OF_LIST()
}
};
static void fdctrl_external_reset_sysbus(DeviceState *d)
{
FDCtrlSysBus *sys = container_of(d, FDCtrlSysBus, busdev.qdev);
FDCtrl *s = &sys->state;
fdctrl_reset(s, 0);
}
static void fdctrl_external_reset_isa(DeviceState *d)
{
FDCtrlISABus *isa = container_of(d, FDCtrlISABus, busdev.qdev);
FDCtrl *s = &isa->state;
fdctrl_reset(s, 0);
}
static void fdctrl_handle_tc(void *opaque, int irq, int level)
{
//FDCtrl *s = opaque;
if (level) {
// XXX
FLOPPY_DPRINTF("TC pulsed\n");
}
}
/* Change IRQ state */
static void fdctrl_reset_irq(FDCtrl *fdctrl)
{
if (!(fdctrl->sra & FD_SRA_INTPEND))
return;
FLOPPY_DPRINTF("Reset interrupt\n");
qemu_set_irq(fdctrl->irq, 0);
fdctrl->sra &= ~FD_SRA_INTPEND;
}
static void fdctrl_raise_irq(FDCtrl *fdctrl, uint8_t status0)
{
/* Sparc mutation */
if (fdctrl->sun4m && (fdctrl->msr & FD_MSR_CMDBUSY)) {
/* XXX: not sure */
fdctrl->msr &= ~FD_MSR_CMDBUSY;
fdctrl->msr |= FD_MSR_RQM | FD_MSR_DIO;
fdctrl->status0 = status0;
return;
}
if (!(fdctrl->sra & FD_SRA_INTPEND)) {
qemu_set_irq(fdctrl->irq, 1);
fdctrl->sra |= FD_SRA_INTPEND;
}
fdctrl->reset_sensei = 0;
fdctrl->status0 = status0;
FLOPPY_DPRINTF("Set interrupt status to 0x%02x\n", fdctrl->status0);
}
/* Reset controller */
static void fdctrl_reset(FDCtrl *fdctrl, int do_irq)
{
int i;
FLOPPY_DPRINTF("reset controller\n");
fdctrl_reset_irq(fdctrl);
/* Initialise controller */
fdctrl->sra = 0;
fdctrl->srb = 0xc0;
if (!fdctrl->drives[1].bs)
fdctrl->sra |= FD_SRA_nDRV2;
fdctrl->cur_drv = 0;
fdctrl->dor = FD_DOR_nRESET;
fdctrl->dor |= (fdctrl->dma_chann != -1) ? FD_DOR_DMAEN : 0;
fdctrl->msr = FD_MSR_RQM;
/* FIFO state */
fdctrl->data_pos = 0;
fdctrl->data_len = 0;
fdctrl->data_state = 0;
fdctrl->data_dir = FD_DIR_WRITE;
for (i = 0; i < MAX_FD; i++)
fd_recalibrate(&fdctrl->drives[i]);
fdctrl_reset_fifo(fdctrl);
if (do_irq) {
fdctrl_raise_irq(fdctrl, FD_SR0_RDYCHG);
fdctrl->reset_sensei = FD_RESET_SENSEI_COUNT;
}
}
static inline FDrive *drv0(FDCtrl *fdctrl)
{
return &fdctrl->drives[(fdctrl->tdr & FD_TDR_BOOTSEL) >> 2];
}
static inline FDrive *drv1(FDCtrl *fdctrl)
{
if ((fdctrl->tdr & FD_TDR_BOOTSEL) < (1 << 2))
return &fdctrl->drives[1];
else
return &fdctrl->drives[0];
}
#if MAX_FD == 4
static inline FDrive *drv2(FDCtrl *fdctrl)
{
if ((fdctrl->tdr & FD_TDR_BOOTSEL) < (2 << 2))
return &fdctrl->drives[2];
else
return &fdctrl->drives[1];
}
static inline FDrive *drv3(FDCtrl *fdctrl)
{
if ((fdctrl->tdr & FD_TDR_BOOTSEL) < (3 << 2))
return &fdctrl->drives[3];
else
return &fdctrl->drives[2];
}
#endif
static FDrive *get_cur_drv(FDCtrl *fdctrl)
{
switch (fdctrl->cur_drv) {
case 0: return drv0(fdctrl);
case 1: return drv1(fdctrl);
#if MAX_FD == 4
case 2: return drv2(fdctrl);
case 3: return drv3(fdctrl);
#endif
default: return NULL;
}
}
/* Status A register : 0x00 (read-only) */
static uint32_t fdctrl_read_statusA(FDCtrl *fdctrl)
{
uint32_t retval = fdctrl->sra;
FLOPPY_DPRINTF("status register A: 0x%02x\n", retval);
return retval;
}
/* Status B register : 0x01 (read-only) */
static uint32_t fdctrl_read_statusB(FDCtrl *fdctrl)
{
uint32_t retval = fdctrl->srb;
FLOPPY_DPRINTF("status register B: 0x%02x\n", retval);
return retval;
}
/* Digital output register : 0x02 */
static uint32_t fdctrl_read_dor(FDCtrl *fdctrl)
{
uint32_t retval = fdctrl->dor;
/* Selected drive */
retval |= fdctrl->cur_drv;
FLOPPY_DPRINTF("digital output register: 0x%02x\n", retval);
return retval;
}
static void fdctrl_write_dor(FDCtrl *fdctrl, uint32_t value)
{
FLOPPY_DPRINTF("digital output register set to 0x%02x\n", value);
/* Motors */
if (value & FD_DOR_MOTEN0)
fdctrl->srb |= FD_SRB_MTR0;
else
fdctrl->srb &= ~FD_SRB_MTR0;
if (value & FD_DOR_MOTEN1)
fdctrl->srb |= FD_SRB_MTR1;
else
fdctrl->srb &= ~FD_SRB_MTR1;
/* Drive */
if (value & 1)
fdctrl->srb |= FD_SRB_DR0;
else
fdctrl->srb &= ~FD_SRB_DR0;
/* Reset */
if (!(value & FD_DOR_nRESET)) {
if (fdctrl->dor & FD_DOR_nRESET) {
FLOPPY_DPRINTF("controller enter RESET state\n");
}
} else {
if (!(fdctrl->dor & FD_DOR_nRESET)) {
FLOPPY_DPRINTF("controller out of RESET state\n");
fdctrl_reset(fdctrl, 1);
fdctrl->dsr &= ~FD_DSR_PWRDOWN;
}
}
/* Selected drive */
fdctrl->cur_drv = value & FD_DOR_SELMASK;
fdctrl->dor = value;
}
/* Tape drive register : 0x03 */
static uint32_t fdctrl_read_tape(FDCtrl *fdctrl)
{
uint32_t retval = fdctrl->tdr;
FLOPPY_DPRINTF("tape drive register: 0x%02x\n", retval);
return retval;
}
static void fdctrl_write_tape(FDCtrl *fdctrl, uint32_t value)
{
/* Reset mode */
if (!(fdctrl->dor & FD_DOR_nRESET)) {
FLOPPY_DPRINTF("Floppy controller in RESET state !\n");
return;
}
FLOPPY_DPRINTF("tape drive register set to 0x%02x\n", value);
/* Disk boot selection indicator */
fdctrl->tdr = value & FD_TDR_BOOTSEL;
/* Tape indicators: never allow */
}
/* Main status register : 0x04 (read) */
static uint32_t fdctrl_read_main_status(FDCtrl *fdctrl)
{
uint32_t retval = fdctrl->msr;
fdctrl->dsr &= ~FD_DSR_PWRDOWN;
fdctrl->dor |= FD_DOR_nRESET;
/* Sparc mutation */
if (fdctrl->sun4m) {
retval |= FD_MSR_DIO;
fdctrl_reset_irq(fdctrl);
};
FLOPPY_DPRINTF("main status register: 0x%02x\n", retval);
return retval;
}
/* Data select rate register : 0x04 (write) */
static void fdctrl_write_rate(FDCtrl *fdctrl, uint32_t value)
{
/* Reset mode */
if (!(fdctrl->dor & FD_DOR_nRESET)) {
FLOPPY_DPRINTF("Floppy controller in RESET state !\n");
return;
}
FLOPPY_DPRINTF("select rate register set to 0x%02x\n", value);
/* Reset: autoclear */
if (value & FD_DSR_SWRESET) {
fdctrl->dor &= ~FD_DOR_nRESET;
fdctrl_reset(fdctrl, 1);
fdctrl->dor |= FD_DOR_nRESET;
}
if (value & FD_DSR_PWRDOWN) {
fdctrl_reset(fdctrl, 1);
}
fdctrl->dsr = value;
}
static int fdctrl_media_changed(FDrive *drv)
{
int ret;
if (!drv->bs)
return 0;
if (drv->media_changed) {
drv->media_changed = 0;
ret = 1;
} else {
ret = bdrv_media_changed(drv->bs);
if (ret < 0) {
ret = 0; /* we don't know, assume no */
}
}
if (ret) {
fd_revalidate(drv);
}
return ret;
}
/* Digital input register : 0x07 (read-only) */
static uint32_t fdctrl_read_dir(FDCtrl *fdctrl)
{
uint32_t retval = 0;
if (fdctrl_media_changed(drv0(fdctrl))
|| fdctrl_media_changed(drv1(fdctrl))
#if MAX_FD == 4
|| fdctrl_media_changed(drv2(fdctrl))
|| fdctrl_media_changed(drv3(fdctrl))
#endif
)
retval |= FD_DIR_DSKCHG;
if (retval != 0) {
FLOPPY_DPRINTF("Floppy digital input register: 0x%02x\n", retval);
}
return retval;
}
/* FIFO state control */
static void fdctrl_reset_fifo(FDCtrl *fdctrl)
{
fdctrl->data_dir = FD_DIR_WRITE;
fdctrl->data_pos = 0;
fdctrl->msr &= ~(FD_MSR_CMDBUSY | FD_MSR_DIO);
}
/* Set FIFO status for the host to read */
static void fdctrl_set_fifo(FDCtrl *fdctrl, int fifo_len, int do_irq)
{
fdctrl->data_dir = FD_DIR_READ;
fdctrl->data_len = fifo_len;
fdctrl->data_pos = 0;
fdctrl->msr |= FD_MSR_CMDBUSY | FD_MSR_RQM | FD_MSR_DIO;
if (do_irq)
fdctrl_raise_irq(fdctrl, 0x00);
}
/* Set an error: unimplemented/unknown command */
static void fdctrl_unimplemented(FDCtrl *fdctrl, int direction)
{
FLOPPY_ERROR("unimplemented command 0x%02x\n", fdctrl->fifo[0]);
fdctrl->fifo[0] = FD_SR0_INVCMD;
fdctrl_set_fifo(fdctrl, 1, 0);
}
/* Seek to next sector */
static int fdctrl_seek_to_next_sect(FDCtrl *fdctrl, FDrive *cur_drv)
{
FLOPPY_DPRINTF("seek to next sector (%d %02x %02x => %d)\n",
cur_drv->head, cur_drv->track, cur_drv->sect,
fd_sector(cur_drv));
/* XXX: cur_drv->sect >= cur_drv->last_sect should be an
error in fact */
if (cur_drv->sect >= cur_drv->last_sect ||
cur_drv->sect == fdctrl->eot) {
cur_drv->sect = 1;
if (FD_MULTI_TRACK(fdctrl->data_state)) {
if (cur_drv->head == 0 &&
(cur_drv->flags & FDISK_DBL_SIDES) != 0) {
cur_drv->head = 1;
} else {
cur_drv->head = 0;
cur_drv->track++;
if ((cur_drv->flags & FDISK_DBL_SIDES) == 0)
return 0;
}
} else {
cur_drv->track++;
return 0;
}
FLOPPY_DPRINTF("seek to next track (%d %02x %02x => %d)\n",
cur_drv->head, cur_drv->track,
cur_drv->sect, fd_sector(cur_drv));
} else {
cur_drv->sect++;
}
return 1;
}
/* Callback for transfer end (stop or abort) */
static void fdctrl_stop_transfer(FDCtrl *fdctrl, uint8_t status0,
uint8_t status1, uint8_t status2)
{
FDrive *cur_drv;
cur_drv = get_cur_drv(fdctrl);
FLOPPY_DPRINTF("transfer status: %02x %02x %02x (%02x)\n",
status0, status1, status2,
status0 | (cur_drv->head << 2) | GET_CUR_DRV(fdctrl));
fdctrl->fifo[0] = status0 | (cur_drv->head << 2) | GET_CUR_DRV(fdctrl);
fdctrl->fifo[1] = status1;
fdctrl->fifo[2] = status2;
fdctrl->fifo[3] = cur_drv->track;
fdctrl->fifo[4] = cur_drv->head;
fdctrl->fifo[5] = cur_drv->sect;
fdctrl->fifo[6] = FD_SECTOR_SC;
fdctrl->data_dir = FD_DIR_READ;
if (!(fdctrl->msr & FD_MSR_NONDMA)) {
DMA_release_DREQ(fdctrl->dma_chann);
}
fdctrl->msr |= FD_MSR_RQM | FD_MSR_DIO;
fdctrl->msr &= ~FD_MSR_NONDMA;
fdctrl_set_fifo(fdctrl, 7, 1);
}
/* Prepare a data transfer (either DMA or FIFO) */
static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv;
uint8_t kh, kt, ks;
int did_seek = 0;
SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK);
cur_drv = get_cur_drv(fdctrl);
kt = fdctrl->fifo[2];
kh = fdctrl->fifo[3];
ks = fdctrl->fifo[4];
FLOPPY_DPRINTF("Start transfer at %d %d %02x %02x (%d)\n",
GET_CUR_DRV(fdctrl), kh, kt, ks,
fd_sector_calc(kh, kt, ks, cur_drv->last_sect));
switch (fd_seek(cur_drv, kh, kt, ks, fdctrl->config & FD_CONFIG_EIS)) {
case 2:
/* sect too big */
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, 0x00, 0x00);
fdctrl->fifo[3] = kt;
fdctrl->fifo[4] = kh;
fdctrl->fifo[5] = ks;
return;
case 3:
/* track too big */
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, FD_SR1_EC, 0x00);
fdctrl->fifo[3] = kt;
fdctrl->fifo[4] = kh;
fdctrl->fifo[5] = ks;
return;
case 4:
/* No seek enabled */
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, 0x00, 0x00);
fdctrl->fifo[3] = kt;
fdctrl->fifo[4] = kh;
fdctrl->fifo[5] = ks;
return;
case 1:
did_seek = 1;
break;
default:
break;
}
/* Set the FIFO state */
fdctrl->data_dir = direction;
fdctrl->data_pos = 0;
fdctrl->msr |= FD_MSR_CMDBUSY;
if (fdctrl->fifo[0] & 0x80)
fdctrl->data_state |= FD_STATE_MULTI;
else
fdctrl->data_state &= ~FD_STATE_MULTI;
if (did_seek)
fdctrl->data_state |= FD_STATE_SEEK;
else
fdctrl->data_state &= ~FD_STATE_SEEK;
if (fdctrl->fifo[5] == 00) {
fdctrl->data_len = fdctrl->fifo[8];
} else {
int tmp;
fdctrl->data_len = 128 << (fdctrl->fifo[5] > 7 ? 7 : fdctrl->fifo[5]);
tmp = (fdctrl->fifo[6] - ks + 1);
if (fdctrl->fifo[0] & 0x80)
tmp += fdctrl->fifo[6];
fdctrl->data_len *= tmp;
}
fdctrl->eot = fdctrl->fifo[6];
if (fdctrl->dor & FD_DOR_DMAEN) {
int dma_mode;
/* DMA transfer are enabled. Check if DMA channel is well programmed */
dma_mode = DMA_get_channel_mode(fdctrl->dma_chann);
dma_mode = (dma_mode >> 2) & 3;
FLOPPY_DPRINTF("dma_mode=%d direction=%d (%d - %d)\n",
dma_mode, direction,
(128 << fdctrl->fifo[5]) *
(cur_drv->last_sect - ks + 1), fdctrl->data_len);
if (((direction == FD_DIR_SCANE || direction == FD_DIR_SCANL ||
direction == FD_DIR_SCANH) && dma_mode == 0) ||
(direction == FD_DIR_WRITE && dma_mode == 2) ||
(direction == FD_DIR_READ && dma_mode == 1)) {
/* No access is allowed until DMA transfer has completed */
fdctrl->msr &= ~FD_MSR_RQM;
/* Now, we just have to wait for the DMA controller to
* recall us...
*/
DMA_hold_DREQ(fdctrl->dma_chann);
DMA_schedule(fdctrl->dma_chann);
return;
} else {
FLOPPY_ERROR("dma_mode=%d direction=%d\n", dma_mode, direction);
}
}
FLOPPY_DPRINTF("start non-DMA transfer\n");
fdctrl->msr |= FD_MSR_NONDMA;
if (direction != FD_DIR_WRITE)
fdctrl->msr |= FD_MSR_DIO;
/* IO based transfer: calculate len */
fdctrl_raise_irq(fdctrl, 0x00);
return;
}
/* Prepare a transfer of deleted data */
static void fdctrl_start_transfer_del(FDCtrl *fdctrl, int direction)
{
FLOPPY_ERROR("fdctrl_start_transfer_del() unimplemented\n");
/* We don't handle deleted data,
* so we don't return *ANYTHING*
*/
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00);
}
/* handlers for DMA transfers */
static int fdctrl_transfer_handler (void *opaque, int nchan,
int dma_pos, int dma_len)
{
FDCtrl *fdctrl;
FDrive *cur_drv;
int len, start_pos, rel_pos;
uint8_t status0 = 0x00, status1 = 0x00, status2 = 0x00;
fdctrl = opaque;
if (fdctrl->msr & FD_MSR_RQM) {
FLOPPY_DPRINTF("Not in DMA transfer mode !\n");
return 0;
}
cur_drv = get_cur_drv(fdctrl);
if (fdctrl->data_dir == FD_DIR_SCANE || fdctrl->data_dir == FD_DIR_SCANL ||
fdctrl->data_dir == FD_DIR_SCANH)
status2 = FD_SR2_SNS;
if (dma_len > fdctrl->data_len)
dma_len = fdctrl->data_len;
if (cur_drv->bs == NULL) {
if (fdctrl->data_dir == FD_DIR_WRITE)
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00);
else
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, 0x00, 0x00);
len = 0;
goto transfer_error;
}
rel_pos = fdctrl->data_pos % FD_SECTOR_LEN;
for (start_pos = fdctrl->data_pos; fdctrl->data_pos < dma_len;) {
len = dma_len - fdctrl->data_pos;
if (len + rel_pos > FD_SECTOR_LEN)
len = FD_SECTOR_LEN - rel_pos;
FLOPPY_DPRINTF("copy %d bytes (%d %d %d) %d pos %d %02x "
"(%d-0x%08x 0x%08x)\n", len, dma_len, fdctrl->data_pos,
fdctrl->data_len, GET_CUR_DRV(fdctrl), cur_drv->head,
cur_drv->track, cur_drv->sect, fd_sector(cur_drv),
fd_sector(cur_drv) * FD_SECTOR_LEN);
if (fdctrl->data_dir != FD_DIR_WRITE ||
len < FD_SECTOR_LEN || rel_pos != 0) {
/* READ & SCAN commands and realign to a sector for WRITE */
if (bdrv_read(cur_drv->bs, fd_sector(cur_drv),
fdctrl->fifo, 1) < 0) {
FLOPPY_DPRINTF("Floppy: error getting sector %d\n",
fd_sector(cur_drv));
/* Sure, image size is too small... */
memset(fdctrl->fifo, 0, FD_SECTOR_LEN);
}
}
switch (fdctrl->data_dir) {
case FD_DIR_READ:
/* READ commands */
DMA_write_memory (nchan, fdctrl->fifo + rel_pos,
fdctrl->data_pos, len);
break;
case FD_DIR_WRITE:
/* WRITE commands */
DMA_read_memory (nchan, fdctrl->fifo + rel_pos,
fdctrl->data_pos, len);
if (bdrv_write(cur_drv->bs, fd_sector(cur_drv),
fdctrl->fifo, 1) < 0) {
FLOPPY_ERROR("writing sector %d\n", fd_sector(cur_drv));
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00);
goto transfer_error;
}
break;
default:
/* SCAN commands */
{
uint8_t tmpbuf[FD_SECTOR_LEN];
int ret;
DMA_read_memory (nchan, tmpbuf, fdctrl->data_pos, len);
ret = memcmp(tmpbuf, fdctrl->fifo + rel_pos, len);
if (ret == 0) {
status2 = FD_SR2_SEH;
goto end_transfer;
}
if ((ret < 0 && fdctrl->data_dir == FD_DIR_SCANL) ||
(ret > 0 && fdctrl->data_dir == FD_DIR_SCANH)) {
status2 = 0x00;
goto end_transfer;
}
}
break;
}
fdctrl->data_pos += len;
rel_pos = fdctrl->data_pos % FD_SECTOR_LEN;
if (rel_pos == 0) {
/* Seek to next sector */
if (!fdctrl_seek_to_next_sect(fdctrl, cur_drv))
break;
}
}
end_transfer:
len = fdctrl->data_pos - start_pos;
FLOPPY_DPRINTF("end transfer %d %d %d\n",
fdctrl->data_pos, len, fdctrl->data_len);
if (fdctrl->data_dir == FD_DIR_SCANE ||
fdctrl->data_dir == FD_DIR_SCANL ||
fdctrl->data_dir == FD_DIR_SCANH)
status2 = FD_SR2_SEH;
if (FD_DID_SEEK(fdctrl->data_state))
status0 |= FD_SR0_SEEK;
fdctrl->data_len -= len;
fdctrl_stop_transfer(fdctrl, status0, status1, status2);
transfer_error:
return len;
}
/* Data register : 0x05 */
static uint32_t fdctrl_read_data(FDCtrl *fdctrl)
{
FDrive *cur_drv;
uint32_t retval = 0;
int pos;
cur_drv = get_cur_drv(fdctrl);
fdctrl->dsr &= ~FD_DSR_PWRDOWN;
if (!(fdctrl->msr & FD_MSR_RQM) || !(fdctrl->msr & FD_MSR_DIO)) {
FLOPPY_ERROR("controller not ready for reading\n");
return 0;
}
pos = fdctrl->data_pos;
if (fdctrl->msr & FD_MSR_NONDMA) {
pos %= FD_SECTOR_LEN;
if (pos == 0) {
if (fdctrl->data_pos != 0)
if (!fdctrl_seek_to_next_sect(fdctrl, cur_drv)) {
FLOPPY_DPRINTF("error seeking to next sector %d\n",
fd_sector(cur_drv));
return 0;
}
if (bdrv_read(cur_drv->bs, fd_sector(cur_drv), fdctrl->fifo, 1) < 0) {
FLOPPY_DPRINTF("error getting sector %d\n",
fd_sector(cur_drv));
/* Sure, image size is too small... */
memset(fdctrl->fifo, 0, FD_SECTOR_LEN);
}
}
}
retval = fdctrl->fifo[pos];
if (++fdctrl->data_pos == fdctrl->data_len) {
fdctrl->data_pos = 0;
/* Switch from transfer mode to status mode
* then from status mode to command mode
*/
if (fdctrl->msr & FD_MSR_NONDMA) {
fdctrl_stop_transfer(fdctrl, FD_SR0_SEEK, 0x00, 0x00);
} else {
fdctrl_reset_fifo(fdctrl);
fdctrl_reset_irq(fdctrl);
}
}
FLOPPY_DPRINTF("data register: 0x%02x\n", retval);
return retval;
}
static void fdctrl_format_sector(FDCtrl *fdctrl)
{
FDrive *cur_drv;
uint8_t kh, kt, ks;
SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK);
cur_drv = get_cur_drv(fdctrl);
kt = fdctrl->fifo[6];
kh = fdctrl->fifo[7];
ks = fdctrl->fifo[8];
FLOPPY_DPRINTF("format sector at %d %d %02x %02x (%d)\n",
GET_CUR_DRV(fdctrl), kh, kt, ks,
fd_sector_calc(kh, kt, ks, cur_drv->last_sect));
switch (fd_seek(cur_drv, kh, kt, ks, fdctrl->config & FD_CONFIG_EIS)) {
case 2:
/* sect too big */
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, 0x00, 0x00);
fdctrl->fifo[3] = kt;
fdctrl->fifo[4] = kh;
fdctrl->fifo[5] = ks;
return;
case 3:
/* track too big */
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, FD_SR1_EC, 0x00);
fdctrl->fifo[3] = kt;
fdctrl->fifo[4] = kh;
fdctrl->fifo[5] = ks;
return;
case 4:
/* No seek enabled */
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, 0x00, 0x00);
fdctrl->fifo[3] = kt;
fdctrl->fifo[4] = kh;
fdctrl->fifo[5] = ks;
return;
case 1:
fdctrl->data_state |= FD_STATE_SEEK;
break;
default:
break;
}
memset(fdctrl->fifo, 0, FD_SECTOR_LEN);
if (cur_drv->bs == NULL ||
bdrv_write(cur_drv->bs, fd_sector(cur_drv), fdctrl->fifo, 1) < 0) {
FLOPPY_ERROR("formatting sector %d\n", fd_sector(cur_drv));
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00);
} else {
if (cur_drv->sect == cur_drv->last_sect) {
fdctrl->data_state &= ~FD_STATE_FORMAT;
/* Last sector done */
if (FD_DID_SEEK(fdctrl->data_state))
fdctrl_stop_transfer(fdctrl, FD_SR0_SEEK, 0x00, 0x00);
else
fdctrl_stop_transfer(fdctrl, 0x00, 0x00, 0x00);
} else {
/* More to do */
fdctrl->data_pos = 0;
fdctrl->data_len = 4;
}
}
}
static void fdctrl_handle_lock(FDCtrl *fdctrl, int direction)
{
fdctrl->lock = (fdctrl->fifo[0] & 0x80) ? 1 : 0;
fdctrl->fifo[0] = fdctrl->lock << 4;
fdctrl_set_fifo(fdctrl, 1, fdctrl->lock);
}
static void fdctrl_handle_dumpreg(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv = get_cur_drv(fdctrl);
/* Drives position */
fdctrl->fifo[0] = drv0(fdctrl)->track;
fdctrl->fifo[1] = drv1(fdctrl)->track;
#if MAX_FD == 4
fdctrl->fifo[2] = drv2(fdctrl)->track;
fdctrl->fifo[3] = drv3(fdctrl)->track;
#else
fdctrl->fifo[2] = 0;
fdctrl->fifo[3] = 0;
#endif
/* timers */
fdctrl->fifo[4] = fdctrl->timer0;
fdctrl->fifo[5] = (fdctrl->timer1 << 1) | (fdctrl->dor & FD_DOR_DMAEN ? 1 : 0);
fdctrl->fifo[6] = cur_drv->last_sect;
fdctrl->fifo[7] = (fdctrl->lock << 7) |
(cur_drv->perpendicular << 2);
fdctrl->fifo[8] = fdctrl->config;
fdctrl->fifo[9] = fdctrl->precomp_trk;
fdctrl_set_fifo(fdctrl, 10, 0);
}
static void fdctrl_handle_version(FDCtrl *fdctrl, int direction)
{
/* Controller's version */
fdctrl->fifo[0] = fdctrl->version;
fdctrl_set_fifo(fdctrl, 1, 1);
}
static void fdctrl_handle_partid(FDCtrl *fdctrl, int direction)
{
fdctrl->fifo[0] = 0x41; /* Stepping 1 */
fdctrl_set_fifo(fdctrl, 1, 0);
}
static void fdctrl_handle_restore(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv = get_cur_drv(fdctrl);
/* Drives position */
drv0(fdctrl)->track = fdctrl->fifo[3];
drv1(fdctrl)->track = fdctrl->fifo[4];
#if MAX_FD == 4
drv2(fdctrl)->track = fdctrl->fifo[5];
drv3(fdctrl)->track = fdctrl->fifo[6];
#endif
/* timers */
fdctrl->timer0 = fdctrl->fifo[7];
fdctrl->timer1 = fdctrl->fifo[8];
cur_drv->last_sect = fdctrl->fifo[9];
fdctrl->lock = fdctrl->fifo[10] >> 7;
cur_drv->perpendicular = (fdctrl->fifo[10] >> 2) & 0xF;
fdctrl->config = fdctrl->fifo[11];
fdctrl->precomp_trk = fdctrl->fifo[12];
fdctrl->pwrd = fdctrl->fifo[13];
fdctrl_reset_fifo(fdctrl);
}
static void fdctrl_handle_save(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv = get_cur_drv(fdctrl);
fdctrl->fifo[0] = 0;
fdctrl->fifo[1] = 0;
/* Drives position */
fdctrl->fifo[2] = drv0(fdctrl)->track;
fdctrl->fifo[3] = drv1(fdctrl)->track;
#if MAX_FD == 4
fdctrl->fifo[4] = drv2(fdctrl)->track;
fdctrl->fifo[5] = drv3(fdctrl)->track;
#else
fdctrl->fifo[4] = 0;
fdctrl->fifo[5] = 0;
#endif
/* timers */
fdctrl->fifo[6] = fdctrl->timer0;
fdctrl->fifo[7] = fdctrl->timer1;
fdctrl->fifo[8] = cur_drv->last_sect;
fdctrl->fifo[9] = (fdctrl->lock << 7) |
(cur_drv->perpendicular << 2);
fdctrl->fifo[10] = fdctrl->config;
fdctrl->fifo[11] = fdctrl->precomp_trk;
fdctrl->fifo[12] = fdctrl->pwrd;
fdctrl->fifo[13] = 0;
fdctrl->fifo[14] = 0;
fdctrl_set_fifo(fdctrl, 15, 1);
}
static void fdctrl_handle_readid(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv = get_cur_drv(fdctrl);
/* XXX: should set main status register to busy */
cur_drv->head = (fdctrl->fifo[1] >> 2) & 1;
qemu_mod_timer(fdctrl->result_timer,
qemu_get_clock_ns(vm_clock) + (get_ticks_per_sec() / 50));
}
static void fdctrl_handle_format_track(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv;
SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK);
cur_drv = get_cur_drv(fdctrl);
fdctrl->data_state |= FD_STATE_FORMAT;
if (fdctrl->fifo[0] & 0x80)
fdctrl->data_state |= FD_STATE_MULTI;
else
fdctrl->data_state &= ~FD_STATE_MULTI;
fdctrl->data_state &= ~FD_STATE_SEEK;
cur_drv->bps =
fdctrl->fifo[2] > 7 ? 16384 : 128 << fdctrl->fifo[2];
#if 0
cur_drv->last_sect =
cur_drv->flags & FDISK_DBL_SIDES ? fdctrl->fifo[3] :
fdctrl->fifo[3] / 2;
#else
cur_drv->last_sect = fdctrl->fifo[3];
#endif
/* TODO: implement format using DMA expected by the Bochs BIOS
* and Linux fdformat (read 3 bytes per sector via DMA and fill
* the sector with the specified fill byte
*/
fdctrl->data_state &= ~FD_STATE_FORMAT;
fdctrl_stop_transfer(fdctrl, 0x00, 0x00, 0x00);
}
static void fdctrl_handle_specify(FDCtrl *fdctrl, int direction)
{
fdctrl->timer0 = (fdctrl->fifo[1] >> 4) & 0xF;
fdctrl->timer1 = fdctrl->fifo[2] >> 1;
if (fdctrl->fifo[2] & 1)
fdctrl->dor &= ~FD_DOR_DMAEN;
else
fdctrl->dor |= FD_DOR_DMAEN;
/* No result back */
fdctrl_reset_fifo(fdctrl);
}
static void fdctrl_handle_sense_drive_status(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv;
SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK);
cur_drv = get_cur_drv(fdctrl);
cur_drv->head = (fdctrl->fifo[1] >> 2) & 1;
/* 1 Byte status back */
fdctrl->fifo[0] = (cur_drv->ro << 6) |
(cur_drv->track == 0 ? 0x10 : 0x00) |
(cur_drv->head << 2) |
GET_CUR_DRV(fdctrl) |
0x28;
fdctrl_set_fifo(fdctrl, 1, 0);
}
static void fdctrl_handle_recalibrate(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv;
SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK);
cur_drv = get_cur_drv(fdctrl);
fd_recalibrate(cur_drv);
fdctrl_reset_fifo(fdctrl);
/* Raise Interrupt */
fdctrl_raise_irq(fdctrl, FD_SR0_SEEK);
}
static void fdctrl_handle_sense_interrupt_status(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv = get_cur_drv(fdctrl);
if(fdctrl->reset_sensei > 0) {
fdctrl->fifo[0] =
FD_SR0_RDYCHG + FD_RESET_SENSEI_COUNT - fdctrl->reset_sensei;
fdctrl->reset_sensei--;
} else {
/* XXX: status0 handling is broken for read/write
commands, so we do this hack. It should be suppressed
ASAP */
fdctrl->fifo[0] =
FD_SR0_SEEK | (cur_drv->head << 2) | GET_CUR_DRV(fdctrl);
}
fdctrl->fifo[1] = cur_drv->track;
fdctrl_set_fifo(fdctrl, 2, 0);
fdctrl_reset_irq(fdctrl);
fdctrl->status0 = FD_SR0_RDYCHG;
}
static void fdctrl_handle_seek(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv;
SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK);
cur_drv = get_cur_drv(fdctrl);
fdctrl_reset_fifo(fdctrl);
if (fdctrl->fifo[2] > cur_drv->max_track) {
fdctrl_raise_irq(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK);
} else {
cur_drv->track = fdctrl->fifo[2];
/* Raise Interrupt */
fdctrl_raise_irq(fdctrl, FD_SR0_SEEK);
}
}
static void fdctrl_handle_perpendicular_mode(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv = get_cur_drv(fdctrl);
if (fdctrl->fifo[1] & 0x80)
cur_drv->perpendicular = fdctrl->fifo[1] & 0x7;
/* No result back */
fdctrl_reset_fifo(fdctrl);
}
static void fdctrl_handle_configure(FDCtrl *fdctrl, int direction)
{
fdctrl->config = fdctrl->fifo[2];
fdctrl->precomp_trk = fdctrl->fifo[3];
/* No result back */
fdctrl_reset_fifo(fdctrl);
}
static void fdctrl_handle_powerdown_mode(FDCtrl *fdctrl, int direction)
{
fdctrl->pwrd = fdctrl->fifo[1];
fdctrl->fifo[0] = fdctrl->fifo[1];
fdctrl_set_fifo(fdctrl, 1, 1);
}
static void fdctrl_handle_option(FDCtrl *fdctrl, int direction)
{
/* No result back */
fdctrl_reset_fifo(fdctrl);
}
static void fdctrl_handle_drive_specification_command(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv = get_cur_drv(fdctrl);
if (fdctrl->fifo[fdctrl->data_pos - 1] & 0x80) {
/* Command parameters done */
if (fdctrl->fifo[fdctrl->data_pos - 1] & 0x40) {
fdctrl->fifo[0] = fdctrl->fifo[1];
fdctrl->fifo[2] = 0;
fdctrl->fifo[3] = 0;
fdctrl_set_fifo(fdctrl, 4, 1);
} else {
fdctrl_reset_fifo(fdctrl);
}
} else if (fdctrl->data_len > 7) {
/* ERROR */
fdctrl->fifo[0] = 0x80 |
(cur_drv->head << 2) | GET_CUR_DRV(fdctrl);
fdctrl_set_fifo(fdctrl, 1, 1);
}
}
static void fdctrl_handle_relative_seek_out(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv;
SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK);
cur_drv = get_cur_drv(fdctrl);
if (fdctrl->fifo[2] + cur_drv->track >= cur_drv->max_track) {
cur_drv->track = cur_drv->max_track - 1;
} else {
cur_drv->track += fdctrl->fifo[2];
}
fdctrl_reset_fifo(fdctrl);
/* Raise Interrupt */
fdctrl_raise_irq(fdctrl, FD_SR0_SEEK);
}
static void fdctrl_handle_relative_seek_in(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv;
SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK);
cur_drv = get_cur_drv(fdctrl);
if (fdctrl->fifo[2] > cur_drv->track) {
cur_drv->track = 0;
} else {
cur_drv->track -= fdctrl->fifo[2];
}
fdctrl_reset_fifo(fdctrl);
/* Raise Interrupt */
fdctrl_raise_irq(fdctrl, FD_SR0_SEEK);
}
static const struct {
uint8_t value;
uint8_t mask;
const char* name;
int parameters;
void (*handler)(FDCtrl *fdctrl, int direction);
int direction;
} handlers[] = {
{ FD_CMD_READ, 0x1f, "READ", 8, fdctrl_start_transfer, FD_DIR_READ },
{ FD_CMD_WRITE, 0x3f, "WRITE", 8, fdctrl_start_transfer, FD_DIR_WRITE },
{ FD_CMD_SEEK, 0xff, "SEEK", 2, fdctrl_handle_seek },
{ FD_CMD_SENSE_INTERRUPT_STATUS, 0xff, "SENSE INTERRUPT STATUS", 0, fdctrl_handle_sense_interrupt_status },
{ FD_CMD_RECALIBRATE, 0xff, "RECALIBRATE", 1, fdctrl_handle_recalibrate },
{ FD_CMD_FORMAT_TRACK, 0xbf, "FORMAT TRACK", 5, fdctrl_handle_format_track },
{ FD_CMD_READ_TRACK, 0xbf, "READ TRACK", 8, fdctrl_start_transfer, FD_DIR_READ },
{ FD_CMD_RESTORE, 0xff, "RESTORE", 17, fdctrl_handle_restore }, /* part of READ DELETED DATA */
{ FD_CMD_SAVE, 0xff, "SAVE", 0, fdctrl_handle_save }, /* part of READ DELETED DATA */
{ FD_CMD_READ_DELETED, 0x1f, "READ DELETED DATA", 8, fdctrl_start_transfer_del, FD_DIR_READ },
{ FD_CMD_SCAN_EQUAL, 0x1f, "SCAN EQUAL", 8, fdctrl_start_transfer, FD_DIR_SCANE },
{ FD_CMD_VERIFY, 0x1f, "VERIFY", 8, fdctrl_unimplemented },
{ FD_CMD_SCAN_LOW_OR_EQUAL, 0x1f, "SCAN LOW OR EQUAL", 8, fdctrl_start_transfer, FD_DIR_SCANL },
{ FD_CMD_SCAN_HIGH_OR_EQUAL, 0x1f, "SCAN HIGH OR EQUAL", 8, fdctrl_start_transfer, FD_DIR_SCANH },
{ FD_CMD_WRITE_DELETED, 0x3f, "WRITE DELETED DATA", 8, fdctrl_start_transfer_del, FD_DIR_WRITE },
{ FD_CMD_READ_ID, 0xbf, "READ ID", 1, fdctrl_handle_readid },
{ FD_CMD_SPECIFY, 0xff, "SPECIFY", 2, fdctrl_handle_specify },
{ FD_CMD_SENSE_DRIVE_STATUS, 0xff, "SENSE DRIVE STATUS", 1, fdctrl_handle_sense_drive_status },
{ FD_CMD_PERPENDICULAR_MODE, 0xff, "PERPENDICULAR MODE", 1, fdctrl_handle_perpendicular_mode },
{ FD_CMD_CONFIGURE, 0xff, "CONFIGURE", 3, fdctrl_handle_configure },
{ FD_CMD_POWERDOWN_MODE, 0xff, "POWERDOWN MODE", 2, fdctrl_handle_powerdown_mode },
{ FD_CMD_OPTION, 0xff, "OPTION", 1, fdctrl_handle_option },
{ FD_CMD_DRIVE_SPECIFICATION_COMMAND, 0xff, "DRIVE SPECIFICATION COMMAND", 5, fdctrl_handle_drive_specification_command },
{ FD_CMD_RELATIVE_SEEK_OUT, 0xff, "RELATIVE SEEK OUT", 2, fdctrl_handle_relative_seek_out },
{ FD_CMD_FORMAT_AND_WRITE, 0xff, "FORMAT AND WRITE", 10, fdctrl_unimplemented },
{ FD_CMD_RELATIVE_SEEK_IN, 0xff, "RELATIVE SEEK IN", 2, fdctrl_handle_relative_seek_in },
{ FD_CMD_LOCK, 0x7f, "LOCK", 0, fdctrl_handle_lock },
{ FD_CMD_DUMPREG, 0xff, "DUMPREG", 0, fdctrl_handle_dumpreg },
{ FD_CMD_VERSION, 0xff, "VERSION", 0, fdctrl_handle_version },
{ FD_CMD_PART_ID, 0xff, "PART ID", 0, fdctrl_handle_partid },
{ FD_CMD_WRITE, 0x1f, "WRITE (BeOS)", 8, fdctrl_start_transfer, FD_DIR_WRITE }, /* not in specification ; BeOS 4.5 bug */
{ 0, 0, "unknown", 0, fdctrl_unimplemented }, /* default handler */
};
/* Associate command to an index in the 'handlers' array */
static uint8_t command_to_handler[256];
static void fdctrl_write_data(FDCtrl *fdctrl, uint32_t value)
{
FDrive *cur_drv;
int pos;
/* Reset mode */
if (!(fdctrl->dor & FD_DOR_nRESET)) {
FLOPPY_DPRINTF("Floppy controller in RESET state !\n");
return;
}
if (!(fdctrl->msr & FD_MSR_RQM) || (fdctrl->msr & FD_MSR_DIO)) {
FLOPPY_ERROR("controller not ready for writing\n");
return;
}
fdctrl->dsr &= ~FD_DSR_PWRDOWN;
/* Is it write command time ? */
if (fdctrl->msr & FD_MSR_NONDMA) {
/* FIFO data write */
pos = fdctrl->data_pos++;
pos %= FD_SECTOR_LEN;
fdctrl->fifo[pos] = value;
if (pos == FD_SECTOR_LEN - 1 ||
fdctrl->data_pos == fdctrl->data_len) {
cur_drv = get_cur_drv(fdctrl);
if (bdrv_write(cur_drv->bs, fd_sector(cur_drv), fdctrl->fifo, 1) < 0) {
FLOPPY_ERROR("writing sector %d\n", fd_sector(cur_drv));
return;
}
if (!fdctrl_seek_to_next_sect(fdctrl, cur_drv)) {
FLOPPY_DPRINTF("error seeking to next sector %d\n",
fd_sector(cur_drv));
return;
}
}
/* Switch from transfer mode to status mode
* then from status mode to command mode
*/
if (fdctrl->data_pos == fdctrl->data_len)
fdctrl_stop_transfer(fdctrl, FD_SR0_SEEK, 0x00, 0x00);
return;
}
if (fdctrl->data_pos == 0) {
/* Command */
pos = command_to_handler[value & 0xff];
FLOPPY_DPRINTF("%s command\n", handlers[pos].name);
fdctrl->data_len = handlers[pos].parameters + 1;
}
FLOPPY_DPRINTF("%s: %02x\n", __func__, value);
fdctrl->fifo[fdctrl->data_pos++] = value;
if (fdctrl->data_pos == fdctrl->data_len) {
/* We now have all parameters
* and will be able to treat the command
*/
if (fdctrl->data_state & FD_STATE_FORMAT) {
fdctrl_format_sector(fdctrl);
return;
}
pos = command_to_handler[fdctrl->fifo[0] & 0xff];
FLOPPY_DPRINTF("treat %s command\n", handlers[pos].name);
(*handlers[pos].handler)(fdctrl, handlers[pos].direction);
}
}
static void fdctrl_result_timer(void *opaque)
{
FDCtrl *fdctrl = opaque;
FDrive *cur_drv = get_cur_drv(fdctrl);
/* Pretend we are spinning.
* This is needed for Coherent, which uses READ ID to check for
* sector interleaving.
*/
if (cur_drv->last_sect != 0) {
cur_drv->sect = (cur_drv->sect % cur_drv->last_sect) + 1;
}
fdctrl_stop_transfer(fdctrl, 0x00, 0x00, 0x00);
}
static void fdctrl_change_cb(void *opaque, bool load)
{
FDrive *drive = opaque;
drive->media_changed = 1;
}
static const BlockDevOps fdctrl_block_ops = {
.change_media_cb = fdctrl_change_cb,
};
/* Init functions */
static int fdctrl_connect_drives(FDCtrl *fdctrl)
{
unsigned int i;
FDrive *drive;
for (i = 0; i < MAX_FD; i++) {
drive = &fdctrl->drives[i];
if (drive->bs) {
if (bdrv_get_on_error(drive->bs, 0) != BLOCK_ERR_STOP_ENOSPC) {
error_report("fdc doesn't support drive option werror");
return -1;
}
if (bdrv_get_on_error(drive->bs, 1) != BLOCK_ERR_REPORT) {
error_report("fdc doesn't support drive option rerror");
return -1;
}
}
fd_init(drive);
fd_revalidate(drive);
if (drive->bs) {
drive->media_changed = 1;
bdrv_set_dev_ops(drive->bs, &fdctrl_block_ops, drive);
}
}
return 0;
}
void fdctrl_init_sysbus(qemu_irq irq, int dma_chann,
target_phys_addr_t mmio_base, DriveInfo **fds)
{
FDCtrl *fdctrl;
DeviceState *dev;
FDCtrlSysBus *sys;
dev = qdev_create(NULL, "sysbus-fdc");
sys = DO_UPCAST(FDCtrlSysBus, busdev.qdev, dev);
fdctrl = &sys->state;
fdctrl->dma_chann = dma_chann; /* FIXME */
if (fds[0]) {
qdev_prop_set_drive_nofail(dev, "driveA", fds[0]->bdrv);
}
if (fds[1]) {
qdev_prop_set_drive_nofail(dev, "driveB", fds[1]->bdrv);
}
qdev_init_nofail(dev);
sysbus_connect_irq(&sys->busdev, 0, irq);
sysbus_mmio_map(&sys->busdev, 0, mmio_base);
}
void sun4m_fdctrl_init(qemu_irq irq, target_phys_addr_t io_base,
DriveInfo **fds, qemu_irq *fdc_tc)
{
DeviceState *dev;
FDCtrlSysBus *sys;
dev = qdev_create(NULL, "SUNW,fdtwo");
if (fds[0]) {
qdev_prop_set_drive_nofail(dev, "drive", fds[0]->bdrv);
}
qdev_init_nofail(dev);
sys = DO_UPCAST(FDCtrlSysBus, busdev.qdev, dev);
sysbus_connect_irq(&sys->busdev, 0, irq);
sysbus_mmio_map(&sys->busdev, 0, io_base);
*fdc_tc = qdev_get_gpio_in(dev, 0);
}
static int fdctrl_init_common(FDCtrl *fdctrl)
{
int i, j;
static int command_tables_inited = 0;
/* Fill 'command_to_handler' lookup table */
if (!command_tables_inited) {
command_tables_inited = 1;
for (i = ARRAY_SIZE(handlers) - 1; i >= 0; i--) {
for (j = 0; j < sizeof(command_to_handler); j++) {
if ((j & handlers[i].mask) == handlers[i].value) {
command_to_handler[j] = i;
}
}
}
}
FLOPPY_DPRINTF("init controller\n");
fdctrl->fifo = qemu_memalign(512, FD_SECTOR_LEN);
fdctrl->fifo_size = 512;
fdctrl->result_timer = qemu_new_timer_ns(vm_clock,
fdctrl_result_timer, fdctrl);
fdctrl->version = 0x90; /* Intel 82078 controller */
fdctrl->config = FD_CONFIG_EIS | FD_CONFIG_EFIFO; /* Implicit seek, polling & FIFO enabled */
fdctrl->num_floppies = MAX_FD;
if (fdctrl->dma_chann != -1)
DMA_register_channel(fdctrl->dma_chann, &fdctrl_transfer_handler, fdctrl);
return fdctrl_connect_drives(fdctrl);
}
static const MemoryRegionPortio fdc_portio_list[] = {
{ 1, 5, 1, .read = fdctrl_read, .write = fdctrl_write },
{ 7, 1, 1, .read = fdctrl_read, .write = fdctrl_write },
PORTIO_END_OF_LIST(),
};
static int isabus_fdc_init1(ISADevice *dev)
{
FDCtrlISABus *isa = DO_UPCAST(FDCtrlISABus, busdev, dev);
FDCtrl *fdctrl = &isa->state;
int iobase = 0x3f0;
int isairq = 6;
int dma_chann = 2;
int ret;
isa_register_portio_list(dev, iobase, fdc_portio_list, fdctrl, "fdc");
isa_init_irq(&isa->busdev, &fdctrl->irq, isairq);
fdctrl->dma_chann = dma_chann;
qdev_set_legacy_instance_id(&dev->qdev, iobase, 2);
ret = fdctrl_init_common(fdctrl);
add_boot_device_path(isa->bootindexA, &dev->qdev, "/floppy@0");
add_boot_device_path(isa->bootindexB, &dev->qdev, "/floppy@1");
return ret;
}
static int sysbus_fdc_init1(SysBusDevice *dev)
{
FDCtrlSysBus *sys = DO_UPCAST(FDCtrlSysBus, busdev, dev);
FDCtrl *fdctrl = &sys->state;
int io;
int ret;
io = cpu_register_io_memory(fdctrl_mem_read, fdctrl_mem_write, fdctrl,
DEVICE_NATIVE_ENDIAN);
sysbus_init_mmio(dev, 0x08, io);
sysbus_init_irq(dev, &fdctrl->irq);
qdev_init_gpio_in(&dev->qdev, fdctrl_handle_tc, 1);
fdctrl->dma_chann = -1;
qdev_set_legacy_instance_id(&dev->qdev, io, 2);
ret = fdctrl_init_common(fdctrl);
return ret;
}
static int sun4m_fdc_init1(SysBusDevice *dev)
{
FDCtrl *fdctrl = &(FROM_SYSBUS(FDCtrlSysBus, dev)->state);
int io;
io = cpu_register_io_memory(fdctrl_mem_read_strict,
fdctrl_mem_write_strict, fdctrl,
DEVICE_NATIVE_ENDIAN);
sysbus_init_mmio(dev, 0x08, io);
sysbus_init_irq(dev, &fdctrl->irq);
qdev_init_gpio_in(&dev->qdev, fdctrl_handle_tc, 1);
fdctrl->sun4m = 1;
qdev_set_legacy_instance_id(&dev->qdev, io, 2);
return fdctrl_init_common(fdctrl);
}
void fdc_get_bs(BlockDriverState *bs[], ISADevice *dev)
{
FDCtrlISABus *isa = DO_UPCAST(FDCtrlISABus, busdev, dev);
FDCtrl *fdctrl = &isa->state;
int i;
for (i = 0; i < MAX_FD; i++) {
bs[i] = fdctrl->drives[i].bs;
}
}
static const VMStateDescription vmstate_isa_fdc ={
.name = "fdc",
.version_id = 2,
.minimum_version_id = 2,
.fields = (VMStateField []) {
VMSTATE_STRUCT(state, FDCtrlISABus, 0, vmstate_fdc, FDCtrl),
VMSTATE_END_OF_LIST()
}
};
static ISADeviceInfo isa_fdc_info = {
.init = isabus_fdc_init1,
.qdev.name = "isa-fdc",
.qdev.fw_name = "fdc",
.qdev.size = sizeof(FDCtrlISABus),
.qdev.no_user = 1,
.qdev.vmsd = &vmstate_isa_fdc,
.qdev.reset = fdctrl_external_reset_isa,
.qdev.props = (Property[]) {
DEFINE_PROP_DRIVE("driveA", FDCtrlISABus, state.drives[0].bs),
DEFINE_PROP_DRIVE("driveB", FDCtrlISABus, state.drives[1].bs),
DEFINE_PROP_INT32("bootindexA", FDCtrlISABus, bootindexA, -1),
DEFINE_PROP_INT32("bootindexB", FDCtrlISABus, bootindexB, -1),
DEFINE_PROP_END_OF_LIST(),
},
};
static const VMStateDescription vmstate_sysbus_fdc ={
.name = "fdc",
.version_id = 2,
.minimum_version_id = 2,
.fields = (VMStateField []) {
VMSTATE_STRUCT(state, FDCtrlSysBus, 0, vmstate_fdc, FDCtrl),
VMSTATE_END_OF_LIST()
}
};
static SysBusDeviceInfo sysbus_fdc_info = {
.init = sysbus_fdc_init1,
.qdev.name = "sysbus-fdc",
.qdev.size = sizeof(FDCtrlSysBus),
.qdev.vmsd = &vmstate_sysbus_fdc,
.qdev.reset = fdctrl_external_reset_sysbus,
.qdev.props = (Property[]) {
DEFINE_PROP_DRIVE("driveA", FDCtrlSysBus, state.drives[0].bs),
DEFINE_PROP_DRIVE("driveB", FDCtrlSysBus, state.drives[1].bs),
DEFINE_PROP_END_OF_LIST(),
},
};
static SysBusDeviceInfo sun4m_fdc_info = {
.init = sun4m_fdc_init1,
.qdev.name = "SUNW,fdtwo",
.qdev.size = sizeof(FDCtrlSysBus),
.qdev.vmsd = &vmstate_sysbus_fdc,
.qdev.reset = fdctrl_external_reset_sysbus,
.qdev.props = (Property[]) {
DEFINE_PROP_DRIVE("drive", FDCtrlSysBus, state.drives[0].bs),
DEFINE_PROP_END_OF_LIST(),
},
};
static void fdc_register_devices(void)
{
isa_qdev_register(&isa_fdc_info);
sysbus_register_withprop(&sysbus_fdc_info);
sysbus_register_withprop(&sun4m_fdc_info);
}
device_init(fdc_register_devices)
|
KernelAnalysisPlatform/KlareDbg
|
tracers/qemu/decaf/hw/fdc.c
|
C
|
gpl-3.0
| 61,584
|
package org.springframework.security.oauth2.provider.client;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.datasource.embedded.EmbeddedDatabase;
import org.springframework.jdbc.datasource.embedded.EmbeddedDatabaseBuilder;
import org.springframework.security.core.GrantedAuthority;
import org.springframework.security.crypto.password.PasswordEncoder;
import org.springframework.security.oauth2.provider.ClientAlreadyExistsException;
import org.springframework.security.oauth2.provider.ClientDetails;
import org.springframework.security.oauth2.provider.NoSuchClientException;
import org.springframework.security.oauth2.provider.client.BaseClientDetails;
import org.springframework.security.oauth2.provider.client.JdbcClientDetailsService;
public class JdbcClientDetailsServiceTests {
private JdbcClientDetailsService service;
private JdbcTemplate jdbcTemplate;
private EmbeddedDatabase db;
private static final String SELECT_SQL = "select client_id, client_secret, resource_ids, scope, authorized_grant_types, web_server_redirect_uri, authorities, access_token_validity, refresh_token_validity from oauth_client_details where client_id=?";
private static final String INSERT_SQL = "insert into oauth_client_details (client_id, client_secret, resource_ids, scope, authorized_grant_types, web_server_redirect_uri, authorities, access_token_validity, refresh_token_validity, autoapprove) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
private static final String CUSTOM_INSERT_SQL = "insert into ClientDetails (appId, appSecret, resourceIds, scope, grantTypes, redirectUrl, authorities) values (?, ?, ?, ?, ?, ?, ?)";
@Before
public void setUp() throws Exception {
// creates a HSQL in-memory db populated from default scripts
// classpath:schema.sql and classpath:data.sql
db = new EmbeddedDatabaseBuilder().addDefaultScripts().build();
jdbcTemplate = new JdbcTemplate(db);
service = new JdbcClientDetailsService(db);
}
@After
public void tearDown() throws Exception {
db.shutdown();
}
@Test(expected = NoSuchClientException.class)
public void testLoadingClientForNonExistingClientId() {
service.loadClientByClientId("nonExistingClientId");
}
@Test
public void testLoadingClientIdWithNoDetails() {
jdbcTemplate.update(INSERT_SQL, "clientIdWithNoDetails", null, null,
null, null, null, null, null, null, null);
ClientDetails clientDetails = service
.loadClientByClientId("clientIdWithNoDetails");
assertEquals("clientIdWithNoDetails", clientDetails.getClientId());
assertFalse(clientDetails.isSecretRequired());
assertNull(clientDetails.getClientSecret());
assertFalse(clientDetails.isScoped());
assertEquals(0, clientDetails.getScope().size());
assertEquals(2, clientDetails.getAuthorizedGrantTypes().size());
assertNull(clientDetails.getRegisteredRedirectUri());
assertEquals(0, clientDetails.getAuthorities().size());
assertEquals(null, clientDetails.getAccessTokenValiditySeconds());
assertEquals(null, clientDetails.getAccessTokenValiditySeconds());
}
@Test
public void testLoadingClientIdWithAdditionalInformation() {
jdbcTemplate.update(INSERT_SQL, "clientIdWithAddInfo", null, null,
null, null, null, null, null, null, null);
jdbcTemplate
.update("update oauth_client_details set additional_information=? where client_id=?",
"{\"foo\":\"bar\"}", "clientIdWithAddInfo");
ClientDetails clientDetails = service
.loadClientByClientId("clientIdWithAddInfo");
assertEquals("clientIdWithAddInfo", clientDetails.getClientId());
assertEquals(Collections.singletonMap("foo", "bar"),
clientDetails.getAdditionalInformation());
}
@Test
public void testLoadingClientIdWithSingleDetails() {
jdbcTemplate.update(INSERT_SQL, "clientIdWithSingleDetails",
"mySecret", "myResource", "myScope", "myAuthorizedGrantType",
"myRedirectUri", "myAuthority", 100, 200, "true");
ClientDetails clientDetails = service
.loadClientByClientId("clientIdWithSingleDetails");
assertEquals("clientIdWithSingleDetails", clientDetails.getClientId());
assertTrue(clientDetails.isSecretRequired());
assertEquals("mySecret", clientDetails.getClientSecret());
assertTrue(clientDetails.isScoped());
assertEquals(1, clientDetails.getScope().size());
assertEquals("myScope", clientDetails.getScope().iterator().next());
assertEquals(1, clientDetails.getResourceIds().size());
assertEquals("myResource", clientDetails.getResourceIds().iterator()
.next());
assertEquals(1, clientDetails.getAuthorizedGrantTypes().size());
assertEquals("myAuthorizedGrantType", clientDetails
.getAuthorizedGrantTypes().iterator().next());
assertEquals("myRedirectUri", clientDetails.getRegisteredRedirectUri()
.iterator().next());
assertEquals(1, clientDetails.getAuthorities().size());
assertEquals("myAuthority", clientDetails.getAuthorities().iterator()
.next().getAuthority());
assertEquals(new Integer(100),
clientDetails.getAccessTokenValiditySeconds());
assertEquals(new Integer(200),
clientDetails.getRefreshTokenValiditySeconds());
}
@Test
public void testLoadingClientIdWithSingleDetailsInCustomTable() {
jdbcTemplate.update(CUSTOM_INSERT_SQL, "clientIdWithSingleDetails",
"mySecret", "myResource", "myScope", "myAuthorizedGrantType",
"myRedirectUri", "myAuthority");
JdbcClientDetailsService customService = new JdbcClientDetailsService(
db);
customService
.setSelectClientDetailsSql("select appId, appSecret, resourceIds, scope, "
+ "grantTypes, redirectUrl, authorities, access_token_validity, refresh_token_validity, additionalInformation, autoApproveScopes from ClientDetails where appId = ?");
ClientDetails clientDetails = customService
.loadClientByClientId("clientIdWithSingleDetails");
assertEquals("clientIdWithSingleDetails", clientDetails.getClientId());
assertTrue(clientDetails.isSecretRequired());
assertEquals("mySecret", clientDetails.getClientSecret());
assertTrue(clientDetails.isScoped());
assertEquals(1, clientDetails.getScope().size());
assertEquals("myScope", clientDetails.getScope().iterator().next());
assertEquals(1, clientDetails.getResourceIds().size());
assertEquals("myResource", clientDetails.getResourceIds().iterator()
.next());
assertEquals(1, clientDetails.getAuthorizedGrantTypes().size());
assertEquals("myAuthorizedGrantType", clientDetails
.getAuthorizedGrantTypes().iterator().next());
assertEquals("myRedirectUri", clientDetails.getRegisteredRedirectUri()
.iterator().next());
assertEquals(1, clientDetails.getAuthorities().size());
assertEquals("myAuthority", clientDetails.getAuthorities().iterator()
.next().getAuthority());
}
@Test
public void testLoadingClientIdWithMultipleDetails() {
jdbcTemplate.update(INSERT_SQL, "clientIdWithMultipleDetails",
"mySecret", "myResource1,myResource2", "myScope1,myScope2",
"myAuthorizedGrantType1,myAuthorizedGrantType2",
"myRedirectUri1,myRedirectUri2", "myAuthority1,myAuthority2",
100, 200, "read,write");
ClientDetails clientDetails = service
.loadClientByClientId("clientIdWithMultipleDetails");
assertEquals("clientIdWithMultipleDetails", clientDetails.getClientId());
assertTrue(clientDetails.isSecretRequired());
assertEquals("mySecret", clientDetails.getClientSecret());
assertTrue(clientDetails.isScoped());
assertEquals(2, clientDetails.getResourceIds().size());
Iterator<String> resourceIds = clientDetails.getResourceIds()
.iterator();
assertEquals("myResource1", resourceIds.next());
assertEquals("myResource2", resourceIds.next());
assertEquals(2, clientDetails.getScope().size());
Iterator<String> scope = clientDetails.getScope().iterator();
assertEquals("myScope1", scope.next());
assertEquals("myScope2", scope.next());
assertEquals(2, clientDetails.getAuthorizedGrantTypes().size());
Iterator<String> grantTypes = clientDetails.getAuthorizedGrantTypes()
.iterator();
assertEquals("myAuthorizedGrantType1", grantTypes.next());
assertEquals("myAuthorizedGrantType2", grantTypes.next());
assertEquals(2, clientDetails.getRegisteredRedirectUri().size());
Iterator<String> redirectUris = clientDetails
.getRegisteredRedirectUri().iterator();
assertEquals("myRedirectUri1", redirectUris.next());
assertEquals("myRedirectUri2", redirectUris.next());
assertEquals(2, clientDetails.getAuthorities().size());
Iterator<GrantedAuthority> authorities = clientDetails.getAuthorities()
.iterator();
assertEquals("myAuthority1", authorities.next().getAuthority());
assertEquals("myAuthority2", authorities.next().getAuthority());
assertEquals(new Integer(100),
clientDetails.getAccessTokenValiditySeconds());
assertEquals(new Integer(200),
clientDetails.getRefreshTokenValiditySeconds());
assertTrue(clientDetails.isAutoApprove("read"));
}
@Test
public void testAddClientWithNoDetails() {
BaseClientDetails clientDetails = new BaseClientDetails();
clientDetails.setClientId("addedClientIdWithNoDetails");
service.addClientDetails(clientDetails);
Map<String, Object> map = jdbcTemplate.queryForMap(SELECT_SQL,
"addedClientIdWithNoDetails");
assertEquals("addedClientIdWithNoDetails", map.get("client_id"));
assertTrue(map.containsKey("client_secret"));
assertEquals(null, map.get("client_secret"));
}
@Test(expected = ClientAlreadyExistsException.class)
public void testInsertDuplicateClient() {
BaseClientDetails clientDetails = new BaseClientDetails();
clientDetails.setClientId("duplicateClientIdWithNoDetails");
service.addClientDetails(clientDetails);
service.addClientDetails(clientDetails);
}
@Test
public void testUpdateClientSecret() {
BaseClientDetails clientDetails = new BaseClientDetails();
clientDetails.setClientId("newClientIdWithNoDetails");
service.setPasswordEncoder(new PasswordEncoder() {
public boolean matches(CharSequence rawPassword,
String encodedPassword) {
return true;
}
public String encode(CharSequence rawPassword) {
return "BAR";
}
});
service.addClientDetails(clientDetails);
service.updateClientSecret(clientDetails.getClientId(), "foo");
Map<String, Object> map = jdbcTemplate.queryForMap(SELECT_SQL,
"newClientIdWithNoDetails");
assertEquals("newClientIdWithNoDetails", map.get("client_id"));
assertTrue(map.containsKey("client_secret"));
assertEquals("BAR", map.get("client_secret"));
}
@Test
public void testUpdateClientRedirectURI() {
BaseClientDetails clientDetails = new BaseClientDetails();
clientDetails.setClientId("newClientIdWithNoDetails");
service.addClientDetails(clientDetails);
String[] redirectURI = { "http://localhost:8080",
"http://localhost:9090" };
clientDetails.setRegisteredRedirectUri(new HashSet<String>(Arrays
.asList(redirectURI)));
service.updateClientDetails(clientDetails);
Map<String, Object> map = jdbcTemplate.queryForMap(SELECT_SQL,
"newClientIdWithNoDetails");
assertEquals("newClientIdWithNoDetails", map.get("client_id"));
assertTrue(map.containsKey("web_server_redirect_uri"));
assertEquals("http://localhost:8080,http://localhost:9090",
map.get("web_server_redirect_uri"));
}
@Test(expected = NoSuchClientException.class)
public void testUpdateNonExistentClient() {
BaseClientDetails clientDetails = new BaseClientDetails();
clientDetails.setClientId("nosuchClientIdWithNoDetails");
service.updateClientDetails(clientDetails);
}
@Test
public void testRemoveClient() {
BaseClientDetails clientDetails = new BaseClientDetails();
clientDetails.setClientId("deletedClientIdWithNoDetails");
service.addClientDetails(clientDetails);
service.removeClientDetails(clientDetails.getClientId());
int count = jdbcTemplate.queryForObject(
"select count(*) from oauth_client_details where client_id=?",
Integer.class, "deletedClientIdWithNoDetails");
assertEquals(0, count);
}
@Test(expected = NoSuchClientException.class)
public void testRemoveNonExistentClient() {
BaseClientDetails clientDetails = new BaseClientDetails();
clientDetails.setClientId("nosuchClientIdWithNoDetails");
service.removeClientDetails(clientDetails.getClientId());
}
@Test
public void testFindClients() {
BaseClientDetails clientDetails = new BaseClientDetails();
clientDetails.setClientId("aclient");
service.addClientDetails(clientDetails);
int count = service.listClientDetails().size();
assertEquals(1, count);
}
}
|
280455936/spring-security-oauth
|
spring-security-oauth2/src/test/java/org/springframework/security/oauth2/provider/client/JdbcClientDetailsServiceTests.java
|
Java
|
apache-2.0
| 12,900
|
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.annotations;
import static java.lang.annotation.ElementType.FIELD;
import static java.lang.annotation.ElementType.LOCAL_VARIABLE;
import static java.lang.annotation.ElementType.METHOD;
import static java.lang.annotation.ElementType.PARAMETER;
import java.lang.annotation.Documented;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Denotes that a parameter, field or method return value can never be null.
* <p>
* This is a marker annotation and it has no specific attributes.
*/
@Documented
@Retention(RetentionPolicy.SOURCE)
@Target({METHOD,PARAMETER,LOCAL_VARIABLE,FIELD})
public @interface NonNull {
}
|
dsyang/buck
|
third-party/java/dx/src/com/android/annotations/NonNull.java
|
Java
|
apache-2.0
| 1,331
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/client-go/1.5/pkg/api/unversioned"
"k8s.io/client-go/1.5/pkg/api/v1"
"k8s.io/client-go/1.5/pkg/runtime"
versionedwatch "k8s.io/client-go/1.5/pkg/watch/versioned"
)
// GroupName is the group name use in this package
const GroupName = "batch"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Job{},
&JobList{},
&v1.ListOptions{},
&v1.DeleteOptions{},
)
versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
|
jnewland/kops
|
vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/1.5/pkg/apis/batch/v1/register.go
|
GO
|
apache-2.0
| 1,437
|
/*! ******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.trans.step;
import org.pentaho.di.trans.Trans;
public class StepAdapter implements StepListener {
@Override
public void stepActive( Trans trans, StepMeta stepMeta, StepInterface step ) {
}
@Override
public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) {
}
}
|
codek/pentaho-kettle
|
engine/src/org/pentaho/di/trans/step/StepAdapter.java
|
Java
|
apache-2.0
| 1,230
|
<?php
/*
* This file is part of PHPUnit.
*
* (c) Sebastian Bergmann <sebastian@phpunit.de>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/**
* A TestRunner for the Command Line Interface (CLI)
* PHP SAPI Module.
*
* @since Class available since Release 3.0.0
*/
class PHPUnit_TextUI_Command
{
/**
* @var array
*/
protected $arguments = [
'listGroups' => false,
'listSuites' => false,
'loader' => null,
'useDefaultConfiguration' => true,
'loadedExtensions' => [],
'notLoadedExtensions' => []
];
/**
* @var array
*/
protected $options = [];
/**
* @var array
*/
protected $longOptions = [
'atleast-version=' => null,
'bootstrap=' => null,
'colors==' => null,
'columns=' => null,
'configuration=' => null,
'coverage-clover=' => null,
'coverage-crap4j=' => null,
'coverage-html=' => null,
'coverage-php=' => null,
'coverage-text==' => null,
'coverage-xml=' => null,
'debug' => null,
'disallow-test-output' => null,
'disallow-resource-usage' => null,
'disallow-todo-tests' => null,
'enforce-time-limit' => null,
'exclude-group=' => null,
'filter=' => null,
'generate-configuration' => null,
'group=' => null,
'help' => null,
'include-path=' => null,
'list-groups' => null,
'list-suites' => null,
'loader=' => null,
'log-json=' => null,
'log-junit=' => null,
'log-tap=' => null,
'log-teamcity=' => null,
'no-configuration' => null,
'no-coverage' => null,
'no-extensions' => null,
'no-globals-backup' => null,
'printer=' => null,
'process-isolation' => null,
'repeat=' => null,
'report-useless-tests' => null,
'reverse-list' => null,
'static-backup' => null,
'stderr' => null,
'stop-on-error' => null,
'stop-on-failure' => null,
'stop-on-warning' => null,
'stop-on-incomplete' => null,
'stop-on-risky' => null,
'stop-on-skipped' => null,
'fail-on-warning' => null,
'fail-on-risky' => null,
'strict-coverage' => null,
'disable-coverage-ignore' => null,
'strict-global-state' => null,
'tap' => null,
'teamcity' => null,
'testdox' => null,
'testdox-group=' => null,
'testdox-exclude-group=' => null,
'testdox-html=' => null,
'testdox-text=' => null,
'testdox-xml=' => null,
'test-suffix=' => null,
'testsuite=' => null,
'verbose' => null,
'version' => null,
'whitelist=' => null
];
/**
* @var bool
*/
private $versionStringPrinted = false;
/**
* @param bool $exit
*/
public static function main($exit = true)
{
$command = new static;
return $command->run($_SERVER['argv'], $exit);
}
/**
* @param array $argv
* @param bool $exit
*
* @return int
*/
public function run(array $argv, $exit = true)
{
$this->handleArguments($argv);
$runner = $this->createRunner();
if (is_object($this->arguments['test']) &&
$this->arguments['test'] instanceof PHPUnit_Framework_Test) {
$suite = $this->arguments['test'];
} else {
$suite = $runner->getTest(
$this->arguments['test'],
$this->arguments['testFile'],
$this->arguments['testSuffixes']
);
}
if ($this->arguments['listGroups']) {
$this->printVersionString();
print "Available test group(s):\n";
$groups = $suite->getGroups();
sort($groups);
foreach ($groups as $group) {
print " - $group\n";
}
if ($exit) {
exit(PHPUnit_TextUI_TestRunner::SUCCESS_EXIT);
} else {
return PHPUnit_TextUI_TestRunner::SUCCESS_EXIT;
}
}
if ($this->arguments['listSuites']) {
$this->printVersionString();
print "Available test suite(s):\n";
$configuration = PHPUnit_Util_Configuration::getInstance(
$this->arguments['configuration']
);
$suiteNames = $configuration->getTestSuiteNames();
foreach ($suiteNames as $suiteName) {
print " - $suiteName\n";
}
if ($exit) {
exit(PHPUnit_TextUI_TestRunner::SUCCESS_EXIT);
} else {
return PHPUnit_TextUI_TestRunner::SUCCESS_EXIT;
}
}
unset($this->arguments['test']);
unset($this->arguments['testFile']);
try {
$result = $runner->doRun($suite, $this->arguments, $exit);
} catch (PHPUnit_Framework_Exception $e) {
print $e->getMessage() . "\n";
}
$return = PHPUnit_TextUI_TestRunner::FAILURE_EXIT;
if (isset($result) && $result->wasSuccessful(false)) {
$return = PHPUnit_TextUI_TestRunner::SUCCESS_EXIT;
} elseif (!isset($result) || $result->errorCount() > 0) {
$return = PHPUnit_TextUI_TestRunner::EXCEPTION_EXIT;
}
if ($exit) {
exit($return);
}
return $return;
}
/**
* Create a TestRunner, override in subclasses.
*
* @return PHPUnit_TextUI_TestRunner
*
* @since Method available since Release 3.6.0
*/
protected function createRunner()
{
return new PHPUnit_TextUI_TestRunner($this->arguments['loader']);
}
/**
* Handles the command-line arguments.
*
* A child class of PHPUnit_TextUI_Command can hook into the argument
* parsing by adding the switch(es) to the $longOptions array and point to a
* callback method that handles the switch(es) in the child class like this
*
* <code>
* <?php
* class MyCommand extends PHPUnit_TextUI_Command
* {
* public function __construct()
* {
* // my-switch won't accept a value, it's an on/off
* $this->longOptions['my-switch'] = 'myHandler';
* // my-secondswitch will accept a value - note the equals sign
* $this->longOptions['my-secondswitch='] = 'myOtherHandler';
* }
*
* // --my-switch -> myHandler()
* protected function myHandler()
* {
* }
*
* // --my-secondswitch foo -> myOtherHandler('foo')
* protected function myOtherHandler ($value)
* {
* }
*
* // You will also need this - the static keyword in the
* // PHPUnit_TextUI_Command will mean that it'll be
* // PHPUnit_TextUI_Command that gets instantiated,
* // not MyCommand
* public static function main($exit = true)
* {
* $command = new static;
*
* return $command->run($_SERVER['argv'], $exit);
* }
*
* }
* </code>
*
* @param array $argv
*/
protected function handleArguments(array $argv)
{
if (defined('__PHPUNIT_PHAR__')) {
$this->longOptions['check-version'] = null;
$this->longOptions['selfupdate'] = null;
$this->longOptions['self-update'] = null;
$this->longOptions['selfupgrade'] = null;
$this->longOptions['self-upgrade'] = null;
}
try {
$this->options = PHPUnit_Util_Getopt::getopt(
$argv,
'd:c:hv',
array_keys($this->longOptions)
);
} catch (PHPUnit_Framework_Exception $e) {
$this->showError($e->getMessage());
}
foreach ($this->options[0] as $option) {
switch ($option[0]) {
case '--colors':
$this->arguments['colors'] = $option[1] ?: PHPUnit_TextUI_ResultPrinter::COLOR_AUTO;
break;
case '--bootstrap':
$this->arguments['bootstrap'] = $option[1];
break;
case '--columns':
if (is_numeric($option[1])) {
$this->arguments['columns'] = (int) $option[1];
} elseif ($option[1] == 'max') {
$this->arguments['columns'] = 'max';
}
break;
case 'c':
case '--configuration':
$this->arguments['configuration'] = $option[1];
break;
case '--coverage-clover':
$this->arguments['coverageClover'] = $option[1];
break;
case '--coverage-crap4j':
$this->arguments['coverageCrap4J'] = $option[1];
break;
case '--coverage-html':
$this->arguments['coverageHtml'] = $option[1];
break;
case '--coverage-php':
$this->arguments['coveragePHP'] = $option[1];
break;
case '--coverage-text':
if ($option[1] === null) {
$option[1] = 'php://stdout';
}
$this->arguments['coverageText'] = $option[1];
$this->arguments['coverageTextShowUncoveredFiles'] = false;
$this->arguments['coverageTextShowOnlySummary'] = false;
break;
case '--coverage-xml':
$this->arguments['coverageXml'] = $option[1];
break;
case 'd':
$ini = explode('=', $option[1]);
if (isset($ini[0])) {
if (isset($ini[1])) {
ini_set($ini[0], $ini[1]);
} else {
ini_set($ini[0], true);
}
}
break;
case '--debug':
$this->arguments['debug'] = true;
break;
case 'h':
case '--help':
$this->showHelp();
exit(PHPUnit_TextUI_TestRunner::SUCCESS_EXIT);
break;
case '--filter':
$this->arguments['filter'] = $option[1];
break;
case '--testsuite':
$this->arguments['testsuite'] = $option[1];
break;
case '--generate-configuration':
$this->printVersionString();
printf(
"Generating phpunit.xml in %s\n\n",
getcwd()
);
print 'Bootstrap script (relative to path shown above; default: vendor/autoload.php): ';
$bootstrapScript = trim(fgets(STDIN));
print 'Tests directory (relative to path shown above; default: tests): ';
$testsDirectory = trim(fgets(STDIN));
print 'Source directory (relative to path shown above; default: src): ';
$src = trim(fgets(STDIN));
if ($bootstrapScript == '') {
$bootstrapScript = 'vendor/autoload.php';
}
if ($testsDirectory == '') {
$testsDirectory = 'tests';
}
if ($src == '') {
$src = 'src';
}
$generator = new PHPUnit_Util_ConfigurationGenerator;
file_put_contents(
'phpunit.xml',
$generator->generateDefaultConfiguration(
PHPUnit_Runner_Version::series(),
$bootstrapScript,
$testsDirectory,
$src
)
);
printf(
"\nGenerated phpunit.xml in %s\n",
getcwd()
);
exit(PHPUnit_TextUI_TestRunner::SUCCESS_EXIT);
break;
case '--group':
$this->arguments['groups'] = explode(',', $option[1]);
break;
case '--exclude-group':
$this->arguments['excludeGroups'] = explode(
',',
$option[1]
);
break;
case '--test-suffix':
$this->arguments['testSuffixes'] = explode(
',',
$option[1]
);
break;
case '--include-path':
$includePath = $option[1];
break;
case '--list-groups':
$this->arguments['listGroups'] = true;
break;
case '--list-suites':
$this->arguments['listSuites'] = true;
break;
case '--printer':
$this->arguments['printer'] = $option[1];
break;
case '--loader':
$this->arguments['loader'] = $option[1];
break;
case '--log-json':
$this->arguments['jsonLogfile'] = $option[1];
break;
case '--log-junit':
$this->arguments['junitLogfile'] = $option[1];
break;
case '--log-tap':
$this->arguments['tapLogfile'] = $option[1];
break;
case '--log-teamcity':
$this->arguments['teamcityLogfile'] = $option[1];
break;
case '--process-isolation':
$this->arguments['processIsolation'] = true;
break;
case '--repeat':
$this->arguments['repeat'] = (int) $option[1];
break;
case '--stderr':
$this->arguments['stderr'] = true;
break;
case '--stop-on-error':
$this->arguments['stopOnError'] = true;
break;
case '--stop-on-failure':
$this->arguments['stopOnFailure'] = true;
break;
case '--stop-on-warning':
$this->arguments['stopOnWarning'] = true;
break;
case '--stop-on-incomplete':
$this->arguments['stopOnIncomplete'] = true;
break;
case '--stop-on-risky':
$this->arguments['stopOnRisky'] = true;
break;
case '--stop-on-skipped':
$this->arguments['stopOnSkipped'] = true;
break;
case '--fail-on-warning':
$this->arguments['failOnWarning'] = true;
break;
case '--fail-on-risky':
$this->arguments['failOnRisky'] = true;
break;
case '--tap':
$this->arguments['printer'] = 'PHPUnit_Util_Log_TAP';
break;
case '--teamcity':
$this->arguments['printer'] = 'PHPUnit_Util_Log_TeamCity';
break;
case '--testdox':
$this->arguments['printer'] = 'PHPUnit_Util_TestDox_ResultPrinter_Text';
break;
case '--testdox-group':
$this->arguments['testdoxGroups'] = explode(
',',
$option[1]
);
break;
case '--testdox-exclude-group':
$this->arguments['testdoxExcludeGroups'] = explode(
',',
$option[1]
);
break;
case '--testdox-html':
$this->arguments['testdoxHTMLFile'] = $option[1];
break;
case '--testdox-text':
$this->arguments['testdoxTextFile'] = $option[1];
break;
case '--testdox-xml':
$this->arguments['testdoxXMLFile'] = $option[1];
break;
case '--no-configuration':
$this->arguments['useDefaultConfiguration'] = false;
break;
case '--no-extensions':
$this->arguments['noExtensions'] = true;
break;
case '--no-coverage':
$this->arguments['noCoverage'] = true;
break;
case '--no-globals-backup':
$this->arguments['backupGlobals'] = false;
break;
case '--static-backup':
$this->arguments['backupStaticAttributes'] = true;
break;
case 'v':
case '--verbose':
$this->arguments['verbose'] = true;
break;
case '--atleast-version':
exit(version_compare(PHPUnit_Runner_Version::id(), $option[1], '>=')
? PHPUnit_TextUI_TestRunner::SUCCESS_EXIT
: PHPUnit_TextUI_TestRunner::FAILURE_EXIT
);
break;
case '--version':
$this->printVersionString();
exit(PHPUnit_TextUI_TestRunner::SUCCESS_EXIT);
break;
case '--report-useless-tests':
$this->arguments['reportUselessTests'] = true;
break;
case '--strict-coverage':
$this->arguments['strictCoverage'] = true;
break;
case '--disable-coverage-ignore':
$this->arguments['disableCodeCoverageIgnore'] = true;
break;
case '--strict-global-state':
$this->arguments['beStrictAboutChangesToGlobalState'] = true;
break;
case '--disallow-test-output':
$this->arguments['disallowTestOutput'] = true;
break;
case '--disallow-resource-usage':
$this->arguments['beStrictAboutResourceUsageDuringSmallTests'] = true;
break;
case '--enforce-time-limit':
$this->arguments['enforceTimeLimit'] = true;
break;
case '--disallow-todo-tests':
$this->arguments['disallowTodoAnnotatedTests'] = true;
break;
case '--reverse-list':
$this->arguments['reverseList'] = true;
break;
case '--check-version':
$this->handleVersionCheck();
break;
case '--selfupdate':
case '--self-update':
$this->handleSelfUpdate();
break;
case '--selfupgrade':
case '--self-upgrade':
$this->handleSelfUpdate(true);
break;
case '--whitelist':
$this->arguments['whitelist'] = $option[1];
break;
default:
$optionName = str_replace('--', '', $option[0]);
if (isset($this->longOptions[$optionName])) {
$handler = $this->longOptions[$optionName];
} elseif (isset($this->longOptions[$optionName . '='])) {
$handler = $this->longOptions[$optionName . '='];
}
if (isset($handler) && is_callable([$this, $handler])) {
$this->$handler($option[1]);
}
}
}
$this->handleCustomTestSuite();
if (!isset($this->arguments['test'])) {
if (isset($this->options[1][0])) {
$this->arguments['test'] = $this->options[1][0];
}
if (isset($this->options[1][1])) {
$this->arguments['testFile'] = realpath($this->options[1][1]);
} else {
$this->arguments['testFile'] = '';
}
if (isset($this->arguments['test']) &&
is_file($this->arguments['test']) &&
substr($this->arguments['test'], -5, 5) != '.phpt') {
$this->arguments['testFile'] = realpath($this->arguments['test']);
$this->arguments['test'] = substr($this->arguments['test'], 0, strrpos($this->arguments['test'], '.'));
}
}
if (!isset($this->arguments['testSuffixes'])) {
$this->arguments['testSuffixes'] = ['Test.php', '.phpt'];
}
if (isset($includePath)) {
ini_set(
'include_path',
$includePath . PATH_SEPARATOR . ini_get('include_path')
);
}
if ($this->arguments['loader'] !== null) {
$this->arguments['loader'] = $this->handleLoader($this->arguments['loader']);
}
if (isset($this->arguments['configuration']) &&
is_dir($this->arguments['configuration'])) {
$configurationFile = $this->arguments['configuration'] . '/phpunit.xml';
if (file_exists($configurationFile)) {
$this->arguments['configuration'] = realpath(
$configurationFile
);
} elseif (file_exists($configurationFile . '.dist')) {
$this->arguments['configuration'] = realpath(
$configurationFile . '.dist'
);
}
} elseif (!isset($this->arguments['configuration']) &&
$this->arguments['useDefaultConfiguration']) {
if (file_exists('phpunit.xml')) {
$this->arguments['configuration'] = realpath('phpunit.xml');
} elseif (file_exists('phpunit.xml.dist')) {
$this->arguments['configuration'] = realpath(
'phpunit.xml.dist'
);
}
}
if (isset($this->arguments['configuration'])) {
try {
$configuration = PHPUnit_Util_Configuration::getInstance(
$this->arguments['configuration']
);
} catch (Throwable $e) {
print $e->getMessage() . "\n";
exit(PHPUnit_TextUI_TestRunner::FAILURE_EXIT);
} catch (Exception $e) {
print $e->getMessage() . "\n";
exit(PHPUnit_TextUI_TestRunner::FAILURE_EXIT);
}
$phpunitConfiguration = $configuration->getPHPUnitConfiguration();
$configuration->handlePHPConfiguration();
/*
* Issue #1216
*/
if (isset($this->arguments['bootstrap'])) {
$this->handleBootstrap($this->arguments['bootstrap']);
} elseif (isset($phpunitConfiguration['bootstrap'])) {
$this->handleBootstrap($phpunitConfiguration['bootstrap']);
}
/*
* Issue #657
*/
if (isset($phpunitConfiguration['stderr']) && ! isset($this->arguments['stderr'])) {
$this->arguments['stderr'] = $phpunitConfiguration['stderr'];
}
if (isset($phpunitConfiguration['extensionsDirectory']) && !isset($this->arguments['noExtensions']) && extension_loaded('phar')) {
$this->handleExtensions($phpunitConfiguration['extensionsDirectory']);
}
if (isset($phpunitConfiguration['columns']) && ! isset($this->arguments['columns'])) {
$this->arguments['columns'] = $phpunitConfiguration['columns'];
}
if (!isset($this->arguments['printer']) && isset($phpunitConfiguration['printerClass'])) {
if (isset($phpunitConfiguration['printerFile'])) {
$file = $phpunitConfiguration['printerFile'];
} else {
$file = '';
}
$this->arguments['printer'] = $this->handlePrinter(
$phpunitConfiguration['printerClass'],
$file
);
}
if (isset($phpunitConfiguration['testSuiteLoaderClass'])) {
if (isset($phpunitConfiguration['testSuiteLoaderFile'])) {
$file = $phpunitConfiguration['testSuiteLoaderFile'];
} else {
$file = '';
}
$this->arguments['loader'] = $this->handleLoader(
$phpunitConfiguration['testSuiteLoaderClass'],
$file
);
}
if (!isset($this->arguments['test'])) {
$testSuite = $configuration->getTestSuiteConfiguration(isset($this->arguments['testsuite']) ? $this->arguments['testsuite'] : null);
if ($testSuite !== null) {
$this->arguments['test'] = $testSuite;
}
}
} elseif (isset($this->arguments['bootstrap'])) {
$this->handleBootstrap($this->arguments['bootstrap']);
}
if (isset($this->arguments['printer']) &&
is_string($this->arguments['printer'])) {
$this->arguments['printer'] = $this->handlePrinter($this->arguments['printer']);
}
if (isset($this->arguments['test']) && is_string($this->arguments['test']) && substr($this->arguments['test'], -5, 5) == '.phpt') {
$test = new PHPUnit_Extensions_PhptTestCase($this->arguments['test']);
$this->arguments['test'] = new PHPUnit_Framework_TestSuite;
$this->arguments['test']->addTest($test);
}
if (!isset($this->arguments['test']) ||
(isset($this->arguments['testDatabaseLogRevision']) && !isset($this->arguments['testDatabaseDSN']))) {
$this->showHelp();
exit(PHPUnit_TextUI_TestRunner::EXCEPTION_EXIT);
}
}
/**
* Handles the loading of the PHPUnit_Runner_TestSuiteLoader implementation.
*
* @param string $loaderClass
* @param string $loaderFile
*
* @return PHPUnit_Runner_TestSuiteLoader
*/
protected function handleLoader($loaderClass, $loaderFile = '')
{
if (!class_exists($loaderClass, false)) {
if ($loaderFile == '') {
$loaderFile = PHPUnit_Util_Filesystem::classNameToFilename(
$loaderClass
);
}
$loaderFile = stream_resolve_include_path($loaderFile);
if ($loaderFile) {
require $loaderFile;
}
}
if (class_exists($loaderClass, false)) {
$class = new ReflectionClass($loaderClass);
if ($class->implementsInterface('PHPUnit_Runner_TestSuiteLoader') &&
$class->isInstantiable()) {
return $class->newInstance();
}
}
if ($loaderClass == 'PHPUnit_Runner_StandardTestSuiteLoader') {
return;
}
$this->showError(
sprintf(
'Could not use "%s" as loader.',
$loaderClass
)
);
}
/**
* Handles the loading of the PHPUnit_Util_Printer implementation.
*
* @param string $printerClass
* @param string $printerFile
*
* @return PHPUnit_Util_Printer|string
*/
protected function handlePrinter($printerClass, $printerFile = '')
{
if (!class_exists($printerClass, false)) {
if ($printerFile == '') {
$printerFile = PHPUnit_Util_Filesystem::classNameToFilename(
$printerClass
);
}
$printerFile = stream_resolve_include_path($printerFile);
if ($printerFile) {
require $printerFile;
}
}
if (class_exists($printerClass)) {
$class = new ReflectionClass($printerClass);
if ($class->implementsInterface('PHPUnit_Framework_TestListener') &&
$class->isSubclassOf('PHPUnit_Util_Printer') &&
$class->isInstantiable()) {
if ($class->isSubclassOf('PHPUnit_TextUI_ResultPrinter')) {
return $printerClass;
}
$outputStream = isset($this->arguments['stderr']) ? 'php://stderr' : null;
return $class->newInstance($outputStream);
}
}
$this->showError(
sprintf(
'Could not use "%s" as printer.',
$printerClass
)
);
}
/**
* Loads a bootstrap file.
*
* @param string $filename
*/
protected function handleBootstrap($filename)
{
try {
PHPUnit_Util_Fileloader::checkAndLoad($filename);
} catch (PHPUnit_Framework_Exception $e) {
$this->showError($e->getMessage());
}
}
/**
* @since Method available since Release 4.0.0
*/
protected function handleSelfUpdate($upgrade = false)
{
$this->printVersionString();
if ($upgrade) {
print "Warning: Deprecated --self-upgrade used\n\n";
} else {
print "Warning: Deprecated --self-update used\n\n";
}
$localFilename = realpath($_SERVER['argv'][0]);
if (!is_writable($localFilename)) {
print 'No write permission to update ' . $localFilename . "\n";
exit(PHPUnit_TextUI_TestRunner::EXCEPTION_EXIT);
}
if (!extension_loaded('openssl')) {
print "The OpenSSL extension is not loaded.\n";
exit(PHPUnit_TextUI_TestRunner::EXCEPTION_EXIT);
}
if (!$upgrade) {
$remoteFilename = sprintf(
'https://phar.phpunit.de/phpunit-%s.phar',
file_get_contents(
sprintf(
'https://phar.phpunit.de/latest-version-of/phpunit-%s',
PHPUnit_Runner_Version::series()
)
)
);
} else {
$remoteFilename = sprintf(
'https://phar.phpunit.de/phpunit%s.phar',
PHPUnit_Runner_Version::getReleaseChannel()
);
}
$tempFilename = tempnam(sys_get_temp_dir(), 'phpunit') . '.phar';
// Workaround for https://bugs.php.net/bug.php?id=65538
$caFile = dirname($tempFilename) . '/ca.pem';
copy(__PHPUNIT_PHAR_ROOT__ . '/ca.pem', $caFile);
print 'Updating the PHPUnit PHAR ... ';
$options = [
'ssl' => [
'allow_self_signed' => false,
'cafile' => $caFile,
'verify_peer' => true
]
];
file_put_contents(
$tempFilename,
file_get_contents(
$remoteFilename,
false,
stream_context_create($options)
)
);
chmod($tempFilename, 0777 & ~umask());
try {
$phar = new Phar($tempFilename);
unset($phar);
rename($tempFilename, $localFilename);
unlink($caFile);
} catch (Throwable $_e) {
$e = $_e;
} catch (Exception $_e) {
$e = $_e;
}
if (isset($e)) {
unlink($caFile);
unlink($tempFilename);
print " done\n\n" . $e->getMessage() . "\n";
exit(2);
}
print " done\n";
exit(PHPUnit_TextUI_TestRunner::SUCCESS_EXIT);
}
/**
* @since Method available since Release 4.8.0
*/
protected function handleVersionCheck()
{
$this->printVersionString();
$latestVersion = file_get_contents('https://phar.phpunit.de/latest-version-of/phpunit');
$isOutdated = version_compare($latestVersion, PHPUnit_Runner_Version::id(), '>');
if ($isOutdated) {
print "You are not using the latest version of PHPUnit.\n";
print 'Use "phpunit --self-upgrade" to install PHPUnit ' . $latestVersion . "\n";
} else {
print "You are using the latest version of PHPUnit.\n";
}
exit(PHPUnit_TextUI_TestRunner::SUCCESS_EXIT);
}
/**
* Show the help message.
*/
protected function showHelp()
{
$this->printVersionString();
print <<<EOT
Usage: phpunit [options] UnitTest [UnitTest.php]
phpunit [options] <directory>
Code Coverage Options:
--coverage-clover <file> Generate code coverage report in Clover XML format.
--coverage-crap4j <file> Generate code coverage report in Crap4J XML format.
--coverage-html <dir> Generate code coverage report in HTML format.
--coverage-php <file> Export PHP_CodeCoverage object to file.
--coverage-text=<file> Generate code coverage report in text format.
Default: Standard output.
--coverage-xml <dir> Generate code coverage report in PHPUnit XML format.
--whitelist <dir> Whitelist <dir> for code coverage analysis.
--disable-coverage-ignore Disable annotations for ignoring code coverage.
Logging Options:
--log-junit <file> Log test execution in JUnit XML format to file.
--log-teamcity <file> Log test execution in TeamCity format to file.
--testdox-html <file> Write agile documentation in HTML format to file.
--testdox-text <file> Write agile documentation in Text format to file.
--testdox-xml <file> Write agile documentation in XML format to file.
--reverse-list Print defects in reverse order
Test Selection Options:
--filter <pattern> Filter which tests to run.
--testsuite <name> Filter which testsuite to run.
--group ... Only runs tests from the specified group(s).
--exclude-group ... Exclude tests from the specified group(s).
--list-groups List available test groups.
--list-suites List available test suites.
--test-suffix ... Only search for test in files with specified
suffix(es). Default: Test.php,.phpt
Test Execution Options:
--report-useless-tests Be strict about tests that do not test anything.
--strict-coverage Be strict about @covers annotation usage.
--strict-global-state Be strict about changes to global state
--disallow-test-output Be strict about output during tests.
--disallow-resource-usage Be strict about resource usage during small tests.
--enforce-time-limit Enforce time limit based on test size.
--disallow-todo-tests Disallow @todo-annotated tests.
--process-isolation Run each test in a separate PHP process.
--no-globals-backup Do not backup and restore \$GLOBALS for each test.
--static-backup Backup and restore static attributes for each test.
--colors=<flag> Use colors in output ("never", "auto" or "always").
--columns <n> Number of columns to use for progress output.
--columns max Use maximum number of columns for progress output.
--stderr Write to STDERR instead of STDOUT.
--stop-on-error Stop execution upon first error.
--stop-on-failure Stop execution upon first error or failure.
--stop-on-warning Stop execution upon first warning.
--stop-on-risky Stop execution upon first risky test.
--stop-on-skipped Stop execution upon first skipped test.
--stop-on-incomplete Stop execution upon first incomplete test.
--fail-on-warning Treat tests with warnings as failures.
--fail-on-risky Treat risky tests as failures.
-v|--verbose Output more verbose information.
--debug Display debugging information during test execution.
--loader <loader> TestSuiteLoader implementation to use.
--repeat <times> Runs the test(s) repeatedly.
--teamcity Report test execution progress in TeamCity format.
--testdox Report test execution progress in TestDox format.
--testdox-group Only include tests from the specified group(s).
--testdox-exclude-group Exclude tests from the specified group(s).
--printer <printer> TestListener implementation to use.
Configuration Options:
--bootstrap <file> A "bootstrap" PHP file that is run before the tests.
-c|--configuration <file> Read configuration from XML file.
--no-configuration Ignore default configuration file (phpunit.xml).
--no-coverage Ignore code coverage configuration.
--no-extensions Do not load PHPUnit extensions.
--include-path <path(s)> Prepend PHP's include_path with given path(s).
-d key[=value] Sets a php.ini value.
--generate-configuration Generate configuration file with suggested settings.
Miscellaneous Options:
-h|--help Prints this usage information.
--version Prints the version and exits.
--atleast-version <min> Checks that version is greater than min and exits.
EOT;
if (defined('__PHPUNIT_PHAR__')) {
print "\n --check-version Check whether PHPUnit is the latest version.";
}
}
/**
* Custom callback for test suite discovery.
*/
protected function handleCustomTestSuite()
{
}
private function printVersionString()
{
if ($this->versionStringPrinted) {
return;
}
print PHPUnit_Runner_Version::getVersionString() . "\n\n";
$this->versionStringPrinted = true;
}
/**
* @param string $message
*/
private function showError($message)
{
$this->printVersionString();
print $message . "\n";
exit(PHPUnit_TextUI_TestRunner::FAILURE_EXIT);
}
/**
* @param string $directory
*/
private function handleExtensions($directory)
{
$facade = new File_Iterator_Facade;
foreach ($facade->getFilesAsArray($directory, '.phar') as $file) {
require $file;
$this->arguments['loadedExtensions'][] = $file;
}
}
}
|
illyaAvdeuk/bella-v
|
vendor/phpunit/phpunit/src/TextUI/Command.php
|
PHP
|
bsd-3-clause
| 40,388
|
/**
* ag-grid - Advanced Data Grid / Data Table supporting Javascript / React / AngularJS / Web Components
* @version v5.0.0-alpha.4
* @link http://www.ag-grid.com/
* @license MIT
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __metadata = (this && this.__metadata) || function (k, v) {
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
};
var utils_1 = require("../utils");
var context_1 = require("../context/context");
var cellRendererFactory_1 = require("./cellRendererFactory");
/** Class to use a cellRenderer. */
var CellRendererService = (function () {
function CellRendererService() {
}
/** Uses a cellRenderer, and returns the cellRenderer object if it is a class implementing ICellRenderer.
* @cellRendererKey: The cellRenderer to use. Can be: a) a class that we call 'new' on b) a function we call
* or c) a string that we use to look up the cellRenderer.
* @params: The params to pass to the cell renderer if it's a function or a class.
* @eTarget: The DOM element we will put the results of the html element into *
* @return: If options a, it returns the created class instance */
CellRendererService.prototype.useCellRenderer = function (cellRendererKey, eTarget, params) {
var cellRenderer = this.lookUpCellRenderer(cellRendererKey);
if (utils_1.Utils.missing(cellRenderer)) {
// this is a bug in users config, they specified a cellRenderer that doesn't exist,
// the factory already printed to console, so here we just skip
return;
}
var resultFromRenderer;
var iCellRendererInstance = null;
this.checkForDeprecatedItems(cellRenderer);
// we check if the class has the 'getGui' method to know if it's a component
var rendererIsAComponent = this.doesImplementICellRenderer(cellRenderer);
// if it's a component, we create and initialise it
if (rendererIsAComponent) {
var CellRendererClass = cellRenderer;
iCellRendererInstance = new CellRendererClass();
this.context.wireBean(iCellRendererInstance);
if (iCellRendererInstance.init) {
iCellRendererInstance.init(params);
}
resultFromRenderer = iCellRendererInstance.getGui();
}
else {
// otherwise it's a function, so we just use it
var cellRendererFunc = cellRenderer;
resultFromRenderer = cellRendererFunc(params);
}
if (resultFromRenderer === null || resultFromRenderer === '') {
return;
}
if (utils_1.Utils.isNodeOrElement(resultFromRenderer)) {
// a dom node or element was returned, so add child
eTarget.appendChild(resultFromRenderer);
}
else {
// otherwise assume it was html, so just insert
eTarget.innerHTML = resultFromRenderer;
}
return iCellRendererInstance;
};
CellRendererService.prototype.checkForDeprecatedItems = function (cellRenderer) {
if (cellRenderer && cellRenderer.renderer) {
console.warn('ag-grid: colDef.cellRenderer should not be an object, it should be a string, function or class. this ' +
'changed in v4.1.x, please check the documentation on Cell Rendering, or if you are doing grouping, look at the grouping examples.');
}
};
CellRendererService.prototype.doesImplementICellRenderer = function (cellRenderer) {
// see if the class has a prototype that defines a getGui method. this is very rough,
// but javascript doesn't have types, so is the only way!
return cellRenderer.prototype && 'getGui' in cellRenderer.prototype;
};
CellRendererService.prototype.lookUpCellRenderer = function (cellRendererKey) {
if (typeof cellRendererKey === 'string') {
return this.cellRendererFactory.getCellRenderer(cellRendererKey);
}
else {
return cellRendererKey;
}
};
__decorate([
context_1.Autowired('cellRendererFactory'),
__metadata('design:type', cellRendererFactory_1.CellRendererFactory)
], CellRendererService.prototype, "cellRendererFactory", void 0);
__decorate([
context_1.Autowired('context'),
__metadata('design:type', context_1.Context)
], CellRendererService.prototype, "context", void 0);
CellRendererService = __decorate([
context_1.Bean('cellRendererService'),
__metadata('design:paramtypes', [])
], CellRendererService);
return CellRendererService;
})();
exports.CellRendererService = CellRendererService;
|
dlueth/cdnjs
|
ajax/libs/ag-grid/5.0.0-alpha.4/lib/rendering/cellRendererService.js
|
JavaScript
|
mit
| 5,277
|
#define _tmain main
#define _TCHAR char
|
gogoba/triglav
|
src/main/cmake/workarounds/tchar/tchar.h
|
C
|
gpl-2.0
| 39
|
/*
** $Id: //Department/DaVinci/BRANCHES/MT6620_WIFI_DRIVER_V2_3/include/nic_cmd_event.h#1 $
*/
/*! \file "nic_cmd_event.h"
\brief This file contains the declairation file of the WLAN OID processing routines
of Windows driver for MediaTek Inc. 802.11 Wireless LAN Adapters.
*/
/*
** $Log: nic_cmd_event.h $
**
** 03 11 2014 eason.tsai
** [ALPS01070904] [Need Patch] [Volunteer Patch][MT6630][Driver]MT6630 Wi-Fi Patch
** update rssi command
**
** 01 15 2014 eason.tsai
** [ALPS01070904] [Need Patch] [Volunteer Patch][MT6630][Driver]MT6630 Wi-Fi Patch
** Merging
**
** //ALPS_SW/DEV/ALPS.JB2.MT6630.DEV/alps/mediatek/kernel/drivers/combo/drv_wlan/mt6630/wlan/...
**
** to //ALPS_SW/TRUNK/KK/alps/mediatek/kernel/drivers/combo/drv_wlan/mt6630/wlan/...
**
** 12 27 2013 eason.tsai
** [ALPS01070904] [Need Patch] [Volunteer Patch][MT6630][Driver]MT6630 Wi-Fi Patch
** update code for ICAP & nvram
**
** 09 03 2013 tsaiyuan.hsu
** [BORA00002775] MT6630 unified MAC ROAMING
** 1. modify roaming fsm.
** 2. add roaming control.
**
** 08 26 2013 eason.tsai
** [BORA00002255] [MT6630 Wi-Fi][Driver] develop
** revise host code for ICAP structure
**
** 08 23 2013 wh.su
** [BORA00002446] [MT6630] [Wi-Fi] [Driver] Update the security function code
** Add GTK re-key driver handle function
**
** 08 20 2013 eason.tsai
** [BORA00002255] [MT6630 Wi-Fi][Driver] develop
** Icap function
**
** 08 15 2013 cp.wu
** [BORA00002253] [MT6630 Wi-Fi][Driver][Firmware] Add NLO and timeout mechanism to SCN module
** enlarge match_ssid_num to 16 for PNO support
**
** 08 13 2013 terry.wu
** [BORA00002207] [MT6630 Wi-Fi] TXM & MQM Implementation
** 1. Assign TXD.PID by wlan index
** 2. Some bug fix
**
** 08 13 2013 wh.su
** [BORA00002446] [MT6630] [Wi-Fi] [Driver] Update the security function code
** Support the AP mode with security (TKIP, CCMP)
**
** 08 09 2013 cp.wu
** [BORA00002253] [MT6630 Wi-Fi][Driver][Firmware] Add NLO and timeout mechanism to SCN module
** 1. integrate scheduled scan functionality
** 2. condition compilation for linux-3.4 & linux-3.8 compatibility
** 3. correct CMD queue access to reduce lock scope
**
** 08 05 2013 terry.wu
** [BORA00002207] [MT6630 Wi-Fi] TXM & MQM Implementation
** 1. Add SW rate definition
** 2. Add HW default rate selection logic from FW
**
** 07 28 2013 eddie.chen
** [BORA00002450] [WIFISYS][MT6630] New design for mt6630
** Save the compileflag and featureflag
**
** 07 23 2013 yuche.tsai
** [BORA00002398] [MT6630][Volunteer Patch] P2P Driver Re-Design for Multiple BSS support
** Update driver for Hot-Spot Role port.
**
** 07 22 2013 wh.su
** [BORA00002446] [MT6630] [Wi-Fi] [Driver] Update the security function code
** Handle the add key done event
**
** 07 17 2013 wh.su
** [BORA00002446] [MT6630] [Wi-Fi] [Driver] Update the security function code
** fix and modify some security code
**
** 07 02 2013 wh.su
** [BORA00002446] [MT6630] [Wi-Fi] [Driver] Update the security function code
** Refine security BMC wlan index assign
** Fix some compiling warning
**
** 06 19 2013 cp.wu
** [BORA00002227] [MT6630 Wi-Fi][Driver] Update for Makefile and HIFSYS modifications
** update MAC address handling logic
**
** 06 18 2013 cm.chang
** [BORA00002149] [MT6630 Wi-Fi] Initial software development
** Get MAC address by NIC_CAPABILITY command
**
** 06 18 2013 terry.wu
** [BORA00002207] [MT6630 Wi-Fi] TXM & MQM Implementation
** Update for 1st connection
**
** 06 14 2013 eddie.chen
** [BORA00002450] [WIFISYS][MT6630] New design for mt6630
** Add full mcsset. Add more vht info in sta update
**
** 03 27 2013 wh.su
** [BORA00002446] [MT6630] [Wi-Fi] [Driver] Update the security function code
** add default ket handler
**
** 03 20 2013 wh.su
** [BORA00002446] [MT6630] [Wi-Fi] [Driver] Update the security function code
** Add the security code for wlan table assign operation
**
** 03 15 2013 wh.su
** [BORA00002446] [MT6630] [Wi-Fi] [Driver] Update the security function code
** Modify some security part code
**
** 03 13 2013 wh.su
** [BORA00002446] [MT6630] [Wi-Fi] [Driver] Update the security function code
** .remove non-used code
**
** 03 12 2013 wh.su
** [BORA00002446] [MT6630] [Wi-Fi] [Driver] Update the security function code
** .
**
** 03 08 2013 wh.su
** [BORA00002446] [MT6630] [Wi-Fi] [Driver] Update the security function code
** Modify code for security design
**
** 03 07 2013 yuche.tsai
** [BORA00002398] [MT6630][Volunteer Patch] P2P Driver Re-Design for Multiple BSS support
** Add wlan_p2p.c, but still need to FIX many place.
**
** 03 06 2013 wh.su
** [BORA00002446] [MT6630] [Wi-Fi] [Driver] Update the security function code
** submit some code related with security.
**
** 02 18 2013 cm.chang
** [BORA00002149] [MT6630 Wi-Fi] Initial software development
** New feature to remove all sta records by BssIndex
**
** 02 05 2013 yuche.tsai
** [BORA00002398] [MT6630][Volunteer Patch] P2P Driver Re-Design for Multiple BSS support
** Code update for FW development.
**
** 02 01 2013 cp.wu
** [BORA00002227] [MT6630 Wi-Fi][Driver] Update for Makefile and HIFSYS modifications
** 1. eliminate MT5931/MT6620/MT6628 logic
** 2. add firmware download control sequence
**
** 01 28 2013 cm.chang
** [BORA00002149] [MT6630 Wi-Fi] Initial software development
** Sync CMD format
**
** 01 24 2013 cm.chang
** [BORA00002149] [MT6630 Wi-Fi] Initial software development
** Mark some code segment for compiling error
**
** 01 22 2013 cp.wu
** [BORA00002253] [MT6630 Wi-Fi][Driver][Firmware] Add NLO and timeout mechanism to SCN module
** .add driver side NLO state machine
**
** 01 22 2013 cp.wu
** [BORA00002253] [MT6630 Wi-Fi][Driver][Firmware] Add NLO and timeout mechanism to SCN module
** modification for ucBssIndex migration
**
** 01 21 2013 cm.chang
** [BORA00002149] [MT6630 Wi-Fi] Initial software development
** 1. Create rP2pDevInfo structure
** 2. Support 80/160 MHz channel bandwidth for channel privilege
**
** 01 17 2013 cm.chang
** [BORA00002149] [MT6630 Wi-Fi] Initial software development
** Use ucBssIndex to replace eNetworkTypeIndex
**
** 01 07 2013 cp.wu
** [BORA00002253] [MT6630 Wi-Fi][Driver][Firmware] Add NLO and timeout mechanism to SCN module
** update NLO_NETWORK definition to add padding for 4-bytes alignment
**
** 01 07 2013 cp.wu
** [BORA00002253] [MT6630 Wi-Fi][Driver][Firmware] Add NLO and timeout mechanism to SCN module
** correct CMD_NLO_REQ command format
**
** 11 19 2012 cp.wu
** [BORA00002253] [MT6630 Wi-Fi][Driver][Firmware] Add NLO and timeout mechanism to SCN module
** update SCAN command definition for specifying number of probe request frame.
**
** 11 06 2012 eason.tsai
** [BORA00002255] [MT6630 Wi-Fi][Driver] develop
** .
**
** 11 06 2012 cp.wu
** [BORA00002253] [MT6630 Wi-Fi][Driver][Firmware] Add NLO and timeout mechanism to SCN module
** add interface for NLO and modified SCAN support
**
** 11 01 2012 cp.wu
** [BORA00002227] [MT6630 Wi-Fi][Driver] Update for Makefile and HIFSYS modifications
** update to MT6630 CMD/EVENT definitions.
**
** 09 17 2012 cm.chang
** [BORA00002149] [MT6630 Wi-Fi] Initial software development
** Duplicate source from MT6620 v2.3 driver branch
** (Davinci label: MT6620_WIFI_Driver_V2_3_120913_1942_As_MT6630_Base)
*
* 03 29 2012 eason.tsai
* [WCXRP00001216] [MT6628 Wi-Fi][Driver]add conditional define
* add conditional define.
*
* 03 04 2012 eason.tsai
* NULL
* modify the cal fail report code.
*
* 01 06 2012 wh.su
* [WCXRP00001153] [MT6620 Wi-Fi][Driver] Adding the get_ch_list and set_tx_power proto type function
* redefine the CMD_ID_SET_TXPWR_CTRL value.
*
* 01 05 2012 wh.su
* [WCXRP00001153] [MT6620 Wi-Fi][Driver] Adding the get_ch_list and set_tx_power proto type function
* Adding the related ioctl / wlan oid function to set the Tx power cfg.
*
* 11 30 2011 cm.chang
* [WCXRP00001128] [MT5931 Wi-Fi][FW] Update BB/RF setting based on RF doc v0.7 for LGE spec
* 1. Add a new CMD for driver to set device mode
* 2. Update calibration parameters
*
* 11 19 2011 yuche.tsai
* NULL
* Update RSSI for P2P.
*
* 11 18 2011 yuche.tsai
* NULL
* CONFIG P2P support RSSI query, default turned off.
*
* 11 10 2011 eddie.chen
* [WCXRP00001096] [MT6620 Wi-Fi][Driver/FW] Enhance the log function (xlog)
* Add TX_DONE status detail information.
*
* 11 08 2011 tsaiyuan.hsu
* [WCXRP00001083] [MT6620 Wi-Fi][DRV]] dump debug counter or frames when debugging is triggered
* check if CFG_SUPPORT_SWCR is defined to aoid compiler error.
*
* 11 07 2011 tsaiyuan.hsu
* [WCXRP00001083] [MT6620 Wi-Fi][DRV]] dump debug counter or frames when debugging is triggered
* add debug counters and periodically dump counters for debugging.
*
* 10 26 2011 cp.wu
* [WCXRP00001065] [MT6620 Wi-Fi][MT5931][FW][DRV] Adding parameter for controlling minimum channel dwell time for scanning
* add interface for control minimum channel dwell time for scanning.
*
* 09 20 2011 cm.chang
* [WCXRP00000997] [MT6620 Wi-Fi][Driver][FW] Handle change of BSS preamble type and slot time
* New CMD definition about RLM parameters
*
* 08 31 2011 cm.chang
* [WCXRP00000969] [MT6620 Wi-Fi][Driver][FW] Channel list for 5G band based on country code
* .
*
* 08 25 2011 chinghwa.yu
* [WCXRP00000612] [MT6620 Wi-Fi] [FW] CSD update SWRDD algorithm
* Add DFS switch.
*
* 08 24 2011 chinghwa.yu
* [WCXRP00000612] [MT6620 Wi-Fi] [FW] CSD update SWRDD algorithm
* Update RDD test mode cases.
*
* 08 15 2011 cp.wu
* [WCXRP00000851] [MT6628 Wi-Fi][Driver] Add HIFSYS related definition to driver source tree
* add MT6628-specific definitions.
*
* 08 11 2011 cp.wu
* [WCXRP00000830] [MT6620 Wi-Fi][Firmware] Use MDRDY counter to detect empty channel for shortening scan time
* sparse channel detection:
* driver: collect sparse channel information with scan-done event
*
* 08 09 2011 cp.wu
* [WCXRP00000702] [MT5931][Driver] Modify initialization sequence for E1 ASIC[WCXRP00000913] [MT6620 Wi-Fi] create repository of source code dedicated for MT6620 E6 ASIC
* add CCK-DSSS TX-PWR control field in NVRAM and CMD definition for MT5931-MP
*
* 08 03 2011 terry.wu
* [WCXRP00000899] [MT6620] [FW] Reply probe rsp in FW for hotspot mode
* Reply Probe Rsp in FW for Hotspot Mode.
*
*
*
* 08 03 2011 terry.wu
* [WCXRP00000899] [MT6620] [FW] Reply probe rsp in FW for hotspot mode
* Reply Probe Rsp in FW for Hotspot Mode.
*
*
* 08 03 2011 terry.wu
* [WCXRP00000899] [MT6620] [FW] Reply probe rsp in FW for hotspot mode
* Reply Probe Rsp in FW for Hotspot Mode.
*
* 08 03 2011 terry.wu
* [WCXRP00000899] [MT6620] [FW] Reply probe rsp in FW for hotspot mode
* Reply Probe Rsp in FW for Hotspot Mode.
*
* 07 28 2011 chinghwa.yu
* [WCXRP00000063] Update BCM CoEx design and settings
* Add BWCS cmd and event.
*
* 07 22 2011 jeffrey.chang
* [WCXRP00000864] [MT5931] Add command to adjust OSC stable time
* add osc stable time command structure
*
* 07 22 2011 jeffrey.chang
* [WCXRP00000864] [MT5931] Add command to adjust OSC stable time
* modify driver to set OSC stable time after f/w download
*
* 07 18 2011 chinghwa.yu
* [WCXRP00000063] Update BCM CoEx design and settings[WCXRP00000612] [MT6620 Wi-Fi] [FW] CSD update SWRDD algorithm
* Add CMD/Event for RDD and BWCS.
*
* 07 18 2011 cp.wu
* [WCXRP00000858] [MT5931][Driver][Firmware] Add support for scan to search for more than one SSID in a single scanning request
* add framework in driver domain for supporting new SCAN_REQ_V2 for more than 1 SSID support as well as uProbeDelay in NDIS 6.x driver model
*
* 06 23 2011 cp.wu
* [WCXRP00000812] [MT6620 Wi-Fi][Driver] not show NVRAM when there is no valid MAC address in NVRAM content
* check with firmware for valid MAC address.
*
* 06 23 2011 cp.wu
* [WCXRP00000798] [MT6620 Wi-Fi][Firmware] Follow-ups for WAPI frequency offset workaround in firmware SCN module
* change parameter name from PeerAddr to BSSID
*
* 06 20 2011 cp.wu
* [WCXRP00000798] [MT6620 Wi-Fi][Firmware] Follow-ups for WAPI frequency offset workaround in firmware SCN module
* 1. specify target's BSSID when requesting channel privilege.
* 2. pass BSSID information to firmware domain
*
* 06 09 2011 tsaiyuan.hsu
* [WCXRP00000760] [MT5931 Wi-Fi][FW] Refine rxmHandleMacRxDone to reduce code size
* move send_auth at rxmHandleMacRxDone in firmware to driver to reduce code size.
*
* 05 27 2011 cp.wu
* [WCXRP00000749] [MT6620 Wi-Fi][Driver] Add band edge tx power control to Wi-Fi NVRAM
* invoke CMD_ID_SET_EDGE_TXPWR_LIMIT when there is valid data exist in NVRAM content.
*
* 04 18 2011 terry.wu
* [WCXRP00000660] [MT6620 Wi-Fi][Driver] Remove flag CFG_WIFI_DIRECT_MOVED
* Remove flag CFG_WIFI_DIRECT_MOVED.
*
* 03 31 2011 chinglan.wang
* [WCXRP00000613] [MT6620 Wi-Fi] [FW] [Driver] BssInfo can get the security mode which is WPA/WPA2/WAPI or not.
* .
*
* 03 18 2011 cm.chang
* [WCXRP00000576] [MT6620 Wi-Fi][Driver][FW] Remove P2P compile option in scan req/cancel command
* As CR title
*
* 03 17 2011 yarco.yang
* [WCXRP00000569] [MT6620 Wi-Fi][F/W][Driver] Set multicast address support current network usage
* .
*
* 03 07 2011 wh.su
* [WCXRP00000506] [MT6620 Wi-Fi][Driver][FW] Add Security check related code
* rename the define to anti_pviracy.
*
* 03 05 2011 wh.su
* [WCXRP00000506] [MT6620 Wi-Fi][Driver][FW] Add Security check related code
* add the code to get the check rsponse and indicate to app.
*
* 03 02 2011 wh.su
* [WCXRP00000506] [MT6620 Wi-Fi][Driver][FW] Add Security check related code
* Add Security check related code.
*
* 03 02 2011 george.huang
* [WCXRP00000504] [MT6620 Wi-Fi][FW] Support Sigma CAPI for power saving related command
* Support UAPSD/OppPS/NoA parameter setting
*
* 02 16 2011 cm.chang
* [WCXRP00000447] [MT6620 Wi-Fi][FW] Support new NVRAM update mechanism
* .
*
* 02 10 2011 cp.wu
* [WCXRP00000434] [MT6620 Wi-Fi][Driver] Obsolete unused event packet handlers
* EVENT_ID_CONNECTION_STATUS has been obsoleted and no need to handle.
*
* 02 08 2011 eddie.chen
* [WCXRP00000426] [MT6620 Wi-Fi][FW/Driver] Add STA aging timeout and defualtHwRatein AP mode
* Add event STA agint timeout
*
* 01 27 2011 tsaiyuan.hsu
* [WCXRP00000392] [MT6620 Wi-Fi][Driver] Add Roaming Support
* add roaming fsm
* 1. not support 11r, only use strength of signal to determine roaming.
* 2. not enable CFG_SUPPORT_ROAMING until completion of full test.
* 3. in 6620, adopt work-around to avoid sign extension problem of cck of hw
* 4. assume that change of link quality in smooth way.
*
* 01 25 2011 yuche.tsai
* [WCXRP00000352] [Volunteer Patch][MT6620][Driver] P2P Statsion Record Client List Issue
* Update cmd format of BSS INFO, always sync OwnMac to FW no matter P2P is enabled or not..
*
* 01 20 2011 eddie.chen
* [WCXRP00000374] [MT6620 Wi-Fi][DRV] SW debug control
* Add Oid for sw control debug command
*
* 01 15 2011 puff.wen
* NULL
* Add Stress test
*
* 01 12 2011 cm.chang
* [WCXRP00000354] [MT6620 Wi-Fi][Driver][FW] Follow NVRAM bandwidth setting
* Sync HT operation element information from host to FW
*
* 01 12 2011 cm.chang
* [WCXRP00000354] [MT6620 Wi-Fi][Driver][FW] Follow NVRAM bandwidth setting
* User-defined bandwidth is for 2.4G and 5G individually
*
* 12 29 2010 eddie.chen
* [WCXRP00000322] Add WMM IE in beacon,
Add per station flow control when STA is in PS
* 1) PS flow control event
*
* 2) WMM IE in beacon, assoc resp, probe resp
*
* 12 28 2010 cp.wu
* [WCXRP00000269] [MT6620 Wi-Fi][Driver][Firmware] Prepare for v1.1 branch release
* report EEPROM used flag via NIC_CAPABILITY
*
* 12 28 2010 cp.wu
* [WCXRP00000269] [MT6620 Wi-Fi][Driver][Firmware] Prepare for v1.1 branch release
* integrate with 'EEPROM used' flag for reporting correct capability to Engineer Mode/META and other tools
*
* 12 23 2010 george.huang
* [WCXRP00000152] [MT6620 Wi-Fi] AP mode power saving function
* 1. update WMM IE parsing, with ASSOC REQ handling
* 2. extend U-APSD parameter passing from driver to FW
*
* 12 07 2010 cm.chang
* [WCXRP00000239] MT6620 Wi-Fi][Driver][FW] Merge concurrent branch back to maintrunk
* 1. BSSINFO include RLM parameter
* 2. free all sta records when network is disconnected
*
* 12 07 2010 cm.chang
* [WCXRP00000238] MT6620 Wi-Fi][Driver][FW] Support regulation domain setting from NVRAM and supplicant
* 1. Country code is from NVRAM or supplicant
* 2. Change band definition in CMD/EVENT.
*
* 11 29 2010 cm.chang
* [WCXRP00000210] [MT6620 Wi-Fi][Driver][FW] Set RCPI value in STA_REC for initial TX rate selection of auto-rate algorithm
* Sync RCPI of STA_REC to FW as reference of initial TX rate
*
* 11 08 2010 cm.chang
* [WCXRP00000169] [MT6620 Wi-Fi][Driver][FW] Remove unused CNM recover message ID
* Remove CNM channel reover message ID
*
* 11 01 2010 cp.wu
* [WCXRP00000056] [MT6620 Wi-Fi][Driver] NVRAM implementation with Version Check[WCXRP00000150] [MT6620 Wi-Fi][Driver] Add implementation for querying current TX rate from firmware auto rate module
* 1) Query link speed (TX rate) from firmware directly with buffering mechanism to reduce overhead
* 2) Remove CNM CH-RECOVER event handling
* 3) cfg read/write API renamed with kal prefix for unified naming rules.
*
* 10 26 2010 cp.wu
* [WCXRP00000056] [MT6620 Wi-Fi][Driver] NVRAM implementation with Version Check[WCXRP00000137] [MT6620 Wi-Fi] [FW] Support NIC capability query command
* 1) update NVRAM content template to ver 1.02
* 2) add compile option for querying NIC capability (default: off)
* 3) modify AIS 5GHz support to run-time option, which could be turned on by registry or NVRAM setting
* 4) correct auto-rate compiler error under linux (treat warning as error)
* 5) simplify usage of NVRAM and REG_INFO_T
* 6) add version checking between driver and firmware
*
* 10 25 2010 cp.wu
* [WCXRP00000133] [MT6620 Wi-Fi] [FW][Driver] Change TX power offset band definition
* follow-up for CMD_5G_PWR_OFFSET_T definition change
*
* 10 20 2010 cp.wu
* [WCXRP00000117] [MT6620 Wi-Fi][Driver] Add logic for suspending driver when MT6620 is not responding anymore
* use OID_CUSTOM_TEST_MODE as indication for driver reset
* by dropping pending TX packets
*
* 10 20 2010 wh.su
* [WCXRP00000124] [MT6620 Wi-Fi] [Driver] Support the dissolve P2P Group
* Add the code to support disconnect p2p group
*
* 09 15 2010 cm.chang
* NULL
* Add new CMD for TX power, 5G power offset and power parameters
*
* 09 07 2010 yuche.tsai
* NULL
* Add a pointer in P2P SCAN RESULT structure. This pointer
* is pointed to a IE buffer for this P2p device.
*
* 09 07 2010 wh.su
* NULL
* adding the code for beacon/probe req/ probe rsp wsc ie at p2p.
*
* 09 03 2010 kevin.huang
* NULL
* Refine #include sequence and solve recursive/nested #include issue
*
* 08 23 2010 chinghwa.yu
* NULL
* Update for BOW.
*
* 08 20 2010 cm.chang
* NULL
* Migrate RLM code to host from FW
*
* 08 16 2010 george.huang
* NULL
* add new CMD ID definition
*
* 08 16 2010 yuche.tsai
* NULL
* Add a field in BSS INFO cmd to change interface address for P2P. (switching between Device Addr & Interface Addr)
*
* 08 12 2010 yuche.tsai
* NULL
* Add interface address indication when indicate connection status.
* It is requested by supplicant to do 4 way handshake.
*
* 08 07 2010 wh.su
* NULL
* adding the privacy related code for P2P network
*
* 08 05 2010 yuche.tsai
* NULL
* Change data structure for P2P Device scan result, all channel time for scan command.
*
* 08 04 2010 george.huang
* NULL
* handle change PS mode OID/ CMD
*
* 08 04 2010 yarco.yang
* NULL
* Add TX_AMPDU and ADDBA_REJECT command
*
* 08 03 2010 george.huang
* NULL
* handle event for updating NOA parameters indicated from FW
*
* 08 02 2010 george.huang
* NULL
* add WMM-PS test related OID/ CMD handlers
*
* 07 28 2010 cp.wu
* NULL
* sync. CMD_BSS_INFO structure change to CMD-EVENT v0.15.
*
* 07 26 2010 yuche.tsai
*
* Add P2P Device Found Event.
* Channel extention option in scan abort command.
*
* 07 23 2010 cp.wu
*
* add AIS-FSM handling for beacon timeout event.
*
* 07 21 2010 yuche.tsai
*
* Add for P2P Scan Result Parsing & Saving.
*
* 07 20 2010 george.huang
*
* DWORD align for the CMD data structure
*
* 07 20 2010 cp.wu
*
* pass band information for scan in an efficient way by mapping ENUM_BAND_T into UINT_8..
*
* 07 19 2010 wh.su
*
* update for security supporting.
*
* 07 19 2010 cm.chang
*
* Set RLM parameters and enable CNM channel manager
*
* 07 16 2010 yarco.yang
*
* 1. Support BSS Absence/Presence Event
* 2. Support STA change PS mode Event
* 3. Support BMC forwarding for AP mode.
*
* 07 14 2010 cp.wu
*
* [WPD00003833] [MT6620 and MT5931] Driver migration.
* pass band with channel number information as scan parameter
*
* 07 14 2010 yarco.yang
*
* 1. Remove CFG_MQM_MIGRATION
* 2. Add CMD_UPDATE_WMM_PARMS command
*
* 07 09 2010 cp.wu
*
* reorder members of CMD_SET_BSS_INFO.
*
* 07 08 2010 cp.wu
*
* [WPD00003833] [MT6620 and MT5931] Driver migration - move to new repository.
*
* 07 07 2010 cp.wu
* [WPD00003833][MT6620 and MT5931] Driver migration
* update prStaRecOfAP with BSS-INFO.
*
* 07 07 2010 cm.chang
* [WPD00003841][LITE Driver] Migrate RLM/CNM to host driver
* Support state of STA record change from 1 to 1
*
* 07 01 2010 cm.chang
* [WPD00003841][LITE Driver] Migrate RLM/CNM to host driver
* Support sync command of STA_REC
*
* 07 01 2010 cp.wu
* [WPD00003833][MT6620 and MT5931] Driver migration
* implementation of DRV-SCN and related mailbox message handling.
*
* 06 30 2010 cp.wu
* [WPD00003833][MT6620 and MT5931] Driver migration
* sync. with CMD/EVENT document ver0.07.
*
* 06 29 2010 cp.wu
* [WPD00003833][MT6620 and MT5931] Driver migration
* correct variable naming for 8-bit variable used in CMD_BEACON_TEMPLATE_UPDATE.
*
* 06 29 2010 cp.wu
* [WPD00003833][MT6620 and MT5931] Driver migration
* 1) sync to. CMD/EVENT document v0.03
* 2) simplify DTIM period parsing in scan.c only, bss.c no longer parses it again.
* 3) send command packet to indicate FW-PM after
* a) 1st beacon is received after AIS has connected to an AP
* b) IBSS-ALONE has been created
* c) IBSS-MERGE has occured
*
* 06 28 2010 george.huang
* [WPD00001556]Basic power managemenet function
* Create beacon update path, with expose bssUpdateBeaconContent()
*
* 06 22 2010 cp.wu
* [WPD00003833][MT6620 and MT5931] Driver migration
* 1) add command warpper for STA-REC/BSS-INFO sync.
* 2) enhance command packet sending procedure for non-oid part
* 3) add command packet definitions for STA-REC/BSS-INFO sync.
*
* 06 21 2010 cp.wu
* [WPD00003833][MT6620 and MT5931] Driver migration
* add BSS/STA_REC commands for integration.
*
* 06 21 2010 yarco.yang
* [WPD00003837][MT6620]Data Path Refine
* Add TX Done Event handle entry
*
* 06 10 2010 cp.wu
* [WPD00003833][MT6620 and MT5931] Driver migration
* 1) eliminate CFG_CMD_EVENT_VERSION_0_9
* 2) when disconnected, indicate nic directly (no event is needed)
*
* 06 06 2010 kevin.huang
* [WPD00003832][MT6620 5931] Create driver base
* [MT6620 5931] Create driver base
*
* 05 20 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* 1) integrate OID_GEN_NETWORK_LAYER_ADDRESSES with CMD_ID_SET_IP_ADDRESS
* 2) buffer statistics data for 2 seconds
* 3) use default value for adhoc parameters instead of 0
*
* 05 19 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* 1) do not take timeout mechanism for power mode oids
* 2) retrieve network type from connection status
* 3) after disassciation, set radio state to off
* 4) TCP option over IPv6 is supported
*
* 05 17 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* correct OID_802_11_DISASSOCIATE handling.
*
* 05 17 2010 cp.wu
* [WPD00003831][MT6620 Wi-Fi] Add framework for Wi-Fi Direct support
* 1) add timeout handler mechanism for pending command packets
* 2) add p2p add/removal key
*
* 04 13 2010 cp.wu
* [WPD00003823][MT6620 Wi-Fi] Add Bluetooth-over-Wi-Fi support
* add framework for BT-over-Wi-Fi support.
* * * * * * * * * * 1) prPendingCmdInfo is replaced by queue for multiple handler capability
* * * * * * * * * * 2) command sequence number is now increased atomically
* * * * * * * * * * 3) private data could be hold and taken use for other purpose
*
* 04 06 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* sync statistics data structure definition with firmware implementation
*
* 03 30 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* statistics information OIDs are now handled by querying from firmware domain
*
* 03 26 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* indicate media stream mode after set is done
*
* 03 26 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* add a temporary flag for integration with CMD/EVENT v0.9.
*
* 03 25 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* 1) correct OID_802_11_CONFIGURATION with frequency setting behavior.
* * the frequency is used for adhoc connection only
* * 2) update with SD1 v0.9 CMD/EVENT documentation
*
* 03 24 2010 jeffrey.chang
* [WPD00003826]Initial import for Linux port
* initial import for Linux port
*
* 03 22 2010 cp.wu
* [WPD00003824][MT6620 Wi-Fi][New Feature] Add support of large scan list
* Implement feature needed by CR: WPD00003824: refining association command by pasting scanning result
*
* 03 19 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* 1) add ACPI D0/D3 state switching support
* * * * * * 2) use more formal way to handle interrupt when the status is retrieved from enhanced RX response
*
* 03 15 2010 kevin.huang
* [WPD00003820][MT6620 Wi-Fi] Modify the code for meet the WHQL test
* Add event for activate STA_RECORD_T
*
* 03 03 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* implement custom OID: EEPROM read/write access
*
* 03 03 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* implement OID_802_3_MULTICAST_LIST oid handling
*
* 02 26 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* move EVENT_ID_ASSOC_INFO from nic_rx.c to gl_kal_ndis_51.c
* 'cause it involves OS dependent data structure handling
*
* 02 25 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* send CMD_ID_INFRASTRUCTURE when handling OID_802_11_INFRASTRUCTURE_MODE set.
*
* 02 09 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* 1. Permanent and current MAC address are now retrieved by CMD/EVENT packets instead of hard-coded address
* * * * * 2. follow MSDN defined behavior when associates to another AP
* * * * * 3. for firmware download, packet size could be up to 2048 bytes
*
* 01 27 2010 wh.su
* [WPD00003816][MT6620 Wi-Fi] Adding the security support
* .
*
* 01 27 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* 1. eliminate improper variable in rHifInfo
* * * * * * 2. block TX/ordinary OID when RF test mode is engaged
* * * * * * 3. wait until firmware finish operation when entering into and leaving from RF test mode
* * * * * * 4. correct some HAL implementation
*
* 01 22 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* implement following 802.11 OIDs:
* * * OID_802_11_RSSI,
* * * OID_802_11_RSSI_TRIGGER,
* * * OID_802_11_STATISTICS,
* * * OID_802_11_DISASSOCIATE,
* * * OID_802_11_POWER_MODE
*
* 01 21 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* implement OID_802_11_MEDIA_STREAM_MODE
*
* 01 21 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* implement OID_802_11_SUPPORTED_RATES / OID_802_11_DESIRED_RATES
*
* 12 30 2009 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* 1) According to CMD/EVENT documentation v0.8,
* * * * * * OID_CUSTOM_TEST_RX_STATUS & OID_CUSTOM_TEST_TX_STATUS is no longer used,
* * * * * * and result is retrieved by get ATInfo instead
* * * * * * 2) add 4 counter for recording aggregation statistics
** \main\maintrunk.MT6620WiFiDriver_Prj\20 2009-12-11 18:35:07 GMT mtk02752
** add CMD added in CMD/EVEN document v0.8
** \main\maintrunk.MT6620WiFiDriver_Prj\19 2009-12-10 16:39:37 GMT mtk02752
** eliminate unused definitions
** \main\maintrunk.MT6620WiFiDriver_Prj\18 2009-12-10 09:55:11 GMT mtk02752
** command ID/event ID revised
** \main\maintrunk.MT6620WiFiDriver_Prj\17 2009-12-09 13:57:37 GMT MTK02468
** Added event ids (EVENT_ID_RX_ADDBA and EVENT_ID_RX_DELBA)
** \main\maintrunk.MT6620WiFiDriver_Prj\16 2009-12-08 17:35:39 GMT mtk02752
** + add event ID for EVENT_ID_TEST_STATUS (rf test)
** \main\maintrunk.MT6620WiFiDriver_Prj\15 2009-12-07 23:01:09 GMT mtk02752
** add data structure for RF_TEST
** \main\maintrunk.MT6620WiFiDriver_Prj\14 2009-12-03 16:22:56 GMT mtk01461
** Modify the element - i4RSSI in EVENT of SCAN RESULT
** \main\maintrunk.MT6620WiFiDriver_Prj\13 2009-11-30 10:54:44 GMT mtk02752
** 1st DW of WIFI_CMD_T is shared with HIF_TX_HEADER_T, while 1st DW of WIFI_EVENT_T is shared with HIF_RX_HEADER_T
** \main\maintrunk.MT6620WiFiDriver_Prj\12 2009-11-26 10:16:58 GMT mtk02752
** resync EVENT_CONNECTION_STATUS
** \main\maintrunk.MT6620WiFiDriver_Prj\11 2009-11-25 21:34:01 GMT mtk02752
** sync. EVENT_SCAN_RESULT_T with firmware
** \main\maintrunk.MT6620WiFiDriver_Prj\10 2009-11-25 21:03:48 GMT mtk02752
** refine MGMT_FRAME
** \main\maintrunk.MT6620WiFiDriver_Prj\9 2009-11-25 18:17:47 GMT mtk02752
** refine GL_WLAN_INFO_T for buffering scan result and presume max. ie length = 600 bytes
** \main\maintrunk.MT6620WiFiDriver_Prj\8 2009-11-24 22:41:20 GMT mtk02752
** add EVENT_SCAN_RESULT_T definition
** \main\maintrunk.MT6620WiFiDriver_Prj\7 2009-11-23 20:29:16 GMT mtk02752
** fix typo
** \main\maintrunk.MT6620WiFiDriver_Prj\6 2009-11-23 14:46:01 GMT mtk02752
** add new command/event structure upon CM@SD1's documentation
** \main\maintrunk.MT6620WiFiDriver_Prj\5 2009-11-13 15:13:40 GMT mtk02752
** add command definition for CMD_BUILD_CONNECTION and EVENT_CONNECTION_STATUS
** \main\maintrunk.MT6620WiFiDriver_Prj\4 2009-05-20 12:22:22 GMT mtk01461
** Add SeqNum field to Event Header
** \main\maintrunk.MT6620WiFiDriver_Prj\3 2009-04-29 15:42:11 GMT mtk01461
** Update structure of HIF_EVENT_HEADER_T and EVENT_HDR_SIZE
** \main\maintrunk.MT6620WiFiDriver_Prj\2 2009-04-21 12:10:36 GMT mtk01461
** Add Common Set CMD Callback for MCR Write and other Set OID
** \main\maintrunk.MT6620WiFiDriver_Prj\1 2009-04-21 01:40:17 GMT mtk01461
** Command Done Handler
*/
#ifndef _NIC_CMD_EVENT_H
#define _NIC_CMD_EVENT_H
/*******************************************************************************
* C O M P I L E R F L A G S
********************************************************************************
*/
/*******************************************************************************
* E X T E R N A L R E F E R E N C E S
********************************************************************************
*/
/*******************************************************************************
* C O N S T A N T S
********************************************************************************
*/
#define CMD_PQ_ID (0x8000)
#define CMD_PACKET_TYPE_ID (0xA0)
#define CMD_STATUS_SUCCESS 0
#define CMD_STATUS_REJECTED 1
#define CMD_STATUS_UNKNOWN 2
#define EVENT_HDR_SIZE OFFSET_OF(WIFI_EVENT_T, aucBuffer[0])
#define MAX_IE_LENGTH (600)
#define MAX_WSC_IE_LENGTH (400)
/* Action field in structure CMD_CH_PRIVILEGE_T */
#define CMD_CH_ACTION_REQ 0
#define CMD_CH_ACTION_ABORT 1
/* Status field in structure EVENT_CH_PRIVILEGE_T */
#define EVENT_CH_STATUS_GRANT 0
/*CMD_POWER_OFFSET_T , follow 5G sub-band*/
/* #define MAX_SUBBAND_NUM 8 */
/* */
/* */
/* */
/* */
typedef enum _ENUM_CMD_ID_T {
CMD_ID_TEST_CTRL = 0x01, /* 0x01 (Set) */
CMD_ID_BASIC_CONFIG, /* 0x02 (Set) */
CMD_ID_SCAN_REQ_V2, /* 0x03 (Set) */
CMD_ID_NIC_POWER_CTRL, /* 0x04 (Set) */
CMD_ID_POWER_SAVE_MODE, /* 0x05 (Set) */
CMD_ID_LINK_ATTRIB, /* 0x06 (Set) */
CMD_ID_ADD_REMOVE_KEY, /* 0x07 (Set) */
CMD_ID_DEFAULT_KEY_ID, /* 0x08 (Set) */
CMD_ID_INFRASTRUCTURE, /* 0x09 (Set) */
CMD_ID_SET_RX_FILTER, /* 0x0a (Set) */
CMD_ID_DOWNLOAD_BUF, /* 0x0b (Set) */
CMD_ID_WIFI_START, /* 0x0c (Set) */
CMD_ID_CMD_BT_OVER_WIFI, /* 0x0d (Set) */
CMD_ID_SET_MEDIA_CHANGE_DELAY_TIME, /* 0x0e (Set) */
CMD_ID_SET_DOMAIN_INFO, /* 0x0f (Set) */
CMD_ID_SET_IP_ADDRESS, /* 0x10 (Set) */
CMD_ID_BSS_ACTIVATE_CTRL, /* 0x11 (Set) */
CMD_ID_SET_BSS_INFO, /* 0x12 (Set) */
CMD_ID_UPDATE_STA_RECORD, /* 0x13 (Set) */
CMD_ID_REMOVE_STA_RECORD, /* 0x14 (Set) */
CMD_ID_INDICATE_PM_BSS_CREATED, /* 0x15 (Set) */
CMD_ID_INDICATE_PM_BSS_CONNECTED, /* 0x16 (Set) */
CMD_ID_INDICATE_PM_BSS_ABORT, /* 0x17 (Set) */
CMD_ID_UPDATE_BEACON_CONTENT, /* 0x18 (Set) */
CMD_ID_SET_BSS_RLM_PARAM, /* 0x19 (Set) */
CMD_ID_SCAN_REQ, /* 0x1a (Set) */
CMD_ID_SCAN_CANCEL, /* 0x1b (Set) */
CMD_ID_CH_PRIVILEGE, /* 0x1c (Set) */
CMD_ID_UPDATE_WMM_PARMS, /* 0x1d (Set) */
CMD_ID_SET_WMM_PS_TEST_PARMS, /* 0x1e (Set) */
CMD_ID_TX_AMPDU, /* 0x1f (Set) */
CMD_ID_ADDBA_REJECT, /* 0x20 (Set) */
CMD_ID_SET_PS_PROFILE_ADV, /* 0x21 (Set) */
CMD_ID_SET_RAW_PATTERN, /* 0x22 (Set) */
CMD_ID_CONFIG_PATTERN_FUNC, /* 0x23 (Set) */
CMD_ID_SET_TX_PWR, /* 0x24 (Set) */
CMD_ID_SET_PWR_PARAM, /* 0x25 (Set) */
CMD_ID_P2P_ABORT, /* 0x26 (Set) */
/* SLT commands */
CMD_ID_RANDOM_RX_RESET_EN = 0x2C, /* 0x2C (Set ) */
CMD_ID_RANDOM_RX_RESET_DE = 0x2D, /* 0x2D (Set ) */
CMD_ID_SAPP_EN = 0x2E, /* 0x2E (Set ) */
CMD_ID_SAPP_DE = 0x2F, /* 0x2F (Set ) */
CMD_ID_ROAMING_TRANSIT = 0x30, /* 0x30 (Set) */
CMD_ID_SET_PHY_PARAM, /* 0x31 (Set) */
CMD_ID_SET_NOA_PARAM, /* 0x32 (Set) */
CMD_ID_SET_OPPPS_PARAM, /* 0x33 (Set) */
CMD_ID_SET_UAPSD_PARAM, /* 0x34 (Set) */
CMD_ID_SET_SIGMA_STA_SLEEP, /* 0x35 (Set) */
CMD_ID_SET_EDGE_TXPWR_LIMIT, /* 0x36 (Set) */
CMD_ID_SET_DEVICE_MODE, /* 0x37 (Set) */
CMD_ID_SET_TXPWR_CTRL, /* 0x38 (Set) */
CMD_ID_SET_AUTOPWR_CTRL, /* 0x39 (Set) */
CMD_ID_SET_WFD_CTRL, /* 0x3a (Set) */
CMD_ID_SET_NLO_REQ, /* 0x3b (Set) */
CMD_ID_SET_NLO_CANCEL, /* 0x3c (Set) */
CMD_ID_SET_GTK_REKEY_DATA, /* 0x3d (Set) */
CMD_ID_ROAMING_CONTROL, /* 0x3e (Set) */
/* CFG_M0VE_BA_TO_DRIVER */
CMD_ID_RESET_BA_SCOREBOARD = 0x3f, /* 0x3f (Set) */
CMD_ID_SET_EDGE_TXPWR_LIMIT_5G = 0x40, /* 0x40 (Set) */
CMD_ID_SET_CHANNEL_PWR_OFFSET, /* 0x41 (Set) */
CMD_ID_SET_80211AC_TX_PWR, /* 0x42 (Set) */
CMD_ID_SET_PATH_COMPASATION, /* 0x43 (Set) */
CMD_ID_SET_BATCH_REQ = 0x47, /* 0x47 (Set) */
CMD_ID_GET_NIC_CAPABILITY = 0x80, /* 0x80 (Query) */
CMD_ID_GET_LINK_QUALITY, /* 0x81 (Query) */
CMD_ID_GET_STATISTICS, /* 0x82 (Query) */
CMD_ID_ACCESS_REG = 0xc0, /* 0xc0 (Set / Query) */
CMD_ID_MAC_MCAST_ADDR, /* 0xc1 (Set / Query) */
CMD_ID_802_11_PMKID, /* 0xc2 (Set / Query) */
CMD_ID_ACCESS_EEPROM, /* 0xc3 (Set / Query) */
CMD_ID_SW_DBG_CTRL, /* 0xc4 (Set / Query) */
CMD_ID_SEC_CHECK, /* 0xc5 (Set / Query) */
CMD_ID_DUMP_MEM, /* 0xc6 (Query) */
CMD_ID_RESOURCE_CONFIG, /* 0xc7 (Set / Query) */
CMD_ID_CHIP_CONFIG = 0xCA, /* 0xca (Set / Query) */
CMD_ID_SET_RDD_CH = 0xE1,
CMD_ID_SET_BWCS = 0xF1,
CMD_ID_SET_OSC = 0xF2,
CMD_ID_END
} ENUM_CMD_ID_T, *P_ENUM_CMD_ID_T;
typedef enum _ENUM_EVENT_ID_T {
EVENT_ID_NIC_CAPABILITY = 0x01, /* 0x01 (Query) */
EVENT_ID_LINK_QUALITY, /* 0x02 (Query / Unsolicited) */
EVENT_ID_STATISTICS, /* 0x03 (Query) */
EVENT_ID_MIC_ERR_INFO, /* 0x04 (Unsolicited) */
EVENT_ID_ACCESS_REG, /* 0x05 (Query - CMD_ID_ACCESS_REG) */
EVENT_ID_ACCESS_EEPROM, /* 0x06 (Query - CMD_ID_ACCESS_EEPROM) */
EVENT_ID_SLEEPY_INFO, /* 0x07 (Unsolicited) */
EVENT_ID_BT_OVER_WIFI, /* 0x08 (Unsolicited) */
EVENT_ID_TEST_STATUS, /* 0x09 (Query - CMD_ID_TEST_CTRL) */
EVENT_ID_RX_ADDBA, /* 0x0a (Unsolicited) */
EVENT_ID_RX_DELBA, /* 0x0b (Unsolicited) */
EVENT_ID_ACTIVATE_STA_REC, /* 0x0c (Response) */
EVENT_ID_SCAN_DONE, /* 0x0d (Unsoiicited) */
EVENT_ID_RX_FLUSH, /* 0x0e (Unsolicited) */
EVENT_ID_TX_DONE, /* 0x0f (Unsolicited) */
EVENT_ID_CH_PRIVILEGE, /* 0x10 (Unsolicited) */
EVENT_ID_BSS_ABSENCE_PRESENCE, /* 0x11 (Unsolicited) */
EVENT_ID_STA_CHANGE_PS_MODE, /* 0x12 (Unsolicited) */
EVENT_ID_BSS_BEACON_TIMEOUT, /* 0x13 (Unsolicited) */
EVENT_ID_UPDATE_NOA_PARAMS, /* 0x14 (Unsolicited) */
EVENT_ID_AP_OBSS_STATUS, /* 0x15 (Unsolicited) */
EVENT_ID_STA_UPDATE_FREE_QUOTA, /* 0x16 (Unsolicited) */
EVENT_ID_SW_DBG_CTRL, /* 0x17 (Query - CMD_ID_SW_DBG_CTRL) */
EVENT_ID_ROAMING_STATUS, /* 0x18 (Unsolicited) */
EVENT_ID_STA_AGING_TIMEOUT, /* 0x19 (Unsolicited) */
EVENT_ID_SEC_CHECK_RSP, /* 0x1a (Query - CMD_ID_SEC_CHECK) */
EVENT_ID_SEND_DEAUTH, /* 0x1b (Unsolicited) */
EVENT_ID_UPDATE_RDD_STATUS, /* 0x1c (Unsolicited) */
EVENT_ID_UPDATE_BWCS_STATUS, /* 0x1d (Unsolicited) */
EVENT_ID_UPDATE_BCM_DEBUG, /* 0x1e (Unsolicited) */
EVENT_ID_RX_ERR, /* 0x1f (Unsolicited) */
EVENT_ID_DUMP_MEM = 0x20, /* 0x20 (Query - CMD_ID_DUMP_MEM) */
EVENT_ID_STA_STATISTICS, /* 0x21 (Query ) */
EVENT_ID_STA_STATISTICS_UPDATE, /* 0x22 (Unsolicited) */
EVENT_ID_NLO_DONE, /* 0x23 (Unsoiicited) */
EVENT_ID_ADD_PKEY_DONE, /* 0x24 (Unsoiicited) */
EVENT_ID_ICAP_DONE, /* 0x25 (Unsoiicited) */
EVENT_ID_RESOURCE_CONFIG = 0x26, /* 0x26 (Query - CMD_ID_RESOURCE_CONFIG) */
EVENT_ID_DEBUG_MSG = 0x27, /* 0x27 (Unsoiicited) */
EVENT_ID_RTT_CALIBR_DONE = 0x28, /* 0x28 (Unsoiicited) */
EVENT_ID_RTT_UPDATE_RANGE = 0x29, /* 0x29 (Unsoiicited) */
EVENT_ID_CHECK_REORDER_BUBBLE = 0x2a, /* 0x2a (Unsoiicited) */
EVENT_ID_BATCH_RESULT = 0x2b, /* 0x2b (Query) */
EVENT_ID_UART_ACK = 0x40, /* 0x40 (Unsolicited) */
EVENT_ID_UART_NAK, /* 0x41 (Unsolicited) */
EVENT_ID_GET_CHIPID, /* 0x42 (Query - CMD_ID_GET_CHIPID) */
EVENT_ID_SLT_STATUS, /* 0x43 (Query - CMD_ID_SET_SLTINFO) */
EVENT_ID_CHIP_CONFIG, /* 0x44 (Query - CMD_ID_CHIP_CONFIG) */
EVENT_ID_END
} ENUM_EVENT_ID_T, *P_ENUM_EVENT_ID_T;
/*******************************************************************************
* D A T A T Y P E S
********************************************************************************
*/
#ifndef LINUX
typedef UINT_8 CMD_STATUS;
#endif
/* for Event Packet (via HIF-RX) */
/* following CM's documentation v0.7 */
typedef struct _WIFI_CMD_T {
UINT_16 u2TxByteCount; /* Max value is over 2048 */
UINT_16 u2PQ_ID; /* Must be 0x8000 (Port1, Queue 0) */
UINT_8 ucCID;
UINT_8 ucPktTypeID; /* Must be 0x20 (CMD Packet) */
UINT_8 ucSetQuery;
UINT_8 ucSeqNum;
UINT_8 aucBuffer[0];
} WIFI_CMD_T, *P_WIFI_CMD_T;
/* for Command Packet (via HIF-TX) */
/* following CM's documentation v0.7 */
typedef struct _WIFI_EVENT_T {
UINT_16 u2PacketLength;
UINT_16 u2PacketType; /* Must be filled with 0xE000 (EVENT Packet) */
UINT_8 ucEID;
UINT_8 ucSeqNum;
UINT_8 aucReserved[2];
UINT_8 aucBuffer[0];
} WIFI_EVENT_T, *P_WIFI_EVENT_T;
/* CMD_ID_TEST_CTRL */
typedef struct _CMD_TEST_CTRL_T {
UINT_8 ucAction;
UINT_8 aucReserved[3];
union {
UINT_32 u4OpMode;
UINT_32 u4ChannelFreq;
PARAM_MTK_WIFI_TEST_STRUC_T rRfATInfo;
} u;
} CMD_TEST_CTRL_T, *P_CMD_TEST_CTRL_T;
/* EVENT_TEST_STATUS */
typedef struct _PARAM_CUSTOM_RFTEST_TX_STATUS_STRUC_T {
UINT_32 u4PktSentStatus;
UINT_32 u4PktSentCount;
UINT_16 u2AvgAlc;
UINT_8 ucCckGainControl;
UINT_8 ucOfdmGainControl;
} PARAM_CUSTOM_RFTEST_TX_STATUS_STRUC_T, *P_PARAM_CUSTOM_RFTEST_TX_STATUS_STRUC_T;
typedef struct _PARAM_CUSTOM_RFTEST_RX_STATUS_STRUC_T {
UINT_32 u4IntRxOk; /*!< number of packets that Rx ok from interrupt */
UINT_32 u4IntCrcErr; /*!< number of packets that CRC error from interrupt */
UINT_32 u4IntShort; /*!< number of packets that is short preamble from interrupt */
UINT_32 u4IntLong; /*!< number of packets that is long preamble from interrupt */
UINT_32 u4PauRxPktCount; /*!< number of packets that Rx ok from PAU */
UINT_32 u4PauCrcErrCount; /*!< number of packets that CRC error from PAU */
UINT_32 u4PauRxFifoFullCount; /*!< number of packets that is short preamble from PAU */
UINT_32 u4PauCCACount; /*!< CCA rising edge count */
} PARAM_CUSTOM_RFTEST_RX_STATUS_STRUC_T, *P_PARAM_CUSTOM_RFTEST_RX_STATUS_STRUC_T;
typedef union _EVENT_TEST_STATUS {
PARAM_MTK_WIFI_TEST_STRUC_T rATInfo;
/* PARAM_CUSTOM_RFTEST_TX_STATUS_STRUC_T rTxStatus; */
/* PARAM_CUSTOM_RFTEST_RX_STATUS_STRUC_T rRxStatus; */
} EVENT_TEST_STATUS, *P_EVENT_TEST_STATUS;
/* CMD_BUILD_CONNECTION */
typedef struct _CMD_BUILD_CONNECTION {
UINT_8 ucInfraMode;
UINT_8 ucAuthMode;
UINT_8 ucEncryptStatus;
UINT_8 ucSsidLen;
UINT_8 aucSsid[PARAM_MAX_LEN_SSID];
UINT_8 aucBssid[PARAM_MAC_ADDR_LEN];
/* Ad-hoc mode */
UINT_16 u2BeaconPeriod;
UINT_16 u2ATIMWindow;
UINT_8 ucJoinOnly;
UINT_8 ucReserved;
UINT_32 u4FreqInKHz;
/* for faster connection */
UINT_8 aucScanResult[0];
} CMD_BUILD_CONNECTION, *P_CMD_BUILD_CONNECTION;
/* CMD_ADD_REMOVE_KEY */
typedef struct _CMD_802_11_KEY {
UINT_8 ucAddRemove;
UINT_8 ucTxKey;
UINT_8 ucKeyType;
UINT_8 ucIsAuthenticator;
UINT_8 aucPeerAddr[6];
UINT_8 ucBssIdx;
UINT_8 ucAlgorithmId;
UINT_8 ucKeyId;
UINT_8 ucKeyLen;
UINT_8 ucWlanIndex;
UINT_8 ucReverved;
UINT_8 aucKeyMaterial[32];
UINT_8 aucKeyRsc[16];
} CMD_802_11_KEY, *P_CMD_802_11_KEY;
/* CMD_ID_DEFAULT_KEY_ID */
typedef struct _CMD_DEFAULT_KEY {
UINT_8 ucBssIdx;
UINT_8 ucKeyId;
UINT_8 ucUnicast;
UINT_8 ucMulticast;
} CMD_DEFAULT_KEY, *P_CMD_DEFAULT_KEY;
/* WPA2 PMKID cache structure */
typedef struct _PMKID_ENTRY_T {
PARAM_BSSID_INFO_T rBssidInfo;
BOOLEAN fgPmkidExist;
} PMKID_ENTRY_T, *P_PMKID_ENTRY_T;
typedef struct _CMD_802_11_PMKID {
ULONG u4BSSIDInfoCount;
P_PMKID_ENTRY_T arPMKIDInfo[1];
} CMD_802_11_PMKID, *P_CMD_802_11_PMKID;
typedef struct _CMD_GTK_REKEY_DATA_T {
UINT_8 aucKek[16];
UINT_8 aucKck[16];
UINT_8 aucReplayCtr[8];
} CMD_GTK_REKEY_DATA_T, *P_CMD_GTK_REKEY_DATA_T;
/* CMD_BASIC_CONFIG */
typedef struct _CMD_CSUM_OFFLOAD_T {
UINT_16 u2RxChecksum; /* bit0: IP, bit1: UDP, bit2: TCP */
UINT_16 u2TxChecksum; /* bit0: IP, bit1: UDP, bit2: TCP */
} CMD_CSUM_OFFLOAD_T, *P_CMD_CSUM_OFFLOAD_T;
typedef struct _CMD_BASIC_CONFIG_T {
UINT_8 ucNative80211;
UINT_8 aucReserved[3];
CMD_CSUM_OFFLOAD_T rCsumOffload;
} CMD_BASIC_CONFIG_T, *P_CMD_BASIC_CONFIG_T;
/* CMD_MAC_MCAST_ADDR */
typedef struct _CMD_MAC_MCAST_ADDR {
UINT_32 u4NumOfGroupAddr;
UINT_8 ucBssIndex;
UINT_8 aucReserved[3];
PARAM_MAC_ADDRESS arAddress[MAX_NUM_GROUP_ADDR];
} CMD_MAC_MCAST_ADDR, *P_CMD_MAC_MCAST_ADDR, EVENT_MAC_MCAST_ADDR, *P_EVENT_MAC_MCAST_ADDR;
/* CMD_ACCESS_EEPROM */
typedef struct _CMD_ACCESS_EEPROM {
UINT_16 u2Offset;
UINT_16 u2Data;
} CMD_ACCESS_EEPROM, *P_CMD_ACCESS_EEPROM, EVENT_ACCESS_EEPROM, *P_EVENT_ACCESS_EEPROM;
typedef struct _CMD_CUSTOM_NOA_PARAM_STRUC_T {
UINT_32 u4NoaDurationMs;
UINT_32 u4NoaIntervalMs;
UINT_32 u4NoaCount;
} CMD_CUSTOM_NOA_PARAM_STRUC_T, *P_CMD_CUSTOM_NOA_PARAM_STRUC_T;
typedef struct _CMD_CUSTOM_OPPPS_PARAM_STRUC_T {
UINT_32 u4CTwindowMs;
} CMD_CUSTOM_OPPPS_PARAM_STRUC_T, *P_CMD_CUSTOM_OPPPS_PARAM_STRUC_T;
typedef struct _CMD_CUSTOM_UAPSD_PARAM_STRUC_T {
UINT_8 fgEnAPSD;
UINT_8 fgEnAPSD_AcBe;
UINT_8 fgEnAPSD_AcBk;
UINT_8 fgEnAPSD_AcVo;
UINT_8 fgEnAPSD_AcVi;
UINT_8 ucMaxSpLen;
UINT_8 aucResv[2];
} CMD_CUSTOM_UAPSD_PARAM_STRUC_T, *P_CMD_CUSTOM_UAPSD_PARAM_STRUC_T;
#if CFG_M0VE_BA_TO_DRIVER
typedef struct _CMD_RESET_BA_SCOREBOARD_T {
UINT_8 ucflag;
UINT_8 ucTID;
UINT_8 aucMacAddr[PARAM_MAC_ADDR_LEN];
} CMD_RESET_BA_SCOREBOARD_T, *P_CMD_RESET_BA_SCOREBOARD_T;
#endif
/* EVENT_CONNECTION_STATUS */
typedef struct _EVENT_CONNECTION_STATUS {
UINT_8 ucMediaStatus;
UINT_8 ucReasonOfDisconnect;
UINT_8 ucInfraMode;
UINT_8 ucSsidLen;
UINT_8 aucSsid[PARAM_MAX_LEN_SSID];
UINT_8 aucBssid[PARAM_MAC_ADDR_LEN];
UINT_8 ucAuthenMode;
UINT_8 ucEncryptStatus;
UINT_16 u2BeaconPeriod;
UINT_16 u2AID;
UINT_16 u2ATIMWindow;
UINT_8 ucNetworkType;
UINT_8 aucReserved[1];
UINT_32 u4FreqInKHz;
#if CFG_ENABLE_WIFI_DIRECT
UINT_8 aucInterfaceAddr[PARAM_MAC_ADDR_LEN];
#endif
} EVENT_CONNECTION_STATUS, *P_EVENT_CONNECTION_STATUS;
/* EVENT_NIC_CAPABILITY */
typedef struct _EVENT_NIC_CAPABILITY_T {
UINT_16 u2ProductID;
UINT_16 u2FwVersion;
UINT_16 u2DriverVersion;
UINT_8 ucHw5GBandDisabled;
UINT_8 ucEepromUsed;
UINT_8 aucMacAddr[6];
UINT_8 ucEndianOfMacAddrNumber;
UINT_8 ucReserved;
UINT_8 ucRfVersion;
UINT_8 ucPhyVersion;
UINT_8 ucRfCalFail;
UINT_8 ucBbCalFail;
UINT_8 aucDateCode[16];
UINT_32 u4FeatureFlag0;
UINT_32 u4FeatureFlag1;
UINT_32 u4CompileFlag0;
UINT_32 u4CompileFlag1;
UINT_8 aucReserved0[64];
} EVENT_NIC_CAPABILITY_T, *P_EVENT_NIC_CAPABILITY_T;
/* modified version of WLAN_BEACON_FRAME_BODY_T for simplier buffering */
typedef struct _WLAN_BEACON_FRAME_BODY_T_LOCAL {
/* Beacon frame body */
UINT_32 au4Timestamp[2]; /* Timestamp */
UINT_16 u2BeaconInterval; /* Beacon Interval */
UINT_16 u2CapInfo; /* Capability */
UINT_8 aucInfoElem[MAX_IE_LENGTH]; /* Various IEs, start from SSID */
UINT_16 u2IELength; /* This field is *NOT* carried by F/W but caculated by nic_rx */
} WLAN_BEACON_FRAME_BODY_T_LOCAL, *P_WLAN_BEACON_FRAME_BODY_T_LOCAL;
/* EVENT_SCAN_RESULT */
typedef struct _EVENT_SCAN_RESULT_T {
INT_32 i4RSSI;
UINT_32 u4LinkQuality;
UINT_32 u4DSConfig; /* Center frequency */
UINT_32 u4DomainInfo; /* Require CM opinion */
UINT_32 u4Reserved;
UINT_8 ucNetworkType;
UINT_8 ucOpMode;
UINT_8 aucBssid[MAC_ADDR_LEN];
UINT_8 aucRatesEx[PARAM_MAX_LEN_RATES_EX];
WLAN_BEACON_FRAME_BODY_T_LOCAL rBeaconFrameBody;
} EVENT_SCAN_RESULT_T, *P_EVENT_SCAN_RESULT_T;
/* event of tkip mic error */
typedef struct _EVENT_MIC_ERR_INFO {
UINT_32 u4Flags;
} EVENT_MIC_ERR_INFO, *P_EVENT_MIC_ERR_INFO;
/* event of add key done for port control */
typedef struct _EVENT_ADD_KEY_DONE_INFO {
UINT_8 ucBSSIndex;
UINT_8 ucReserved;
UINT_8 aucStaAddr[6];
} EVENT_ADD_KEY_DONE_INFO, *P_EVENT_ADD_KEY_DONE_INFO;
typedef struct _EVENT_PMKID_CANDIDATE_LIST_T {
UINT_32 u4Version; /*!< Version */
UINT_32 u4NumCandidates; /*!< How many candidates follow */
PARAM_PMKID_CANDIDATE_T arCandidateList[1];
} EVENT_PMKID_CANDIDATE_LIST_T, *P_EVENT_PMKID_CANDIDATE_LIST_T;
typedef struct _EVENT_CMD_RESULT {
UINT_8 ucCmdID;
UINT_8 ucStatus;
UINT_8 aucReserved[2];
} EVENT_CMD_RESULT, *P_EVENT_CMD_RESULT;
/* CMD_ID_ACCESS_REG & EVENT_ID_ACCESS_REG */
typedef struct _CMD_ACCESS_REG {
UINT_32 u4Address;
UINT_32 u4Data;
} CMD_ACCESS_REG, *P_CMD_ACCESS_REG;
/* CMD_DUMP_MEMORY */
typedef struct _CMD_DUMP_MEM {
UINT_32 u4Address;
UINT_32 u4Length;
UINT_32 u4RemainLength;
UINT_8 ucFragNum;
} CMD_DUMP_MEM, *P_CMD_DUMP_MEM;
typedef struct _EVENT_DUMP_MEM_T {
UINT_32 u4Address;
UINT_32 u4Length;
UINT_32 u4RemainLength;
UINT_8 ucFragNum;
UINT_8 aucBuffer[1];
} EVENT_DUMP_MEM_T, *P_EVENT_DUMP_MEM_T;
typedef struct _CMD_SW_DBG_CTRL_T {
UINT_32 u4Id;
UINT_32 u4Data;
/* Debug Support */
UINT_32 u4DebugCnt[64];
} CMD_SW_DBG_CTRL_T, *P_CMD_SW_DBG_CTRL_T;
typedef struct _CMD_CHIP_CONFIG_T {
UINT_16 u2Id;
UINT_8 ucType;
UINT_8 ucRespType;
UINT_16 u2MsgSize;
UINT_8 aucReserved0[2];
UINT_8 aucCmd[CHIP_CONFIG_RESP_SIZE];
} CMD_CHIP_CONFIG_T, *P_CMD_CHIP_CONFIG_T;
/* CMD_ID_LINK_ATTRIB */
typedef struct _CMD_LINK_ATTRIB {
INT_8 cRssiTrigger;
UINT_8 ucDesiredRateLen;
UINT_16 u2DesiredRate[32];
UINT_8 ucMediaStreamMode;
UINT_8 aucReserved[1];
} CMD_LINK_ATTRIB, *P_CMD_LINK_ATTRIB;
/* CMD_ID_NIC_POWER_CTRL */
typedef struct _CMD_NIC_POWER_CTRL {
UINT_8 ucPowerMode;
UINT_8 aucReserved[3];
} CMD_NIC_POWER_CTRL, *P_CMD_NIC_POWER_CTRL;
/* CMD_ID_POWER_SAVE_MODE */
typedef struct _CMD_PS_PROFILE_T {
UINT_8 ucBssIndex;
UINT_8 ucPsProfile;
UINT_8 aucReserved[2];
} CMD_PS_PROFILE_T, *P_CMD_PS_PROFILE_T;
/* EVENT_LINK_QUALITY */
#if 1
typedef struct _LINK_QUALITY_ {
INT_8 cRssi; /* AIS Network. */
INT_8 cLinkQuality;
UINT_16 u2LinkSpeed; /* TX rate1 */
UINT_8 ucMediumBusyPercentage; /* Read clear */
UINT_8 ucIsLQ0Rdy; /* Link Quality BSS0 Ready. */
} LINK_QUALITY, *P_LINK_QUALITY;
typedef struct _EVENT_LINK_QUALITY_V2 {
LINK_QUALITY rLq[BSSID_NUM];
} EVENT_LINK_QUALITY_V2, *P_EVENT_LINK_QUALITY_V2;
typedef struct _EVENT_LINK_QUALITY {
INT_8 cRssi;
INT_8 cLinkQuality;
UINT_16 u2LinkSpeed;
UINT_8 ucMediumBusyPercentage;
} EVENT_LINK_QUALITY, *P_EVENT_LINK_QUALITY;
#endif
#if CFG_SUPPORT_P2P_RSSI_QUERY
/* EVENT_LINK_QUALITY */
typedef struct _EVENT_LINK_QUALITY_EX {
INT_8 cRssi;
INT_8 cLinkQuality;
UINT_16 u2LinkSpeed;
UINT_8 ucMediumBusyPercentage;
UINT_8 ucIsLQ0Rdy;
INT_8 cRssiP2P; /* For P2P Network. */
INT_8 cLinkQualityP2P;
UINT_16 u2LinkSpeedP2P;
UINT_8 ucMediumBusyPercentageP2P;
UINT_8 ucIsLQ1Rdy;
} EVENT_LINK_QUALITY_EX, *P_EVENT_LINK_QUALITY_EX;
#endif
/* EVENT_ID_STATISTICS */
typedef struct _EVENT_STATISTICS {
LARGE_INTEGER rTransmittedFragmentCount;
LARGE_INTEGER rMulticastTransmittedFrameCount;
LARGE_INTEGER rFailedCount;
LARGE_INTEGER rRetryCount;
LARGE_INTEGER rMultipleRetryCount;
LARGE_INTEGER rRTSSuccessCount;
LARGE_INTEGER rRTSFailureCount;
LARGE_INTEGER rACKFailureCount;
LARGE_INTEGER rFrameDuplicateCount;
LARGE_INTEGER rReceivedFragmentCount;
LARGE_INTEGER rMulticastReceivedFrameCount;
LARGE_INTEGER rFCSErrorCount;
} EVENT_STATISTICS, *P_EVENT_STATISTICS;
/* EVENT_ID_FW_SLEEPY_NOTIFY */
typedef struct _EVENT_SLEEPY_INFO_T {
UINT_8 ucSleepyState;
UINT_8 aucReserved[3];
} EVENT_SLEEPY_INFO_T, *P_EVENT_SLEEPY_INFO_T;
typedef struct _EVENT_ACTIVATE_STA_REC_T {
UINT_8 aucMacAddr[6];
UINT_8 ucStaRecIdx;
UINT_8 ucBssIndex;
} EVENT_ACTIVATE_STA_REC_T, *P_EVENT_ACTIVATE_STA_REC_T;
typedef struct _EVENT_DEACTIVATE_STA_REC_T {
UINT_8 ucStaRecIdx;
UINT_8 aucReserved[3];
} EVENT_DEACTIVATE_STA_REC_T, *P_EVENT_DEACTIVATE_STA_REC_T;
/* CMD_BT_OVER_WIFI */
typedef struct _CMD_BT_OVER_WIFI {
UINT_8 ucAction; /* 0: query, 1: setup, 2: destroy */
UINT_8 ucChannelNum;
PARAM_MAC_ADDRESS rPeerAddr;
UINT_16 u2BeaconInterval;
UINT_8 ucTimeoutDiscovery;
UINT_8 ucTimeoutInactivity;
UINT_8 ucRole;
UINT_8 PAL_Capabilities;
UINT_8 cMaxTxPower;
UINT_8 ucChannelBand;
UINT_8 ucReserved[1];
} CMD_BT_OVER_WIFI, *P_CMD_BT_OVER_WIFI;
/* EVENT_BT_OVER_WIFI */
typedef struct _EVENT_BT_OVER_WIFI {
UINT_8 ucLinkStatus;
UINT_8 ucSelectedChannel;
INT_8 cRSSI;
UINT_8 ucReserved[1];
} EVENT_BT_OVER_WIFI, *P_EVENT_BT_OVER_WIFI;
/* Same with DOMAIN_SUBBAND_INFO */
typedef struct _CMD_SUBBAND_INFO {
UINT_8 ucRegClass;
UINT_8 ucBand;
UINT_8 ucChannelSpan;
UINT_8 ucFirstChannelNum;
UINT_8 ucNumChannels;
UINT_8 aucReserved[3];
} CMD_SUBBAND_INFO, *P_CMD_SUBBAND_INFO;
/* CMD_SET_DOMAIN_INFO */
typedef struct _CMD_SET_DOMAIN_INFO_T {
UINT_16 u2CountryCode;
UINT_16 u2Reserved;
CMD_SUBBAND_INFO rSubBand[6];
UINT_8 uc2G4Bandwidth; /* CONFIG_BW_20_40M or CONFIG_BW_20M */
UINT_8 uc5GBandwidth; /* CONFIG_BW_20_40M or CONFIG_BW_20M */
UINT_8 aucReserved[2];
} CMD_SET_DOMAIN_INFO_T, *P_CMD_SET_DOMAIN_INFO_T;
/* CMD_SET_IP_ADDRESS */
typedef struct _IPV4_NETWORK_ADDRESS {
UINT_8 aucIpAddr[4];
} IPV4_NETWORK_ADDRESS, *P_IPV4_NETWORK_ADDRESS;
typedef struct _CMD_SET_NETWORK_ADDRESS_LIST {
UINT_8 ucBssIndex;
UINT_8 ucAddressCount;
UINT_8 ucReserved[2];
IPV4_NETWORK_ADDRESS arNetAddress[1];
} CMD_SET_NETWORK_ADDRESS_LIST, *P_CMD_SET_NETWORK_ADDRESS_LIST;
typedef struct _PATTERN_DESCRIPTION {
UINT_8 fgCheckBcA1;
UINT_8 fgCheckMcA1;
UINT_8 ePatternHeader;
UINT_8 fgAndOp;
UINT_8 fgNotOp;
UINT_8 ucPatternMask;
UINT_16 u2PatternOffset;
UINT_8 aucPattern[8];
} PATTERN_DESCRIPTION, *P_PATTERN_DESCRIPTION;
typedef struct _CMD_RAW_PATTERN_CONFIGURATION_T {
PATTERN_DESCRIPTION arPatternDesc[4];
} CMD_RAW_PATTERN_CONFIGURATION_T, *P_CMD_RAW_PATTERN_CONFIGURATION_T;
typedef struct _CMD_PATTERN_FUNC_CONFIG {
BOOLEAN fgBcA1En;
BOOLEAN fgMcA1En;
BOOLEAN fgBcA1MatchDrop;
BOOLEAN fgMcA1MatchDrop;
} CMD_PATTERN_FUNC_CONFIG, *P_CMD_PATTERN_FUNC_CONFIG;
typedef struct _EVENT_TX_DONE_T {
UINT_8 ucPacketSeq;
UINT_8 ucStatus;
UINT_16 u2SequenceNumber;
UINT_8 ucWlanIndex;
UINT_8 aucReserved1[3];
UINT_32 au4Reserved2;
UINT_32 au4Reserved3;
} EVENT_TX_DONE_T, *P_EVENT_TX_DONE_T;
typedef struct _CMD_BSS_ACTIVATE_CTRL {
UINT_8 ucBssIndex;
UINT_8 ucActive;
UINT_8 ucNetworkType;
UINT_8 ucOwnMacAddrIndex;
UINT_8 aucBssMacAddr[6];
UINT_8 ucBMCWlanIndex;
UINT_8 ucReserved;
} CMD_BSS_ACTIVATE_CTRL, *P_CMD_BSS_ACTIVATE_CTRL;
typedef struct _CMD_SET_BSS_RLM_PARAM_T {
UINT_8 ucBssIndex;
UINT_8 ucRfBand;
UINT_8 ucPrimaryChannel;
UINT_8 ucRfSco;
UINT_8 ucErpProtectMode;
UINT_8 ucHtProtectMode;
UINT_8 ucGfOperationMode;
UINT_8 ucTxRifsMode;
UINT_16 u2HtOpInfo3;
UINT_16 u2HtOpInfo2;
UINT_8 ucHtOpInfo1;
UINT_8 ucUseShortPreamble;
UINT_8 ucUseShortSlotTime;
UINT_8 ucVhtChannelWidth;
UINT_8 ucVhtChannelFrequencyS1;
UINT_8 ucVhtChannelFrequencyS2;
UINT_16 u2VhtBasicMcsSet;
} CMD_SET_BSS_RLM_PARAM_T, *P_CMD_SET_BSS_RLM_PARAM_T;
typedef struct _CMD_SET_BSS_INFO {
UINT_8 ucBssIndex;
UINT_8 ucConnectionState;
UINT_8 ucCurrentOPMode;
UINT_8 ucSSIDLen;
UINT_8 aucSSID[32];
UINT_8 aucBSSID[6];
UINT_8 ucIsQBSS;
UINT_8 ucReserved1;
UINT_16 u2OperationalRateSet;
UINT_16 u2BSSBasicRateSet;
UINT_8 ucStaRecIdxOfAP;
UINT_16 u2HwDefaultFixedRateCode;
UINT_8 ucNonHTBasicPhyType; /* For Slot Time and CWmin */
UINT_8 ucAuthMode;
UINT_8 ucEncStatus;
UINT_8 ucPhyTypeSet;
UINT_8 ucWapiMode;
UINT_8 ucIsApMode;
UINT_8 ucBMCWlanIndex;
UINT_8 ucHiddenSsidMode;
UINT_8 aucRsv[1];
UINT_32 u4PrivateData;
CMD_SET_BSS_RLM_PARAM_T rBssRlmParam;
} CMD_SET_BSS_INFO, *P_CMD_SET_BSS_INFO;
typedef enum _ENUM_RTS_POLICY_T {
RTS_POLICY_AUTO,
RTS_POLICY_STATIC_BW,
RTS_POLICY_DYNAMIC_BW,
RTS_POLICY_LEGACY,
RTS_POLICY_NO_RTS
} ENUM_RTS_POLICY;
typedef struct _CMD_UPDATE_STA_RECORD_T {
UINT_8 ucStaIndex;
UINT_8 ucStaType;
UINT_8 aucMacAddr[MAC_ADDR_LEN]; /* This field should assign at create and keep consistency for update usage */
UINT_16 u2AssocId;
UINT_16 u2ListenInterval;
UINT_8 ucBssIndex; /* This field should assign at create and keep consistency for update usage */
UINT_8 ucDesiredPhyTypeSet;
UINT_16 u2DesiredNonHTRateSet;
UINT_16 u2BSSBasicRateSet;
UINT_8 ucIsQoS;
UINT_8 ucIsUapsdSupported;
UINT_8 ucStaState;
UINT_8 ucMcsSet;
UINT_8 ucSupMcs32;
UINT_8 aucReserved1[1];
UINT_8 aucRxMcsBitmask[10];
UINT_16 u2RxHighestSupportedRate;
UINT_32 u4TxRateInfo;
UINT_16 u2HtCapInfo;
UINT_16 u2HtExtendedCap;
UINT_32 u4TxBeamformingCap;
UINT_8 ucAmpduParam;
UINT_8 ucAselCap;
UINT_8 ucRCPI;
UINT_8 ucNeedResp;
UINT_8 ucUapsdAc; /* b0~3: Trigger enabled, b4~7: Delivery enabled */
UINT_8 ucUapsdSp; /* 0: all, 1: max 2, 2: max 4, 3: max 6 */
UINT_8 ucWlanIndex; /* This field should assign at create and keep consistency for update usage */
UINT_8 ucBMCWlanIndex; /* This field should assign at create and keep consistency for update usage */
UINT_32 u4VhtCapInfo;
UINT_16 u2VhtRxMcsMap;
UINT_16 u2VhtRxHighestSupportedDataRate;
UINT_16 u2VhtTxMcsMap;
UINT_16 u2VhtTxHighestSupportedDataRate;
UINT_8 ucRtsPolicy; /* 0: auto 1: Static BW 2: Dynamic BW 3: Legacy 7: WoRts */
UINT_8 aucReserved2[1];
UINT_8 ucTrafficDataType; /* 0: auto 1: data 2: video 3: voice */
UINT_8 ucTxGfMode;
UINT_8 ucTxSgiMode;
UINT_8 ucTxStbcMode;
UINT_16 u2HwDefaultFixedRateCode;
UINT_8 ucTxAmpdu;
UINT_8 ucRxAmpdu;
UINT_32 u4FixedPhyRate; /* */
UINT_16 u2MaxLinkSpeed; /* unit is 0.5 Mbps */
UINT_16 u2MinLinkSpeed;
UINT_32 u4Flags;
UINT_8 aucReserved4[32];
} CMD_UPDATE_STA_RECORD_T, *P_CMD_UPDATE_STA_RECORD_T;
typedef struct _CMD_REMOVE_STA_RECORD_T {
UINT_8 ucActionType;
UINT_8 ucStaIndex;
UINT_8 ucBssIndex;
UINT_8 ucReserved;
} CMD_REMOVE_STA_RECORD_T, *P_CMD_REMOVE_STA_RECORD_T;
typedef struct _CMD_INDICATE_PM_BSS_CREATED_T {
UINT_8 ucBssIndex;
UINT_8 ucDtimPeriod;
UINT_16 u2BeaconInterval;
UINT_16 u2AtimWindow;
UINT_8 aucReserved[2];
} CMD_INDICATE_PM_BSS_CREATED, *P_CMD_INDICATE_PM_BSS_CREATED;
typedef struct _CMD_INDICATE_PM_BSS_CONNECTED_T {
UINT_8 ucBssIndex;
UINT_8 ucDtimPeriod;
UINT_16 u2AssocId;
UINT_16 u2BeaconInterval;
UINT_16 u2AtimWindow;
UINT_8 fgIsUapsdConnection;
UINT_8 ucBmpDeliveryAC;
UINT_8 ucBmpTriggerAC;
UINT_8 aucReserved[1];
} CMD_INDICATE_PM_BSS_CONNECTED, *P_CMD_INDICATE_PM_BSS_CONNECTED;
typedef struct _CMD_INDICATE_PM_BSS_ABORT {
UINT_8 ucBssIndex;
UINT_8 aucReserved[3];
} CMD_INDICATE_PM_BSS_ABORT, *P_CMD_INDICATE_PM_BSS_ABORT;
typedef struct _CMD_BEACON_TEMPLATE_UPDATE {
UINT_8 ucUpdateMethod; /* 0: update randomly, 1: update all, 2: delete all (1 and 2 will update directly without search) */
UINT_8 ucBssIndex;
UINT_8 aucReserved[2];
UINT_16 u2Capability;
UINT_16 u2IELen;
UINT_8 aucIE[MAX_IE_LENGTH];
} CMD_BEACON_TEMPLATE_UPDATE, *P_CMD_BEACON_TEMPLATE_UPDATE;
typedef struct _CMD_SET_WMM_PS_TEST_STRUC_T {
UINT_8 ucBssIndex;
UINT_8 bmfgApsdEnAc; /* b0~3: trigger-en AC0~3. b4~7: delivery-en AC0~3 */
UINT_8 ucIsEnterPsAtOnce; /* enter PS immediately without 5 second guard after connected */
UINT_8 ucIsDisableUcTrigger; /* not to trigger UC on beacon TIM is matched (under U-APSD) */
} CMD_SET_WMM_PS_TEST_STRUC_T, *P_CMD_SET_WMM_PS_TEST_STRUC_T;
/* Definition for CHANNEL_INFO.ucBand:
* 0: Reserved
* 1: BAND_2G4
* 2: BAND_5G
* Others: Reserved
*/
typedef struct _CHANNEL_INFO_T {
UINT_8 ucBand;
UINT_8 ucChannelNum;
} CHANNEL_INFO_T, *P_CHANNEL_INFO_T;
typedef struct _CMD_SCAN_REQ_T {
UINT_8 ucSeqNum;
UINT_8 ucBssIndex;
UINT_8 ucScanType;
UINT_8 ucSSIDType; /* BIT(0) wildcard / BIT(1) P2P-wildcard / BIT(2) specific */
UINT_8 ucSSIDLength;
UINT_8 ucNumProbeReq;
UINT_16 u2ChannelMinDwellTime;
UINT_16 u2ChannelDwellTime;
UINT_16 u2TimeoutValue;
UINT_8 aucSSID[32];
UINT_8 ucChannelType;
UINT_8 ucChannelListNum;
UINT_8 aucReserved[2];
CHANNEL_INFO_T arChannelList[32];
UINT_16 u2IELen;
UINT_8 aucIE[MAX_IE_LENGTH];
} CMD_SCAN_REQ, *P_CMD_SCAN_REQ;
typedef struct _CMD_SCAN_REQ_V2_T {
UINT_8 ucSeqNum;
UINT_8 ucBssIndex;
UINT_8 ucScanType;
UINT_8 ucSSIDType;
UINT_8 ucSSIDNum;
UINT_8 ucNumProbeReq;
UINT_8 aucReserved[2];
PARAM_SSID_T arSSID[4];
UINT_16 u2ProbeDelayTime;
UINT_16 u2ChannelDwellTime;
UINT_16 u2TimeoutValue;
UINT_8 ucChannelType;
UINT_8 ucChannelListNum;
CHANNEL_INFO_T arChannelList[32];
UINT_16 u2IELen;
UINT_8 aucIE[MAX_IE_LENGTH];
} CMD_SCAN_REQ_V2, *P_CMD_SCAN_REQ_V2;
typedef struct _CMD_SCAN_CANCEL_T {
UINT_8 ucSeqNum;
UINT_8 ucIsExtChannel; /* For P2P channel extention. */
UINT_8 aucReserved[2];
} CMD_SCAN_CANCEL, *P_CMD_SCAN_CANCEL;
typedef struct _EVENT_SCAN_DONE_T {
UINT_8 ucSeqNum;
UINT_8 ucSparseChannelValid;
CHANNEL_INFO_T rSparseChannel;
} EVENT_SCAN_DONE, *P_EVENT_SCAN_DONE;
#if CFG_SUPPORT_BATCH_SCAN
typedef struct _CMD_BATCH_REQ_T {
UINT_8 ucSeqNum;
UINT_8 ucNetTypeIndex;
UINT_8 ucCmd; /* Start/ Stop */
UINT_8 ucMScan; /* an integer number of scans per batch */
UINT_8 ucBestn; /* an integer number of the max AP to remember per scan */
UINT_8 ucRtt; /* an integer number of highest-strength AP for which we'd like approximate distance reported */
UINT_8 ucChannel; /* channels */
UINT_8 ucChannelType;
UINT_8 ucChannelListNum;
UINT_8 aucReserved[3];
UINT_32 u4Scanfreq; /* an integer number of seconds between scans */
CHANNEL_INFO_T arChannelList[32]; /* channels */
} CMD_BATCH_REQ_T, *P_CMD_BATCH_REQ_T;
typedef struct _EVENT_BATCH_RESULT_ENTRY_T {
UINT_8 aucBssid[MAC_ADDR_LEN];
UINT_8 aucSSID[ELEM_MAX_LEN_SSID];
UINT_8 ucSSIDLen;
INT_8 cRssi;
UINT_32 ucFreq;
UINT_32 u4Age;
UINT_32 u4Dist;
UINT_32 u4Distsd;
} EVENT_BATCH_RESULT_ENTRY_T, *P_EVENT_BATCH_RESULT_ENTRY_T;
typedef struct _EVENT_BATCH_RESULT_T {
UINT_8 ucScanCount;
UINT_8 aucReserved[3];
EVENT_BATCH_RESULT_ENTRY_T arBatchResult[12]; /* Must be the same with SCN_BATCH_STORE_MAX_NUM */
} EVENT_BATCH_RESULT_T, *P_EVENT_BATCH_RESULT_T;
#endif
typedef struct _CMD_CH_PRIVILEGE_T {
UINT_8 ucBssIndex;
UINT_8 ucTokenID;
UINT_8 ucAction;
UINT_8 ucPrimaryChannel;
UINT_8 ucRfSco;
UINT_8 ucRfBand;
UINT_8 ucRfChannelWidth; /* To support 80/160MHz bandwidth */
UINT_8 ucRfCenterFreqSeg1; /* To support 80/160MHz bandwidth */
UINT_8 ucRfCenterFreqSeg2; /* To support 80/160MHz bandwidth */
UINT_8 ucReqType;
UINT_8 aucReserved[2];
UINT_32 u4MaxInterval; /* In unit of ms */
} CMD_CH_PRIVILEGE_T, *P_CMD_CH_PRIVILEGE_T;
typedef struct _CMD_TX_PWR_T {
INT_8 cTxPwr2G4Cck; /* signed, in unit of 0.5dBm */
INT_8 cTxPwr2G4Dsss; /* signed, in unit of 0.5dBm */
INT_8 acReserved[2];
INT_8 cTxPwr2G4OFDM_BPSK;
INT_8 cTxPwr2G4OFDM_QPSK;
INT_8 cTxPwr2G4OFDM_16QAM;
INT_8 cTxPwr2G4OFDM_Reserved;
INT_8 cTxPwr2G4OFDM_48Mbps;
INT_8 cTxPwr2G4OFDM_54Mbps;
INT_8 cTxPwr2G4HT20_BPSK;
INT_8 cTxPwr2G4HT20_QPSK;
INT_8 cTxPwr2G4HT20_16QAM;
INT_8 cTxPwr2G4HT20_MCS5;
INT_8 cTxPwr2G4HT20_MCS6;
INT_8 cTxPwr2G4HT20_MCS7;
INT_8 cTxPwr2G4HT40_BPSK;
INT_8 cTxPwr2G4HT40_QPSK;
INT_8 cTxPwr2G4HT40_16QAM;
INT_8 cTxPwr2G4HT40_MCS5;
INT_8 cTxPwr2G4HT40_MCS6;
INT_8 cTxPwr2G4HT40_MCS7;
INT_8 cTxPwr5GOFDM_BPSK;
INT_8 cTxPwr5GOFDM_QPSK;
INT_8 cTxPwr5GOFDM_16QAM;
INT_8 cTxPwr5GOFDM_Reserved;
INT_8 cTxPwr5GOFDM_48Mbps;
INT_8 cTxPwr5GOFDM_54Mbps;
INT_8 cTxPwr5GHT20_BPSK;
INT_8 cTxPwr5GHT20_QPSK;
INT_8 cTxPwr5GHT20_16QAM;
INT_8 cTxPwr5GHT20_MCS5;
INT_8 cTxPwr5GHT20_MCS6;
INT_8 cTxPwr5GHT20_MCS7;
INT_8 cTxPwr5GHT40_BPSK;
INT_8 cTxPwr5GHT40_QPSK;
INT_8 cTxPwr5GHT40_16QAM;
INT_8 cTxPwr5GHT40_MCS5;
INT_8 cTxPwr5GHT40_MCS6;
INT_8 cTxPwr5GHT40_MCS7;
} CMD_TX_PWR_T, *P_CMD_TX_PWR_T;
typedef struct _CMD_TX_AC_PWR_T {
INT_8 ucBand;
#if 0
INT_8 c11AcTxPwr_BPSK;
INT_8 c11AcTxPwr_QPSK;
INT_8 c11AcTxPwr_16QAM;
INT_8 c11AcTxPwr_MCS5_MCS6;
INT_8 c11AcTxPwr_MCS7;
INT_8 c11AcTxPwr_MCS8;
INT_8 c11AcTxPwr_MCS9;
INT_8 c11AcTxPwrVht40_OFFSET;
INT_8 c11AcTxPwrVht80_OFFSET;
INT_8 c11AcTxPwrVht160_OFFSET;
#else
AC_PWR_SETTING_STRUCT rAcPwr;
#endif
} CMD_TX_AC_PWR_T, *P_CMD_TX_AC_PWR_T;
typedef struct _CMD_RSSI_PATH_COMPASATION_T {
INT_8 c2GRssiCompensation;
INT_8 c5GRssiCompensation;
} CMD_RSSI_PATH_COMPASATION_T, *P_CMD_RSSI_PATH_COMPASATION_T;
typedef struct _CMD_5G_PWR_OFFSET_T {
INT_8 cOffsetBand0; /* 4.915-4.980G */
INT_8 cOffsetBand1; /* 5.000-5.080G */
INT_8 cOffsetBand2; /* 5.160-5.180G */
INT_8 cOffsetBand3; /* 5.200-5.280G */
INT_8 cOffsetBand4; /* 5.300-5.340G */
INT_8 cOffsetBand5; /* 5.500-5.580G */
INT_8 cOffsetBand6; /* 5.600-5.680G */
INT_8 cOffsetBand7; /* 5.700-5.825G */
} CMD_5G_PWR_OFFSET_T, *P_CMD_5G_PWR_OFFSET_T;
typedef struct _CMD_PWR_PARAM_T {
UINT_32 au4Data[28];
UINT_32 u4RefValue1;
UINT_32 u4RefValue2;
} CMD_PWR_PARAM_T, *P_CMD_PWR_PARAM_T;
typedef struct _CMD_PHY_PARAM_T {
UINT_8 aucData[144]; /* eFuse content */
} CMD_PHY_PARAM_T, *P_CMD_PHY_PARAM_T;
typedef struct _CMD_AUTO_POWER_PARAM_T {
UINT_8 ucType; /* 0: Disable 1: Enalbe 0x10: Change paramters */
UINT_8 ucBssIndex;
UINT_8 aucReserved[2];
UINT_8 aucLevelRcpiTh[3];
UINT_8 aucReserved2[1];
INT_8 aicLevelPowerOffset[3]; /* signed, in unit of 0.5dBm */
UINT_8 aucReserved3[1];
UINT_8 aucReserved4[8];
} CMD_AUTO_POWER_PARAM_T, *P_CMD_AUTO_POWER_PARAM_T;
typedef struct _EVENT_CH_PRIVILEGE_T {
UINT_8 ucBssIndex;
UINT_8 ucTokenID;
UINT_8 ucStatus;
UINT_8 ucPrimaryChannel;
UINT_8 ucRfSco;
UINT_8 ucRfBand;
UINT_8 ucRfChannelWidth; /* To support 80/160MHz bandwidth */
UINT_8 ucRfCenterFreqSeg1; /* To support 80/160MHz bandwidth */
UINT_8 ucRfCenterFreqSeg2; /* To support 80/160MHz bandwidth */
UINT_8 ucReqType;
UINT_8 aucReserved[2];
UINT_32 u4GrantInterval; /* In unit of ms */
} EVENT_CH_PRIVILEGE_T, *P_EVENT_CH_PRIVILEGE_T;
typedef struct _EVENT_BSS_BEACON_TIMEOUT_T {
UINT_8 ucBssIndex;
UINT_8 aucReserved[3];
} EVENT_BSS_BEACON_TIMEOUT_T, *P_EVENT_BSS_BEACON_TIMEOUT_T;
typedef struct _EVENT_STA_AGING_TIMEOUT_T {
UINT_8 ucStaRecIdx;
UINT_8 aucReserved[3];
} EVENT_STA_AGING_TIMEOUT_T, *P_EVENT_STA_AGING_TIMEOUT_T;
typedef struct _EVENT_NOA_TIMING_T {
UINT_8 ucIsInUse; /* Indicate if this entry is in use or not */
UINT_8 ucCount; /* Count */
UINT_8 aucReserved[2];
UINT_32 u4Duration; /* Duration */
UINT_32 u4Interval; /* Interval */
UINT_32 u4StartTime; /* Start Time */
} EVENT_NOA_TIMING_T, *P_EVENT_NOA_TIMING_T;
typedef struct _EVENT_UPDATE_NOA_PARAMS_T {
UINT_8 ucBssIndex;
UINT_8 aucReserved[2];
UINT_8 ucEnableOppPS;
UINT_16 u2CTWindow;
UINT_8 ucNoAIndex;
UINT_8 ucNoATimingCount; /* Number of NoA Timing */
EVENT_NOA_TIMING_T arEventNoaTiming[8 /*P2P_MAXIMUM_NOA_COUNT */];
} EVENT_UPDATE_NOA_PARAMS_T, *P_EVENT_UPDATE_NOA_PARAMS_T;
typedef struct _EVENT_AP_OBSS_STATUS_T {
UINT_8 ucBssIndex;
UINT_8 ucObssErpProtectMode;
UINT_8 ucObssHtProtectMode;
UINT_8 ucObssGfOperationMode;
UINT_8 ucObssRifsOperationMode;
UINT_8 ucObssBeaconForcedTo20M;
UINT_8 aucReserved[2];
} EVENT_AP_OBSS_STATUS_T, *P_EVENT_AP_OBSS_STATUS_T;
typedef struct _EVENT_DEBUG_MSG_T {
UINT_16 u2DebugMsgId;
UINT_8 ucMsgType;
UINT_8 ucFlags; /* unused */
UINT_32 u4Value; /* memory addre or ... */
UINT_16 u2MsgSize;
UINT_8 aucReserved0[2];
UINT_8 aucMsg[1];
} EVENT_DEBUG_MSG_T, *P_EVENT_DEBUG_MSG_T;
typedef struct _CMD_EDGE_TXPWR_LIMIT_T {
INT_8 cBandEdgeMaxPwrCCK;
INT_8 cBandEdgeMaxPwrOFDM20;
INT_8 cBandEdgeMaxPwrOFDM40;
INT_8 cBandEdgeMaxPwrOFDM80;
} CMD_EDGE_TXPWR_LIMIT_T, *P_CMD_EDGE_TXPWR_LIMIT_T;
typedef struct _CMD_POWER_OFFSET_T {
UINT_8 ucBand; /*1:2.4G ; 2:5G */
UINT_8 ucSubBandOffset[MAX_SUBBAND_NUM]; /*the max num subband is 5G, devide with 8 subband */
UINT_8 aucReverse[3];
} CMD_POWER_OFFSET_T, *P_CMD_POWER_OFFSET_T;
typedef struct _CMD_SET_DEVICE_MODE_T {
UINT_16 u2ChipID;
UINT_16 u2Mode;
} CMD_SET_DEVICE_MODE_T, *P_CMD_SET_DEVICE_MODE_T;
#if CFG_SUPPORT_RDD_TEST_MODE
typedef struct _CMD_RDD_CH_T {
UINT_8 ucRddTestMode;
UINT_8 ucRddShutCh;
UINT_8 ucRddStartCh;
UINT_8 ucRddStopCh;
UINT_8 ucRddDfs;
UINT_8 ucReserved;
UINT_8 ucReserved1;
UINT_8 ucReserved2;
} CMD_RDD_CH_T, *P_CMD_RDD_CH_T;
typedef struct _EVENT_RDD_STATUS_T {
UINT_8 ucRddStatus;
UINT_8 aucReserved[3];
} EVENT_RDD_STATUS_T, *P_EVENT_RDD_STATUS_T;
#endif
typedef struct _EVENT_ICAP_STATUS_T {
UINT_8 ucRddStatus;
UINT_8 aucReserved[3];
UINT_32 u4StartAddress;
UINT_32 u4IcapSieze;
} EVENT_ICAP_STATUS_T, *P_EVENT_ICAP_STATUS_T;
typedef struct _CMD_SET_TXPWR_CTRL_T {
INT_8 c2GLegacyStaPwrOffset; /* Unit: 0.5dBm, default: 0 */
INT_8 c2GHotspotPwrOffset;
INT_8 c2GP2pPwrOffset;
INT_8 c2GBowPwrOffset;
INT_8 c5GLegacyStaPwrOffset; /* Unit: 0.5dBm, default: 0 */
INT_8 c5GHotspotPwrOffset;
INT_8 c5GP2pPwrOffset;
INT_8 c5GBowPwrOffset;
UINT_8 ucConcurrencePolicy; /* TX power policy when concurrence
in the same channel
0: Highest power has priority
1: Lowest power has priority */
INT_8 acReserved1[3]; /* Must be zero */
/* Power limit by channel for all data rates */
INT_8 acTxPwrLimit2G[14]; /* Channel 1~14, Unit: 0.5dBm */
INT_8 acTxPwrLimit5G[4]; /* UNII 1~4 */
INT_8 acReserved2[2]; /* Must be zero */
} CMD_SET_TXPWR_CTRL_T, *P_CMD_SET_TXPWR_CTRL_T;
typedef enum _ENUM_NLO_CIPHER_ALGORITHM {
NLO_CIPHER_ALGO_NONE = 0x00,
NLO_CIPHER_ALGO_WEP40 = 0x01,
NLO_CIPHER_ALGO_TKIP = 0x02,
NLO_CIPHER_ALGO_CCMP = 0x04,
NLO_CIPHER_ALGO_WEP104 = 0x05,
NLO_CIPHER_ALGO_WPA_USE_GROUP = 0x100,
NLO_CIPHER_ALGO_RSN_USE_GROUP = 0x100,
NLO_CIPHER_ALGO_WEP = 0x101,
} ENUM_NLO_CIPHER_ALGORITHM, *P_ENUM_NLO_CIPHER_ALGORITHM;
typedef enum _ENUM_NLO_AUTH_ALGORITHM {
NLO_AUTH_ALGO_80211_OPEN = 1,
NLO_AUTH_ALGO_80211_SHARED_KEY = 2,
NLO_AUTH_ALGO_WPA = 3,
NLO_AUTH_ALGO_WPA_PSK = 4,
NLO_AUTH_ALGO_WPA_NONE = 5,
NLO_AUTH_ALGO_RSNA = 6,
NLO_AUTH_ALGO_RSNA_PSK = 7,
} ENUM_NLO_AUTH_ALGORITHM, *P_ENUM_NLO_AUTH_ALGORITHM;
typedef struct _NLO_NETWORK {
UINT_8 ucNumChannelHint[4];
UINT_8 ucSSIDLength;
UINT_8 ucCipherAlgo;
UINT_16 u2AuthAlgo;
UINT_8 aucSSID[32];
} NLO_NETWORK, *P_NLO_NETWORK;
typedef struct _CMD_NLO_REQ {
UINT_8 ucSeqNum;
UINT_8 ucBssIndex;
UINT_8 fgStopAfterIndication;
UINT_8 ucFastScanIteration;
UINT_16 u2FastScanPeriod;
UINT_16 u2SlowScanPeriod;
UINT_8 ucEntryNum;
UINT_8 ucReserved;
UINT_16 u2IELen;
NLO_NETWORK arNetworkList[16];
UINT_8 aucIE[0];
} CMD_NLO_REQ, *P_CMD_NLO_REQ;
typedef struct _CMD_NLO_CANCEL_T {
UINT_8 ucSeqNum;
UINT_8 aucReserved[3];
} CMD_NLO_CANCEL, *P_CMD_NLO_CANCEL;
typedef struct _EVENT_NLO_DONE_T {
UINT_8 ucSeqNum;
UINT_8 ucStatus;
UINT_8 aucReserved[2];
} EVENT_NLO_DONE_T, *P_EVENT_NLO_DONE_T;
typedef struct _CMD_GET_STA_STATISTICS_T {
UINT_8 ucIndex;
UINT_8 ucFlags;
UINT_8 ucReadClear;
UINT_8 aucReserved0[1];
UINT_8 aucMacAddr[MAC_ADDR_LEN];
UINT_8 aucReserved1[2];
UINT_8 aucReserved2[16];
} CMD_GET_STA_STATISTICS_T, *P_CMD_GET_STA_STATISTICS_T;
/* CFG_SUPPORT_WFD */
typedef struct _EVENT_STA_STATISTICS_T {
/* Event header */
/* UINT_16 u2Length; */
/* UINT_16 u2Reserved1; */ /* Must be filled with 0x0001 (EVENT Packet) */
/* UINT_8 ucEID; */
/* UINT_8 ucSeqNum; */
/* UINT_8 aucReserved2[2]; */
/* Event Body */
UINT_8 ucVersion;
UINT_8 aucReserved1[3];
UINT_32 u4Flags; /* Bit0: valid */
UINT_8 ucStaRecIdx;
UINT_8 ucNetworkTypeIndex;
UINT_8 ucWTEntry;
UINT_8 aucReserved4[1];
UINT_8 ucMacAddr[MAC_ADDR_LEN];
UINT_8 ucPer; /* base: 128 */
UINT_8 ucRcpi;
UINT_32 u4PhyMode; /* SGI BW */
UINT_16 u2LinkSpeed; /* unit is 0.5 Mbits */
UINT_8 ucLinkQuality;
UINT_8 ucLinkReserved;
UINT_32 u4TxCount;
UINT_32 u4TxFailCount;
UINT_32 u4TxLifeTimeoutCount;
UINT_32 u4TxDoneAirTime;
UINT_8 aucReserved[64];
} EVENT_STA_STATISTICS_T, *P_EVENT_STA_STATISTICS_T;
/*******************************************************************************
* P U B L I C D A T A
********************************************************************************
*/
/*******************************************************************************
* P R I V A T E D A T A
********************************************************************************
*/
/*******************************************************************************
* M A C R O S
********************************************************************************
*/
/*******************************************************************************
* F U N C T I O N D E C L A R A T I O N S
********************************************************************************
*/
VOID
nicCmdEventQueryMcrRead(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryMemDump(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQuerySwCtrlRead(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryChipConfig(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryRfTestATInfo(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventSetCommon(IN P_ADAPTER_T prAdapter, IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventSetDisassociate(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventSetIpAddress(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryLinkQuality(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryLinkSpeed(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryStatistics(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventEnterRfTest(IN P_ADAPTER_T prAdapter, IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventLeaveRfTest(IN P_ADAPTER_T prAdapter, IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryMcastAddr(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryEepromRead(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventSetMediaStreamMode(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventSetStopSchedScan(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
/* Statistics responder */
VOID
nicCmdEventQueryXmitOk(IN P_ADAPTER_T prAdapter, IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryRecvOk(IN P_ADAPTER_T prAdapter, IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryXmitError(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryRecvError(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryRecvNoBuffer(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryRecvCrcError(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryRecvErrorAlignment(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryXmitOneCollision(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryXmitMoreCollisions(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
VOID
nicCmdEventQueryXmitMaxCollisions(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
/* for timeout check */
VOID nicOidCmdTimeoutCommon(IN P_ADAPTER_T prAdapter, IN P_CMD_INFO_T prCmdInfo);
VOID nicCmdTimeoutCommon(IN P_ADAPTER_T prAdapter, IN P_CMD_INFO_T prCmdInfo);
VOID nicOidCmdEnterRFTestTimeout(IN P_ADAPTER_T prAdapter, IN P_CMD_INFO_T prCmdInfo);
#if CFG_SUPPORT_BATCH_SCAN
VOID
nicCmdEventBatchScanResult(IN P_ADAPTER_T prAdapter,
IN P_CMD_INFO_T prCmdInfo, IN PUINT_8 pucEventBuf);
#endif
/*******************************************************************************
* F U N C T I O N S
********************************************************************************
*/
#endif /* _NIC_CMD_EVENT_H */
|
emceethemouth/kernel_androidone
|
drivers/misc/mediatek/combo/drv_wlan/mt6630/wlan/include/nic_cmd_event.h
|
C
|
gpl-2.0
| 74,754
|
<?php
/*
* LibreNMS
*
* Copyright (c) 2016 Søren Friis Rosiak <sorenrosiak@gmail.com>
* This program is free software: you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your
* option) any later version. Please see LICENSE.txt at the top level of
* the source code distribution for details.
*/
$temp = snmpwalk_cache_multi_oid($device, 'cfwHardwareStatusTable', array(), 'CISCO-FIREWALL-MIB');
$cur_oid = '.1.3.6.1.4.1.9.9.147.1.2.1.1.1.3.';
if (is_array($temp)) {
//Create State Index
if (strstr($temp['netInterface']['cfwHardwareStatusDetail'], 'not Configured') == false) {
$state_name = 'cfwHardwareStatus';
$state_index_id = create_state_index($state_name);
//Create State Translation
if ($state_index_id !== null) {
$states = array(
array($state_index_id,'other',0,1,2) ,
array($state_index_id,'up',0,2,0) ,
array($state_index_id,'down',0,3,2) ,
array($state_index_id,'error',0,4,2) ,
array($state_index_id,'overTemp',0,5,2) ,
array($state_index_id,'busy',0,6,2) ,
array($state_index_id,'noMedia',0,7,2) ,
array($state_index_id,'backup',0,8,2) ,
array($state_index_id,'active',0,9,0) ,
array($state_index_id,'standby',0,10,0)
);
foreach ($states as $value) {
$insert = array(
'state_index_id' => $value[0],
'state_descr' => $value[1],
'state_draw_graph' => $value[2],
'state_value' => $value[3],
'state_generic_value' => $value[4]
);
dbInsert($insert, 'state_translations');
}
}
foreach ($temp as $index => $entry) {
$descr = ucwords(trim(preg_replace('/\s*\([^\s)]*\)/', '', $temp[$index]['cfwHardwareInformation'])));
if ($index == 'netInterface') {
$index = 4;
} elseif ($index == 'primaryUnit') {
$index = 6;
} elseif ($index == 'secondaryUnit') {
$index = 7;
}
//Discover Sensors
discover_sensor($valid['sensor'], 'state', $device, $cur_oid.$index, $index, $state_name, $descr, '1', '1', null, null, null, null, $temp[$index][' cfwHardwareStatusValue'], 'snmp', $index);
//Create Sensor To State Index
create_sensor_to_state_index($device, $state_name, $index);
}
}
}
|
wrgeorge1983/librenms
|
includes/discovery/sensors/states/asa.inc.php
|
PHP
|
gpl-3.0
| 2,706
|
/**
* This Source Code Form is subject to the terms of the Mozilla Public License,
* v. 2.0. If a copy of the MPL was not distributed with this file, You can
* obtain one at http://mozilla.org/MPL/2.0/. OpenMRS is also distributed under
* the terms of the Healthcare Disclaimer located at http://openmrs.org/license.
*
* Copyright (C) OpenMRS Inc. OpenMRS is a registered trademark and the OpenMRS
* graphic logo is a trademark of OpenMRS Inc.
*/
package org.openmrs.api.db.hibernate.search;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.TermsFilter;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.queryparser.classic.QueryParser.Operator;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.hibernate.Session;
import org.hibernate.search.FullTextQuery;
import org.hibernate.search.FullTextSession;
import org.hibernate.search.Search;
import org.hibernate.search.query.dsl.QueryBuilder;
import org.openmrs.collection.ListPart;
/**
* Performs Lucene queries.
*
* @since 1.11
*/
public abstract class LuceneQuery<T> extends SearchQuery<T> {
private FullTextQuery fullTextQuery;
private Set<Set<Term>> includeTerms = new HashSet<Set<Term>>();
private Set<Term> excludeTerms = new HashSet<Term>();
/**
* The preferred way to create a Lucene query using the query parser.
* @param type filters on type
* @param session
* @param query
*
* @return the Lucene query
*/
public static <T> LuceneQuery<T> newQuery(final Class<T> type, final Session session, final String query) {
return new LuceneQuery<T>(
type, session) {
@Override
protected Query prepareQuery() throws ParseException {
if (query.isEmpty()) {
return new MatchAllDocsQuery();
}
return newQueryParser().parse(query);
}
};
}
/**
* Escape any characters that can be interpreted by the query parser.
*
* @param query
* @return the escaped query
*/
public static String escapeQuery(final String query) {
return QueryParser.escape(query);
}
public LuceneQuery(Class<T> type, Session session) {
super(session, type);
buildQuery();
}
/**
* Include items with the given value in the specified field.
* <p>
* It is a filter applied before the query.
*
* @param field
* @param value
* @return the query
*/
public LuceneQuery<T> include(String field, Object value) {
if (value != null) {
include(field, new Object[] { value });
}
return this;
}
public LuceneQuery<T> include(String field, Collection<?> values) {
if (values != null) {
include(field, values.toArray());
}
return this;
}
/**
* Include items with any of the given values in the specified field.
* <p>
* It is a filter applied before the query.
*
* @param field
* @param values
* @return the query
*/
public LuceneQuery<T> include(String field, Object[] values) {
if (values != null && values.length != 0) {
Set<Term> terms = new HashSet<Term>();
for (Object value : values) {
terms.add(new Term(field, value.toString()));
}
includeTerms.add(terms);
fullTextQuery.enableFullTextFilter("termsFilterFactory").setParameter("includeTerms", includeTerms)
.setParameter("excludeTerms", excludeTerms);
}
return this;
}
/**
* Exclude any items with the given value in the specified field.
* <p>
* It is a filter applied before the query.
*
* @param field
* @param value
* @return the query
*/
public LuceneQuery<T> exclude(String field, Object value) {
if (value != null) {
exclude(field, new Object[] { value });
}
return this;
}
/**
* Exclude any items with the given values in the specified field.
* <p>
* It is a filter applied before the query.
*
* @param field
* @param values
* @return the query
*/
public LuceneQuery<T> exclude(String field, Object[] values) {
if (values != null && values.length != 0) {
for (Object value : values) {
excludeTerms.add(new Term(field, value.toString()));
}
fullTextQuery.enableFullTextFilter("termsFilterFactory").setParameter("includeTerms", includeTerms)
.setParameter("excludeTerms", excludeTerms);
}
return this;
}
/**
* It is called by the constructor to get an instance of a query.
* <p>
* To construct the query you can use {@link #newQueryBuilder()} or {@link #newQueryParser()},
* which are created for the proper type.
*
* @return the query
* @throws ParseException
*/
protected abstract Query prepareQuery() throws ParseException;
/**
* It is called by the constructor after creating {@link FullTextQuery}.
* <p>
* You can override it to adjust the full text query, e.g. add a filter.
*
* @param fullTextQuery
*/
protected void adjustFullTextQuery(FullTextQuery fullTextQuery) {
}
/**
* You can use it in {@link #prepareQuery()}.
*
* @return the query builder
*/
protected QueryBuilder newQueryBuilder() {
return getFullTextSession().getSearchFactory().buildQueryBuilder().forEntity(getType()).get();
}
/**
* You can use it in {@link #prepareQuery()}.
*
* @return the query parser
*/
protected QueryParser newQueryParser() {
Analyzer analyzer = getFullTextSession().getSearchFactory().getAnalyzer(getType());
QueryParser queryParser = new QueryParser(null, analyzer);
queryParser.setDefaultOperator(Operator.AND);
return queryParser;
}
/**
* Gives you access to the full text session.
*
* @return the full text session
*/
protected FullTextSession getFullTextSession() {
return Search.getFullTextSession(getSession());
}
/**
* Skip elements, values of which repeat in the given field.
* <p>
* Only the first element will be included in the results.
* <p>
* <b>Note:</b> For performance reasons you should call this method as last when constructing a
* query. When called it will project the query and create a filter to eliminate duplicates.
*
* @param field
* @return this
*/
public LuceneQuery<T> skipSame(String field) {
String idPropertyName = getSession().getSessionFactory().getClassMetadata(getType()).getIdentifierPropertyName();
List<Object> documents = listProjection(idPropertyName, field);
TermsFilter termsFilter = null;
if (!documents.isEmpty()) {
Set<Object> uniqueFieldValues = new HashSet<Object>();
List<Term> terms = new ArrayList<Term>();
for (Object document : documents) {
Object[] row = (Object[]) document;
if (uniqueFieldValues.add(row[1])) {
terms.add(new Term(idPropertyName, row[0].toString()));
}
}
termsFilter = new TermsFilter(terms);
}
buildQuery();
if (termsFilter != null) {
fullTextQuery.setFilter(termsFilter);
}
return this;
}
@Override
public T uniqueResult() {
@SuppressWarnings("unchecked")
T result = (T) fullTextQuery.uniqueResult();
return result;
}
@Override
public List<T> list() {
@SuppressWarnings("unchecked")
List<T> list = fullTextQuery.list();
return list;
}
@Override
public ListPart<T> listPart(Long firstResult, Long maxResults) {
applyPartialResults(fullTextQuery, firstResult, maxResults);
@SuppressWarnings("unchecked")
List<T> list = fullTextQuery.list();
return ListPart.newListPart(list, firstResult, maxResults, Long.valueOf(fullTextQuery.getResultSize()),
!fullTextQuery.hasPartialResults());
}
/**
* @see org.openmrs.api.db.hibernate.search.SearchQuery#resultSize()
*/
@Override
public long resultSize() {
return fullTextQuery.getResultSize();
}
public List<Object> listProjection(String... fields) {
fullTextQuery.setProjection(fields);
@SuppressWarnings("unchecked")
List<Object> list = fullTextQuery.list();
return list;
}
public ListPart<Object> listPartProjection(Long firstResult, Long maxResults, String... fields) {
applyPartialResults(fullTextQuery, firstResult, maxResults);
fullTextQuery.setProjection(fields);
@SuppressWarnings("unchecked")
List<Object> list = fullTextQuery.list();
return ListPart.newListPart(list, firstResult, maxResults, Long.valueOf(fullTextQuery.getResultSize()),
!fullTextQuery.hasPartialResults());
}
public ListPart<Object> listPartProjection(Integer firstResult, Integer maxResults, String... fields) {
Long first = (firstResult != null) ? Long.valueOf(firstResult) : null;
Long max = (maxResults != null) ? Long.valueOf(maxResults) : null;
return listPartProjection(first, max, fields);
}
private void buildQuery() {
Query query;
try {
query = prepareQuery();
}
catch (ParseException e) {
throw new IllegalStateException("Invalid query", e);
}
fullTextQuery = getFullTextSession().createFullTextQuery(query, getType());
adjustFullTextQuery(fullTextQuery);
}
private void applyPartialResults(FullTextQuery fullTextQuery, Long firstResult, Long maxResults) {
if (firstResult != null) {
fullTextQuery.setFirstResult(firstResult.intValue());
}
if (maxResults != null) {
fullTextQuery.setMaxResults(maxResults.intValue());
}
}
}
|
joansmith/openmrs-core
|
api/src/main/java/org/openmrs/api/db/hibernate/search/LuceneQuery.java
|
Java
|
mpl-2.0
| 9,429
|
{{+partials.standard_extensions_article article:intros.api_index is_apps:false}}
|
plxaye/chromium
|
src/chrome/common/extensions/docs/templates/public/extensions/api_index.html
|
HTML
|
apache-2.0
| 81
|
// +build go1.6
package gotool
import (
"go/build"
"path/filepath"
"runtime"
)
var gorootSrc = filepath.Join(runtime.GOROOT(), "src")
func shouldIgnoreImport(p *build.Package) bool {
return p == nil || len(p.InvalidGoFiles) == 0
}
|
jcgruenhage/dendrite
|
vendor/src/github.com/alecthomas/gometalinter/_linters/src/github.com/kisielk/gotool/go16.go
|
GO
|
apache-2.0
| 239
|
/*
* Copyright 2000-2014 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jetbrains.python.codeInsight.stdlib;
import com.intellij.openapi.diagnostic.Logger;
import com.jetbrains.python.PythonHelpersLocator;
import org.jetbrains.annotations.Nullable;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
/**
* @author vlan
*/
public class PyStdlibUtil {
@Nullable private static Set<String> PACKAGES = loadStdlibPackagesList();
private PyStdlibUtil() {
}
@Nullable
public static Collection<String> getPackages() {
return PACKAGES;
}
@Nullable
private static Set<String> loadStdlibPackagesList() {
final Logger log = Logger.getInstance(PyStdlibUtil.class.getName());
final String helperPath = PythonHelpersLocator.getHelperPath("/tools/stdlib_packages.txt");
try {
final BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(helperPath)));
try {
final Set<String> result = new HashSet<String>();
String line;
while ((line = reader.readLine()) != null) {
result.add(line);
}
return result;
}
finally {
reader.close();
}
}
catch (IOException e) {
log.error("Cannot read list of standard library packages: " + e.getMessage());
}
return null;
}
}
|
akosyakov/intellij-community
|
python/src/com/jetbrains/python/codeInsight/stdlib/PyStdlibUtil.java
|
Java
|
apache-2.0
| 2,013
|
<html>
<head>
<script>
window.onunload = function () {
try {
var url = '1251.html';
var xhr = new XMLHttpRequest();
xhr.open('GET', url, false);
xhr.send(null);
window.parent.completed("PASS: sync XHR completed successfully");
} catch (e) {
window.parent.completed("FAIL: sync XHR during unload failed: " + e.message);
}
};
window.onload = function () {
window.parent.subframeLoaded();
};
</script>
</head>
<body>
</body>
</html>
|
XiaosongWei/chromium-crosswalk
|
third_party/WebKit/LayoutTests/http/tests/xmlhttprequest/resources/xmlhttprequest-in-unload-sync.html
|
HTML
|
bsd-3-clause
| 494
|
<?php
/**
* Util
*
* @copyright Copyright (c) Gjero Krsteski (http://krsteski.de)
* @license http://krsteski.de/new-bsd-license New BSD License
*/
namespace Pimf\Util;
/**
* Due to PHP Bug #39736 - serialize() consumes insane amount of RAM.
*
* Now we can put objects, strings, integers or arrays. Even instances of SimpleXMLElement can be put too!
*
* @package Util
* @link https://bugs.php.net/bug.php?id=39736
* @author Gjero Krsteski <gjero@krsteski.de>
*/
class Serializer
{
/**
* Serialize things.
*
* @param mixed $object Item you want - string, array, integer, object
*
* @return string Containing a byte-stream representation.
*/
public static function serialize($object)
{
$masked = false;
if (false === is_object($object)) {
$object = self::mask($object);
$masked = true;
}
$capsule = new \stdClass();
$capsule->type = get_class($object);
$capsule->object = $object;
$capsule->fake = $masked;
if ($object instanceof \SimpleXMLElement) {
$capsule->object = $object->asXml();
}
return '' . self::serializeNative($capsule);
}
/**
* Unserialize things.
*
* @param string $object Serialized object.
*
* @return mixed
*/
public static function unserialize($object)
{
$capsule = self::unserializeNative($object);
if (true === $capsule->fake) {
$capsule->object = self::unmask($capsule->object);
}
if ($capsule->type == 'SimpleXMLElement') {
$capsule->object = simplexml_load_string($capsule->object);
}
return $capsule->object;
}
/**
* @param \stdClass $value Item value.
*
* @throws \RuntimeException If error during serialize.
* @return string
*/
public static function serializeNative($value)
{
$ret = (extension_loaded('igbinary') && function_exists('igbinary_serialize')) ? @igbinary_serialize($value) : @serialize($value);
self::bombIf($ret);
return $ret;
}
/**
* @param string $serialized The serialized item-string.
*
* @throws \RuntimeException If error during unserialize.
* @return mixed
*/
public static function unserializeNative($serialized)
{
$ret = (extension_loaded('igbinary') && function_exists('igbinary_unserialize'))
? @igbinary_unserialize($serialized)
: @unserialize(
$serialized
);
self::bombIf($ret);
return $ret;
}
/**
* @param mixed $item Item
*
* @return \stdClass
*/
private static function mask($item)
{
return (object)$item;
}
/**
* @param mixed $item Item
*
* @return array
*/
private static function unmask($item)
{
if (isset($item->scalar)) {
return $item->scalar;
}
return (array)$item;
}
/**
* @param boolean $valid
*
* @throws \RuntimeException
*/
private static function bombIf($valid)
{
if ($valid === false) {
$err = error_get_last();
throw new \RuntimeException($err['message']);
}
}
}
|
seem-sky/FrameworkBenchmarks
|
php-pimf/pimf-framework/core/Pimf/Util/Serializer.php
|
PHP
|
bsd-3-clause
| 3,028
|
module ChunkyPNG
class Canvas
# Methods for encoding a Canvas instance into a PNG datastream.
#
# Overview of the encoding process:
#
# * The image is split up in scanlines (i.e. rows of pixels);
# * All pixels are encoded as a pixelstream, based on the color mode.
# * All the pixel bytes in the pixelstream are adjusted using a filtering
# method if one is specified.
# * Compress the resulting string using deflate compression.
# * Split compressed data over one or more PNG chunks.
# * These chunks should be embedded in a datastream with at least a IHDR and
# IEND chunk and possibly a PLTE chunk.
#
# For interlaced images, the initial image is first split into 7 subimages.
# These images get encoded exactly as above, and the result gets combined
# before the compression step.
#
# @see ChunkyPNG::Canvas::PNGDecoding
# @see http://www.w3.org/TR/PNG/ The W3C PNG format specification
module PNGEncoding
# The palette used for encoding the image.This is only in used for images
# that get encoded using indexed colors.
# @return [ChunkyPNG::Palette]
attr_accessor :encoding_palette
# Writes the canvas to an IO stream, encoded as a PNG image.
# @param [IO] io The output stream to write to.
# @param constraints (see ChunkyPNG::Canvas::PNGEncoding#to_datastream)
# @return [void]
def write(io, constraints = {})
to_datastream(constraints).write(io)
end
# Writes the canvas to a file, encoded as a PNG image.
# @param [String] filename The file to save the PNG image to.
# @param constraints (see ChunkyPNG::Canvas::PNGEncoding#to_datastream)
# @return [void]
def save(filename, constraints = {})
File.open(filename, 'wb') { |io| write(io, constraints) }
end
# Encoded the canvas to a PNG formatted string.
# @param constraints (see ChunkyPNG::Canvas::PNGEncoding#to_datastream)
# @return [String] The PNG encoded canvas as string.
def to_blob(constraints = {})
to_datastream(constraints).to_blob
end
alias_method :to_string, :to_blob
alias_method :to_s, :to_blob
# Converts this Canvas to a datastream, so that it can be saved as a PNG image.
# @param [Hash, Symbol] constraints The constraints to use when encoding the canvas.
# This can either be a hash with different constraints, or a symbol which acts as a
# preset for some constraints. If no constraints are given, ChunkyPNG will decide
# for itself how to best create the PNG datastream.
# Supported presets are <tt>:fast_rgba</tt> for quickly saving images with transparency,
# <tt>:fast_rgb</tt> for quickly saving opaque images, and <tt>:best_compression</tt> to
# obtain the smallest possible filesize.
# @option constraints [Fixnum] :color_mode The color mode to use. Use one of the
# ChunkyPNG::COLOR_* constants.
# @option constraints [true, false] :interlace Whether to use interlacing.
# @option constraints [Fixnum] :compression The compression level for Zlib. This can be a
# value between 0 and 9, or a Zlib constant like Zlib::BEST_COMPRESSION.
# @option constraints [Fixnum] :bit_depth The bit depth to use. This option is only used
# for indexed images, in which case it overrides the determined minimal bit depth. For
# all the other color modes, a bit depth of 8 is used.
# @return [ChunkyPNG::Datastream] The PNG datastream containing the encoded canvas.
# @see ChunkyPNG::Canvas::PNGEncoding#determine_png_encoding
def to_datastream(constraints = {})
encoding = determine_png_encoding(constraints)
ds = Datastream.new
ds.header_chunk = Chunk::Header.new(:width => width, :height => height,
:color => encoding[:color_mode], :depth => encoding[:bit_depth], :interlace => encoding[:interlace])
if encoding[:color_mode] == ChunkyPNG::COLOR_INDEXED
ds.palette_chunk = encoding_palette.to_plte_chunk
ds.transparency_chunk = encoding_palette.to_trns_chunk unless encoding_palette.opaque?
end
data = encode_png_pixelstream(encoding[:color_mode], encoding[:bit_depth], encoding[:interlace], encoding[:filtering])
ds.data_chunks = Chunk::ImageData.split_in_chunks(data, encoding[:compression])
ds.end_chunk = Chunk::End.new
return ds
end
protected
# Determines the best possible PNG encoding variables for this image, by analyzing
# the colors used for the image.
#
# You can provide constraints for the encoding variables by passing a hash with
# encoding variables to this method.
#
# @param [Hash, Symbol] constraints The constraints for the encoding. This can be a
# Hash or a preset symbol.
# @return [Hash] A hash with encoding options for {ChunkyPNG::Canvas::PNGEncoding#to_datastream}
def determine_png_encoding(constraints = {})
encoding = case constraints
when :fast_rgb; { :color_mode => ChunkyPNG::COLOR_TRUECOLOR, :compression => Zlib::BEST_SPEED }
when :fast_rgba; { :color_mode => ChunkyPNG::COLOR_TRUECOLOR_ALPHA, :compression => Zlib::BEST_SPEED }
when :best_compression; { :compression => Zlib::BEST_COMPRESSION, :filtering => ChunkyPNG::FILTER_PAETH }
when :good_compression; { :compression => Zlib::BEST_COMPRESSION, :filtering => ChunkyPNG::FILTER_NONE }
when :no_compression; { :compression => Zlib::NO_COMPRESSION }
when :black_and_white; { :color_mode => ChunkyPNG::COLOR_GRAYSCALE, :bit_depth => 1 }
when Hash; constraints
else raise ChunkyPNG::Exception, "Unknown encoding preset: #{constraints.inspect}"
end
# Do not create a palette when the encoding is given and does not require a palette.
if encoding[:color_mode]
if encoding[:color_mode] == ChunkyPNG::COLOR_INDEXED
self.encoding_palette = self.palette
encoding[:bit_depth] ||= self.encoding_palette.determine_bit_depth
else
encoding[:bit_depth] ||= 8
end
else
self.encoding_palette = self.palette
suggested_color_mode, suggested_bit_depth = encoding_palette.best_color_settings
encoding[:color_mode] ||= suggested_color_mode
encoding[:bit_depth] ||= suggested_bit_depth
end
# Use Zlib's default for compression unless otherwise provided.
encoding[:compression] ||= Zlib::DEFAULT_COMPRESSION
encoding[:interlace] = case encoding[:interlace]
when nil, false, ChunkyPNG::INTERLACING_NONE; ChunkyPNG::INTERLACING_NONE
when true, ChunkyPNG::INTERLACING_ADAM7; ChunkyPNG::INTERLACING_ADAM7
else encoding[:interlace]
end
encoding[:filtering] ||= case encoding[:compression]
when Zlib::BEST_COMPRESSION; ChunkyPNG::FILTER_PAETH
when Zlib::NO_COMPRESSION..Zlib::BEST_SPEED; ChunkyPNG::FILTER_NONE
else ChunkyPNG::FILTER_UP
end
return encoding
end
# Encodes the canvas according to the PNG format specification with a given color
# mode, possibly with interlacing.
# @param [Integer] color_mode The color mode to use for encoding.
# @param [Integer] bit_depth The bit depth of the image.
# @param [Integer] interlace The interlacing method to use.
# @return [String] The PNG encoded canvas as string.
def encode_png_pixelstream(color_mode = ChunkyPNG::COLOR_TRUECOLOR, bit_depth = 8, interlace = ChunkyPNG::INTERLACING_NONE, filtering = ChunkyPNG::FILTER_NONE)
if color_mode == ChunkyPNG::COLOR_INDEXED
raise ChunkyPNG::ExpectationFailed, "This palette is not suitable for encoding!" if encoding_palette.nil? || !encoding_palette.can_encode?
raise ChunkyPNG::ExpectationFailed, "This palette has too many colors!" if encoding_palette.size > (1 << bit_depth)
end
case interlace
when ChunkyPNG::INTERLACING_NONE; encode_png_image_without_interlacing(color_mode, bit_depth, filtering)
when ChunkyPNG::INTERLACING_ADAM7; encode_png_image_with_interlacing(color_mode, bit_depth, filtering)
else raise ChunkyPNG::NotSupported, "Unknown interlacing method: #{interlace}!"
end
end
# Encodes the canvas according to the PNG format specification with a given color mode.
# @param [Integer] color_mode The color mode to use for encoding.
# @param [Integer] bit_depth The bit depth of the image.
# @param [Integer] filtering The filtering method to use.
# @return [String] The PNG encoded canvas as string.
def encode_png_image_without_interlacing(color_mode, bit_depth = 8, filtering = ChunkyPNG::FILTER_NONE)
stream = ChunkyPNG::Datastream.empty_bytearray
encode_png_image_pass_to_stream(stream, color_mode, bit_depth, filtering)
stream
end
# Encodes the canvas according to the PNG format specification with a given color
# mode and Adam7 interlacing.
#
# This method will split the original canvas in 7 smaller canvases and encode them
# one by one, concatenating the resulting strings.
#
# @param [Integer] color_mode The color mode to use for encoding.
# @param [Integer] bit_depth The bit depth of the image.
# @param [Integer] filtering The filtering method to use.
# @return [String] The PNG encoded canvas as string.
def encode_png_image_with_interlacing(color_mode, bit_depth = 8, filtering = ChunkyPNG::FILTER_NONE)
stream = ChunkyPNG::Datastream.empty_bytearray
0.upto(6) do |pass|
subcanvas = self.class.adam7_extract_pass(pass, self)
subcanvas.encoding_palette = encoding_palette
subcanvas.encode_png_image_pass_to_stream(stream, color_mode, bit_depth, filtering)
end
stream
end
# Encodes the canvas to a stream, in a given color mode.
# @param [String] stream The stream to write to.
# @param [Integer] color_mode The color mode to use for encoding.
# @param [Integer] bit_depth The bit depth of the image.
# @param [Integer] filtering The filtering method to use.
def encode_png_image_pass_to_stream(stream, color_mode, bit_depth, filtering)
start_pos = stream.bytesize
pixel_size = Color.pixel_bytesize(color_mode)
line_width = Color.scanline_bytesize(color_mode, bit_depth, width)
# Determine the filter method
encode_method = encode_png_pixels_to_scanline_method(color_mode, bit_depth)
filter_method = case filtering
when ChunkyPNG::FILTER_SUB; :encode_png_str_scanline_sub
when ChunkyPNG::FILTER_UP; :encode_png_str_scanline_up
when ChunkyPNG::FILTER_AVERAGE; :encode_png_str_scanline_average
when ChunkyPNG::FILTER_PAETH; :encode_png_str_scanline_paeth
else nil
end
0.upto(height - 1) do |y|
stream << send(encode_method, row(y))
end
# Now, apply filtering if any
if filter_method
(height - 1).downto(0) do |y|
pos = start_pos + y * (line_width + 1)
prev_pos = (y == 0) ? nil : pos - (line_width + 1)
send(filter_method, stream, pos, prev_pos, line_width, pixel_size)
end
end
end
# Encodes a line of pixels using 8-bit truecolor mode.
# @param [Array<Integer>] pixels A row of pixels of the original image.
# @return [String] The encoded scanline as binary string
def encode_png_pixels_to_scanline_truecolor_8bit(pixels)
pixels.pack('x' + ('NX' * width))
end
# Encodes a line of pixels using 8-bit truecolor alpha mode.
# @param [Array<Integer>] pixels A row of pixels of the original image.
# @return [String] The encoded scanline as binary string
def encode_png_pixels_to_scanline_truecolor_alpha_8bit(pixels)
pixels.pack("xN#{width}")
end
# Encodes a line of pixels using 1-bit indexed mode.
# @param [Array<Integer>] pixels A row of pixels of the original image.
# @return [String] The encoded scanline as binary string
def encode_png_pixels_to_scanline_indexed_1bit(pixels)
chars = []
pixels.each_slice(8) do |p1, p2, p3, p4, p5, p6, p7, p8|
chars << ((encoding_palette.index(p1) << 7) |
(encoding_palette.index(p2) << 6) |
(encoding_palette.index(p3) << 5) |
(encoding_palette.index(p4) << 4) |
(encoding_palette.index(p5) << 3) |
(encoding_palette.index(p6) << 2) |
(encoding_palette.index(p7) << 1) |
(encoding_palette.index(p8)))
end
chars.pack('xC*')
end
# Encodes a line of pixels using 2-bit indexed mode.
# @param [Array<Integer>] pixels A row of pixels of the original image.
# @return [String] The encoded scanline as binary string
def encode_png_pixels_to_scanline_indexed_2bit(pixels)
chars = []
pixels.each_slice(4) do |p1, p2, p3, p4|
chars << ((encoding_palette.index(p1) << 6) |
(encoding_palette.index(p2) << 4) |
(encoding_palette.index(p3) << 2) |
(encoding_palette.index(p4)))
end
chars.pack('xC*')
end
# Encodes a line of pixels using 4-bit indexed mode.
# @param [Array<Integer>] pixels A row of pixels of the original image.
# @return [String] The encoded scanline as binary string
def encode_png_pixels_to_scanline_indexed_4bit(pixels)
chars = []
pixels.each_slice(2) do |p1, p2|
chars << ((encoding_palette.index(p1) << 4) | (encoding_palette.index(p2)))
end
chars.pack('xC*')
end
# Encodes a line of pixels using 8-bit indexed mode.
# @param [Array<Integer>] pixels A row of pixels of the original image.
# @return [String] The encoded scanline as binary string
def encode_png_pixels_to_scanline_indexed_8bit(pixels)
pixels.map { |p| encoding_palette.index(p) }.pack("xC#{width}")
end
# Encodes a line of pixels using 1-bit grayscale mode.
# @param [Array<Integer>] pixels A row of pixels of the original image.
# @return [String] The encoded scanline as binary string
def encode_png_pixels_to_scanline_grayscale_1bit(pixels)
chars = []
pixels.each_slice(8) do |p1, p2, p3, p4, p5, p6, p7, p8|
chars << ((p1.nil? ? 0 : (p1 & 0x0000ffff) >> 15 << 7) |
(p2.nil? ? 0 : (p2 & 0x0000ffff) >> 15 << 6) |
(p3.nil? ? 0 : (p3 & 0x0000ffff) >> 15 << 5) |
(p4.nil? ? 0 : (p4 & 0x0000ffff) >> 15 << 4) |
(p5.nil? ? 0 : (p5 & 0x0000ffff) >> 15 << 3) |
(p6.nil? ? 0 : (p6 & 0x0000ffff) >> 15 << 2) |
(p7.nil? ? 0 : (p7 & 0x0000ffff) >> 15 << 1) |
(p8.nil? ? 0 : (p8 & 0x0000ffff) >> 15))
end
chars.pack('xC*')
end
# Encodes a line of pixels using 2-bit grayscale mode.
# @param [Array<Integer>] pixels A row of pixels of the original image.
# @return [String] The encoded scanline as binary string
def encode_png_pixels_to_scanline_grayscale_2bit(pixels)
chars = []
pixels.each_slice(4) do |p1, p2, p3, p4|
chars << ((p1.nil? ? 0 : (p1 & 0x0000ffff) >> 14 << 6) |
(p2.nil? ? 0 : (p2 & 0x0000ffff) >> 14 << 4) |
(p3.nil? ? 0 : (p3 & 0x0000ffff) >> 14 << 2) |
(p4.nil? ? 0 : (p4 & 0x0000ffff) >> 14))
end
chars.pack('xC*')
end
# Encodes a line of pixels using 2-bit grayscale mode.
# @param [Array<Integer>] pixels A row of pixels of the original image.
# @return [String] The encoded scanline as binary string
def encode_png_pixels_to_scanline_grayscale_4bit(pixels)
chars = []
pixels.each_slice(2) do |p1, p2|
chars << ((p1.nil? ? 0 : ((p1 & 0x0000ffff) >> 12) << 4) | (p2.nil? ? 0 : ((p2 & 0x0000ffff) >> 12)))
end
chars.pack('xC*')
end
# Encodes a line of pixels using 8-bit grayscale mode.
# @param [Array<Integer>] pixels A row of pixels of the original image.
# @return [String] The encoded scanline as binary string
def encode_png_pixels_to_scanline_grayscale_8bit(pixels)
pixels.map { |p| p >> 8 }.pack("xC#{width}")
end
# Encodes a line of pixels using 8-bit grayscale alpha mode.
# @param [Array<Integer>] pixels A row of pixels of the original image.
# @return [String] The encoded scanline as binary string
def encode_png_pixels_to_scanline_grayscale_alpha_8bit(pixels)
pixels.pack("xn#{width}")
end
# Returns the method name to use to decode scanlines into pixels.
# @param [Integer] color_mode The color mode of the image.
# @param [Integer] depth The bit depth of the image.
# @return [Symbol] The method name to use for decoding, to be called on the canvas class.
# @raise [ChunkyPNG::NotSupported] when the color_mode and/or bit depth is not supported.
def encode_png_pixels_to_scanline_method(color_mode, depth)
encoder_method = case color_mode
when ChunkyPNG::COLOR_TRUECOLOR; :"encode_png_pixels_to_scanline_truecolor_#{depth}bit"
when ChunkyPNG::COLOR_TRUECOLOR_ALPHA; :"encode_png_pixels_to_scanline_truecolor_alpha_#{depth}bit"
when ChunkyPNG::COLOR_INDEXED; :"encode_png_pixels_to_scanline_indexed_#{depth}bit"
when ChunkyPNG::COLOR_GRAYSCALE; :"encode_png_pixels_to_scanline_grayscale_#{depth}bit"
when ChunkyPNG::COLOR_GRAYSCALE_ALPHA; :"encode_png_pixels_to_scanline_grayscale_alpha_#{depth}bit"
else nil
end
raise ChunkyPNG::NotSupported, "No encoder found for color mode #{color_mode} and #{depth}-bit depth!" unless respond_to?(encoder_method, true)
encoder_method
end
# Encodes a scanline of a pixelstream without filtering. This is a no-op.
# @param [String] stream The pixelstream to work on. This string will be modified.
# @param [Integer] pos The starting position of the scanline.
# @param [Integer, nil] prev_pos The starting position of the previous scanline. <tt>nil</tt> if
# this is the first line.
# @param [Integer] line_width The number of bytes in this scanline, without counting the filtering
# method byte.
# @param [Integer] pixel_size The number of bytes used per pixel.
# @return [void]
def encode_png_str_scanline_none(stream, pos, prev_pos, line_width, pixel_size)
# noop - this method shouldn't get called at all.
end
# Encodes a scanline of a pixelstream using SUB filtering. This will modify the stream.
# @param (see #encode_png_str_scanline_none)
# @return [void]
def encode_png_str_scanline_sub(stream, pos, prev_pos, line_width, pixel_size)
line_width.downto(1) do |i|
a = (i > pixel_size) ? stream.getbyte(pos + i - pixel_size) : 0
stream.setbyte(pos + i, (stream.getbyte(pos + i) - a) & 0xff)
end
stream.setbyte(pos, ChunkyPNG::FILTER_SUB)
end
# Encodes a scanline of a pixelstream using UP filtering. This will modify the stream.
# @param (see #encode_png_str_scanline_none)
# @return [void]
def encode_png_str_scanline_up(stream, pos, prev_pos, line_width, pixel_size)
line_width.downto(1) do |i|
b = prev_pos ? stream.getbyte(prev_pos + i) : 0
stream.setbyte(pos + i, (stream.getbyte(pos + i) - b) & 0xff)
end
stream.setbyte(pos, ChunkyPNG::FILTER_UP)
end
# Encodes a scanline of a pixelstream using AVERAGE filtering. This will modify the stream.
# @param (see #encode_png_str_scanline_none)
# @return [void]
def encode_png_str_scanline_average(stream, pos, prev_pos, line_width, pixel_size)
line_width.downto(1) do |i|
a = (i > pixel_size) ? stream.getbyte(pos + i - pixel_size) : 0
b = prev_pos ? stream.getbyte(prev_pos + i) : 0
stream.setbyte(pos + i, (stream.getbyte(pos + i) - ((a + b) >> 1)) & 0xff)
end
stream.setbyte(pos, ChunkyPNG::FILTER_AVERAGE)
end
# Encodes a scanline of a pixelstream using PAETH filtering. This will modify the stream.
# @param (see #encode_png_str_scanline_none)
# @return [void]
def encode_png_str_scanline_paeth(stream, pos, prev_pos, line_width, pixel_size)
line_width.downto(1) do |i|
a = (i > pixel_size) ? stream.getbyte(pos + i - pixel_size) : 0
b = (prev_pos) ? stream.getbyte(prev_pos + i) : 0
c = (prev_pos && i > pixel_size) ? stream.getbyte(prev_pos + i - pixel_size) : 0
p = a + b - c
pa = (p - a).abs
pb = (p - b).abs
pc = (p - c).abs
pr = (pa <= pb && pa <= pc) ? a : (pb <= pc ? b : c)
stream.setbyte(pos + i, (stream.getbyte(pos + i) - pr) & 0xff)
end
stream.setbyte(pos, ChunkyPNG::FILTER_PAETH)
end
end
end
end
|
skynet-tx/photoblog
|
wp-content/themes/videoblog/resources/lib/compass/lib/ruby/gems/1.9.1/gems/chunky_png-1.2.7/lib/chunky_png/canvas/png_encoding.rb
|
Ruby
|
gpl-2.0
| 21,812
|
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by defaulter-gen. Do not edit it manually!
package v1alpha1
import (
v1alpha1 "k8s.io/api/settings/v1alpha1"
runtime "k8s.io/apimachinery/pkg/runtime"
v1 "k8s.io/kubernetes/pkg/api/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&v1alpha1.PodPreset{}, func(obj interface{}) { SetObjectDefaults_PodPreset(obj.(*v1alpha1.PodPreset)) })
scheme.AddTypeDefaultingFunc(&v1alpha1.PodPresetList{}, func(obj interface{}) { SetObjectDefaults_PodPresetList(obj.(*v1alpha1.PodPresetList)) })
return nil
}
func SetObjectDefaults_PodPreset(in *v1alpha1.PodPreset) {
for i := range in.Spec.Env {
a := &in.Spec.Env[i]
if a.ValueFrom != nil {
if a.ValueFrom.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(a.ValueFrom.FieldRef)
}
}
}
for i := range in.Spec.Volumes {
a := &in.Spec.Volumes[i]
v1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
v1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI)
}
if a.VolumeSource.RBD != nil {
v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD)
}
if a.VolumeSource.DownwardAPI != nil {
v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
}
if a.VolumeSource.Projected != nil {
v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
}
}
if a.VolumeSource.ScaleIO != nil {
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
}
}
}
func SetObjectDefaults_PodPresetList(in *v1alpha1.PodPresetList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_PodPreset(a)
}
}
|
rhamilto/origin
|
vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.defaults.go
|
GO
|
apache-2.0
| 3,305
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef UI_VIEWS_CONTROLS_BUTTON_CHECKBOX_H_
#define UI_VIEWS_CONTROLS_BUTTON_CHECKBOX_H_
#include <string>
#include "base/compiler_specific.h"
#include "base/strings/string16.h"
#include "ui/views/controls/button/label_button.h"
namespace views {
// A native themed class representing a checkbox. This class does not use
// platform specific objects to replicate the native platforms looks and feel.
class VIEWS_EXPORT Checkbox : public LabelButton {
public:
static const char kViewClassName[];
explicit Checkbox(const base::string16& label);
virtual ~Checkbox();
// Sets a listener for this checkbox. Checkboxes aren't required to have them
// since their state can be read independently of them being toggled.
void set_listener(ButtonListener* listener) { listener_ = listener; }
// Sets/Gets whether or not the checkbox is checked.
virtual void SetChecked(bool checked);
bool checked() const { return checked_; }
protected:
// Overridden from LabelButton:
virtual void Layout() OVERRIDE;
virtual const char* GetClassName() const OVERRIDE;
virtual void GetAccessibleState(ui::AXViewState* state) OVERRIDE;
virtual void OnFocus() OVERRIDE;
virtual void OnBlur() OVERRIDE;
virtual const gfx::ImageSkia& GetImage(ButtonState for_state) OVERRIDE;
// Set the image shown for each button state depending on whether it is
// [checked] or [focused].
void SetCustomImage(bool checked,
bool focused,
ButtonState for_state,
const gfx::ImageSkia& image);
private:
// Overridden from Button:
virtual void NotifyClick(const ui::Event& event) OVERRIDE;
virtual ui::NativeTheme::Part GetThemePart() const OVERRIDE;
virtual void GetExtraParams(
ui::NativeTheme::ExtraParams* params) const OVERRIDE;
// True if the checkbox is checked.
bool checked_;
// The images for each button state.
gfx::ImageSkia images_[2][2][STATE_COUNT];
DISALLOW_COPY_AND_ASSIGN(Checkbox);
};
} // namespace views
#endif // UI_VIEWS_CONTROLS_BUTTON_CHECKBOX_H_
|
7kbird/chrome
|
ui/views/controls/button/checkbox.h
|
C
|
bsd-3-clause
| 2,243
|
require "spec_helper"
describe Mongoid::Validatable::UniquenessValidator do
describe "#valid?" do
context "when the document is a root document" do
context "when adding custom persistence options" do
before do
Dictionary.validates_uniqueness_of :name
end
after do
Dictionary.reset_callbacks(:validate)
end
context "when persisting to another collection" do
before do
Dictionary.with(collection: "dicts").create(name: "websters")
end
context "when the document is not valid" do
let(:websters) do
Dictionary.with(collection: "dicts").new(name: "websters")
end
it "performs the validation on the correct collection" do
expect(websters).to_not be_valid
end
it "adds the uniqueness error" do
websters.valid?
expect(websters.errors[:name]).to_not be_nil
end
it "clears the persistence options in the thread local" do
websters.valid?
expect(Dictionary.persistence_options).to be_nil
end
end
context "when the document is valid" do
let(:oxford) do
Dictionary.with(collection: "dicts").new(name: "oxford")
end
it "performs the validation on the correct collection" do
expect(oxford).to be_valid
end
end
end
end
context "when the document contains no compound key" do
context "when validating a relation" do
before do
Word.validates_uniqueness_of :dictionary
end
after do
Word.reset_callbacks(:validate)
end
context "when the attribute id is unique" do
let(:dictionary) do
Dictionary.create
end
let(:word) do
Word.new(dictionary: dictionary)
end
it "returns true" do
expect(word).to be_valid
end
end
end
context "when the field name is aliased" do
before do
Dictionary.create!(language: "en")
end
let(:dictionary) do
Dictionary.new(language: "en")
end
after do
Dictionary.reset_callbacks(:validate)
end
context "when the validation uses the aliased name" do
before do
Dictionary.validates_uniqueness_of :language
end
it "correctly detects a uniqueness conflict" do
expect(dictionary).to_not be_valid
end
it "adds the uniqueness error to the aliased field name" do
dictionary.valid?
expect(dictionary.errors).to have_key(:language)
expect(dictionary.errors[:language]).to eq([ "is already taken" ])
end
end
context "when the validation uses the underlying field name" do
before do
Dictionary.validates_uniqueness_of :l
end
it "correctly detects a uniqueness conflict" do
expect(dictionary).to_not be_valid
end
it "adds the uniqueness error to the underlying field name" do
dictionary.valid?
expect(dictionary.errors).to have_key(:l)
expect(dictionary.errors[:l]).to eq([ "is already taken" ])
end
end
end
context "when the field is localized" do
context "when no scope is provided" do
context "when case sensitive is true" do
before do
Dictionary.validates_uniqueness_of :description
end
after do
Dictionary.reset_callbacks(:validate)
end
context "when the attribute is unique" do
context "when single localization" do
before do
Dictionary.create(description: "english")
end
let(:dictionary) do
Dictionary.new(description: "English")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when multiple localizations" do
before do
Dictionary.
create(description_translations: { "en" => "english", "de" => "german" })
end
let(:dictionary) do
Dictionary.new(description_translations: { "en" => "English", "de" => "German" })
end
it "returns true" do
expect(dictionary).to be_valid
end
end
end
context "when the attribute is not unique" do
context "when the document is not the match" do
context "when single localization" do
before do
Dictionary.create(description: "english")
end
let(:dictionary) do
Dictionary.new(description: "english")
end
it "returns false" do
expect(dictionary).to_not be_valid
end
it "adds the uniqueness error" do
dictionary.valid?
expect(dictionary.errors[:description]).to eq([ "is already taken" ])
end
end
context "when multiple localizations" do
before do
Dictionary.
create(description_translations: { "en" => "english", "de" => "german" })
end
let(:dictionary) do
Dictionary.new(description_translations: { "en" => "english", "de" => "German" })
end
it "returns false" do
expect(dictionary).to_not be_valid
end
it "adds the uniqueness error" do
dictionary.valid?
expect(dictionary.errors[:description]).to eq([ "is already taken" ])
end
end
end
end
end
context "when case sensitive is false" do
before do
Dictionary.validates_uniqueness_of :description, case_sensitive: false
end
after do
Dictionary.reset_callbacks(:validate)
end
context "when the attribute is unique" do
context "when there are no special characters" do
before do
Dictionary.create(description: "english")
end
let(:dictionary) do
Dictionary.new(description: "german")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when special characters exist" do
before do
Dictionary.create(description: "english")
end
let(:dictionary) do
Dictionary.new(description: "en@gl.ish")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
end
context "when the attribute is not unique" do
context "when the document is not the match" do
context "when signle localization" do
before do
Dictionary.create(description: "english")
end
let(:dictionary) do
Dictionary.new(description: "English")
end
it "returns false" do
expect(dictionary).to_not be_valid
end
it "adds the uniqueness error" do
dictionary.valid?
expect(dictionary.errors[:description]).to eq([ "is already taken" ])
end
end
context "when multiple localizations" do
before do
Dictionary.
create(description_translations: { "en" => "english", "de" => "german" })
end
let(:dictionary) do
Dictionary.new(description_translations: { "en" => "English", "de" => "German" })
end
it "returns false" do
expect(dictionary).to_not be_valid
end
it "adds the uniqueness error" do
dictionary.valid?
expect(dictionary.errors[:description]).to eq([ "is already taken" ])
end
end
end
context "when the document is the match in the database" do
let!(:dictionary) do
Dictionary.create(description: "english")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
end
end
end
context "when a scope is provided" do
before do
Dictionary.validates_uniqueness_of :description, scope: :name
end
after do
Dictionary.reset_callbacks(:validate)
end
context "when the attribute is not unique in the scope" do
context "when the document is not the match" do
before do
Dictionary.
create(description: "english", name: "test")
end
let(:dictionary) do
Dictionary.new(description: "english", name: "test")
end
it "returns false" do
expect(dictionary).to_not be_valid
end
it "adds the uniqueness error" do
dictionary.valid?
expect(dictionary.errors[:description]).to eq([ "is already taken" ])
end
end
end
end
end
context "when no scope is provided" do
before do
Dictionary.validates_uniqueness_of :name
end
after do
Dictionary.reset_callbacks(:validate)
end
context "when the attribute is unique" do
let!(:oxford) do
Dictionary.create(name: "Oxford")
end
let(:dictionary) do
Dictionary.new(name: "Webster")
end
it "returns true" do
expect(dictionary).to be_valid
end
context "when subsequently cloning the document" do
let(:clone) do
oxford.clone
end
it "returns false for the clone" do
expect(clone).to_not be_valid
end
end
end
context "when the attribute is not unique" do
context "when the document is not the match" do
before do
Dictionary.create(name: "Oxford")
end
let!(:dictionary) do
Dictionary.new(name: "Oxford")
end
it "returns false" do
expect(dictionary).to_not be_valid
end
it "adds the uniqueness error" do
dictionary.valid?
expect(dictionary.errors[:name]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
context "when the field has changed" do
let!(:dictionary) do
Dictionary.create(name: "Oxford")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when the field has not changed" do
before do
Dictionary.default_scoping = nil
end
let!(:dictionary) do
Dictionary.create!(name: "Oxford")
end
let!(:from_db) do
Dictionary.find(dictionary.id)
end
it "returns true" do
expect(from_db).to be_valid
end
it "does not touch the database" do
expect(Dictionary).to receive(:where).never
from_db.valid?
end
end
end
end
end
context "when a default scope is on the model" do
before do
Dictionary.validates_uniqueness_of :name
Dictionary.default_scope(->{ Dictionary.where(year: 1990) })
end
after do
Dictionary.default_scoping = nil
Dictionary.reset_callbacks(:validate)
end
context "when the document with the unqiue attribute is not in default scope" do
context "when the attribute is not unique" do
before do
Dictionary.create(name: "Oxford")
end
let(:dictionary) do
Dictionary.new(name: "Oxford")
end
it "returns false" do
expect(dictionary).to_not be_valid
end
end
end
end
context "when an aliased scope is provided" do
before do
Dictionary.validates_uniqueness_of :name, scope: :language
end
after do
Dictionary.reset_callbacks(:validate)
end
context "when the attribute is unique" do
before do
Dictionary.create(name: "Oxford", language: "English")
end
let(:dictionary) do
Dictionary.new(name: "Webster")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when the attribute is unique in the scope" do
before do
Dictionary.create(name: "Oxford", language: "English")
end
let(:dictionary) do
Dictionary.new(name: "Webster", language: "English")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when the attribute is not unique with no scope" do
before do
Dictionary.create(name: "Oxford", language: "English")
end
let(:dictionary) do
Dictionary.new(name: "Oxford")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when the attribute is not unique in another scope" do
before do
Dictionary.create(name: "Oxford", language: "English")
end
let(:dictionary) do
Dictionary.new(name: "Oxford", language: "Deutsch")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when the attribute is not unique in the same scope" do
context "when the document is not the match" do
before do
Dictionary.create(name: "Oxford", language: "English")
end
let(:dictionary) do
Dictionary.new(name: "Oxford", language: "English")
end
it "returns false" do
expect(dictionary).to_not be_valid
end
it "adds the uniqueness errors" do
dictionary.valid?
expect(dictionary.errors[:name]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:dictionary) do
Dictionary.create(name: "Oxford", language: "English")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
end
end
context "when a single scope is provided" do
before do
Dictionary.validates_uniqueness_of :name, scope: :publisher
end
after do
Dictionary.reset_callbacks(:validate)
end
context "when the attribute is unique" do
before do
Dictionary.create(name: "Oxford", publisher: "Amazon")
end
let(:dictionary) do
Dictionary.new(name: "Webster")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when the attribute is unique in the scope" do
before do
Dictionary.create(name: "Oxford", publisher: "Amazon")
end
let(:dictionary) do
Dictionary.new(name: "Webster", publisher: "Amazon")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when uniqueness is violated due to scope change" do
let(:personal_folder) do
Folder.create!(name: "Personal")
end
let(:public_folder) do
Folder.create!(name: "Public")
end
before do
personal_folder.folder_items << FolderItem.new(name: "non-unique")
public_folder.folder_items << FolderItem.new(name: "non-unique")
end
let(:item) do
public_folder.folder_items.last
end
it "should set an error for associated object not being unique" do
item.update_attributes(folder_id: personal_folder.id)
expect(item.errors.messages[:name].first).to eq("is already taken")
end
end
context "when the attribute is not unique with no scope" do
before do
Dictionary.create(name: "Oxford", publisher: "Amazon")
end
let(:dictionary) do
Dictionary.new(name: "Oxford")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when the attribute is not unique in another scope" do
before do
Dictionary.create(name: "Oxford", publisher: "Amazon")
end
let(:dictionary) do
Dictionary.new(name: "Oxford", publisher: "Addison")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when the attribute is not unique in the same scope" do
context "when the document is not the match" do
before do
Dictionary.create(name: "Oxford", publisher: "Amazon")
end
let(:dictionary) do
Dictionary.new(name: "Oxford", publisher: "Amazon")
end
it "returns false" do
expect(dictionary).to_not be_valid
end
it "adds the uniqueness errors" do
dictionary.valid?
expect(dictionary.errors[:name]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:dictionary) do
Dictionary.create(name: "Oxford", publisher: "Amazon")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when one of the scopes is a time" do
before do
Dictionary.create(
name: "Oxford",
publisher: "Amazon",
published: 10.days.ago.to_time
)
end
let(:dictionary) do
Dictionary.new(
name: "Oxford",
publisher: "Amazon",
published: 10.days.ago.to_time
)
end
it "returns false" do
expect(dictionary).to_not be_valid
end
it "adds the uniqueness errors" do
dictionary.valid?
expect(dictionary.errors[:name]).to eq([ "is already taken" ])
end
end
end
end
context "when multiple scopes are provided" do
before do
Dictionary.validates_uniqueness_of :name, scope: [ :publisher, :year ]
end
after do
Dictionary.reset_callbacks(:validate)
end
context "when the attribute is unique" do
before do
Dictionary.create(name: "Oxford", publisher: "Amazon")
end
let(:dictionary) do
Dictionary.new(name: "Webster")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when the attribute is unique in the scope" do
before do
Dictionary.create(
name: "Oxford",
publisher: "Amazon",
year: 2011
)
end
let(:dictionary) do
Dictionary.new(
name: "Webster",
publisher: "Amazon",
year: 2011
)
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when the attribute is not unique with no scope" do
before do
Dictionary.create(name: "Oxford", publisher: "Amazon")
end
let(:dictionary) do
Dictionary.new(name: "Oxford")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when the attribute is not unique in another scope" do
before do
Dictionary.create(
name: "Oxford",
publisher: "Amazon",
year: 1995
)
end
let(:dictionary) do
Dictionary.new(
name: "Oxford",
publisher: "Addison",
year: 2011
)
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when the attribute is not unique in the same scope" do
context "when the document is not the match" do
before do
Dictionary.create(
name: "Oxford",
publisher: "Amazon",
year: 1960
)
end
let(:dictionary) do
Dictionary.new(
name: "Oxford",
publisher: "Amazon",
year: 1960
)
end
it "returns false" do
expect(dictionary).to_not be_valid
end
it "adds the uniqueness errors" do
dictionary.valid?
expect(dictionary.errors[:name]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:dictionary) do
Dictionary.create(
name: "Oxford",
publisher: "Amazon",
year: 1960
)
end
it "returns true" do
expect(dictionary).to be_valid
end
end
end
end
context "when case sensitive is true" do
before do
Dictionary.validates_uniqueness_of :name
end
after do
Dictionary.reset_callbacks(:validate)
end
context "when the attribute is unique" do
before do
Dictionary.create(name: "Oxford")
end
let(:dictionary) do
Dictionary.new(name: "Webster")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when the attribute is not unique" do
context "when the document is not the match" do
before do
Dictionary.create(name: "Oxford")
end
let(:dictionary) do
Dictionary.new(name: "Oxford")
end
it "returns false" do
expect(dictionary).to_not be_valid
end
it "adds the uniqueness error" do
dictionary.valid?
expect(dictionary.errors[:name]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:dictionary) do
Dictionary.create(name: "Oxford")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
end
end
context "when case sensitive is false" do
before do
Dictionary.validates_uniqueness_of :name, case_sensitive: false
end
after do
Dictionary.reset_callbacks(:validate)
end
context "when the attribute is unique" do
context "when there are no special characters" do
before do
Dictionary.create(name: "Oxford")
end
let(:dictionary) do
Dictionary.new(name: "Webster")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
context "when special characters exist" do
before do
Dictionary.create(name: "Oxford")
end
let(:dictionary) do
Dictionary.new(name: "Web@st.er")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
end
context "when the attribute is not unique" do
context "when the document is not the match" do
before do
Dictionary.create(name: "Oxford")
end
let(:dictionary) do
Dictionary.new(name: "oxford")
end
it "returns false" do
expect(dictionary).to_not be_valid
end
it "adds the uniqueness error" do
dictionary.valid?
expect(dictionary.errors[:name]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:dictionary) do
Dictionary.create(name: "Oxford")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
end
end
context "when not allowing nil" do
it "raises a validation error" do
expect { LineItem.create! }.to raise_error Mongoid::Errors::Validations
end
end
context "when allowing nil" do
before do
Dictionary.validates_uniqueness_of :name, allow_nil: true
end
after do
Dictionary.reset_callbacks(:validate)
end
context "when the attribute is nil" do
before do
Dictionary.create
end
let(:dictionary) do
Dictionary.new
end
it "returns true" do
expect(dictionary).to be_valid
end
end
end
context "when allowing blank" do
before do
Dictionary.validates_uniqueness_of :name, allow_blank: true
end
after do
Dictionary.reset_callbacks(:validate)
end
context "when the attribute is blank" do
before do
Dictionary.create(name: "")
end
let(:dictionary) do
Dictionary.new(name: "")
end
it "returns true" do
expect(dictionary).to be_valid
end
end
end
end
context "when the document contains a compound key" do
context "when no scope is provided" do
before do
Login.validates_uniqueness_of :username
end
after do
Login.reset_callbacks(:validate)
end
context "when the attribute is unique" do
before do
Login.create(username: "Oxford")
end
let(:login) do
Login.new(username: "Webster")
end
it "returns true" do
expect(login).to be_valid
end
end
context "when the attribute is not unique" do
context "when the document is not the match" do
before do
Login.create(username: "Oxford")
end
let(:login) do
Login.new(username: "Oxford")
end
it "returns false" do
expect(login).to_not be_valid
end
it "adds the uniqueness error" do
login.valid?
expect(login.errors[:username]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:login) do
Login.create(username: "Oxford")
end
it "returns true" do
expect(login).to be_valid
end
end
end
end
context "when a single scope is provided" do
before do
Login.validates_uniqueness_of :username, scope: :application_id
end
after do
Login.reset_callbacks(:validate)
end
context "when the attribute is unique" do
before do
Login.create(username: "Oxford", application_id: 1)
end
let(:login) do
Login.new(username: "Webster")
end
it "returns true" do
expect(login).to be_valid
end
end
context "when the attribute is unique in the scope" do
before do
Login.create(username: "Oxford", application_id: 1)
end
let(:login) do
Login.new(username: "Webster", application_id: 1)
end
it "returns true" do
expect(login).to be_valid
end
end
context "when the attribute is not unique with no scope" do
before do
Login.create(username: "Oxford", application_id: 1)
end
let(:login) do
Login.new(username: "Oxford")
end
it "returns true" do
expect(login).to be_valid
end
end
context "when the attribute is not unique in another scope" do
before do
Login.create(username: "Oxford", application_id: 1)
end
let(:login) do
Login.new(username: "Oxford", application_id: 2)
end
it "returns true" do
expect(login).to be_valid
end
end
context "when the attribute is not unique in the same scope" do
context "when the document is not the match" do
before do
Login.create(username: "Oxford", application_id: 1)
end
let(:login) do
Login.new(username: "Oxford", application_id: 1)
end
it "returns false" do
expect(login).to_not be_valid
end
it "adds the uniqueness errors" do
login.valid?
expect(login.errors[:username]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:login) do
Login.create(username: "Oxford", application_id: 1)
end
it "returns true" do
expect(login).to be_valid
end
end
end
end
context "when case sensitive is true" do
before do
Login.validates_uniqueness_of :username
end
after do
Login.reset_callbacks(:validate)
end
context "when the attribute is unique" do
before do
Login.create(username: "Oxford")
end
let(:login) do
Login.new(username: "Webster")
end
it "returns true" do
expect(login).to be_valid
end
end
context "when the attribute is not unique" do
context "when the document is not the match" do
before do
Login.create(username: "Oxford")
end
let(:login) do
Login.new(username: "Oxford")
end
it "returns false" do
expect(login).to_not be_valid
end
it "adds the uniqueness error" do
login.valid?
expect(login.errors[:username]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:login) do
Login.create(username: "Oxford")
end
it "returns true" do
expect(login).to be_valid
end
end
end
end
context "when case sensitive is false" do
before do
Login.validates_uniqueness_of :username, case_sensitive: false
end
after do
Login.reset_callbacks(:validate)
end
context "when the attribute is unique" do
context "when there are no special characters" do
before do
Login.create(username: "Oxford")
end
let(:login) do
Login.new(username: "Webster")
end
it "returns true" do
expect(login).to be_valid
end
end
context "when special characters exist" do
before do
Login.create(username: "Oxford")
end
let(:login) do
Login.new(username: "Web@st.er")
end
it "returns true" do
expect(login).to be_valid
end
end
end
context "when the attribute is not unique" do
context "when the document is not the match" do
before do
Login.create(username: "Oxford")
end
let(:login) do
Login.new(username: "oxford")
end
it "returns false" do
expect(login).to_not be_valid
end
it "adds the uniqueness error" do
login.valid?
expect(login.errors[:username]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:login) do
Login.create(username: "Oxford")
end
it "returns true" do
expect(login).to be_valid
end
end
end
end
context "when allowing nil" do
before do
Login.validates_uniqueness_of :username, allow_nil: true
end
after do
Login.reset_callbacks(:validate)
end
context "when the attribute is nil" do
before do
Login.create
end
let(:login) do
Login.new
end
it "returns true" do
expect(login).to be_valid
end
end
end
context "when allowing blank" do
before do
Login.validates_uniqueness_of :username, allow_blank: true
end
after do
Login.reset_callbacks(:validate)
end
context "when the attribute is blank" do
before do
Login.create(username: "")
end
let(:login) do
Login.new(username: "")
end
it "returns true" do
expect(login).to be_valid
end
end
end
end
context "when the attribute is a custom type" do
before do
Bar.validates_uniqueness_of :lat_lng
end
after do
Bar.reset_callbacks(:validate)
end
context "when the attribute is unique" do
before do
Bar.create(lat_lng: LatLng.new(52.30, 13.25))
end
let(:unique_bar) do
Bar.new(lat_lng: LatLng.new(54.30, 14.25))
end
it "returns true" do
expect(unique_bar).to be_valid
end
end
context "when the attribute is not unique" do
before do
Bar.create(lat_lng: LatLng.new(52.30, 13.25))
end
let(:non_unique_bar) do
Bar.new(lat_lng: LatLng.new(52.30, 13.25))
end
it "returns false" do
expect(non_unique_bar).to_not be_valid
end
end
end
context "when conditions is set" do
before do
Band.validates_uniqueness_of :name, conditions: ->{ Band.where(active: true) }
end
after do
Band.reset_callbacks(:validate)
end
context "when the attribute is unique" do
before do
Band.create(name: 'Foo', active: false)
end
let(:unique_band) do
Band.new(name: 'Foo')
end
it "returns true" do
expect(unique_band).to be_valid
end
end
context "when the attribute is not unique" do
before do
Band.create(name: 'Foo')
end
let(:non_unique_band) do
Band.new(name: 'Foo')
end
it "returns false" do
expect(non_unique_band).to_not be_valid
end
end
end
end
end
context "when the document is embedded" do
let(:word) do
Word.create(name: "Schadenfreude")
end
context "when in an embeds_many" do
let!(:def_one) do
word.definitions.create(description: "1")
end
let!(:def_two) do
word.definitions.create(description: "2")
end
context "when a document is being destroyed" do
before do
Definition.validates_uniqueness_of :description
end
after do
Definition.reset_callbacks(:validate)
end
context "when changing a document to the destroyed property" do
let(:attributes) do
{
definitions_attributes: {
"0" => { id: def_one.id, description: "0", "_destroy" => 1 },
"1" => { id: def_two.id, description: "1" }
}
}
end
before do
word.attributes = attributes
end
it "returns true" do
expect(def_two).to be_valid
end
end
end
context "when the document does not use composite keys" do
context "when no scope is provided" do
before do
Definition.validates_uniqueness_of :description
end
after do
Definition.reset_callbacks(:validate)
end
context "when the attribute is unique" do
before do
word.definitions.build(description: "Malicious joy")
end
let(:definition) do
word.definitions.build(description: "Gloating")
end
it "returns true" do
expect(definition).to be_valid
end
end
context "when the attribute is not unique" do
context "when the document is not the match" do
before do
word.definitions.build(description: "Malicious joy")
end
let(:definition) do
word.definitions.build(description: "Malicious joy")
end
it "returns false" do
expect(definition).to_not be_valid
end
it "adds the uniqueness error" do
definition.valid?
expect(definition.errors[:description]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:definition) do
word.definitions.build(description: "Malicious joy")
end
it "returns true" do
expect(definition).to be_valid
end
end
end
end
context "when a single scope is provided" do
before do
Definition.validates_uniqueness_of :description, scope: :part
end
after do
Definition.reset_callbacks(:validate)
end
context "when the attribute is unique" do
before do
word.definitions.build(
description: "Malicious joy", part: "Noun"
)
end
let(:definition) do
word.definitions.build(description: "Gloating")
end
it "returns true" do
expect(definition).to be_valid
end
end
context "when the attribute is unique in the scope" do
before do
word.definitions.build(
description: "Malicious joy",
part: "Noun"
)
end
let(:definition) do
word.definitions.build(
description: "Gloating",
part: "Noun"
)
end
it "returns true" do
expect(definition).to be_valid
end
end
context "when the attribute is not unique with no scope" do
before do
word.definitions.build(
description: "Malicious joy",
part: "Noun"
)
end
let(:definition) do
word.definitions.build(description: "Malicious joy")
end
it "returns true" do
expect(definition).to be_valid
end
end
context "when the attribute is not unique in another scope" do
before do
word.definitions.build(
description: "Malicious joy",
part: "Noun"
)
end
let(:definition) do
word.definitions.build(
description: "Malicious joy",
part: "Adj"
)
end
it "returns true" do
expect(definition).to be_valid
end
end
context "when the attribute is not unique in the same scope" do
context "when the document is not the match" do
before do
word.definitions.build(
description: "Malicious joy",
part: "Noun"
)
end
let(:definition) do
word.definitions.build(
description: "Malicious joy",
part: "Noun"
)
end
it "returns false" do
expect(definition).to_not be_valid
end
it "adds the uniqueness errors" do
definition.valid?
expect(definition.errors[:description]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:definition) do
word.definitions.build(
description: "Malicious joy",
part: "Noun"
)
end
it "returns true" do
expect(definition).to be_valid
end
end
end
end
context "when multiple scopes are provided" do
before do
Definition.validates_uniqueness_of :description, scope: [ :part, :regular ]
end
after do
Definition.reset_callbacks(:validate)
end
context "when the attribute is unique" do
before do
word.definitions.build(
description: "Malicious joy",
part: "Noun"
)
end
let(:definition) do
word.definitions.build(description: "Gloating")
end
it "returns true" do
expect(definition).to be_valid
end
end
context "when the attribute is unique in the scope" do
before do
word.definitions.build(
description: "Malicious joy",
part: "Noun",
regular: true
)
end
let(:definition) do
word.definitions.build(
description: "Gloating",
part: "Noun",
regular: true
)
end
it "returns true" do
expect(definition).to be_valid
end
end
context "when the attribute is not unique with no scope" do
before do
word.definitions.build(
description: "Malicious joy",
part: "Noun"
)
end
let(:definition) do
word.definitions.build(description: "Malicious scope")
end
it "returns true" do
expect(definition).to be_valid
end
end
context "when the attribute is not unique in another scope" do
before do
word.definitions.build(
description: "Malicious joy",
part: "Noun",
regular: true
)
end
let(:definition) do
word.definitions.build(
description: "Malicious joy",
part: "Adj",
regular: true
)
end
it "returns true" do
expect(definition).to be_valid
end
end
context "when the attribute is not unique in the same scope" do
context "when the document is not the match" do
before do
word.definitions.build(
description: "Malicious joy",
part: "Noun",
regular: true
)
end
let(:definition) do
word.definitions.build(
description: "Malicious joy",
part: "Noun",
regular: true
)
end
it "returns false" do
expect(definition).to_not be_valid
end
it "adds the uniqueness errors" do
definition.valid?
expect(definition.errors[:description]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:definition) do
word.definitions.build(
description: "Malicious joy",
part: "Noun",
regular: false
)
end
it "returns true" do
expect(definition).to be_valid
end
end
end
end
context "when case sensitive is true" do
before do
Definition.validates_uniqueness_of :description
end
after do
Definition.reset_callbacks(:validate)
end
context "when the attribute is unique" do
before do
word.definitions.build(description: "Malicious jo")
end
let(:definition) do
word.definitions.build(description: "Gloating")
end
it "returns true" do
expect(definition).to be_valid
end
end
context "when the attribute is not unique" do
context "when the document is not the match" do
before do
word.definitions.build(description: "Malicious joy")
end
let(:definition) do
word.definitions.build(description: "Malicious joy")
end
it "returns false" do
expect(definition).to_not be_valid
end
it "adds the uniqueness error" do
definition.valid?
expect(definition.errors[:description]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:definition) do
word.definitions.build(description: "Malicious joy")
end
it "returns true" do
expect(definition).to be_valid
end
end
end
end
context "when case sensitive is false" do
before do
Definition.validates_uniqueness_of :description, case_sensitive: false
end
after do
Definition.reset_callbacks(:validate)
end
context "when the attribute is unique" do
context "when there are no special characters" do
before do
word.definitions.build(description: "Malicious joy")
end
let(:definition) do
word.definitions.build(description: "Gloating")
end
it "returns true" do
expect(definition).to be_valid
end
end
context "when special characters exist" do
before do
word.definitions.build(description: "Malicious joy")
end
let(:definition) do
word.definitions.build(description: "M@licious.joy")
end
it "returns true" do
expect(definition).to be_valid
end
end
end
context "when the attribute is not unique" do
context "when the document is not the match" do
before do
word.definitions.build(description: "Malicious joy")
end
let(:definition) do
word.definitions.build(description: "Malicious JOY")
end
it "returns false" do
expect(definition).to_not be_valid
end
it "adds the uniqueness error" do
definition.valid?
expect(definition.errors[:description]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:definition) do
word.definitions.build(description: "Malicious joy")
end
it "returns true" do
expect(definition).to be_valid
end
end
end
end
context "when allowing nil" do
before do
Definition.validates_uniqueness_of :description, allow_nil: true
end
after do
Definition.reset_callbacks(:validate)
end
context "when the attribute is nil" do
before do
word.definitions.build
end
let(:definition) do
word.definitions.build
end
it "returns true" do
expect(definition).to be_valid
end
end
end
context "when allowing blank" do
before do
Definition.validates_uniqueness_of :description, allow_blank: true
end
after do
Definition.reset_callbacks(:validate)
end
context "when the attribute is blank" do
before do
word.definitions.build(description: "")
end
let(:definition) do
word.definitions.build(description: "")
end
it "returns true" do
expect(definition).to be_valid
end
end
end
context "when the field name is aliased" do
before do
word.definitions.build(part: "noun", synonyms: "foo")
end
let(:definition) do
word.definitions.build(part: "noun", synonyms: "foo")
end
after do
Definition.reset_callbacks(:validate)
end
context "when the validation uses the aliased name" do
before do
Definition.validates_uniqueness_of :part, case_sensitive: false
end
it "correctly detects a uniqueness conflict" do
expect(definition).to_not be_valid
end
it "adds the uniqueness error to the aliased field name" do
definition.valid?
expect(definition.errors).to have_key(:part)
expect(definition.errors[:part]).to eq([ "is already taken" ])
end
end
context "when the validation uses the underlying field name" do
before do
Definition.validates_uniqueness_of :p, case_sensitive: false
end
it "correctly detects a uniqueness conflict" do
expect(definition).to_not be_valid
end
it "adds the uniqueness error to the underlying field name" do
definition.valid?
expect(definition.errors).to have_key(:p)
expect(definition.errors[:p]).to eq([ "is already taken" ])
end
end
context "when the field is localized" do
context "when the validation uses the aliased name" do
before do
Definition.validates_uniqueness_of :synonyms, case_sensitive: false
end
it "correctly detects a uniqueness conflict" do
expect(definition).to_not be_valid
end
it "adds the uniqueness error to the aliased field name" do
definition.valid?
expect(definition.errors).to have_key(:synonyms)
expect(definition.errors[:synonyms]).to eq([ "is already taken" ])
end
end
context "when the validation uses the underlying field name" do
before do
Definition.validates_uniqueness_of :syn, case_sensitive: false
end
it "correctly detects a uniqueness conflict" do
expect(definition).to_not be_valid
end
it "adds the uniqueness error to the aliased field name" do
definition.valid?
expect(definition.errors).to have_key(:syn)
expect(definition.errors[:syn]).to eq([ "is already taken" ])
end
end
end
end
end
context "when the document uses composite keys" do
context "when no scope is provided" do
before do
WordOrigin.validates_uniqueness_of :origin_id
end
after do
WordOrigin.reset_callbacks(:validate)
end
context "when the attribute is unique" do
before do
word.word_origins.build(origin_id: 1)
end
let(:word_origin) do
word.word_origins.build(origin_id: 2)
end
it "returns true" do
expect(word_origin).to be_valid
end
end
context "when the attribute is not unique" do
context "when the document is not the match" do
before do
word.word_origins.build(origin_id: 1)
end
let(:word_origin) do
word.word_origins.build(origin_id: 1)
end
it "returns false" do
expect(word_origin).to_not be_valid
end
it "adds the uniqueness error" do
word_origin.valid?
expect(word_origin.errors[:origin_id]).to eq([ "is already taken" ])
end
end
context "when the document is the match in the database" do
let!(:word_origin) do
word.word_origins.build(origin_id: 1)
end
it "returns true" do
expect(word_origin).to be_valid
end
end
end
end
context "when allowing nil" do
before do
WordOrigin.validates_uniqueness_of :origin_id, allow_nil: true
end
after do
WordOrigin.reset_callbacks(:validate)
end
context "when the attribute is nil" do
before do
word.word_origins.build
end
let(:word_origin) do
word.word_origins.build
end
it "returns true" do
expect(word_origin).to be_valid
end
end
end
context "when allowing blank" do
before do
WordOrigin.validates_uniqueness_of :origin_id, allow_blank: true
end
after do
WordOrigin.reset_callbacks(:validate)
end
context "when the attribute is blank" do
before do
word.word_origins.build(origin_id: "")
end
let(:word_origin) do
word.word_origins.build(origin_id: "")
end
it "returns true" do
expect(word_origin).to be_valid
end
end
end
end
end
context "when in an embeds_one" do
before do
Pronunciation.validates_uniqueness_of :sound
end
after do
Pronunciation.reset_callbacks(:validate)
end
let(:pronunciation) do
word.build_pronunciation(sound: "Schwa")
end
it "always returns true" do
expect(pronunciation).to be_valid
end
end
end
context "when describing validation on the instance level" do
let!(:dictionary) do
Dictionary.create!(name: "en")
end
let(:validators) do
dictionary.validates_uniqueness_of :name
end
it "adds the validation only to the instance" do
expect(validators).to eq([ described_class ])
end
end
context "when validation works with inheritance" do
class EuropeanActor < Actor
validates_uniqueness_of :name
end
class SpanishActor < EuropeanActor
end
before do
EuropeanActor.create!(name: "Antonio Banderas")
end
let!(:subclass_document_with_duplicated_name) do
SpanishActor.new(name: "Antonio Banderas")
end
it "should be invalid" do
subclass_document_with_duplicated_name.tap do |d|
expect(d).to be_invalid
expect(d.errors[:name]).to eq([ "is already taken" ])
end
end
end
context "when persisting with safe options" do
before do
Person.validates_uniqueness_of(:username)
Person.create_indexes
end
let!(:person) do
Person.create(ssn: "132-11-1111", username: "aaasdaffff")
end
after do
Person.reset_callbacks(:validate)
end
it "transfers the options to the cloned session" do
expect {
Person.create!(ssn: "132-11-1111", username: "asdfsdfA")
}.to raise_error
end
end
end
|
eljojo/mongoid
|
spec/mongoid/validatable/uniqueness_spec.rb
|
Ruby
|
mit
| 62,278
|
<html>
<head>
<title>Vorbisfile - function - ov_bitrate</title>
<link rel=stylesheet href="style.css" type="text/css">
</head>
<body bgcolor=white text=black link="#5555ff" alink="#5555ff" vlink="#5555ff">
<table border=0 width=100%>
<tr>
<td><p class=tiny>Vorbisfile documentation</p></td>
<td align=right><p class=tiny>vorbisfile version 1.2.0 - 20070723</p></td>
</tr>
</table>
<h1>ov_bitrate_instant</h1>
<p><i>declared in "vorbis/vorbisfile.h";</i></p>
<p>Used to find the most recent bitrate played back within the file. Will return 0 if the bitrate has not changed or it is the beginning of the file.
<br><br>
<table border=0 color=black cellspacing=0 cellpadding=7>
<tr bgcolor=#cccccc>
<td>
<pre><b>
long ov_bitrate_instant(OggVorbis_File *vf);
</b></pre>
</td>
</tr>
</table>
<h3>Parameters</h3>
<dl>
<dt><i>vf</i></dt>
<dd>A pointer to the OggVorbis_File structure--this is used for ALL the externally visible libvorbisfile
functions.
</dl>
<h3>Return Values</h3>
<blockquote>
<li>0 indicates the beginning of the file or unchanged bitrate info.</li>
<li><i>n</i> indicates the actual bitrate since the last call.</li>
<li>OV_FALSE indicates that playback is not in progress, and thus there is no instantaneous bitrate information to report.</li>
<li>OV_EINVAL indicates that the stream represented by <tt>vf</tt> is not open.</li>
</blockquote>
<p>
<br><br>
<hr noshade>
<table border=0 width=100%>
<tr valign=top>
<td><p class=tiny>copyright © 2007 Xiph.org</p></td>
<td align=right><p class=tiny><a href="http://www.xiph.org/ogg/vorbis/">Ogg Vorbis</a></p></td>
</tr><tr>
<td><p class=tiny>Vorbisfile documentation</p></td>
<td align=right><p class=tiny>vorbisfile version 1.2.0 - 20070723</p></td>
</tr>
</table>
</body>
</html>
|
xbmc/atv2
|
xbmc/cores/paplayer/vorbisfile/libvorbis/doc/vorbisfile/ov_bitrate_instant.html
|
HTML
|
gpl-2.0
| 1,765
|
// { dg-do assemble }
// Bug: g++ doesn't notice the overflow in the enum values.
#include <limits.h>
enum COLOR
{
red,
green = ULONG_MAX,
blue // { dg-error "overflow in enumeration" }
};
|
SanDisk-Open-Source/SSD_Dashboard
|
uefi/gcc/gcc-4.6.3/gcc/testsuite/g++.old-deja/g++.jason/rfg10.C
|
C++
|
gpl-2.0
| 205
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 Davidlohr Bueso <davidlohr@hp.com>
*
* futex-requeue: Block a bunch of threads on futex1 and requeue them
* on futex2, N at a time.
*
* This program is particularly useful to measure the latency of nthread
* requeues without waking up any tasks -- thus mimicking a regular futex_wait.
*/
/* For the CLR_() macros */
#include <string.h>
#include <pthread.h>
#include <signal.h>
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/time64.h>
#include <errno.h>
#include "bench.h"
#include "futex.h"
#include <err.h>
#include <stdlib.h>
#include <sys/time.h>
static u_int32_t futex1 = 0, futex2 = 0;
/*
* How many tasks to requeue at a time.
* Default to 1 in order to make the kernel work more.
*/
static unsigned int nrequeue = 1;
static pthread_t *worker;
static bool done = false, silent = false, fshared = false;
static pthread_mutex_t thread_lock;
static pthread_cond_t thread_parent, thread_worker;
static struct stats requeuetime_stats, requeued_stats;
static unsigned int ncpus, threads_starting, nthreads = 0;
static int futex_flag = 0;
static const struct option options[] = {
OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
OPT_UINTEGER('q', "nrequeue", &nrequeue, "Specify amount of threads to requeue at once"),
OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
OPT_BOOLEAN( 'S', "shared", &fshared, "Use shared futexes instead of private ones"),
OPT_END()
};
static const char * const bench_futex_requeue_usage[] = {
"perf bench futex requeue <options>",
NULL
};
static void print_summary(void)
{
double requeuetime_avg = avg_stats(&requeuetime_stats);
double requeuetime_stddev = stddev_stats(&requeuetime_stats);
unsigned int requeued_avg = avg_stats(&requeued_stats);
printf("Requeued %d of %d threads in %.4f ms (+-%.2f%%)\n",
requeued_avg,
nthreads,
requeuetime_avg / USEC_PER_MSEC,
rel_stddev_stats(requeuetime_stddev, requeuetime_avg));
}
static void *workerfn(void *arg __maybe_unused)
{
pthread_mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
pthread_cond_signal(&thread_parent);
pthread_cond_wait(&thread_worker, &thread_lock);
pthread_mutex_unlock(&thread_lock);
futex_wait(&futex1, 0, NULL, futex_flag);
return NULL;
}
static void block_threads(pthread_t *w,
pthread_attr_t thread_attr)
{
cpu_set_t cpu;
unsigned int i;
threads_starting = nthreads;
/* create and block all threads */
for (i = 0; i < nthreads; i++) {
CPU_ZERO(&cpu);
CPU_SET(i % ncpus, &cpu);
if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
err(EXIT_FAILURE, "pthread_create");
}
}
static void toggle_done(int sig __maybe_unused,
siginfo_t *info __maybe_unused,
void *uc __maybe_unused)
{
done = true;
}
int bench_futex_requeue(int argc, const char **argv)
{
int ret = 0;
unsigned int i, j;
struct sigaction act;
pthread_attr_t thread_attr;
argc = parse_options(argc, argv, options, bench_futex_requeue_usage, 0);
if (argc)
goto err;
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (!nthreads)
nthreads = ncpus;
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
err(EXIT_FAILURE, "calloc");
if (!fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
if (nrequeue > nthreads)
nrequeue = nthreads;
printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), "
"%d at a time.\n\n", getpid(), nthreads,
fshared ? "shared":"private", &futex1, &futex2, nrequeue);
init_stats(&requeued_stats);
init_stats(&requeuetime_stats);
pthread_attr_init(&thread_attr);
pthread_mutex_init(&thread_lock, NULL);
pthread_cond_init(&thread_parent, NULL);
pthread_cond_init(&thread_worker, NULL);
for (j = 0; j < bench_repeat && !done; j++) {
unsigned int nrequeued = 0;
struct timeval start, end, runtime;
/* create, launch & block all threads */
block_threads(worker, thread_attr);
/* make sure all threads are already blocked */
pthread_mutex_lock(&thread_lock);
while (threads_starting)
pthread_cond_wait(&thread_parent, &thread_lock);
pthread_cond_broadcast(&thread_worker);
pthread_mutex_unlock(&thread_lock);
usleep(100000);
/* Ok, all threads are patiently blocked, start requeueing */
gettimeofday(&start, NULL);
while (nrequeued < nthreads) {
/*
* Do not wakeup any tasks blocked on futex1, allowing
* us to really measure futex_wait functionality.
*/
nrequeued += futex_cmp_requeue(&futex1, 0, &futex2, 0,
nrequeue, futex_flag);
}
gettimeofday(&end, NULL);
timersub(&end, &start, &runtime);
update_stats(&requeued_stats, nrequeued);
update_stats(&requeuetime_stats, runtime.tv_usec);
if (!silent) {
printf("[Run %d]: Requeued %d of %d threads in %.4f ms\n",
j + 1, nrequeued, nthreads, runtime.tv_usec / (double)USEC_PER_MSEC);
}
/* everybody should be blocked on futex2, wake'em up */
nrequeued = futex_wake(&futex2, nrequeued, futex_flag);
if (nthreads != nrequeued)
warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads);
for (i = 0; i < nthreads; i++) {
ret = pthread_join(worker[i], NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
}
/* cleanup & report results */
pthread_cond_destroy(&thread_parent);
pthread_cond_destroy(&thread_worker);
pthread_mutex_destroy(&thread_lock);
pthread_attr_destroy(&thread_attr);
print_summary();
free(worker);
return ret;
err:
usage_with_options(bench_futex_requeue_usage, options);
exit(EXIT_FAILURE);
}
|
hannes/linux
|
tools/perf/bench/futex-requeue.c
|
C
|
gpl-2.0
| 5,901
|
/// @ref gtx_common
/// @file glm/gtx/common.hpp
///
/// @see core (dependence)
///
/// @defgroup gtx_common GLM_GTX_common
/// @ingroup gtx
///
/// Include <glm/gtx/common.hpp> to use the features of this extension.
///
/// @brief Provide functions to increase the compatibility with Cg and HLSL languages
#pragma once
// Dependencies:
#include "../vec2.hpp"
#include "../vec3.hpp"
#include "../vec4.hpp"
#include "../gtc/vec1.hpp"
#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
# ifndef GLM_ENABLE_EXPERIMENTAL
# pragma message("GLM: GLM_GTX_common is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
# else
# pragma message("GLM: GLM_GTX_common extension included")
# endif
#endif
namespace glm
{
/// @addtogroup gtx_common
/// @{
/// Returns true if x is a denormalized number
/// Numbers whose absolute value is too small to be represented in the normal format are represented in an alternate, denormalized format.
/// This format is less precise but can represent values closer to zero.
///
/// @tparam genType Floating-point scalar or vector types.
///
/// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/isnan.xml">GLSL isnan man page</a>
/// @see <a href="http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf">GLSL 4.20.8 specification, section 8.3 Common Functions</a>
template<typename genType>
GLM_FUNC_DECL typename genType::bool_type isdenormal(genType const& x);
/// Similar to 'mod' but with a different rounding and integer support.
/// Returns 'x - y * trunc(x/y)' instead of 'x - y * floor(x/y)'
///
/// @see <a href="http://stackoverflow.com/questions/7610631/glsl-mod-vs-hlsl-fmod">GLSL mod vs HLSL fmod</a>
/// @see <a href="http://www.opengl.org/sdk/docs/manglsl/xhtml/mod.xml">GLSL mod man page</a>
template<length_t L, typename T, qualifier Q>
GLM_FUNC_DECL vec<L, T, Q> fmod(vec<L, T, Q> const& v);
/// Returns whether vector components values are within an interval. A open interval excludes its endpoints, and is denoted with square brackets.
///
/// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
/// @tparam T Floating-point or integer scalar types
/// @tparam Q Value from qualifier enum
///
/// @see ext_vector_relational
template <length_t L, typename T, qualifier Q>
GLM_FUNC_DECL vec<L, bool, Q> openBounded(vec<L, T, Q> const& Value, vec<L, T, Q> const& Min, vec<L, T, Q> const& Max);
/// Returns whether vector components values are within an interval. A closed interval includes its endpoints, and is denoted with square brackets.
///
/// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector
/// @tparam T Floating-point or integer scalar types
/// @tparam Q Value from qualifier enum
///
/// @see ext_vector_relational
template <length_t L, typename T, qualifier Q>
GLM_FUNC_DECL vec<L, bool, Q> closeBounded(vec<L, T, Q> const& Value, vec<L, T, Q> const& Min, vec<L, T, Q> const& Max);
/// @}
}//namespace glm
#include "common.inl"
|
bugbit/mathematical-drawings
|
v2020/Dibuixos/3rdParty/glm-0.9.9.7/glm/gtx/common.hpp
|
C++
|
apache-2.0
| 3,116
|
.dojoxFloatingPane {
background-color:#fff;
position:relative;
border: 1px solid #dedede;
overflow: hidden;
-webkit-box-shadow: 0px 5px 10px #adadad;
}
.dojoxFloatingPaneFg {
-webkit-box-shadow: 0px 8px 20px #525252;
}
/* titleNode */
.dojoxFloatingPaneTitle {
background: #cccccc;
background:#fafafa repeat-x bottom left;
border:1px solid #bfbfbf;
padding:4px 4px 2px 4px;
cursor: pointer;
white-space: nowrap;
}
.soria .dojoxFloatingPaneTitle {
background:#fff url("../../../dijit/themes/soria/images/titleBar.png") repeat-x top left;
border:1px solid #b1badf;
font-size: 0.9em;
font-weight: bold;
line-height:1.2em;
}
.tundra .dojoxFloatingPaneTitle {
background:#fafafa url("../../../dijit/themes/tundra/images/titleBarBg.gif") repeat-x bottom left;
border:1px solid #bfbfbf;
color:#000;
}
/* Icons */
.dojoxFloatingCloseIcon {
background:url('../../../dijit/themes/tundra/images/tabClose.png') no-repeat center center;
width:16px;
height:16px;
overflow:hidden;
float:right;
}
.dojoxFloatingMinimizeIcon {
background:url('../../../dijit/themes/tundra/images/spriteArrows.png') no-repeat 0px center;
width:7px;
margin: 0px 4px;
overflow:hidden;
float:right;
}
.soria .dojoxFloatingMinimizeIcon {
background:url("../../../dijit/themes/soria/images/spriteRoundedIconsSmallBl.png") no-repeat -15px top;
width: 14px;
margin: 0px 2px;
}
.floatingPaneMaximized .dojoxFloatingMaximizeIcon { display:none; }
.dojoxFloatingMaximizeIcon {
background:url('../../../dijit/themes/tundra/images/spriteArrows.png') no-repeat -21px center;
width:7px;
margin: 0px 4px;
overflow:hidden;
float:right;
}
.soria .dojoxFloatingMaximizeIcon {
background:url("../../../dijit/themes/soria/images/spriteRoundedIconsSmallBl.png") no-repeat -45px top;
}
.floatingPaneMaximized .dojoxFloatingRestoreIcon { display:inline; }
.dojoxFloatingRestoreIcon {
background:url('../../../dijit/themes/tundra/images/spriteArrows.png') no-repeat 0px center;
width:7px;
margin: 0px 4px;
float:right;
display:none;
}
.dojoxFloatingResizeHandle {
background:url('icons/resize.png') no-repeat bottom right;
position:absolute;
right:0;
bottom:0;
width:16px;
height:16px;
cursor:nw-resize;
}
.dojoxFloatingCloseIcon {
width:15px;
height:15px;
overflow:hidden;
float:right;
cursor:pointer;
}
.soria .dojoxFloatingCloseIcon {
background:url("../../../dijit/themes/soria/images/spriteRoundedIconsSmallBl.png") no-repeat -60px top;
}
.tundra .dojoxFloatingCloseIcon {
background:url('../../../dijit/themes/tundra/images/tabClose.png') no-repeat center center;
}
/* our un-used dock styles for now */
.dojoxFloatingDockDefault {
position:absolute;
bottom:0px;
left:0px;
overflow:hidden;
margin:0;
margin-bottom:3px;
padding:0px;
width:100%;
z-index:99; /* position the dock _just_ below the lowest pane */
background:transparent;
/* background-color:#fff;
border-top:1px solid #ccc;
*/
}
.dojoxDockList {
padding: 0px;
margin: 0px;
}
.dojoxDockRestoreButton {
background:url('../../../dijit/themes/tundra/images/arrowUp.png') no-repeat center center;
width:16px; height:16px;
overflow:hidden;
float:left;
margin-top:2px;
}
.soria .dojoxDockRestoreButton {
background:url("../../../dijit/themes/soria/images/spriteRoundedIconsSmallBl.png") no-repeat -45px top;
}
.dojoxDockTitleNode {
overflow:hidden;
}
/* Modifications */
.dojoxDock {
display: block;
border: 1px solid black;
position: absolute;
padding:0;
margin:0;
background:#fcfcfc;
}
.dojoxDockNode {
border: 1px solid #adadad;
border-radius: 2px;
-webkit-border-radius: 2px;
-moz-border-radius: 3px;
cursor:pointer;
list-style: none;
padding: 2px;
margin: 0px;
height: 16px;
width: auto;
float: left;
background: #fafafa url("../../../dijit/themes/tundra/images/titleBarBg.gif") repeat-x bottom left;
}
.soria .dojoxDockNode {
background:#b7cdee url("../../../dijit/themes/soria/images/titleBar.png") repeat-x;
}
.dojoxFloatingPaneContent {
overflow: auto;
background-color: #fff;
height: 100%;
width: 100%;
}
.dojoxFloatingPaneCanvas {
background-color:#fff;
}
|
ozoneplatform/owf-framework
|
web-app/js-lib/dojo-release-1.5.0/dojox/layout/resources/FloatingPane.css
|
CSS
|
apache-2.0
| 4,128
|
#
# (C) Copyright 2008 Jelmer Vernooij <jelmer@samba.org>
# (C) Copyright 2011 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""GSSAPI authentication mechanism for PyXMPP SASL implementation.
Normative reference:
- `RFC 4752 <http://www.ietf.org/rfc/rfc4752.txt>`__
"""
__docformat__ = "restructuredtext en"
import base64
import kerberos
import logging
from .core import ClientAuthenticator, Response, Success
from .core import sasl_mechanism
logger = logging.getLogger("pyxmpp2.sasl.gssapi")
@sasl_mechanism("GSSAPI", 75)
class GSSAPIClientAuthenticator(ClientAuthenticator):
"""Provides client-side GSSAPI SASL (Kerberos 5) authentication."""
def __init__(self, password_manager):
ClientAuthenticator.__init__(self, password_manager)
self.password_manager = password_manager
self.username = None
self._gss = None
self.step = None
self.authzid = None
def start(self, username, authzid):
self.username = username
self.authzid = authzid
_unused, self._gss = kerberos.authGSSClientInit(authzid or
"{0}@{1}".format("xmpp",
self.password_manager.get_serv_host()))
self.step = 0
return self.challenge("")
def challenge(self, challenge):
if self.step == 0:
ret = kerberos.authGSSClientStep(self._gss,
base64.b64encode(challenge))
if ret != kerberos.AUTH_GSS_CONTINUE:
self.step = 1
elif self.step == 1:
ret = kerberos.authGSSClientUnwrap(self._gss,
base64.b64encode(challenge))
response = kerberos.authGSSClientResponse(self._gss)
ret = kerberos.authGSSClientWrap(self._gss, response, self.username)
response = kerberos.authGSSClientResponse(self._gss)
if response is None:
return Response("")
else:
return Response(base64.b64decode(response))
def finish(self, data):
self.username = kerberos.authGSSClientUserName(self._gss)
logger.debug("Authenticated as {0!r}".format(
kerberos.authGSSClientUserName(self._gss)))
return Success(self.username, None, self.authzid)
# vi: sts=4 et sw=4
|
pforret/python-for-android
|
python3-alpha/python-libs/pyxmpp2/sasl/gssapi.py
|
Python
|
apache-2.0
| 3,008
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_AGP_H
#define _ASM_X86_AGP_H
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
/*
* Functions to keep the agpgart mappings coherent with the MMU. The
* GART gives the CPU a physical alias of pages in memory. The alias
* region is mapped uncacheable. Make sure there are no conflicting
* mappings with different cachability attributes for the same
* page. This avoids data corruption on some CPUs.
*/
#define map_page_into_agp(page) set_pages_uc(page, 1)
#define unmap_page_from_agp(page) set_pages_wb(page, 1)
/*
* Could use CLFLUSH here if the cpu supports it. But then it would
* need to be called for each cacheline of the whole page so it may
* not be worth it. Would need a page for it.
*/
#define flush_agp_cache() wbinvd()
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif /* _ASM_X86_AGP_H */
|
BPI-SINOVOIP/BPI-Mainline-kernel
|
linux-5.4/arch/x86/include/asm/agp.h
|
C
|
gpl-2.0
| 1,070
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MACROS_H
#define EIGEN_MACROS_H
#define EIGEN_WORLD_VERSION 3
#define EIGEN_MAJOR_VERSION 2
#define EIGEN_MINOR_VERSION 10
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
EIGEN_MINOR_VERSION>=z))))
// Compiler identification, EIGEN_COMP_*
/// \internal EIGEN_COMP_GNUC set to 1 for all compilers compatible with GCC
#ifdef __GNUC__
#define EIGEN_COMP_GNUC 1
#else
#define EIGEN_COMP_GNUC 0
#endif
/// \internal EIGEN_COMP_CLANG set to 1 if the compiler is clang (alias for __clang__)
#if defined(__clang__)
#define EIGEN_COMP_CLANG 1
#else
#define EIGEN_COMP_CLANG 0
#endif
/// \internal EIGEN_COMP_LLVM set to 1 if the compiler backend is llvm
#if defined(__llvm__)
#define EIGEN_COMP_LLVM 1
#else
#define EIGEN_COMP_LLVM 0
#endif
/// \internal EIGEN_COMP_ICC set to __INTEL_COMPILER if the compiler is Intel compiler, 0 otherwise
#if defined(__INTEL_COMPILER)
#define EIGEN_COMP_ICC __INTEL_COMPILER
#else
#define EIGEN_COMP_ICC 0
#endif
/// \internal EIGEN_COMP_MINGW set to 1 if the compiler is mingw
#if defined(__MINGW32__)
#define EIGEN_COMP_MINGW 1
#else
#define EIGEN_COMP_MINGW 0
#endif
/// \internal EIGEN_COMP_SUNCC set to 1 if the compiler is Solaris Studio
#if defined(__SUNPRO_CC)
#define EIGEN_COMP_SUNCC 1
#else
#define EIGEN_COMP_SUNCC 0
#endif
/// \internal EIGEN_COMP_MSVC set to _MSC_VER if the compiler is Microsoft Visual C++, 0 otherwise.
#if defined(_MSC_VER)
#define EIGEN_COMP_MSVC _MSC_VER
#else
#define EIGEN_COMP_MSVC 0
#endif
/// \internal EIGEN_COMP_MSVC_STRICT set to 1 if the compiler is really Microsoft Visual C++ and not ,e.g., ICC
#if EIGEN_COMP_MSVC && !(EIGEN_COMP_ICC)
#define EIGEN_COMP_MSVC_STRICT _MSC_VER
#else
#define EIGEN_COMP_MSVC_STRICT 0
#endif
/// \internal EIGEN_COMP_IBM set to 1 if the compiler is IBM XL C++
#if defined(__IBMCPP__) || defined(__xlc__)
#define EIGEN_COMP_IBM 1
#else
#define EIGEN_COMP_IBM 0
#endif
/// \internal EIGEN_COMP_PGI set to 1 if the compiler is Portland Group Compiler
#if defined(__PGI)
#define EIGEN_COMP_PGI 1
#else
#define EIGEN_COMP_PGI 0
#endif
/// \internal EIGEN_COMP_ARM set to 1 if the compiler is ARM Compiler
#if defined(__CC_ARM) || defined(__ARMCC_VERSION)
#define EIGEN_COMP_ARM 1
#else
#define EIGEN_COMP_ARM 0
#endif
/// \internal EIGEN_GNUC_STRICT set to 1 if the compiler is really GCC and not a compatible compiler (e.g., ICC, clang, mingw, etc.)
#if EIGEN_COMP_GNUC && !(EIGEN_COMP_CLANG || EIGEN_COMP_ICC || EIGEN_COMP_MINGW || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM )
#define EIGEN_COMP_GNUC_STRICT 1
#else
#define EIGEN_COMP_GNUC_STRICT 0
#endif
#if EIGEN_COMP_GNUC
#define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x)
#define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__<x)
#define EIGEN_GNUC_AT(x,y) ( __GNUC__==x && __GNUC_MINOR__==y )
#else
#define EIGEN_GNUC_AT_LEAST(x,y) 0
#define EIGEN_GNUC_AT_MOST(x,y) 0
#define EIGEN_GNUC_AT(x,y) 0
#endif
// FIXME: could probably be removed as we do not support gcc 3.x anymore
#if EIGEN_COMP_GNUC && (__GNUC__ <= 3)
#define EIGEN_GCC3_OR_OLDER 1
#else
#define EIGEN_GCC3_OR_OLDER 0
#endif
// Architecture identification, EIGEN_ARCH_*
#if defined(__x86_64__) || defined(_M_X64) || defined(__amd64)
#define EIGEN_ARCH_x86_64 1
#else
#define EIGEN_ARCH_x86_64 0
#endif
#if defined(__i386__) || defined(_M_IX86) || defined(_X86_) || defined(__i386)
#define EIGEN_ARCH_i386 1
#else
#define EIGEN_ARCH_i386 0
#endif
#if EIGEN_ARCH_x86_64 || EIGEN_ARCH_i386
#define EIGEN_ARCH_i386_OR_x86_64 1
#else
#define EIGEN_ARCH_i386_OR_x86_64 0
#endif
/// \internal EIGEN_ARCH_ARM set to 1 if the architecture is ARM
#if defined(__arm__)
#define EIGEN_ARCH_ARM 1
#else
#define EIGEN_ARCH_ARM 0
#endif
/// \internal EIGEN_ARCH_ARM64 set to 1 if the architecture is ARM64
#if defined(__aarch64__)
#define EIGEN_ARCH_ARM64 1
#else
#define EIGEN_ARCH_ARM64 0
#endif
#if EIGEN_ARCH_ARM || EIGEN_ARCH_ARM64
#define EIGEN_ARCH_ARM_OR_ARM64 1
#else
#define EIGEN_ARCH_ARM_OR_ARM64 0
#endif
/// \internal EIGEN_ARCH_MIPS set to 1 if the architecture is MIPS
#if defined(__mips__) || defined(__mips)
#define EIGEN_ARCH_MIPS 1
#else
#define EIGEN_ARCH_MIPS 0
#endif
/// \internal EIGEN_ARCH_SPARC set to 1 if the architecture is SPARC
#if defined(__sparc__) || defined(__sparc)
#define EIGEN_ARCH_SPARC 1
#else
#define EIGEN_ARCH_SPARC 0
#endif
/// \internal EIGEN_ARCH_IA64 set to 1 if the architecture is Intel Itanium
#if defined(__ia64__)
#define EIGEN_ARCH_IA64 1
#else
#define EIGEN_ARCH_IA64 0
#endif
/// \internal EIGEN_ARCH_PPC set to 1 if the architecture is PowerPC
#if defined(__powerpc__) || defined(__ppc__) || defined(_M_PPC)
#define EIGEN_ARCH_PPC 1
#else
#define EIGEN_ARCH_PPC 0
#endif
// Operating system identification, EIGEN_OS_*
/// \internal EIGEN_OS_UNIX set to 1 if the OS is a unix variant
#if defined(__unix__) || defined(__unix)
#define EIGEN_OS_UNIX 1
#else
#define EIGEN_OS_UNIX 0
#endif
/// \internal EIGEN_OS_LINUX set to 1 if the OS is based on Linux kernel
#if defined(__linux__)
#define EIGEN_OS_LINUX 1
#else
#define EIGEN_OS_LINUX 0
#endif
/// \internal EIGEN_OS_ANDROID set to 1 if the OS is Android
// note: ANDROID is defined when using ndk_build, __ANDROID__ is defined when using a standalone toolchain.
#if defined(__ANDROID__) || defined(ANDROID)
#define EIGEN_OS_ANDROID 1
#else
#define EIGEN_OS_ANDROID 0
#endif
/// \internal EIGEN_OS_GNULINUX set to 1 if the OS is GNU Linux and not Linux-based OS (e.g., not android)
#if defined(__gnu_linux__) && !(EIGEN_OS_ANDROID)
#define EIGEN_OS_GNULINUX 1
#else
#define EIGEN_OS_GNULINUX 0
#endif
/// \internal EIGEN_OS_BSD set to 1 if the OS is a BSD variant
#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__bsdi__) || defined(__DragonFly__)
#define EIGEN_OS_BSD 1
#else
#define EIGEN_OS_BSD 0
#endif
/// \internal EIGEN_OS_MAC set to 1 if the OS is MacOS
#if defined(__APPLE__)
#define EIGEN_OS_MAC 1
#else
#define EIGEN_OS_MAC 0
#endif
/// \internal EIGEN_OS_QNX set to 1 if the OS is QNX
#if defined(__QNX__)
#define EIGEN_OS_QNX 1
#else
#define EIGEN_OS_QNX 0
#endif
/// \internal EIGEN_OS_WIN set to 1 if the OS is Windows based
#if defined(_WIN32)
#define EIGEN_OS_WIN 1
#else
#define EIGEN_OS_WIN 0
#endif
/// \internal EIGEN_OS_WIN64 set to 1 if the OS is Windows 64bits
#if defined(_WIN64)
#define EIGEN_OS_WIN64 1
#else
#define EIGEN_OS_WIN64 0
#endif
/// \internal EIGEN_OS_WINCE set to 1 if the OS is Windows CE
#if defined(_WIN32_WCE)
#define EIGEN_OS_WINCE 1
#else
#define EIGEN_OS_WINCE 0
#endif
/// \internal EIGEN_OS_CYGWIN set to 1 if the OS is Windows/Cygwin
#if defined(__CYGWIN__)
#define EIGEN_OS_CYGWIN 1
#else
#define EIGEN_OS_CYGWIN 0
#endif
/// \internal EIGEN_OS_WIN_STRICT set to 1 if the OS is really Windows and not some variants
#if EIGEN_OS_WIN && !( EIGEN_OS_WINCE || EIGEN_OS_CYGWIN )
#define EIGEN_OS_WIN_STRICT 1
#else
#define EIGEN_OS_WIN_STRICT 0
#endif
/// \internal EIGEN_OS_SUN set to 1 if the OS is SUN
#if (defined(sun) || defined(__sun)) && !(defined(__SVR4) || defined(__svr4__))
#define EIGEN_OS_SUN 1
#else
#define EIGEN_OS_SUN 0
#endif
/// \internal EIGEN_OS_SOLARIS set to 1 if the OS is Solaris
#if (defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))
#define EIGEN_OS_SOLARIS 1
#else
#define EIGEN_OS_SOLARIS 0
#endif
#if EIGEN_GNUC_AT_MOST(4,3) && !defined(__clang__)
// see bug 89
#define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
#else
#define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1
#endif
// 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
// 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
// enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
// certain common platform (compiler+architecture combinations) to avoid these problems.
// Only static alignment is really problematic (relies on nonstandard compiler extensions that don't
// work everywhere, for example don't work on GCC/ARM), try to keep heap alignment even
// when we have to disable static alignment.
#if defined(__GNUC__) && !(defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || defined(__ppc__) || defined(__ia64__))
#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
#else
#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0
#endif
// static alignment is completely disabled with GCC 3, Sun Studio, and QCC/QNX
#if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT \
&& !EIGEN_GCC3_OR_OLDER \
&& !defined(__SUNPRO_CC) \
&& !defined(__QNXNTO__)
#define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 1
#else
#define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0
#endif
#ifdef EIGEN_DONT_ALIGN
#ifndef EIGEN_DONT_ALIGN_STATICALLY
#define EIGEN_DONT_ALIGN_STATICALLY
#endif
#define EIGEN_ALIGN 0
#else
#define EIGEN_ALIGN 1
#endif
// EIGEN_ALIGN_STATICALLY is the true test whether we want to align arrays on the stack or not. It takes into account both the user choice to explicitly disable
// alignment (EIGEN_DONT_ALIGN_STATICALLY) and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT). Henceforth, only EIGEN_ALIGN_STATICALLY should be used.
#if EIGEN_ARCH_WANTS_STACK_ALIGNMENT && !defined(EIGEN_DONT_ALIGN_STATICALLY)
#define EIGEN_ALIGN_STATICALLY 1
#else
#define EIGEN_ALIGN_STATICALLY 0
#ifndef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
#define EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
#endif
#endif
#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION RowMajor
#else
#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ColMajor
#endif
#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
#endif
// A Clang feature extension to determine compiler features.
// We use it to determine 'cxx_rvalue_references'
#ifndef __has_feature
# define __has_feature(x) 0
#endif
// Do we support r-value references?
#if (__has_feature(cxx_rvalue_references) || \
(defined(__cplusplus) && __cplusplus >= 201103L) || \
(defined(_MSC_VER) && _MSC_VER >= 1600))
#define EIGEN_HAVE_RVALUE_REFERENCES
#endif
// Cross compiler wrapper around LLVM's __has_builtin
#ifdef __has_builtin
# define EIGEN_HAS_BUILTIN(x) __has_builtin(x)
#else
# define EIGEN_HAS_BUILTIN(x) 0
#endif
/** Allows to disable some optimizations which might affect the accuracy of the result.
* Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
* They currently include:
* - single precision Cwise::sin() and Cwise::cos() when SSE vectorization is enabled.
*/
#ifndef EIGEN_FAST_MATH
#define EIGEN_FAST_MATH 1
#endif
#define EIGEN_DEBUG_VAR(x) std::cerr << #x << " = " << x << std::endl;
// concatenate two tokens
#define EIGEN_CAT2(a,b) a ## b
#define EIGEN_CAT(a,b) EIGEN_CAT2(a,b)
// convert a token to a string
#define EIGEN_MAKESTRING2(a) #a
#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a)
// EIGEN_STRONG_INLINE is a stronger version of the inline, using __forceinline on MSVC,
// but it still doesn't use GCC's always_inline. This is useful in (common) situations where MSVC needs forceinline
// but GCC is still doing fine with just inline.
#if (defined _MSC_VER) || (defined __INTEL_COMPILER)
#define EIGEN_STRONG_INLINE __forceinline
#else
#define EIGEN_STRONG_INLINE inline
#endif
// EIGEN_ALWAYS_INLINE is the stronget, it has the effect of making the function inline and adding every possible
// attribute to maximize inlining. This should only be used when really necessary: in particular,
// it uses __attribute__((always_inline)) on GCC, which most of the time is useless and can severely harm compile times.
// FIXME with the always_inline attribute,
// gcc 3.4.x reports the following compilation error:
// Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'
// : function body not available
#if EIGEN_GNUC_AT_LEAST(4,0)
#define EIGEN_ALWAYS_INLINE __attribute__((always_inline)) inline
#else
#define EIGEN_ALWAYS_INLINE EIGEN_STRONG_INLINE
#endif
#if (defined __GNUC__)
#define EIGEN_DONT_INLINE __attribute__((noinline))
#elif (defined _MSC_VER)
#define EIGEN_DONT_INLINE __declspec(noinline)
#else
#define EIGEN_DONT_INLINE
#endif
#if (defined __GNUC__)
#define EIGEN_PERMISSIVE_EXPR __extension__
#else
#define EIGEN_PERMISSIVE_EXPR
#endif
// this macro allows to get rid of linking errors about multiply defined functions.
// - static is not very good because it prevents definitions from different object files to be merged.
// So static causes the resulting linked executable to be bloated with multiple copies of the same function.
// - inline is not perfect either as it unwantedly hints the compiler toward inlining the function.
#define EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
#define EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS inline
#ifdef NDEBUG
# ifndef EIGEN_NO_DEBUG
# define EIGEN_NO_DEBUG
# endif
#endif
// eigen_plain_assert is where we implement the workaround for the assert() bug in GCC <= 4.3, see bug 89
#ifdef EIGEN_NO_DEBUG
#define eigen_plain_assert(x)
#else
#if EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO
namespace Eigen {
namespace internal {
inline bool copy_bool(bool b) { return b; }
}
}
#define eigen_plain_assert(x) assert(x)
#else
// work around bug 89
#include <cstdlib> // for abort
#include <iostream> // for std::cerr
namespace Eigen {
namespace internal {
// trivial function copying a bool. Must be EIGEN_DONT_INLINE, so we implement it after including Eigen headers.
// see bug 89.
namespace {
EIGEN_DONT_INLINE bool copy_bool(bool b) { return b; }
}
inline void assert_fail(const char *condition, const char *function, const char *file, int line)
{
std::cerr << "assertion failed: " << condition << " in function " << function << " at " << file << ":" << line << std::endl;
abort();
}
}
}
#define eigen_plain_assert(x) \
do { \
if(!Eigen::internal::copy_bool(x)) \
Eigen::internal::assert_fail(EIGEN_MAKESTRING(x), __PRETTY_FUNCTION__, __FILE__, __LINE__); \
} while(false)
#endif
#endif
// eigen_assert can be overridden
#ifndef eigen_assert
#define eigen_assert(x) eigen_plain_assert(x)
#endif
#ifdef EIGEN_INTERNAL_DEBUGGING
#define eigen_internal_assert(x) eigen_assert(x)
#else
#define eigen_internal_assert(x)
#endif
#ifdef EIGEN_NO_DEBUG
#define EIGEN_ONLY_USED_FOR_DEBUG(x) (void)x
#else
#define EIGEN_ONLY_USED_FOR_DEBUG(x)
#endif
#ifndef EIGEN_NO_DEPRECATED_WARNING
#if (defined __GNUC__)
#define EIGEN_DEPRECATED __attribute__((deprecated))
#elif (defined _MSC_VER)
#define EIGEN_DEPRECATED __declspec(deprecated)
#else
#define EIGEN_DEPRECATED
#endif
#else
#define EIGEN_DEPRECATED
#endif
#if (defined __GNUC__)
#define EIGEN_UNUSED __attribute__((unused))
#else
#define EIGEN_UNUSED
#endif
// Suppresses 'unused variable' warnings.
namespace Eigen {
namespace internal {
template<typename T> void ignore_unused_variable(const T&) {}
}
}
#define EIGEN_UNUSED_VARIABLE(var) Eigen::internal::ignore_unused_variable(var);
#if !defined(EIGEN_ASM_COMMENT)
#if (defined __GNUC__) && ( defined(__i386__) || defined(__x86_64__) )
#define EIGEN_ASM_COMMENT(X) __asm__("#" X)
#else
#define EIGEN_ASM_COMMENT(X)
#endif
#endif
/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.
* However, we do that EVEN if vectorization (EIGEN_VECTORIZE) is disabled,
* so that vectorization doesn't affect binary compatibility.
*
* If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
* vectorized and non-vectorized code.
*/
#if (defined __GNUC__) || (defined __PGI) || (defined __IBMCPP__) || (defined __ARMCC_VERSION)
#define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
#elif (defined _MSC_VER)
#define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))
#elif (defined __SUNPRO_CC)
// FIXME not sure about this one:
#define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
#else
#error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
#endif
#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8)
#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
#if EIGEN_ALIGN_STATICALLY
#define EIGEN_USER_ALIGN_TO_BOUNDARY(n) EIGEN_ALIGN_TO_BOUNDARY(n)
#define EIGEN_USER_ALIGN16 EIGEN_ALIGN16
#else
#define EIGEN_USER_ALIGN_TO_BOUNDARY(n)
#define EIGEN_USER_ALIGN16
#endif
#ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD
#define EIGEN_RESTRICT
#endif
#ifndef EIGEN_RESTRICT
#define EIGEN_RESTRICT __restrict
#endif
#ifndef EIGEN_STACK_ALLOCATION_LIMIT
// 131072 == 128 KB
#define EIGEN_STACK_ALLOCATION_LIMIT 131072
#endif
#ifndef EIGEN_DEFAULT_IO_FORMAT
#ifdef EIGEN_MAKING_DOCS
// format used in Eigen's documentation
// needed to define it here as escaping characters in CMake add_definition's argument seems very problematic.
#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat(3, 0, " ", "\n", "", "")
#else
#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat()
#endif
#endif
// just an empty macro !
#define EIGEN_EMPTY
#if defined(_MSC_VER) && (_MSC_VER < 1900) && (!defined(__INTEL_COMPILER))
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
using Base::operator =;
#elif defined(__clang__) // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
using Base::operator =; \
EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \
template <typename OtherDerived> \
EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other) { Base::operator=(other.derived()); return *this; }
#else
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
using Base::operator =; \
EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) \
{ \
Base::operator=(other); \
return *this; \
}
#endif
/** \internal
* \brief Macro to manually inherit assignment operators.
* This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined.
*/
#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
/**
* Just a side note. Commenting within defines works only by documenting
* behind the object (via '!<'). Comments cannot be multi-line and thus
* we have these extra long lines. What is confusing doxygen over here is
* that we use '\' and basically have a bunch of typedefs with their
* documentation in a single line.
**/
#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
typedef typename Eigen::internal::nested<Derived>::type Nested; \
typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
typedef typename Eigen::internal::traits<Derived>::Index Index; \
enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
Flags = Eigen::internal::traits<Derived>::Flags, \
CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
SizeAtCompileTime = Base::SizeAtCompileTime, \
MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
typedef typename Base::PacketScalar PacketScalar; \
typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
typedef typename Eigen::internal::nested<Derived>::type Nested; \
typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
typedef typename Eigen::internal::traits<Derived>::Index Index; \
enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
MaxRowsAtCompileTime = Eigen::internal::traits<Derived>::MaxRowsAtCompileTime, \
MaxColsAtCompileTime = Eigen::internal::traits<Derived>::MaxColsAtCompileTime, \
Flags = Eigen::internal::traits<Derived>::Flags, \
CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
SizeAtCompileTime = Base::SizeAtCompileTime, \
MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
using Base::derived; \
using Base::const_cast_derived;
#define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b)
#define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b)
// EIGEN_SIZE_MIN_PREFER_DYNAMIC gives the min between compile-time sizes. 0 has absolute priority, followed by 1,
// followed by Dynamic, followed by other finite values. The reason for giving Dynamic the priority over
// finite values is that min(3, Dynamic) should be Dynamic, since that could be anything between 0 and 3.
#define EIGEN_SIZE_MIN_PREFER_DYNAMIC(a,b) (((int)a == 0 || (int)b == 0) ? 0 \
: ((int)a == 1 || (int)b == 1) ? 1 \
: ((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
: ((int)a <= (int)b) ? (int)a : (int)b)
// EIGEN_SIZE_MIN_PREFER_FIXED is a variant of EIGEN_SIZE_MIN_PREFER_DYNAMIC comparing MaxSizes. The difference is that finite values
// now have priority over Dynamic, so that min(3, Dynamic) gives 3. Indeed, whatever the actual value is
// (between 0 and 3), it is not more than 3.
#define EIGEN_SIZE_MIN_PREFER_FIXED(a,b) (((int)a == 0 || (int)b == 0) ? 0 \
: ((int)a == 1 || (int)b == 1) ? 1 \
: ((int)a == Dynamic && (int)b == Dynamic) ? Dynamic \
: ((int)a == Dynamic) ? (int)b \
: ((int)b == Dynamic) ? (int)a \
: ((int)a <= (int)b) ? (int)a : (int)b)
// see EIGEN_SIZE_MIN_PREFER_DYNAMIC. No need for a separate variant for MaxSizes here.
#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
: ((int)a >= (int)b) ? (int)a : (int)b)
#define EIGEN_ADD_COST(a,b) int(a)==Dynamic || int(b)==Dynamic ? Dynamic : int(a)+int(b)
#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b)))
#define EIGEN_IMPLIES(a,b) (!(a) || (b))
#define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR) \
template<typename OtherDerived> \
EIGEN_STRONG_INLINE const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> \
(METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
{ \
return CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); \
}
// the expression type of a cwise product
#define EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS) \
CwiseBinaryOp< \
internal::scalar_product_op< \
typename internal::traits<LHS>::Scalar, \
typename internal::traits<RHS>::Scalar \
>, \
const LHS, \
const RHS \
>
#endif // EIGEN_MACROS_H
|
dzorlu/sdc
|
unscented-kf/src/Eigen/src/Core/util/Macros.h
|
C
|
mit
| 25,454
|
module Fog
module AWS
class AutoScaling
class Real
require 'fog/aws/parsers/auto_scaling/basic'
# Runs the policy you create for your Auto Scaling group in
# put_scaling_policy.
#
# ==== Parameters
# * 'PolicyName'<~String> - The name or PolicyARN of the policy you
# want to run.
# * options<~Hash>:
# * 'AutoScalingGroupName'<~String> - The name or ARN of the Auto
# Scaling group.
# * 'HonorCooldown'<~Boolean> - Set to true if you want Auto Scaling
# to reject this request if the Auto Scaling group is in cooldown.
#
# ==== Returns
# * response<~Excon::Response>:
# * body<~Hash>:
# * 'ResponseMetadata'<~Hash>:
# * 'RequestId'<~String> - Id of request
#
# ==== See Also
# http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_ExecutePolicy.html
#
def execute_policy(policy_name, options = {})
request({
'Action' => 'ExecutePolicy',
'PolicyName' => policy_name,
:parser => Fog::Parsers::AWS::AutoScaling::Basic.new
}.merge!(options))
end
end
class Mock
def execute_policy(policy_name, options = {})
Fog::Mock.not_implemented
end
end
end
end
end
|
jonpstone/portfolio-project-rails-mean-movie-reviews
|
vendor/bundle/ruby/2.3.0/gems/fog-aws-2.0.0/lib/fog/aws/requests/auto_scaling/execute_policy.rb
|
Ruby
|
mit
| 1,413
|
using System;
using System.Collections.Generic;
using System.Text;
namespace UniRx
{
/// <summary>Event kind of CountNotifier.</summary>
public enum CountChangedStatus
{
/// <summary>Count incremented.</summary>
Increment,
/// <summary>Count decremented.</summary>
Decrement,
/// <summary>Count is zero.</summary>
Empty,
/// <summary>Count arrived max.</summary>
Max
}
/// <summary>
/// Notify event of count flag.
/// </summary>
public class CountNotifier : IObservable<CountChangedStatus>
{
readonly object lockObject = new object();
readonly Subject<CountChangedStatus> statusChanged = new Subject<CountChangedStatus>();
readonly int max;
public int Max { get { return max; } }
public int Count { get; private set; }
/// <summary>
/// Setup max count of signal.
/// </summary>
public CountNotifier(int max = int.MaxValue)
{
if (max <= 0)
{
throw new ArgumentException("max");
}
this.max = max;
}
/// <summary>
/// Increment count and notify status.
/// </summary>
public IDisposable Increment(int incrementCount = 1)
{
if (incrementCount < 0)
{
throw new ArgumentException("incrementCount");
}
lock (lockObject)
{
if (Count == Max) return Disposable.Empty;
else if (incrementCount + Count > Max) Count = Max;
else Count += incrementCount;
statusChanged.OnNext(CountChangedStatus.Increment);
if (Count == Max) statusChanged.OnNext(CountChangedStatus.Max);
return Disposable.Create(() => this.Decrement(incrementCount));
}
}
/// <summary>
/// Decrement count and notify status.
/// </summary>
public void Decrement(int decrementCount = 1)
{
if (decrementCount < 0)
{
throw new ArgumentException("decrementCount");
}
lock (lockObject)
{
if (Count == 0) return;
else if (Count - decrementCount < 0) Count = 0;
else Count -= decrementCount;
statusChanged.OnNext(CountChangedStatus.Decrement);
if (Count == 0) statusChanged.OnNext(CountChangedStatus.Empty);
}
}
/// <summary>
/// Subscribe observer.
/// </summary>
public IDisposable Subscribe(IObserver<CountChangedStatus> observer)
{
return statusChanged.Subscribe(observer);
}
}
}
|
Stormancer/Stormancer-sdk-net-unity
|
src/libs/Unity/UniRx/Notifiers/CountNotifier.cs
|
C#
|
mit
| 2,813
|
/**
* ag-grid - Advanced Data Grid / Data Table supporting Javascript / React / AngularJS / Web Components
* @version v4.2.2
* @link http://www.ag-grid.com/
* @license MIT
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __metadata = (this && this.__metadata) || function (k, v) {
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
};
var context_1 = require("../../context/context");
var context_2 = require("../../context/context");
var gridOptionsWrapper_1 = require("../../gridOptionsWrapper");
var filterManager_1 = require("../../filter/filterManager");
var FilterStage = (function () {
function FilterStage() {
}
FilterStage.prototype.execute = function (rowNode) {
var filterActive;
if (this.gridOptionsWrapper.isEnableServerSideFilter()) {
filterActive = false;
}
else {
filterActive = this.filterManager.isAnyFilterPresent();
}
this.recursivelyFilter(rowNode, filterActive);
};
FilterStage.prototype.recursivelyFilter = function (rowNode, filterActive) {
var _this = this;
// recursively get all children that are groups to also filter
rowNode.childrenAfterGroup.forEach(function (child) {
if (child.group) {
_this.recursivelyFilter(child, filterActive);
}
});
// result of filter for this node
var filterResult;
if (filterActive) {
filterResult = [];
rowNode.childrenAfterGroup.forEach(function (childNode) {
if (childNode.group) {
// a group is included in the result if it has any children of it's own.
// by this stage, the child groups are already filtered
if (childNode.childrenAfterFilter.length > 0) {
filterResult.push(childNode);
}
}
else {
// a leaf level node is included if it passes the filter
if (_this.filterManager.doesRowPassFilter(childNode)) {
filterResult.push(childNode);
}
}
});
}
else {
// if not filtering, the result is the original list
filterResult = rowNode.childrenAfterGroup;
}
rowNode.childrenAfterFilter = filterResult;
this.setAllChildrenCount(rowNode);
};
FilterStage.prototype.setAllChildrenCount = function (rowNode) {
var allChildrenCount = 0;
rowNode.childrenAfterFilter.forEach(function (child) {
if (child.group) {
allChildrenCount += child.allChildrenCount;
}
else {
allChildrenCount++;
}
});
rowNode.allChildrenCount = allChildrenCount;
};
__decorate([
context_2.Autowired('gridOptionsWrapper'),
__metadata('design:type', gridOptionsWrapper_1.GridOptionsWrapper)
], FilterStage.prototype, "gridOptionsWrapper", void 0);
__decorate([
context_2.Autowired('filterManager'),
__metadata('design:type', filterManager_1.FilterManager)
], FilterStage.prototype, "filterManager", void 0);
FilterStage = __decorate([
context_1.Bean('filterStage'),
__metadata('design:paramtypes', [])
], FilterStage);
return FilterStage;
})();
exports.FilterStage = FilterStage;
|
joeyparrish/cdnjs
|
ajax/libs/ag-grid/4.2.3/lib/rowControllers/inMemory/filterStage.js
|
JavaScript
|
mit
| 4,042
|
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
typeof define === 'function' && define.amd ? define(factory) :
global.Dexie = factory();
}(this, function () { 'use strict';
// By default, debug will be true only if platform is a web platform and its page is served from localhost.
// When debug = true, error's stacks will contain asyncronic long stacks.
var debug = typeof location !== 'undefined' &&
// By default, use debug mode if served from localhost.
/^(http|https):\/\/(localhost|127\.0\.0\.1)/.test(location.href);
function setDebug(value, filter) {
debug = value;
libraryFilter = filter;
}
var libraryFilter = function () {
return true;
};
var NEEDS_THROW_FOR_STACK = !new Error("").stack;
function getErrorWithStack() {
"use strict";
if (NEEDS_THROW_FOR_STACK) try {
// Doing something naughty in strict mode here to trigger a specific error
// that can be explicitely ignored in debugger's exception settings.
// If we'd just throw new Error() here, IE's debugger's exception settings
// will just consider it as "exception thrown by javascript code" which is
// something you wouldn't want it to ignore.
getErrorWithStack.arguments;
throw new Error(); // Fallback if above line don't throw.
} catch (e) {
return e;
}
return new Error();
}
function prettyStack(exception, numIgnoredFrames) {
var stack = exception.stack;
if (!stack) return "";
numIgnoredFrames = numIgnoredFrames || 0;
if (stack.indexOf(exception.name) === 0) numIgnoredFrames += (exception.name + exception.message).split('\n').length;
return stack.split('\n').slice(numIgnoredFrames).filter(libraryFilter).map(function (frame) {
return "\n" + frame;
}).join('');
}
function nop() {}
function mirror(val) {
return val;
}
function pureFunctionChain(f1, f2) {
// Enables chained events that takes ONE argument and returns it to the next function in chain.
// This pattern is used in the hook("reading") event.
if (f1 == null || f1 === mirror) return f2;
return function (val) {
return f2(f1(val));
};
}
function callBoth(on1, on2) {
return function () {
on1.apply(this, arguments);
on2.apply(this, arguments);
};
}
function hookCreatingChain(f1, f2) {
// Enables chained events that takes several arguments and may modify first argument by making a modification and then returning the same instance.
// This pattern is used in the hook("creating") event.
if (f1 === nop) return f2;
return function () {
var res = f1.apply(this, arguments);
if (res !== undefined) arguments[0] = res;
var onsuccess = this.onsuccess,
// In case event listener has set this.onsuccess
onerror = this.onerror; // In case event listener has set this.onerror
this.onsuccess = null;
this.onerror = null;
var res2 = f2.apply(this, arguments);
if (onsuccess) this.onsuccess = this.onsuccess ? callBoth(onsuccess, this.onsuccess) : onsuccess;
if (onerror) this.onerror = this.onerror ? callBoth(onerror, this.onerror) : onerror;
return res2 !== undefined ? res2 : res;
};
}
function hookDeletingChain(f1, f2) {
if (f1 === nop) return f2;
return function () {
f1.apply(this, arguments);
var onsuccess = this.onsuccess,
// In case event listener has set this.onsuccess
onerror = this.onerror; // In case event listener has set this.onerror
this.onsuccess = this.onerror = null;
f2.apply(this, arguments);
if (onsuccess) this.onsuccess = this.onsuccess ? callBoth(onsuccess, this.onsuccess) : onsuccess;
if (onerror) this.onerror = this.onerror ? callBoth(onerror, this.onerror) : onerror;
};
}
function hookUpdatingChain(f1, f2) {
if (f1 === nop) return f2;
return function (modifications) {
var res = f1.apply(this, arguments);
extend(modifications, res); // If f1 returns new modifications, extend caller's modifications with the result before calling next in chain.
var onsuccess = this.onsuccess,
// In case event listener has set this.onsuccess
onerror = this.onerror; // In case event listener has set this.onerror
this.onsuccess = null;
this.onerror = null;
var res2 = f2.apply(this, arguments);
if (onsuccess) this.onsuccess = this.onsuccess ? callBoth(onsuccess, this.onsuccess) : onsuccess;
if (onerror) this.onerror = this.onerror ? callBoth(onerror, this.onerror) : onerror;
return res === undefined ? res2 === undefined ? undefined : res2 : extend(res, res2);
};
}
function reverseStoppableEventChain(f1, f2) {
if (f1 === nop) return f2;
return function () {
if (f2.apply(this, arguments) === false) return false;
return f1.apply(this, arguments);
};
}
function promisableChain(f1, f2) {
if (f1 === nop) return f2;
return function () {
var res = f1.apply(this, arguments);
if (res && typeof res.then === 'function') {
var thiz = this,
i = arguments.length,
args = new Array(i);
while (i--) {
args[i] = arguments[i];
}return res.then(function () {
return f2.apply(thiz, args);
});
}
return f2.apply(this, arguments);
};
}
var keys = Object.keys;
var isArray = Array.isArray;
var _global = typeof self !== 'undefined' ? self : typeof window !== 'undefined' ? window : global;
function extend(obj, extension) {
if (typeof extension !== 'object') return obj;
keys(extension).forEach(function (key) {
obj[key] = extension[key];
});
return obj;
}
var getProto = Object.getPrototypeOf;
var _hasOwn = {}.hasOwnProperty;
function hasOwn(obj, prop) {
return _hasOwn.call(obj, prop);
}
function props(proto, extension) {
if (typeof extension === 'function') extension = extension(getProto(proto));
keys(extension).forEach(function (key) {
setProp(proto, key, extension[key]);
});
}
function setProp(obj, prop, functionOrGetSet, options) {
Object.defineProperty(obj, prop, extend(functionOrGetSet && hasOwn(functionOrGetSet, "get") && typeof functionOrGetSet.get === 'function' ? { get: functionOrGetSet.get, set: functionOrGetSet.set, configurable: true } : { value: functionOrGetSet, configurable: true, writable: true }, options));
}
function derive(Child) {
return {
from: function (Parent) {
Child.prototype = Object.create(Parent.prototype);
setProp(Child.prototype, "constructor", Child);
return {
extend: props.bind(null, Child.prototype)
};
}
};
}
var getOwnPropertyDescriptor = Object.getOwnPropertyDescriptor;
function getPropertyDescriptor(obj, prop) {
var pd = getOwnPropertyDescriptor(obj, prop),
proto;
return pd || (proto = getProto(obj)) && getPropertyDescriptor(proto, prop);
}
var _slice = [].slice;
function slice(args, start, end) {
return _slice.call(args, start, end);
}
function override(origFunc, overridedFactory) {
return overridedFactory(origFunc);
}
function doFakeAutoComplete(fn) {
var to = setTimeout(fn, 1000);
clearTimeout(to);
}
function assert(b) {
if (!b) throw new exceptions.Internal("Assertion failed");
}
function asap(fn) {
if (_global.setImmediate) setImmediate(fn);else setTimeout(fn, 0);
}
/** Generate an object (hash map) based on given array.
* @param extractor Function taking an array item and its index and returning an array of 2 items ([key, value]) to
* instert on the resulting object for each item in the array. If this function returns a falsy value, the
* current item wont affect the resulting object.
*/
function arrayToObject(array, extractor) {
return array.reduce(function (result, item, i) {
var nameAndValue = extractor(item, i);
if (nameAndValue) result[nameAndValue[0]] = nameAndValue[1];
return result;
}, {});
}
function trycatcher(fn, reject) {
return function () {
try {
fn.apply(this, arguments);
} catch (e) {
reject(e);
}
};
}
function tryCatch(fn, onerror, args) {
try {
fn.apply(null, args);
} catch (ex) {
onerror && onerror(ex);
}
}
function rejection(err, uncaughtHandler) {
// Get the call stack and return a rejected promise.
var rv = Promise.reject(err);
return uncaughtHandler ? rv.uncaught(uncaughtHandler) : rv;
}
function getByKeyPath(obj, keyPath) {
// http://www.w3.org/TR/IndexedDB/#steps-for-extracting-a-key-from-a-value-using-a-key-path
if (hasOwn(obj, keyPath)) return obj[keyPath]; // This line is moved from last to first for optimization purpose.
if (!keyPath) return obj;
if (typeof keyPath !== 'string') {
var rv = [];
for (var i = 0, l = keyPath.length; i < l; ++i) {
var val = getByKeyPath(obj, keyPath[i]);
rv.push(val);
}
return rv;
}
var period = keyPath.indexOf('.');
if (period !== -1) {
var innerObj = obj[keyPath.substr(0, period)];
return innerObj === undefined ? undefined : getByKeyPath(innerObj, keyPath.substr(period + 1));
}
return undefined;
}
function setByKeyPath(obj, keyPath, value) {
if (!obj || keyPath === undefined) return;
if ('isFrozen' in Object && Object.isFrozen(obj)) return;
if (typeof keyPath !== 'string' && 'length' in keyPath) {
assert(typeof value !== 'string' && 'length' in value);
for (var i = 0, l = keyPath.length; i < l; ++i) {
setByKeyPath(obj, keyPath[i], value[i]);
}
} else {
var period = keyPath.indexOf('.');
if (period !== -1) {
var currentKeyPath = keyPath.substr(0, period);
var remainingKeyPath = keyPath.substr(period + 1);
if (remainingKeyPath === "") {
if (value === undefined) delete obj[currentKeyPath];else obj[currentKeyPath] = value;
} else {
var innerObj = obj[currentKeyPath];
if (!innerObj) innerObj = obj[currentKeyPath] = {};
setByKeyPath(innerObj, remainingKeyPath, value);
}
} else {
if (value === undefined) delete obj[keyPath];else obj[keyPath] = value;
}
}
}
function delByKeyPath(obj, keyPath) {
if (typeof keyPath === 'string') setByKeyPath(obj, keyPath, undefined);else if ('length' in keyPath) [].map.call(keyPath, function (kp) {
setByKeyPath(obj, kp, undefined);
});
}
function shallowClone(obj) {
var rv = {};
for (var m in obj) {
if (hasOwn(obj, m)) rv[m] = obj[m];
}
return rv;
}
function deepClone(any) {
if (!any || typeof any !== 'object') return any;
var rv;
if (isArray(any)) {
rv = [];
for (var i = 0, l = any.length; i < l; ++i) {
rv.push(deepClone(any[i]));
}
} else if (any instanceof Date) {
rv = new Date();
rv.setTime(any.getTime());
} else {
rv = any.constructor ? Object.create(any.constructor.prototype) : {};
for (var prop in any) {
if (hasOwn(any, prop)) {
rv[prop] = deepClone(any[prop]);
}
}
}
return rv;
}
function getObjectDiff(a, b, rv, prfx) {
// Compares objects a and b and produces a diff object.
rv = rv || {};
prfx = prfx || '';
for (var prop in a) {
if (hasOwn(a, prop)) {
if (!hasOwn(b, prop)) rv[prfx + prop] = undefined; // Property removed
else {
var ap = a[prop],
bp = b[prop];
if (typeof ap === 'object' && typeof bp === 'object') getObjectDiff(ap, bp, rv, prfx + prop + ".");else if (ap !== bp) rv[prfx + prop] = b[prop]; // Primitive value changed
}
}
}for (prop in b) {
if (hasOwn(b, prop) && !hasOwn(a, prop)) {
rv[prfx + prop] = b[prop]; // Property added
}
}return rv;
}
// If first argument is iterable or array-like, return it as an array
var iteratorSymbol = typeof Symbol !== 'undefined' && Symbol.iterator;
var getIteratorOf = iteratorSymbol ? function (x) {
var i;
return x != null && (i = x[iteratorSymbol]) && i.apply(x);
} : function () {
return null;
};
var NO_CHAR_ARRAY = {};
// Takes one or several arguments and returns an array based on the following criteras:
// * If several arguments provided, return arguments converted to an array in a way that
// still allows javascript engine to optimize the code.
// * If single argument is an array, return a clone of it.
// * If this-pointer equals NO_CHAR_ARRAY, don't accept strings as valid iterables as a special
// case to the two bullets below.
// * If single argument is an iterable, convert it to an array and return the resulting array.
// * If single argument is array-like (has length of type number), convert it to an array.
function getArrayOf(arrayLike) {
var i, a, x, it;
if (arguments.length === 1) {
if (isArray(arrayLike)) return arrayLike.slice();
if (this === NO_CHAR_ARRAY && typeof arrayLike === 'string') return [arrayLike];
if (it = getIteratorOf(arrayLike)) {
a = [];
while (x = it.next(), !x.done) {
a.push(x.value);
}return a;
}
if (arrayLike == null) return [arrayLike];
i = arrayLike.length;
if (typeof i === 'number') {
a = new Array(i);
while (i--) {
a[i] = arrayLike[i];
}return a;
}
return [arrayLike];
}
i = arguments.length;
a = new Array(i);
while (i--) {
a[i] = arguments[i];
}return a;
}
var concat = [].concat;
function flatten(a) {
return concat.apply([], a);
}
var dexieErrorNames = ['Modify', 'Bulk', 'OpenFailed', 'VersionChange', 'Schema', 'Upgrade', 'InvalidTable', 'MissingAPI', 'NoSuchDatabase', 'InvalidArgument', 'SubTransaction', 'Unsupported', 'Internal', 'DatabaseClosed', 'IncompatiblePromise'];
var idbDomErrorNames = ['Unknown', 'Constraint', 'Data', 'TransactionInactive', 'ReadOnly', 'Version', 'NotFound', 'InvalidState', 'InvalidAccess', 'Abort', 'Timeout', 'QuotaExceeded', 'Syntax', 'DataClone'];
var errorList = dexieErrorNames.concat(idbDomErrorNames);
var defaultTexts = {
VersionChanged: "Database version changed by other database connection",
DatabaseClosed: "Database has been closed",
Abort: "Transaction aborted",
TransactionInactive: "Transaction has already completed or failed"
};
//
// DexieError - base class of all out exceptions.
//
function DexieError(name, msg) {
// Reason we don't use ES6 classes is because:
// 1. It bloats transpiled code and increases size of minified code.
// 2. It doesn't give us much in this case.
// 3. It would require sub classes to call super(), which
// is not needed when deriving from Error.
this._e = getErrorWithStack();
this.name = name;
this.message = msg;
}
derive(DexieError).from(Error).extend({
stack: {
get: function () {
return this._stack || (this._stack = this.name + ": " + this.message + prettyStack(this._e, 2));
}
},
toString: function () {
return this.name + ": " + this.message;
}
});
function getMultiErrorMessage(msg, failures) {
return msg + ". Errors: " + failures.map(function (f) {
return f.toString();
}).filter(function (v, i, s) {
return s.indexOf(v) === i;
}) // Only unique error strings
.join('\n');
}
//
// ModifyError - thrown in WriteableCollection.modify()
// Specific constructor because it contains members failures and failedKeys.
//
function ModifyError(msg, failures, successCount, failedKeys) {
this._e = getErrorWithStack();
this.failures = failures;
this.failedKeys = failedKeys;
this.successCount = successCount;
}
derive(ModifyError).from(DexieError);
function BulkError(msg, failures) {
this._e = getErrorWithStack();
this.name = "BulkError";
this.failures = failures;
this.message = getMultiErrorMessage(msg, failures);
}
derive(BulkError).from(DexieError);
//
//
// Dynamically generate error names and exception classes based
// on the names in errorList.
//
//
// Map of {ErrorName -> ErrorName + "Error"}
var errnames = errorList.reduce(function (obj, name) {
return obj[name] = name + "Error", obj;
}, {});
// Need an alias for DexieError because we're gonna create subclasses with the same name.
var BaseException = DexieError;
// Map of {ErrorName -> exception constructor}
var exceptions = errorList.reduce(function (obj, name) {
// Let the name be "DexieError" because this name may
// be shown in call stack and when debugging. DexieError is
// the most true name because it derives from DexieError,
// and we cannot change Function.name programatically without
// dynamically create a Function object, which would be considered
// 'eval-evil'.
var fullName = name + "Error";
function DexieError(msgOrInner, inner) {
this._e = getErrorWithStack();
this.name = fullName;
if (!msgOrInner) {
this.message = defaultTexts[name] || fullName;
this.inner = null;
} else if (typeof msgOrInner === 'string') {
this.message = msgOrInner;
this.inner = inner || null;
} else if (typeof msgOrInner === 'object') {
this.message = msgOrInner.name + ' ' + msgOrInner.message;
this.inner = msgOrInner;
}
}
derive(DexieError).from(BaseException);
obj[name] = DexieError;
return obj;
}, {});
// Use ECMASCRIPT standard exceptions where applicable:
exceptions.Syntax = SyntaxError;
exceptions.Type = TypeError;
exceptions.Range = RangeError;
var exceptionMap = idbDomErrorNames.reduce(function (obj, name) {
obj[name + "Error"] = exceptions[name];
return obj;
}, {});
function mapError(domError, message) {
if (!domError || domError instanceof DexieError || domError instanceof TypeError || domError instanceof SyntaxError || !domError.name || !exceptionMap[domError.name]) return domError;
var rv = new exceptionMap[domError.name](message || domError.message, domError);
if ("stack" in domError) {
// Derive stack from inner exception if it has a stack
setProp(rv, "stack", { get: function () {
return this.inner.stack;
} });
}
return rv;
}
var fullNameExceptions = errorList.reduce(function (obj, name) {
if (["Syntax", "Type", "Range"].indexOf(name) === -1) obj[name + "Error"] = exceptions[name];
return obj;
}, {});
fullNameExceptions.ModifyError = ModifyError;
fullNameExceptions.DexieError = DexieError;
fullNameExceptions.BulkError = BulkError;
function Events(ctx) {
var evs = {};
var rv = function (eventName, subscriber) {
if (subscriber) {
// Subscribe. If additional arguments than just the subscriber was provided, forward them as well.
var i = arguments.length,
args = new Array(i - 1);
while (--i) {
args[i - 1] = arguments[i];
}evs[eventName].subscribe.apply(null, args);
return ctx;
} else if (typeof eventName === 'string') {
// Return interface allowing to fire or unsubscribe from event
return evs[eventName];
}
};
rv.addEventType = add;
for (var i = 1, l = arguments.length; i < l; ++i) {
add(arguments[i]);
}
return rv;
function add(eventName, chainFunction, defaultFunction) {
if (typeof eventName === 'object') return addConfiguredEvents(eventName);
if (!chainFunction) chainFunction = reverseStoppableEventChain;
if (!defaultFunction) defaultFunction = nop;
var context = {
subscribers: [],
fire: defaultFunction,
subscribe: function (cb) {
if (context.subscribers.indexOf(cb) === -1) {
context.subscribers.push(cb);
context.fire = chainFunction(context.fire, cb);
}
},
unsubscribe: function (cb) {
context.subscribers = context.subscribers.filter(function (fn) {
return fn !== cb;
});
context.fire = context.subscribers.reduce(chainFunction, defaultFunction);
}
};
evs[eventName] = rv[eventName] = context;
return context;
}
function addConfiguredEvents(cfg) {
// events(this, {reading: [functionChain, nop]});
keys(cfg).forEach(function (eventName) {
var args = cfg[eventName];
if (isArray(args)) {
add(eventName, cfg[eventName][0], cfg[eventName][1]);
} else if (args === 'asap') {
// Rather than approaching event subscription using a functional approach, we here do it in a for-loop where subscriber is executed in its own stack
// enabling that any exception that occur wont disturb the initiator and also not nescessary be catched and forgotten.
var context = add(eventName, mirror, function fire() {
// Optimazation-safe cloning of arguments into args.
var i = arguments.length,
args = new Array(i);
while (i--) {
args[i] = arguments[i];
} // All each subscriber:
context.subscribers.forEach(function (fn) {
asap(function fireEvent() {
fn.apply(null, args);
});
});
});
} else throw new exceptions.InvalidArgument("Invalid event config");
});
}
}
//
// Promise Class for Dexie library
//
// I started out writing this Promise class by copying promise-light (https://github.com/taylorhakes/promise-light) by
// https://github.com/taylorhakes - an A+ and ECMASCRIPT 6 compliant Promise implementation.
//
// Modifications needed to be done to support indexedDB because it wont accept setTimeout()
// (See discussion: https://github.com/promises-aplus/promises-spec/issues/45) .
// This topic was also discussed in the following thread: https://github.com/promises-aplus/promises-spec/issues/45
//
// This implementation will not use setTimeout or setImmediate when it's not needed. The behavior is 100% Promise/A+ compliant since
// the caller of new Promise() can be certain that the promise wont be triggered the lines after constructing the promise.
//
// In previous versions this was fixed by not calling setTimeout when knowing that the resolve() or reject() came from another
// tick. In Dexie v1.4.0, I've rewritten the Promise class entirely. Just some fragments of promise-light is left. I use
// another strategy now that simplifies everything a lot: to always execute callbacks in a new tick, but have an own microTick
// engine that is used instead of setImmediate() or setTimeout().
// Promise class has also been optimized a lot with inspiration from bluebird - to avoid closures as much as possible.
// Also with inspiration from bluebird, asyncronic stacks in debug mode.
//
// Specific non-standard features of this Promise class:
// * Async static context support (Promise.PSD)
// * Promise.follow() method built upon PSD, that allows user to track all promises created from current stack frame
// and below + all promises that those promises creates or awaits.
// * Detect any unhandled promise in a PSD-scope (PSD.onunhandled).
//
// David Fahlander, https://github.com/dfahlander
//
// Just a pointer that only this module knows about.
// Used in Promise constructor to emulate a private constructor.
var INTERNAL = {};
// Async stacks (long stacks) must not grow infinitely.
var LONG_STACKS_CLIP_LIMIT = 100;
var MAX_LONG_STACKS = 20;
var stack_being_generated = false;
/* The default "nextTick" function used only for the very first promise in a promise chain.
As soon as then promise is resolved or rejected, all next tasks will be executed in micro ticks
emulated in this module. For indexedDB compatibility, this means that every method needs to
execute at least one promise before doing an indexedDB operation. Dexie will always call
db.ready().then() for every operation to make sure the indexedDB event is started in an
emulated micro tick.
*/
var schedulePhysicalTick = typeof setImmediate === 'undefined' ?
// No support for setImmediate. No worry, setTimeout is only called
// once time. Every tick that follows will be our emulated micro tick.
// Could have uses setTimeout.bind(null, 0, physicalTick) if it wasnt for that FF13 and below has a bug
function () {
setTimeout(physicalTick, 0);
} :
// setImmediate supported. Modern platform. Also supports Function.bind().
setImmediate.bind(null, physicalTick);
// Confifurable through Promise.scheduler.
// Don't export because it would be unsafe to let unknown
// code call it unless they do try..catch within their callback.
// This function can be retrieved through getter of Promise.scheduler though,
// but users must not do Promise.scheduler (myFuncThatThrows exception)!
var asap$1 = function (callback, args) {
microtickQueue.push([callback, args]);
if (needsNewPhysicalTick) {
schedulePhysicalTick();
needsNewPhysicalTick = false;
}
};
var isOutsideMicroTick = true;
var needsNewPhysicalTick = true;
var unhandledErrors = [];
var rejectingErrors = [];
var currentFulfiller = null;
var rejectionMapper = mirror;
// Remove in next major when removing error mapping of DOMErrors and DOMExceptions
var globalPSD = {
global: true,
ref: 0,
unhandleds: [],
onunhandled: globalError,
//env: null, // Will be set whenever leaving a scope using wrappers.snapshot()
finalize: function () {
this.unhandleds.forEach(function (uh) {
try {
globalError(uh[0], uh[1]);
} catch (e) {}
});
}
};
var PSD = globalPSD;
var microtickQueue = []; // Callbacks to call in this or next physical tick.
var numScheduledCalls = 0; // Number of listener-calls left to do in this physical tick.
var tickFinalizers = []; // Finalizers to call when there are no more async calls scheduled within current physical tick.
// Wrappers are not being used yet. Their framework is functioning and can be used
// to replace environment during a PSD scope (a.k.a. 'zone').
/* **KEEP** export var wrappers = (() => {
var wrappers = [];
return {
snapshot: () => {
var i = wrappers.length,
result = new Array(i);
while (i--) result[i] = wrappers[i].snapshot();
return result;
},
restore: values => {
var i = wrappers.length;
while (i--) wrappers[i].restore(values[i]);
},
wrap: () => wrappers.map(w => w.wrap()),
add: wrapper => {
wrappers.push(wrapper);
}
};
})();
*/
function Promise(fn) {
if (typeof this !== 'object') throw new TypeError('Promises must be constructed via new');
this._listeners = [];
this.onuncatched = nop; // Deprecate in next major. Not needed. Better to use global error handler.
// A library may set `promise._lib = true;` after promise is created to make resolve() or reject()
// execute the microtask engine implicitely within the call to resolve() or reject().
// To remain A+ compliant, a library must only set `_lib=true` if it can guarantee that the stack
// only contains library code when calling resolve() or reject().
// RULE OF THUMB: ONLY set _lib = true for promises explicitely resolving/rejecting directly from
// global scope (event handler, timer etc)!
this._lib = false;
// Current async scope
var psd = this._PSD = PSD;
if (debug) {
this._stackHolder = getErrorWithStack();
this._prev = null;
this._numPrev = 0; // Number of previous promises (for long stacks)
linkToPreviousPromise(this, currentFulfiller);
}
if (typeof fn !== 'function') {
if (fn !== INTERNAL) throw new TypeError('Not a function');
// Private constructor (INTERNAL, state, value).
// Used internally by Promise.resolve() and Promise.reject().
this._state = arguments[1];
this._value = arguments[2];
if (this._state === false) handleRejection(this, this._value); // Map error, set stack and addPossiblyUnhandledError().
return;
}
this._state = null; // null (=pending), false (=rejected) or true (=resolved)
this._value = null; // error or result
++psd.ref; // Refcounting current scope
executePromiseTask(this, fn);
}
props(Promise.prototype, {
then: function (onFulfilled, onRejected) {
var _this = this;
var rv = new Promise(function (resolve, reject) {
propagateToListener(_this, new Listener(onFulfilled, onRejected, resolve, reject));
});
debug && (!this._prev || this._state === null) && linkToPreviousPromise(rv, this);
return rv;
},
_then: function (onFulfilled, onRejected) {
// A little tinier version of then() that don't have to create a resulting promise.
propagateToListener(this, new Listener(null, null, onFulfilled, onRejected));
},
catch: function (onRejected) {
if (arguments.length === 1) return this.then(null, onRejected);
// First argument is the Error type to catch
var type = arguments[0],
handler = arguments[1];
return typeof type === 'function' ? this.then(null, function (err) {
return(
// Catching errors by its constructor type (similar to java / c++ / c#)
// Sample: promise.catch(TypeError, function (e) { ... });
err instanceof type ? handler(err) : PromiseReject(err)
);
}) : this.then(null, function (err) {
return(
// Catching errors by the error.name property. Makes sense for indexedDB where error type
// is always DOMError but where e.name tells the actual error type.
// Sample: promise.catch('ConstraintError', function (e) { ... });
err && err.name === type ? handler(err) : PromiseReject(err)
);
});
},
finally: function (onFinally) {
return this.then(function (value) {
onFinally();
return value;
}, function (err) {
onFinally();
return PromiseReject(err);
});
},
// Deprecate in next major. Needed only for db.on.error.
uncaught: function (uncaughtHandler) {
var _this2 = this;
// Be backward compatible and use "onuncatched" as the event name on this.
// Handle multiple subscribers through reverseStoppableEventChain(). If a handler returns `false`, bubbling stops.
this.onuncatched = reverseStoppableEventChain(this.onuncatched, uncaughtHandler);
// In case caller does this on an already rejected promise, assume caller wants to point out the error to this promise and not
// a previous promise. Reason: the prevous promise may lack onuncatched handler.
if (this._state === false && unhandledErrors.indexOf(this) === -1) {
// Replace unhandled error's destinaion promise with this one!
unhandledErrors.some(function (p, i, l) {
return p._value === _this2._value && (l[i] = _this2);
});
// Actually we do this shit because we need to support db.on.error() correctly during db.open(). If we deprecate db.on.error, we could
// take away this piece of code as well as the onuncatched and uncaught() method.
}
return this;
},
stack: {
get: function () {
if (this._stack) return this._stack;
try {
stack_being_generated = true;
var stacks = getStack(this, [], MAX_LONG_STACKS);
var stack = stacks.join("\nFrom previous: ");
if (this._state !== null) this._stack = stack; // Stack may be updated on reject.
return stack;
} finally {
stack_being_generated = false;
}
}
}
});
function Listener(onFulfilled, onRejected, resolve, reject) {
this.onFulfilled = typeof onFulfilled === 'function' ? onFulfilled : null;
this.onRejected = typeof onRejected === 'function' ? onRejected : null;
this.resolve = resolve;
this.reject = reject;
this.psd = PSD;
}
// Promise Static Properties
props(Promise, {
all: function () {
var values = getArrayOf.apply(null, arguments); // Supports iterables, implicit arguments and array-like.
return new Promise(function (resolve, reject) {
if (values.length === 0) resolve([]);
var remaining = values.length;
values.forEach(function (a, i) {
return Promise.resolve(a).then(function (x) {
values[i] = x;
if (! --remaining) resolve(values);
}, reject);
});
});
},
resolve: function (value) {
if (value && typeof value.then === 'function') return value;
return new Promise(INTERNAL, true, value);
},
reject: PromiseReject,
race: function () {
var values = getArrayOf.apply(null, arguments);
return new Promise(function (resolve, reject) {
values.map(function (value) {
return Promise.resolve(value).then(resolve, reject);
});
});
},
PSD: {
get: function () {
return PSD;
},
set: function (value) {
return PSD = value;
}
},
newPSD: newScope,
usePSD: usePSD,
scheduler: {
get: function () {
return asap$1;
},
set: function (value) {
asap$1 = value;
}
},
rejectionMapper: {
get: function () {
return rejectionMapper;
},
set: function (value) {
rejectionMapper = value;
} // Map reject failures
},
follow: function (fn) {
return new Promise(function (resolve, reject) {
return newScope(function (resolve, reject) {
var psd = PSD;
psd.unhandleds = []; // For unhandled standard- or 3rd party Promises. Checked at psd.finalize()
psd.onunhandled = reject; // Triggered directly on unhandled promises of this library.
psd.finalize = callBoth(function () {
var _this3 = this;
// Unhandled standard or 3rd part promises are put in PSD.unhandleds and
// examined upon scope completion while unhandled rejections in this Promise
// will trigger directly through psd.onunhandled
run_at_end_of_this_or_next_physical_tick(function () {
_this3.unhandleds.length === 0 ? resolve() : reject(_this3.unhandleds[0]);
});
}, psd.finalize);
fn();
}, resolve, reject);
});
},
on: Events(null, { "error": [reverseStoppableEventChain, defaultErrorHandler] // Default to defaultErrorHandler
})
});
/**
* Take a potentially misbehaving resolver function and make sure
* onFulfilled and onRejected are only called once.
*
* Makes no guarantees about asynchrony.
*/
function executePromiseTask(promise, fn) {
// Promise Resolution Procedure:
// https://github.com/promises-aplus/promises-spec#the-promise-resolution-procedure
try {
fn(function (value) {
if (promise._state !== null) return;
if (value === promise) throw new TypeError('A promise cannot be resolved with itself.');
var shouldExecuteTick = promise._lib && beginMicroTickScope();
if (value && typeof value.then === 'function') {
executePromiseTask(promise, function (resolve, reject) {
value instanceof Promise ? value._then(resolve, reject) : value.then(resolve, reject);
});
} else {
promise._state = true;
promise._value = value;
propagateAllListeners(promise);
}
if (shouldExecuteTick) endMicroTickScope();
}, handleRejection.bind(null, promise)); // If Function.bind is not supported. Exception is handled in catch below
} catch (ex) {
handleRejection(promise, ex);
}
}
function handleRejection(promise, reason) {
rejectingErrors.push(reason);
if (promise._state !== null) return;
var shouldExecuteTick = promise._lib && beginMicroTickScope();
reason = rejectionMapper(reason);
promise._state = false;
promise._value = reason;
debug && reason !== null && !reason._promise && typeof reason === 'object' && tryCatch(function () {
var origProp = getPropertyDescriptor(reason, "stack");
reason._promise = promise;
setProp(reason, "stack", {
get: function () {
return stack_being_generated ? origProp && (origProp.get ? origProp.get.apply(reason) : origProp.value) : promise.stack;
}
});
});
// Add the failure to a list of possibly uncaught errors
addPossiblyUnhandledError(promise);
propagateAllListeners(promise);
if (shouldExecuteTick) endMicroTickScope();
}
function propagateAllListeners(promise) {
//debug && linkToPreviousPromise(promise);
var listeners = promise._listeners;
promise._listeners = [];
for (var i = 0, len = listeners.length; i < len; ++i) {
propagateToListener(promise, listeners[i]);
}
var psd = promise._PSD;
--psd.ref || psd.finalize(); // if psd.ref reaches zero, call psd.finalize();
if (numScheduledCalls === 0) {
// If numScheduledCalls is 0, it means that our stack is not in a callback of a scheduled call,
// and that no deferreds where listening to this rejection or success.
// Since there is a risk that our stack can contain application code that may
// do stuff after this code is finished that may generate new calls, we cannot
// call finalizers here.
++numScheduledCalls;
asap$1(function () {
if (--numScheduledCalls === 0) finalizePhysicalTick(); // Will detect unhandled errors
}, []);
}
}
function propagateToListener(promise, listener) {
if (promise._state === null) {
promise._listeners.push(listener);
return;
}
var cb = promise._state ? listener.onFulfilled : listener.onRejected;
if (cb === null) {
// This Listener doesnt have a listener for the event being triggered (onFulfilled or onReject) so lets forward the event to any eventual listeners on the Promise instance returned by then() or catch()
return (promise._state ? listener.resolve : listener.reject)(promise._value);
}
var psd = listener.psd;
++psd.ref;
++numScheduledCalls;
asap$1(callListener, [cb, promise, listener]);
}
function callListener(cb, promise, listener) {
var outerScope = PSD;
var psd = listener.psd;
try {
if (psd !== outerScope) {
// **KEEP** outerScope.env = wrappers.snapshot(); // Snapshot outerScope's environment.
PSD = psd;
// **KEEP** wrappers.restore(psd.env); // Restore PSD's environment.
}
// Set static variable currentFulfiller to the promise that is being fullfilled,
// so that we connect the chain of promises (for long stacks support)
currentFulfiller = promise;
// Call callback and resolve our listener with it's return value.
var value = promise._value,
ret;
if (promise._state) {
ret = cb(value);
} else {
if (rejectingErrors.length) rejectingErrors = [];
ret = cb(value);
if (rejectingErrors.indexOf(value) === -1) markErrorAsHandled(promise); // Callback didnt do Promise.reject(err) nor reject(err) onto another promise.
}
listener.resolve(ret);
} catch (e) {
// Exception thrown in callback. Reject our listener.
listener.reject(e);
} finally {
// Restore PSD, env and currentFulfiller.
if (psd !== outerScope) {
PSD = outerScope;
// **KEEP** wrappers.restore(outerScope.env); // Restore outerScope's environment
}
currentFulfiller = null;
if (--numScheduledCalls === 0) finalizePhysicalTick();
--psd.ref || psd.finalize();
}
}
function getStack(promise, stacks, limit) {
if (stacks.length === limit) return stacks;
var stack = "";
if (promise._state === false) {
var failure = promise._value,
errorName,
message;
if (failure != null) {
errorName = failure.name || "Error";
message = failure.message || failure;
stack = prettyStack(failure, 0);
} else {
errorName = failure; // If error is undefined or null, show that.
message = "";
}
stacks.push(errorName + (message ? ": " + message : "") + stack);
}
if (debug) {
stack = prettyStack(promise._stackHolder, 2);
if (stack && stacks.indexOf(stack) === -1) stacks.push(stack);
if (promise._prev) getStack(promise._prev, stacks, limit);
}
return stacks;
}
function linkToPreviousPromise(promise, prev) {
// Support long stacks by linking to previous completed promise.
var numPrev = prev ? prev._numPrev + 1 : 0;
if (numPrev < LONG_STACKS_CLIP_LIMIT) {
// Prohibit infinite Promise loops to get an infinite long memory consuming "tail".
promise._prev = prev;
promise._numPrev = numPrev;
}
}
/* The callback to schedule with setImmediate() or setTimeout().
It runs a virtual microtick and executes any callback registered in microtickQueue.
*/
function physicalTick() {
beginMicroTickScope() && endMicroTickScope();
}
function beginMicroTickScope() {
var wasRootExec = isOutsideMicroTick;
isOutsideMicroTick = false;
needsNewPhysicalTick = false;
return wasRootExec;
}
/* Executes micro-ticks without doing try..catch.
This can be possible because we only use this internally and
the registered functions are exception-safe (they do try..catch
internally before calling any external method). If registering
functions in the microtickQueue that are not exception-safe, this
would destroy the framework and make it instable. So we don't export
our asap method.
*/
function endMicroTickScope() {
var callbacks, i, l;
do {
while (microtickQueue.length > 0) {
callbacks = microtickQueue;
microtickQueue = [];
l = callbacks.length;
for (i = 0; i < l; ++i) {
var item = callbacks[i];
item[0].apply(null, item[1]);
}
}
} while (microtickQueue.length > 0);
isOutsideMicroTick = true;
needsNewPhysicalTick = true;
}
function finalizePhysicalTick() {
var unhandledErrs = unhandledErrors;
unhandledErrors = [];
unhandledErrs.forEach(function (p) {
p._PSD.onunhandled.call(null, p._value, p);
});
var finalizers = tickFinalizers.slice(0); // Clone first because finalizer may remove itself from list.
var i = finalizers.length;
while (i) {
finalizers[--i]();
}
}
function run_at_end_of_this_or_next_physical_tick(fn) {
function finalizer() {
fn();
tickFinalizers.splice(tickFinalizers.indexOf(finalizer), 1);
}
tickFinalizers.push(finalizer);
++numScheduledCalls;
asap$1(function () {
if (--numScheduledCalls === 0) finalizePhysicalTick();
}, []);
}
function addPossiblyUnhandledError(promise) {
// Only add to unhandledErrors if not already there. The first one to add to this list
// will be upon the first rejection so that the root cause (first promise in the
// rejection chain) is the one listed.
if (!unhandledErrors.some(function (p) {
return p._value === promise._value;
})) unhandledErrors.push(promise);
}
function markErrorAsHandled(promise) {
// Called when a reject handled is actually being called.
// Search in unhandledErrors for any promise whos _value is this promise_value (list
// contains only rejected promises, and only one item per error)
var i = unhandledErrors.length;
while (i) {
if (unhandledErrors[--i]._value === promise._value) {
// Found a promise that failed with this same error object pointer,
// Remove that since there is a listener that actually takes care of it.
unhandledErrors.splice(i, 1);
return;
}
}
}
// By default, log uncaught errors to the console
function defaultErrorHandler(e) {
console.warn('Unhandled rejection: ' + (e.stack || e));
}
function PromiseReject(reason) {
return new Promise(INTERNAL, false, reason);
}
function wrap(fn, errorCatcher) {
var psd = PSD;
return function () {
var wasRootExec = beginMicroTickScope(),
outerScope = PSD;
try {
if (outerScope !== psd) {
// **KEEP** outerScope.env = wrappers.snapshot(); // Snapshot outerScope's environment
PSD = psd;
// **KEEP** wrappers.restore(psd.env); // Restore PSD's environment.
}
return fn.apply(this, arguments);
} catch (e) {
errorCatcher && errorCatcher(e);
} finally {
if (outerScope !== psd) {
PSD = outerScope;
// **KEEP** wrappers.restore(outerScope.env); // Restore outerScope's environment
}
if (wasRootExec) endMicroTickScope();
}
};
}
function newScope(fn, a1, a2, a3) {
var parent = PSD,
psd = Object.create(parent);
psd.parent = parent;
psd.ref = 0;
psd.global = false;
// **KEEP** psd.env = wrappers.wrap(psd);
// unhandleds and onunhandled should not be specifically set here.
// Leave them on parent prototype.
// unhandleds.push(err) will push to parent's prototype
// onunhandled() will call parents onunhandled (with this scope's this-pointer though!)
++parent.ref;
psd.finalize = function () {
--this.parent.ref || this.parent.finalize();
};
var rv = usePSD(psd, fn, a1, a2, a3);
if (psd.ref === 0) psd.finalize();
return rv;
}
function usePSD(psd, fn, a1, a2, a3) {
var outerScope = PSD;
try {
if (psd !== outerScope) {
// **KEEP** outerScope.env = wrappers.snapshot(); // snapshot outerScope's environment.
PSD = psd;
// **KEEP** wrappers.restore(psd.env); // Restore PSD's environment.
}
return fn(a1, a2, a3);
} finally {
if (psd !== outerScope) {
PSD = outerScope;
// **KEEP** wrappers.restore(outerScope.env); // Restore outerScope's environment.
}
}
}
function globalError(err, promise) {
var rv;
try {
rv = promise.onuncatched(err);
} catch (e) {}
if (rv !== false) try {
Promise.on.error.fire(err, promise); // TODO: Deprecated and use same global handler as bluebird.
} catch (e) {}
}
/* **KEEP**
export function wrapPromise(PromiseClass) {
var proto = PromiseClass.prototype;
var origThen = proto.then;
wrappers.add({
snapshot: () => proto.then,
restore: value => {proto.then = value;},
wrap: () => patchedThen
});
function patchedThen (onFulfilled, onRejected) {
var promise = this;
var onFulfilledProxy = wrap(function(value){
var rv = value;
if (onFulfilled) {
rv = onFulfilled(rv);
if (rv && typeof rv.then === 'function') rv.then(); // Intercept that promise as well.
}
--PSD.ref || PSD.finalize();
return rv;
});
var onRejectedProxy = wrap(function(err){
promise._$err = err;
var unhandleds = PSD.unhandleds;
var idx = unhandleds.length,
rv;
while (idx--) if (unhandleds[idx]._$err === err) break;
if (onRejected) {
if (idx !== -1) unhandleds.splice(idx, 1); // Mark as handled.
rv = onRejected(err);
if (rv && typeof rv.then === 'function') rv.then(); // Intercept that promise as well.
} else {
if (idx === -1) unhandleds.push(promise);
rv = PromiseClass.reject(err);
rv._$nointercept = true; // Prohibit eternal loop.
}
--PSD.ref || PSD.finalize();
return rv;
});
if (this._$nointercept) return origThen.apply(this, arguments);
++PSD.ref;
return origThen.call(this, onFulfilledProxy, onRejectedProxy);
}
}
// Global Promise wrapper
if (_global.Promise) wrapPromise(_global.Promise);
*/
doFakeAutoComplete(function () {
// Simplify the job for VS Intellisense. This piece of code is one of the keys to the new marvellous intellisense support in Dexie.
asap$1 = function (fn, args) {
setTimeout(function () {
fn.apply(null, args);
}, 0);
};
});
var maxString = String.fromCharCode(65535);
var maxKey = function () {
try {
IDBKeyRange.only([[]]);return [[]];
} catch (e) {
return maxString;
}
}();
var INVALID_KEY_ARGUMENT = "Invalid key provided. Keys must be of type string, number, Date or Array<string | number | Date>.";
var STRING_EXPECTED = "String expected.";
var connections = [];
var isIEOrEdge = typeof navigator !== 'undefined' && /(MSIE|Trident|Edge)/.test(navigator.userAgent);
var hasIEDeleteObjectStoreBug = isIEOrEdge;
var hangsOnDeleteLargeKeyRange = isIEOrEdge;
var dexieStackFrameFilter = function (frame) {
return !/(dexie\.js|dexie\.min\.js)/.test(frame);
};
setDebug(debug, dexieStackFrameFilter);
function Dexie(dbName, options) {
/// <param name="options" type="Object" optional="true">Specify only if you wich to control which addons that should run on this instance</param>
var deps = Dexie.dependencies;
var opts = extend({
// Default Options
addons: Dexie.addons, // Pick statically registered addons by default
autoOpen: true, // Don't require db.open() explicitely.
indexedDB: deps.indexedDB, // Backend IndexedDB api. Default to IDBShim or browser env.
IDBKeyRange: deps.IDBKeyRange // Backend IDBKeyRange api. Default to IDBShim or browser env.
}, options);
var addons = opts.addons,
autoOpen = opts.autoOpen,
indexedDB = opts.indexedDB,
IDBKeyRange = opts.IDBKeyRange;
var globalSchema = this._dbSchema = {};
var versions = [];
var dbStoreNames = [];
var allTables = {};
///<var type="IDBDatabase" />
var idbdb = null; // Instance of IDBDatabase
var dbOpenError = null;
var isBeingOpened = false;
var openComplete = false;
var READONLY = "readonly",
READWRITE = "readwrite";
var db = this;
var dbReadyResolve,
dbReadyPromise = new Promise(function (resolve) {
dbReadyResolve = resolve;
}),
cancelOpen,
openCanceller = new Promise(function (_, reject) {
cancelOpen = reject;
});
var autoSchema = true;
var hasNativeGetDatabaseNames = !!getNativeGetDatabaseNamesFn(indexedDB),
hasGetAll;
function init() {
// Default subscribers to "versionchange" and "blocked".
// Can be overridden by custom handlers. If custom handlers return false, these default
// behaviours will be prevented.
db.on("versionchange", function (ev) {
// Default behavior for versionchange event is to close database connection.
// Caller can override this behavior by doing db.on("versionchange", function(){ return false; });
// Let's not block the other window from making it's delete() or open() call.
// NOTE! This event is never fired in IE,Edge or Safari.
if (ev.newVersion > 0) console.warn('Another connection wants to upgrade database \'' + db.name + '\'. Closing db now to resume the upgrade.');else console.warn('Another connection wants to delete database \'' + db.name + '\'. Closing db now to resume the delete request.');
db.close();
// In many web applications, it would be recommended to force window.reload()
// when this event occurs. To do that, subscribe to the versionchange event
// and call window.location.reload(true) if ev.newVersion > 0 (not a deletion)
// The reason for this is that your current web app obviously has old schema code that needs
// to be updated. Another window got a newer version of the app and needs to upgrade DB but
// your window is blocking it unless we close it here.
});
db.on("blocked", function (ev) {
if (!ev.newVersion || ev.newVersion < ev.oldVersion) console.warn('Dexie.delete(\'' + db.name + '\') was blocked');else console.warn('Upgrade \'' + db.name + '\' blocked by other connection holding version ' + ev.oldVersion / 10);
});
}
//
//
//
// ------------------------- Versioning Framework---------------------------
//
//
//
this.version = function (versionNumber) {
/// <param name="versionNumber" type="Number"></param>
/// <returns type="Version"></returns>
if (idbdb || isBeingOpened) throw new exceptions.Schema("Cannot add version when database is open");
this.verno = Math.max(this.verno, versionNumber);
var versionInstance = versions.filter(function (v) {
return v._cfg.version === versionNumber;
})[0];
if (versionInstance) return versionInstance;
versionInstance = new Version(versionNumber);
versions.push(versionInstance);
versions.sort(lowerVersionFirst);
return versionInstance;
};
function Version(versionNumber) {
this._cfg = {
version: versionNumber,
storesSource: null,
dbschema: {},
tables: {},
contentUpgrade: null
};
this.stores({}); // Derive earlier schemas by default.
}
extend(Version.prototype, {
stores: function (stores) {
/// <summary>
/// Defines the schema for a particular version
/// </summary>
/// <param name="stores" type="Object">
/// Example: <br/>
/// {users: "id++,first,last,&username,*email", <br/>
/// passwords: "id++,&username"}<br/>
/// <br/>
/// Syntax: {Table: "[primaryKey][++],[&][*]index1,[&][*]index2,..."}<br/><br/>
/// Special characters:<br/>
/// "&" means unique key, <br/>
/// "*" means value is multiEntry, <br/>
/// "++" means auto-increment and only applicable for primary key <br/>
/// </param>
this._cfg.storesSource = this._cfg.storesSource ? extend(this._cfg.storesSource, stores) : stores;
// Derive stores from earlier versions if they are not explicitely specified as null or a new syntax.
var storesSpec = {};
versions.forEach(function (version) {
// 'versions' is always sorted by lowest version first.
extend(storesSpec, version._cfg.storesSource);
});
var dbschema = this._cfg.dbschema = {};
this._parseStoresSpec(storesSpec, dbschema);
// Update the latest schema to this version
// Update API
globalSchema = db._dbSchema = dbschema;
removeTablesApi([allTables, db, Transaction.prototype]);
setApiOnPlace([allTables, db, Transaction.prototype, this._cfg.tables], keys(dbschema), READWRITE, dbschema);
dbStoreNames = keys(dbschema);
return this;
},
upgrade: function (upgradeFunction) {
/// <param name="upgradeFunction" optional="true">Function that performs upgrading actions.</param>
var self = this;
fakeAutoComplete(function () {
upgradeFunction(db._createTransaction(READWRITE, keys(self._cfg.dbschema), self._cfg.dbschema)); // BUGBUG: No code completion for prev version's tables wont appear.
});
this._cfg.contentUpgrade = upgradeFunction;
return this;
},
_parseStoresSpec: function (stores, outSchema) {
keys(stores).forEach(function (tableName) {
if (stores[tableName] !== null) {
var instanceTemplate = {};
var indexes = parseIndexSyntax(stores[tableName]);
var primKey = indexes.shift();
if (primKey.multi) throw new exceptions.Schema("Primary key cannot be multi-valued");
if (primKey.keyPath) setByKeyPath(instanceTemplate, primKey.keyPath, primKey.auto ? 0 : primKey.keyPath);
indexes.forEach(function (idx) {
if (idx.auto) throw new exceptions.Schema("Only primary key can be marked as autoIncrement (++)");
if (!idx.keyPath) throw new exceptions.Schema("Index must have a name and cannot be an empty string");
setByKeyPath(instanceTemplate, idx.keyPath, idx.compound ? idx.keyPath.map(function () {
return "";
}) : "");
});
outSchema[tableName] = new TableSchema(tableName, primKey, indexes, instanceTemplate);
}
});
}
});
function runUpgraders(oldVersion, idbtrans, reject) {
var trans = db._createTransaction(READWRITE, dbStoreNames, globalSchema);
trans.create(idbtrans);
trans._completion.catch(reject);
var rejectTransaction = trans._reject.bind(trans);
newScope(function () {
PSD.trans = trans;
if (oldVersion === 0) {
// Create tables:
keys(globalSchema).forEach(function (tableName) {
createTable(idbtrans, tableName, globalSchema[tableName].primKey, globalSchema[tableName].indexes);
});
Promise.follow(function () {
return db.on.populate.fire(trans);
}).catch(rejectTransaction);
} else updateTablesAndIndexes(oldVersion, trans, idbtrans).catch(rejectTransaction);
});
}
function updateTablesAndIndexes(oldVersion, trans, idbtrans) {
// Upgrade version to version, step-by-step from oldest to newest version.
// Each transaction object will contain the table set that was current in that version (but also not-yet-deleted tables from its previous version)
var queue = [];
var oldVersionStruct = versions.filter(function (version) {
return version._cfg.version === oldVersion;
})[0];
if (!oldVersionStruct) throw new exceptions.Upgrade("Dexie specification of currently installed DB version is missing");
globalSchema = db._dbSchema = oldVersionStruct._cfg.dbschema;
var anyContentUpgraderHasRun = false;
var versToRun = versions.filter(function (v) {
return v._cfg.version > oldVersion;
});
versToRun.forEach(function (version) {
/// <param name="version" type="Version"></param>
queue.push(function () {
var oldSchema = globalSchema;
var newSchema = version._cfg.dbschema;
adjustToExistingIndexNames(oldSchema, idbtrans);
adjustToExistingIndexNames(newSchema, idbtrans);
globalSchema = db._dbSchema = newSchema;
var diff = getSchemaDiff(oldSchema, newSchema);
// Add tables
diff.add.forEach(function (tuple) {
createTable(idbtrans, tuple[0], tuple[1].primKey, tuple[1].indexes);
});
// Change tables
diff.change.forEach(function (change) {
if (change.recreate) {
throw new exceptions.Upgrade("Not yet support for changing primary key");
} else {
var store = idbtrans.objectStore(change.name);
// Add indexes
change.add.forEach(function (idx) {
addIndex(store, idx);
});
// Update indexes
change.change.forEach(function (idx) {
store.deleteIndex(idx.name);
addIndex(store, idx);
});
// Delete indexes
change.del.forEach(function (idxName) {
store.deleteIndex(idxName);
});
}
});
if (version._cfg.contentUpgrade) {
anyContentUpgraderHasRun = true;
return Promise.follow(function () {
version._cfg.contentUpgrade(trans);
});
}
});
queue.push(function (idbtrans) {
if (anyContentUpgraderHasRun && !hasIEDeleteObjectStoreBug) {
// Dont delete old tables if ieBug is present and a content upgrader has run. Let tables be left in DB so far. This needs to be taken care of.
var newSchema = version._cfg.dbschema;
// Delete old tables
deleteRemovedTables(newSchema, idbtrans);
}
});
});
// Now, create a queue execution engine
function runQueue() {
return queue.length ? Promise.resolve(queue.shift()(trans.idbtrans)).then(runQueue) : Promise.resolve();
}
return runQueue().then(function () {
createMissingTables(globalSchema, idbtrans); // At last, make sure to create any missing tables. (Needed by addons that add stores to DB without specifying version)
});
}
function getSchemaDiff(oldSchema, newSchema) {
var diff = {
del: [], // Array of table names
add: [], // Array of [tableName, newDefinition]
change: [] // Array of {name: tableName, recreate: newDefinition, del: delIndexNames, add: newIndexDefs, change: changedIndexDefs}
};
for (var table in oldSchema) {
if (!newSchema[table]) diff.del.push(table);
}
for (table in newSchema) {
var oldDef = oldSchema[table],
newDef = newSchema[table];
if (!oldDef) {
diff.add.push([table, newDef]);
} else {
var change = {
name: table,
def: newDef,
recreate: false,
del: [],
add: [],
change: []
};
if (oldDef.primKey.src !== newDef.primKey.src) {
// Primary key has changed. Remove and re-add table.
change.recreate = true;
diff.change.push(change);
} else {
// Same primary key. Just find out what differs:
var oldIndexes = oldDef.idxByName;
var newIndexes = newDef.idxByName;
for (var idxName in oldIndexes) {
if (!newIndexes[idxName]) change.del.push(idxName);
}
for (idxName in newIndexes) {
var oldIdx = oldIndexes[idxName],
newIdx = newIndexes[idxName];
if (!oldIdx) change.add.push(newIdx);else if (oldIdx.src !== newIdx.src) change.change.push(newIdx);
}
if (change.del.length > 0 || change.add.length > 0 || change.change.length > 0) {
diff.change.push(change);
}
}
}
}
return diff;
}
function createTable(idbtrans, tableName, primKey, indexes) {
/// <param name="idbtrans" type="IDBTransaction"></param>
var store = idbtrans.db.createObjectStore(tableName, primKey.keyPath ? { keyPath: primKey.keyPath, autoIncrement: primKey.auto } : { autoIncrement: primKey.auto });
indexes.forEach(function (idx) {
addIndex(store, idx);
});
return store;
}
function createMissingTables(newSchema, idbtrans) {
keys(newSchema).forEach(function (tableName) {
if (!idbtrans.db.objectStoreNames.contains(tableName)) {
createTable(idbtrans, tableName, newSchema[tableName].primKey, newSchema[tableName].indexes);
}
});
}
function deleteRemovedTables(newSchema, idbtrans) {
for (var i = 0; i < idbtrans.db.objectStoreNames.length; ++i) {
var storeName = idbtrans.db.objectStoreNames[i];
if (newSchema[storeName] == null) {
idbtrans.db.deleteObjectStore(storeName);
}
}
}
function addIndex(store, idx) {
store.createIndex(idx.name, idx.keyPath, { unique: idx.unique, multiEntry: idx.multi });
}
function dbUncaught(err) {
return db.on.error.fire(err);
}
//
//
// Dexie Protected API
//
//
this._allTables = allTables;
this._tableFactory = function createTable(mode, tableSchema) {
/// <param name="tableSchema" type="TableSchema"></param>
if (mode === READONLY) return new Table(tableSchema.name, tableSchema, Collection);else return new WriteableTable(tableSchema.name, tableSchema);
};
this._createTransaction = function (mode, storeNames, dbschema, parentTransaction) {
return new Transaction(mode, storeNames, dbschema, parentTransaction);
};
/* Generate a temporary transaction when db operations are done outside a transactino scope.
*/
function tempTransaction(mode, storeNames, fn) {
// Last argument is "writeLocked". But this doesnt apply to oneshot direct db operations, so we ignore it.
if (!openComplete && !PSD.letThrough) {
if (!isBeingOpened) {
if (!autoOpen) return rejection(new exceptions.DatabaseClosed(), dbUncaught);
db.open().catch(nop); // Open in background. If if fails, it will be catched by the final promise anyway.
}
return dbReadyPromise.then(function () {
return tempTransaction(mode, storeNames, fn);
});
} else {
var trans = db._createTransaction(mode, storeNames, globalSchema);
return trans._promise(mode, function (resolve, reject) {
newScope(function () {
// OPTIMIZATION POSSIBLE? newScope() not needed because it's already done in _promise.
PSD.trans = trans;
fn(resolve, reject, trans);
});
}).then(function (result) {
// Instead of resolving value directly, wait with resolving it until transaction has completed.
// Otherwise the data would not be in the DB if requesting it in the then() operation.
// Specifically, to ensure that the following expression will work:
//
// db.friends.put({name: "Arne"}).then(function () {
// db.friends.where("name").equals("Arne").count(function(count) {
// assert (count === 1);
// });
// });
//
return trans._completion.then(function () {
return result;
});
}); /*.catch(err => { // Don't do this as of now. If would affect bulk- and modify methods in a way that could be more intuitive. But wait! Maybe change in next major.
trans._reject(err);
return rejection(err);
});*/
}
}
this._whenReady = function (fn) {
return new Promise(fake || openComplete || PSD.letThrough ? fn : function (resolve, reject) {
if (!isBeingOpened) {
if (!autoOpen) {
reject(new exceptions.DatabaseClosed());
return;
}
db.open().catch(nop); // Open in background. If if fails, it will be catched by the final promise anyway.
}
dbReadyPromise.then(function () {
fn(resolve, reject);
});
}).uncaught(dbUncaught);
};
//
//
//
//
// Dexie API
//
//
//
this.verno = 0;
this.open = function () {
if (isBeingOpened || idbdb) return dbReadyPromise.then(function () {
return dbOpenError ? rejection(dbOpenError, dbUncaught) : db;
});
debug && (openCanceller._stackHolder = getErrorWithStack()); // Let stacks point to when open() was called rather than where new Dexie() was called.
isBeingOpened = true;
dbOpenError = null;
openComplete = false;
// Function pointers to call when the core opening process completes.
var resolveDbReady = dbReadyResolve,
// upgradeTransaction to abort on failure.
upgradeTransaction = null;
return Promise.race([openCanceller, new Promise(function (resolve, reject) {
doFakeAutoComplete(function () {
return resolve();
});
// Make sure caller has specified at least one version
if (versions.length > 0) autoSchema = false;
// Multiply db.verno with 10 will be needed to workaround upgrading bug in IE:
// IE fails when deleting objectStore after reading from it.
// A future version of Dexie.js will stopover an intermediate version to workaround this.
// At that point, we want to be backward compatible. Could have been multiplied with 2, but by using 10, it is easier to map the number to the real version number.
// If no API, throw!
if (!indexedDB) throw new exceptions.MissingAPI("indexedDB API not found. If using IE10+, make sure to run your code on a server URL " + "(not locally). If using old Safari versions, make sure to include indexedDB polyfill.");
var req = autoSchema ? indexedDB.open(dbName) : indexedDB.open(dbName, Math.round(db.verno * 10));
if (!req) throw new exceptions.MissingAPI("IndexedDB API not available"); // May happen in Safari private mode, see https://github.com/dfahlander/Dexie.js/issues/134
req.onerror = wrap(eventRejectHandler(reject));
req.onblocked = wrap(fireOnBlocked);
req.onupgradeneeded = wrap(function (e) {
upgradeTransaction = req.transaction;
if (autoSchema && !db._allowEmptyDB) {
// Unless an addon has specified db._allowEmptyDB, lets make the call fail.
// Caller did not specify a version or schema. Doing that is only acceptable for opening alread existing databases.
// If onupgradeneeded is called it means database did not exist. Reject the open() promise and make sure that we
// do not create a new database by accident here.
req.onerror = preventDefault; // Prohibit onabort error from firing before we're done!
upgradeTransaction.abort(); // Abort transaction (would hope that this would make DB disappear but it doesnt.)
// Close database and delete it.
req.result.close();
var delreq = indexedDB.deleteDatabase(dbName); // The upgrade transaction is atomic, and javascript is single threaded - meaning that there is no risk that we delete someone elses database here!
delreq.onsuccess = delreq.onerror = wrap(function () {
reject(new exceptions.NoSuchDatabase('Database ' + dbName + ' doesnt exist'));
});
} else {
upgradeTransaction.onerror = wrap(eventRejectHandler(reject));
var oldVer = e.oldVersion > Math.pow(2, 62) ? 0 : e.oldVersion; // Safari 8 fix.
runUpgraders(oldVer / 10, upgradeTransaction, reject, req);
}
}, reject);
req.onsuccess = wrap(function () {
// Core opening procedure complete. Now let's just record some stuff.
upgradeTransaction = null;
idbdb = req.result;
connections.push(db); // Used for emulating versionchange event on IE/Edge/Safari.
if (autoSchema) readGlobalSchema();else if (idbdb.objectStoreNames.length > 0) {
try {
adjustToExistingIndexNames(globalSchema, idbdb.transaction(safariMultiStoreFix(idbdb.objectStoreNames), READONLY));
} catch (e) {
// Safari may bail out if > 1 store names. However, this shouldnt be a showstopper. Issue #120.
}
}
idbdb.onversionchange = wrap(function (ev) {
db._vcFired = true; // detect implementations that not support versionchange (IE/Edge/Safari)
db.on("versionchange").fire(ev);
});
if (!hasNativeGetDatabaseNames) {
// Update localStorage with list of database names
globalDatabaseList(function (databaseNames) {
if (databaseNames.indexOf(dbName) === -1) return databaseNames.push(dbName);
});
}
resolve();
}, reject);
})]).then(function () {
// Before finally resolving the dbReadyPromise and this promise,
// call and await all on('ready') subscribers:
// Dexie.vip() makes subscribers able to use the database while being opened.
// This is a must since these subscribers take part of the opening procedure.
return Dexie.vip(db.on.ready.fire);
}).then(function () {
// Resolve the db.open() with the db instance.
isBeingOpened = false;
return db;
}).catch(function (err) {
try {
// Did we fail within onupgradeneeded? Make sure to abort the upgrade transaction so it doesnt commit.
upgradeTransaction && upgradeTransaction.abort();
} catch (e) {}
isBeingOpened = false; // Set before calling db.close() so that it doesnt reject openCanceller again (leads to unhandled rejection event).
db.close(); // Closes and resets idbdb, removes connections, resets dbReadyPromise and openCanceller so that a later db.open() is fresh.
// A call to db.close() may have made on-ready subscribers fail. Use dbOpenError if set, since err could be a follow-up error on that.
dbOpenError = err; // Record the error. It will be used to reject further promises of db operations.
return rejection(dbOpenError, dbUncaught); // dbUncaught will make sure any error that happened in any operation before will now bubble to db.on.error() thanks to the special handling in Promise.uncaught().
}).finally(function () {
openComplete = true;
resolveDbReady(); // dbReadyPromise is resolved no matter if open() rejects or resolved. It's just to wake up waiters.
});
};
this.close = function () {
var idx = connections.indexOf(db);
if (idx >= 0) connections.splice(idx, 1);
if (idbdb) {
try {
idbdb.close();
} catch (e) {}
idbdb = null;
}
autoOpen = false;
dbOpenError = new exceptions.DatabaseClosed();
if (isBeingOpened) cancelOpen(dbOpenError);
// Reset dbReadyPromise promise:
dbReadyPromise = new Promise(function (resolve) {
dbReadyResolve = resolve;
});
openCanceller = new Promise(function (_, reject) {
cancelOpen = reject;
});
};
this.delete = function () {
var hasArguments = arguments.length > 0;
return new Promise(function (resolve, reject) {
if (hasArguments) throw new exceptions.InvalidArgument("Arguments not allowed in db.delete()");
if (isBeingOpened) {
dbReadyPromise.then(doDelete);
} else {
doDelete();
}
function doDelete() {
db.close();
var req = indexedDB.deleteDatabase(dbName);
req.onsuccess = wrap(function () {
if (!hasNativeGetDatabaseNames) {
globalDatabaseList(function (databaseNames) {
var pos = databaseNames.indexOf(dbName);
if (pos >= 0) return databaseNames.splice(pos, 1);
});
}
resolve();
});
req.onerror = wrap(eventRejectHandler(reject));
req.onblocked = fireOnBlocked;
}
}).uncaught(dbUncaught);
};
this.backendDB = function () {
return idbdb;
};
this.isOpen = function () {
return idbdb !== null;
};
this.hasFailed = function () {
return dbOpenError !== null;
};
this.dynamicallyOpened = function () {
return autoSchema;
};
//
// Properties
//
this.name = dbName;
// db.tables - an array of all Table instances.
setProp(this, "tables", {
get: function () {
/// <returns type="Array" elementType="WriteableTable" />
return keys(allTables).map(function (name) {
return allTables[name];
});
}
});
//
// Events
//
this.on = Events(this, "error", "populate", "blocked", "versionchange", { ready: [promisableChain, nop] });
this.on.ready.subscribe = override(this.on.ready.subscribe, function (subscribe) {
return function (subscriber, bSticky) {
Dexie.vip(function () {
subscribe(subscriber);
if (!bSticky) subscribe(function unsubscribe() {
db.on.ready.unsubscribe(subscriber);
db.on.ready.unsubscribe(unsubscribe);
});
});
};
});
fakeAutoComplete(function () {
db.on("populate").fire(db._createTransaction(READWRITE, dbStoreNames, globalSchema));
db.on("error").fire(new Error());
});
this.transaction = function (mode, tableInstances, scopeFunc) {
/// <summary>
///
/// </summary>
/// <param name="mode" type="String">"r" for readonly, or "rw" for readwrite</param>
/// <param name="tableInstances">Table instance, Array of Table instances, String or String Array of object stores to include in the transaction</param>
/// <param name="scopeFunc" type="Function">Function to execute with transaction</param>
// Let table arguments be all arguments between mode and last argument.
var i = arguments.length;
if (i < 2) throw new exceptions.InvalidArgument("Too few arguments");
// Prevent optimzation killer (https://github.com/petkaantonov/bluebird/wiki/Optimization-killers#32-leaking-arguments)
// and clone arguments except the first one into local var 'args'.
var args = new Array(i - 1);
while (--i) {
args[i - 1] = arguments[i];
} // Let scopeFunc be the last argument and pop it so that args now only contain the table arguments.
scopeFunc = args.pop();
var tables = flatten(args); // Support using array as middle argument, or a mix of arrays and non-arrays.
var parentTransaction = PSD.trans;
// Check if parent transactions is bound to this db instance, and if caller wants to reuse it
if (!parentTransaction || parentTransaction.db !== db || mode.indexOf('!') !== -1) parentTransaction = null;
var onlyIfCompatible = mode.indexOf('?') !== -1;
mode = mode.replace('!', '').replace('?', ''); // Ok. Will change arguments[0] as well but we wont touch arguments henceforth.
try {
//
// Get storeNames from arguments. Either through given table instances, or through given table names.
//
var storeNames = tables.map(function (table) {
var storeName = table instanceof Table ? table.name : table;
if (typeof storeName !== 'string') throw new TypeError("Invalid table argument to Dexie.transaction(). Only Table or String are allowed");
return storeName;
});
//
// Resolve mode. Allow shortcuts "r" and "rw".
//
if (mode == "r" || mode == READONLY) mode = READONLY;else if (mode == "rw" || mode == READWRITE) mode = READWRITE;else throw new exceptions.InvalidArgument("Invalid transaction mode: " + mode);
if (parentTransaction) {
// Basic checks
if (parentTransaction.mode === READONLY && mode === READWRITE) {
if (onlyIfCompatible) {
// Spawn new transaction instead.
parentTransaction = null;
} else throw new exceptions.SubTransaction("Cannot enter a sub-transaction with READWRITE mode when parent transaction is READONLY");
}
if (parentTransaction) {
storeNames.forEach(function (storeName) {
if (!hasOwn(parentTransaction.tables, storeName)) {
if (onlyIfCompatible) {
// Spawn new transaction instead.
parentTransaction = null;
} else throw new exceptions.SubTransaction("Table " + storeName + " not included in parent transaction.");
}
});
}
}
} catch (e) {
return parentTransaction ? parentTransaction._promise(null, function (_, reject) {
reject(e);
}) : rejection(e, dbUncaught);
}
// If this is a sub-transaction, lock the parent and then launch the sub-transaction.
return parentTransaction ? parentTransaction._promise(mode, enterTransactionScope, "lock") : db._whenReady(enterTransactionScope);
function enterTransactionScope(resolve) {
var parentPSD = PSD;
resolve(Promise.resolve().then(function () {
return newScope(function () {
// Keep a pointer to last non-transactional PSD to use if someone calls Dexie.ignoreTransaction().
PSD.transless = PSD.transless || parentPSD;
// Our transaction.
//return new Promise((resolve, reject) => {
var trans = db._createTransaction(mode, storeNames, globalSchema, parentTransaction);
// Let the transaction instance be part of a Promise-specific data (PSD) value.
PSD.trans = trans;
if (parentTransaction) {
// Emulate transaction commit awareness for inner transaction (must 'commit' when the inner transaction has no more operations ongoing)
trans.idbtrans = parentTransaction.idbtrans;
} else {
trans.create(); // Create the backend transaction so that complete() or error() will trigger even if no operation is made upon it.
}
// Provide arguments to the scope function (for backward compatibility)
var tableArgs = storeNames.map(function (name) {
return trans.tables[name];
});
tableArgs.push(trans);
var returnValue;
return Promise.follow(function () {
// Finally, call the scope function with our table and transaction arguments.
returnValue = scopeFunc.apply(trans, tableArgs); // NOTE: returnValue is used in trans.on.complete() not as a returnValue to this func.
if (returnValue) {
if (typeof returnValue.next === 'function' && typeof returnValue.throw === 'function') {
// scopeFunc returned an iterator with throw-support. Handle yield as await.
returnValue = awaitIterator(returnValue);
} else if (typeof returnValue.then === 'function' && !hasOwn(returnValue, '_PSD')) {
throw new exceptions.IncompatiblePromise("Incompatible Promise returned from transaction scope (read more at http://tinyurl.com/znyqjqc). Transaction scope: " + scopeFunc.toString());
}
}
}).uncaught(dbUncaught).then(function () {
if (parentTransaction) trans._resolve(); // sub transactions don't react to idbtrans.oncomplete. We must trigger a acompletion.
return trans._completion; // Even if WE believe everything is fine. Await IDBTransaction's oncomplete or onerror as well.
}).then(function () {
return returnValue;
}).catch(function (e) {
//reject(e);
trans._reject(e); // Yes, above then-handler were maybe not called because of an unhandled rejection in scopeFunc!
return rejection(e);
});
//});
});
}));
}
};
this.table = function (tableName) {
/// <returns type="WriteableTable"></returns>
if (fake && autoSchema) return new WriteableTable(tableName);
if (!hasOwn(allTables, tableName)) {
throw new exceptions.InvalidTable('Table ' + tableName + ' does not exist');
}
return allTables[tableName];
};
//
//
//
// Table Class
//
//
//
function Table(name, tableSchema, collClass) {
/// <param name="name" type="String"></param>
this.name = name;
this.schema = tableSchema;
this.hook = allTables[name] ? allTables[name].hook : Events(null, {
"creating": [hookCreatingChain, nop],
"reading": [pureFunctionChain, mirror],
"updating": [hookUpdatingChain, nop],
"deleting": [hookDeletingChain, nop]
});
this._collClass = collClass || Collection;
}
props(Table.prototype, {
//
// Table Protected Methods
//
_trans: function getTransaction(mode, fn, writeLocked) {
var trans = PSD.trans;
return trans && trans.db === db ? trans._promise(mode, fn, writeLocked) : tempTransaction(mode, [this.name], fn);
},
_idbstore: function getIDBObjectStore(mode, fn, writeLocked) {
if (fake) return new Promise(fn); // Simplify the work for Intellisense/Code completion.
var trans = PSD.trans,
tableName = this.name;
function supplyIdbStore(resolve, reject, trans) {
fn(resolve, reject, trans.idbtrans.objectStore(tableName), trans);
}
return trans && trans.db === db ? trans._promise(mode, supplyIdbStore, writeLocked) : tempTransaction(mode, [this.name], supplyIdbStore);
},
//
// Table Public Methods
//
get: function (key, cb) {
var self = this;
return this._idbstore(READONLY, function (resolve, reject, idbstore) {
fake && resolve(self.schema.instanceTemplate);
var req = idbstore.get(key);
req.onerror = eventRejectHandler(reject);
req.onsuccess = function () {
resolve(self.hook.reading.fire(req.result));
};
}).then(cb);
},
where: function (indexName) {
return new WhereClause(this, indexName);
},
count: function (cb) {
return this.toCollection().count(cb);
},
offset: function (offset) {
return this.toCollection().offset(offset);
},
limit: function (numRows) {
return this.toCollection().limit(numRows);
},
reverse: function () {
return this.toCollection().reverse();
},
filter: function (filterFunction) {
return this.toCollection().and(filterFunction);
},
each: function (fn) {
return this.toCollection().each(fn);
},
toArray: function (cb) {
return this.toCollection().toArray(cb);
},
orderBy: function (index) {
return new this._collClass(new WhereClause(this, index));
},
toCollection: function () {
return new this._collClass(new WhereClause(this));
},
mapToClass: function (constructor, structure) {
/// <summary>
/// Map table to a javascript constructor function. Objects returned from the database will be instances of this class, making
/// it possible to the instanceOf operator as well as extending the class using constructor.prototype.method = function(){...}.
/// </summary>
/// <param name="constructor">Constructor function representing the class.</param>
/// <param name="structure" optional="true">Helps IDE code completion by knowing the members that objects contain and not just the indexes. Also
/// know what type each member has. Example: {name: String, emailAddresses: [String], password}</param>
this.schema.mappedClass = constructor;
var instanceTemplate = Object.create(constructor.prototype);
if (structure) {
// structure and instanceTemplate is for IDE code competion only while constructor.prototype is for actual inheritance.
applyStructure(instanceTemplate, structure);
}
this.schema.instanceTemplate = instanceTemplate;
// Now, subscribe to the when("reading") event to make all objects that come out from this table inherit from given class
// no matter which method to use for reading (Table.get() or Table.where(...)... )
var readHook = function (obj) {
if (!obj) return obj; // No valid object. (Value is null). Return as is.
// Create a new object that derives from constructor:
var res = Object.create(constructor.prototype);
// Clone members:
for (var m in obj) {
if (hasOwn(obj, m)) res[m] = obj[m];
}return res;
};
if (this.schema.readHook) {
this.hook.reading.unsubscribe(this.schema.readHook);
}
this.schema.readHook = readHook;
this.hook("reading", readHook);
return constructor;
},
defineClass: function (structure) {
/// <summary>
/// Define all members of the class that represents the table. This will help code completion of when objects are read from the database
/// as well as making it possible to extend the prototype of the returned constructor function.
/// </summary>
/// <param name="structure">Helps IDE code completion by knowing the members that objects contain and not just the indexes. Also
/// know what type each member has. Example: {name: String, emailAddresses: [String], properties: {shoeSize: Number}}</param>
return this.mapToClass(Dexie.defineClass(structure), structure);
}
});
//
//
//
// WriteableTable Class (extends Table)
//
//
//
function WriteableTable(name, tableSchema, collClass) {
Table.call(this, name, tableSchema, collClass || WriteableCollection);
}
function BulkErrorHandlerCatchAll(errorList, done, supportHooks) {
return (supportHooks ? hookedEventRejectHandler : eventRejectHandler)(function (e) {
errorList.push(e);
done && done();
});
}
function bulkDelete(idbstore, trans, keysOrTuples, hasDeleteHook, deletingHook) {
// If hasDeleteHook, keysOrTuples must be an array of tuples: [[key1, value2],[key2,value2],...],
// else keysOrTuples must be just an array of keys: [key1, key2, ...].
return new Promise(function (resolve, reject) {
var len = keysOrTuples.length,
lastItem = len - 1;
if (len === 0) return resolve();
if (!hasDeleteHook) {
for (var i = 0; i < len; ++i) {
var req = idbstore.delete(keysOrTuples[i]);
req.onerror = wrap(eventRejectHandler(reject));
if (i === lastItem) req.onsuccess = wrap(function () {
return resolve();
});
}
} else {
var hookCtx,
errorHandler = hookedEventRejectHandler(reject),
successHandler = hookedEventSuccessHandler(null);
tryCatch(function () {
for (var i = 0; i < len; ++i) {
hookCtx = { onsuccess: null, onerror: null };
var tuple = keysOrTuples[i];
deletingHook.call(hookCtx, tuple[0], tuple[1], trans);
var req = idbstore.delete(tuple[0]);
req._hookCtx = hookCtx;
req.onerror = errorHandler;
if (i === lastItem) req.onsuccess = hookedEventSuccessHandler(resolve);else req.onsuccess = successHandler;
}
}, function (err) {
hookCtx.onerror && hookCtx.onerror(err);
throw err;
});
}
}).uncaught(dbUncaught);
}
derive(WriteableTable).from(Table).extend({
bulkDelete: function (keys) {
if (this.hook.deleting.fire === nop) {
return this._idbstore(READWRITE, function (resolve, reject, idbstore, trans) {
resolve(bulkDelete(idbstore, trans, keys, false, nop));
});
} else {
return this.where(':id').anyOf(keys).delete().then(function () {}); // Resolve with undefined.
}
},
bulkPut: function (objects, keys) {
var _this = this;
return this._idbstore(READWRITE, function (resolve, reject, idbstore) {
if (!idbstore.keyPath && !_this.schema.primKey.auto && !keys) throw new exceptions.InvalidArgument("bulkPut() with non-inbound keys requires keys array in second argument");
if (idbstore.keyPath && keys) throw new exceptions.InvalidArgument("bulkPut(): keys argument invalid on tables with inbound keys");
if (keys && keys.length !== objects.length) throw new exceptions.InvalidArgument("Arguments objects and keys must have the same length");
if (objects.length === 0) return resolve(); // Caller provided empty list.
var done = function (result) {
if (errorList.length === 0) resolve(result);else reject(new BulkError(_this.name + '.bulkPut(): ' + errorList.length + ' of ' + numObjs + ' operations failed', errorList));
};
var req,
errorList = [],
errorHandler,
numObjs = objects.length,
table = _this;
if (_this.hook.creating.fire === nop && _this.hook.updating.fire === nop) {
//
// Standard Bulk (no 'creating' or 'updating' hooks to care about)
//
errorHandler = BulkErrorHandlerCatchAll(errorList);
for (var i = 0, l = objects.length; i < l; ++i) {
req = keys ? idbstore.put(objects[i], keys[i]) : idbstore.put(objects[i]);
req.onerror = errorHandler;
}
// Only need to catch success or error on the last operation
// according to the IDB spec.
req.onerror = BulkErrorHandlerCatchAll(errorList, done);
req.onsuccess = eventSuccessHandler(done);
} else {
var effectiveKeys = keys || idbstore.keyPath && objects.map(function (o) {
return getByKeyPath(o, idbstore.keyPath);
});
// Generate map of {[key]: object}
var objectLookup = effectiveKeys && arrayToObject(effectiveKeys, function (key, i) {
return key != null && [key, objects[i]];
});
var promise = !effectiveKeys ?
// Auto-incremented key-less objects only without any keys argument.
table.bulkAdd(objects) :
// Keys provided. Either as inbound in provided objects, or as a keys argument.
// Begin with updating those that exists in DB:
table.where(':id').anyOf(effectiveKeys.filter(function (key) {
return key != null;
})).modify(function () {
this.value = objectLookup[this.primKey];
objectLookup[this.primKey] = null; // Mark as "don't add this"
}).catch(ModifyError, function (e) {
errorList = e.failures; // No need to concat here. These are the first errors added.
}).then(function () {
// Now, let's examine which items didnt exist so we can add them:
var objsToAdd = [],
keysToAdd = keys && [];
// Iterate backwards. Why? Because if same key was used twice, just add the last one.
for (var i = effectiveKeys.length - 1; i >= 0; --i) {
var key = effectiveKeys[i];
if (key == null || objectLookup[key]) {
objsToAdd.push(objects[i]);
keys && keysToAdd.push(key);
if (key != null) objectLookup[key] = null; // Mark as "dont add again"
}
}
// The items are in reverse order so reverse them before adding.
// Could be important in order to get auto-incremented keys the way the caller
// would expect. Could have used unshift instead of push()/reverse(),
// but: http://jsperf.com/unshift-vs-reverse
objsToAdd.reverse();
keys && keysToAdd.reverse();
return table.bulkAdd(objsToAdd, keysToAdd);
}).then(function (lastAddedKey) {
// Resolve with key of the last object in given arguments to bulkPut():
var lastEffectiveKey = effectiveKeys[effectiveKeys.length - 1]; // Key was provided.
return lastEffectiveKey != null ? lastEffectiveKey : lastAddedKey;
});
promise.then(done).catch(BulkError, function (e) {
// Concat failure from ModifyError and reject using our 'done' method.
errorList = errorList.concat(e.failures);
done();
}).catch(reject);
}
}, "locked"); // If called from transaction scope, lock transaction til all steps are done.
},
bulkAdd: function (objects, keys) {
var self = this,
creatingHook = this.hook.creating.fire;
return this._idbstore(READWRITE, function (resolve, reject, idbstore, trans) {
if (!idbstore.keyPath && !self.schema.primKey.auto && !keys) throw new exceptions.InvalidArgument("bulkAdd() with non-inbound keys requires keys array in second argument");
if (idbstore.keyPath && keys) throw new exceptions.InvalidArgument("bulkAdd(): keys argument invalid on tables with inbound keys");
if (keys && keys.length !== objects.length) throw new exceptions.InvalidArgument("Arguments objects and keys must have the same length");
if (objects.length === 0) return resolve(); // Caller provided empty list.
function done(result) {
if (errorList.length === 0) resolve(result);else reject(new BulkError(self.name + '.bulkAdd(): ' + errorList.length + ' of ' + numObjs + ' operations failed', errorList));
}
var req,
errorList = [],
errorHandler,
successHandler,
numObjs = objects.length;
if (creatingHook !== nop) {
//
// There are subscribers to hook('creating')
// Must behave as documented.
//
var keyPath = idbstore.keyPath,
hookCtx;
errorHandler = BulkErrorHandlerCatchAll(errorList, null, true);
successHandler = hookedEventSuccessHandler(null);
tryCatch(function () {
for (var i = 0, l = objects.length; i < l; ++i) {
hookCtx = { onerror: null, onsuccess: null };
var key = keys && keys[i];
var obj = objects[i],
effectiveKey = keys ? key : keyPath ? getByKeyPath(obj, keyPath) : undefined,
keyToUse = creatingHook.call(hookCtx, effectiveKey, obj, trans);
if (effectiveKey == null && keyToUse != null) {
if (keyPath) {
obj = deepClone(obj);
setByKeyPath(obj, keyPath, keyToUse);
} else {
key = keyToUse;
}
}
req = key != null ? idbstore.add(obj, key) : idbstore.add(obj);
req._hookCtx = hookCtx;
if (i < l - 1) {
req.onerror = errorHandler;
if (hookCtx.onsuccess) req.onsuccess = successHandler;
}
}
}, function (err) {
hookCtx.onerror && hookCtx.onerror(err);
throw err;
});
req.onerror = BulkErrorHandlerCatchAll(errorList, done, true);
req.onsuccess = hookedEventSuccessHandler(done);
} else {
//
// Standard Bulk (no 'creating' hook to care about)
//
errorHandler = BulkErrorHandlerCatchAll(errorList);
for (var i = 0, l = objects.length; i < l; ++i) {
req = keys ? idbstore.add(objects[i], keys[i]) : idbstore.add(objects[i]);
req.onerror = errorHandler;
}
// Only need to catch success or error on the last operation
// according to the IDB spec.
req.onerror = BulkErrorHandlerCatchAll(errorList, done);
req.onsuccess = eventSuccessHandler(done);
}
});
},
add: function (obj, key) {
/// <summary>
/// Add an object to the database. In case an object with same primary key already exists, the object will not be added.
/// </summary>
/// <param name="obj" type="Object">A javascript object to insert</param>
/// <param name="key" optional="true">Primary key</param>
var creatingHook = this.hook.creating.fire;
return this._idbstore(READWRITE, function (resolve, reject, idbstore, trans) {
var hookCtx = { onsuccess: null, onerror: null };
if (creatingHook !== nop) {
var effectiveKey = key != null ? key : idbstore.keyPath ? getByKeyPath(obj, idbstore.keyPath) : undefined;
var keyToUse = creatingHook.call(hookCtx, effectiveKey, obj, trans); // Allow subscribers to when("creating") to generate the key.
if (effectiveKey == null && keyToUse != null) {
// Using "==" and "!=" to check for either null or undefined!
if (idbstore.keyPath) setByKeyPath(obj, idbstore.keyPath, keyToUse);else key = keyToUse;
}
}
try {
var req = key != null ? idbstore.add(obj, key) : idbstore.add(obj);
req._hookCtx = hookCtx;
req.onerror = hookedEventRejectHandler(reject);
req.onsuccess = hookedEventSuccessHandler(function (result) {
// TODO: Remove these two lines in next major release (2.0?)
// It's no good practice to have side effects on provided parameters
var keyPath = idbstore.keyPath;
if (keyPath) setByKeyPath(obj, keyPath, result);
resolve(result);
});
} catch (e) {
if (hookCtx.onerror) hookCtx.onerror(e);
throw e;
}
});
},
put: function (obj, key) {
/// <summary>
/// Add an object to the database but in case an object with same primary key alread exists, the existing one will get updated.
/// </summary>
/// <param name="obj" type="Object">A javascript object to insert or update</param>
/// <param name="key" optional="true">Primary key</param>
var self = this,
creatingHook = this.hook.creating.fire,
updatingHook = this.hook.updating.fire;
if (creatingHook !== nop || updatingHook !== nop) {
//
// People listens to when("creating") or when("updating") events!
// We must know whether the put operation results in an CREATE or UPDATE.
//
return this._trans(READWRITE, function (resolve, reject, trans) {
// Since key is optional, make sure we get it from obj if not provided
var effectiveKey = key !== undefined ? key : self.schema.primKey.keyPath && getByKeyPath(obj, self.schema.primKey.keyPath);
if (effectiveKey == null) {
// "== null" means checking for either null or undefined.
// No primary key. Must use add().
trans.tables[self.name].add(obj).then(resolve, reject);
} else {
// Primary key exist. Lock transaction and try modifying existing. If nothing modified, call add().
trans._lock(); // Needed because operation is splitted into modify() and add().
// clone obj before this async call. If caller modifies obj the line after put(), the IDB spec requires that it should not affect operation.
obj = deepClone(obj);
trans.tables[self.name].where(":id").equals(effectiveKey).modify(function () {
// Replace extisting value with our object
// CRUD event firing handled in WriteableCollection.modify()
this.value = obj;
}).then(function (count) {
if (count === 0) {
// Object's key was not found. Add the object instead.
// CRUD event firing will be done in add()
return trans.tables[self.name].add(obj, key); // Resolving with another Promise. Returned Promise will then resolve with the new key.
} else {
return effectiveKey; // Resolve with the provided key.
}
}).finally(function () {
trans._unlock();
}).then(resolve, reject);
}
});
} else {
// Use the standard IDB put() method.
return this._idbstore(READWRITE, function (resolve, reject, idbstore) {
var req = key !== undefined ? idbstore.put(obj, key) : idbstore.put(obj);
req.onerror = eventRejectHandler(reject);
req.onsuccess = function (ev) {
var keyPath = idbstore.keyPath;
if (keyPath) setByKeyPath(obj, keyPath, ev.target.result);
resolve(req.result);
};
});
}
},
'delete': function (key) {
/// <param name="key">Primary key of the object to delete</param>
if (this.hook.deleting.subscribers.length) {
// People listens to when("deleting") event. Must implement delete using WriteableCollection.delete() that will
// call the CRUD event. Only WriteableCollection.delete() will know whether an object was actually deleted.
return this.where(":id").equals(key).delete();
} else {
// No one listens. Use standard IDB delete() method.
return this._idbstore(READWRITE, function (resolve, reject, idbstore) {
var req = idbstore.delete(key);
req.onerror = eventRejectHandler(reject);
req.onsuccess = function () {
resolve(req.result);
};
});
}
},
clear: function () {
if (this.hook.deleting.subscribers.length) {
// People listens to when("deleting") event. Must implement delete using WriteableCollection.delete() that will
// call the CRUD event. Only WriteableCollection.delete() will knows which objects that are actually deleted.
return this.toCollection().delete();
} else {
return this._idbstore(READWRITE, function (resolve, reject, idbstore) {
var req = idbstore.clear();
req.onerror = eventRejectHandler(reject);
req.onsuccess = function () {
resolve(req.result);
};
});
}
},
update: function (keyOrObject, modifications) {
if (typeof modifications !== 'object' || isArray(modifications)) throw new exceptions.InvalidArgument("Modifications must be an object.");
if (typeof keyOrObject === 'object' && !isArray(keyOrObject)) {
// object to modify. Also modify given object with the modifications:
keys(modifications).forEach(function (keyPath) {
setByKeyPath(keyOrObject, keyPath, modifications[keyPath]);
});
var key = getByKeyPath(keyOrObject, this.schema.primKey.keyPath);
if (key === undefined) return rejection(new exceptions.InvalidArgument("Given object does not contain its primary key"), dbUncaught);
return this.where(":id").equals(key).modify(modifications);
} else {
// key to modify
return this.where(":id").equals(keyOrObject).modify(modifications);
}
}
});
//
//
//
// Transaction Class
//
//
//
function Transaction(mode, storeNames, dbschema, parent) {
var _this2 = this;
/// <summary>
/// Transaction class. Represents a database transaction. All operations on db goes through a Transaction.
/// </summary>
/// <param name="mode" type="String">Any of "readwrite" or "readonly"</param>
/// <param name="storeNames" type="Array">Array of table names to operate on</param>
this.db = db;
this.mode = mode;
this.storeNames = storeNames;
this.idbtrans = null;
this.on = Events(this, "complete", "error", "abort");
this.parent = parent || null;
this.active = true;
this._tables = null;
this._reculock = 0;
this._blockedFuncs = [];
this._psd = null;
this._dbschema = dbschema;
this._resolve = null;
this._reject = null;
this._completion = new Promise(function (resolve, reject) {
_this2._resolve = resolve;
_this2._reject = reject;
}).uncaught(dbUncaught);
this._completion.then(function () {
_this2.on.complete.fire();
}, function (e) {
_this2.on.error.fire(e);
_this2.parent ? _this2.parent._reject(e) : _this2.active && _this2.idbtrans && _this2.idbtrans.abort();
_this2.active = false;
return rejection(e); // Indicate we actually DO NOT catch this error.
});
}
props(Transaction.prototype, {
//
// Transaction Protected Methods (not required by API users, but needed internally and eventually by dexie extensions)
//
_lock: function () {
assert(!PSD.global); // Locking and unlocking reuires to be within a PSD scope.
// Temporary set all requests into a pending queue if they are called before database is ready.
++this._reculock; // Recursive read/write lock pattern using PSD (Promise Specific Data) instead of TLS (Thread Local Storage)
if (this._reculock === 1 && !PSD.global) PSD.lockOwnerFor = this;
return this;
},
_unlock: function () {
assert(!PSD.global); // Locking and unlocking reuires to be within a PSD scope.
if (--this._reculock === 0) {
if (!PSD.global) PSD.lockOwnerFor = null;
while (this._blockedFuncs.length > 0 && !this._locked()) {
var fn = this._blockedFuncs.shift();
try {
fn();
} catch (e) {}
}
}
return this;
},
_locked: function () {
// Checks if any write-lock is applied on this transaction.
// To simplify the Dexie API for extension implementations, we support recursive locks.
// This is accomplished by using "Promise Specific Data" (PSD).
// PSD data is bound to a Promise and any child Promise emitted through then() or resolve( new Promise() ).
// PSD is local to code executing on top of the call stacks of any of any code executed by Promise():
// * callback given to the Promise() constructor (function (resolve, reject){...})
// * callbacks given to then()/catch()/finally() methods (function (value){...})
// If creating a new independant Promise instance from within a Promise call stack, the new Promise will derive the PSD from the call stack of the parent Promise.
// Derivation is done so that the inner PSD __proto__ points to the outer PSD.
// PSD.lockOwnerFor will point to current transaction object if the currently executing PSD scope owns the lock.
return this._reculock && PSD.lockOwnerFor !== this;
},
create: function (idbtrans) {
var _this3 = this;
assert(!this.idbtrans);
if (!idbtrans && !idbdb) {
switch (dbOpenError && dbOpenError.name) {
case "DatabaseClosedError":
// Errors where it is no difference whether it was caused by the user operation or an earlier call to db.open()
throw new exceptions.DatabaseClosed(dbOpenError);
case "MissingAPIError":
// Errors where it is no difference whether it was caused by the user operation or an earlier call to db.open()
throw new exceptions.MissingAPI(dbOpenError.message, dbOpenError);
default:
// Make it clear that the user operation was not what caused the error - the error had occurred earlier on db.open()!
throw new exceptions.OpenFailed(dbOpenError);
}
}
if (!this.active) throw new exceptions.TransactionInactive();
assert(this._completion._state === null);
idbtrans = this.idbtrans = idbtrans || idbdb.transaction(safariMultiStoreFix(this.storeNames), this.mode);
idbtrans.onerror = wrap(function (ev) {
preventDefault(ev); // Prohibit default bubbling to window.error
_this3._reject(idbtrans.error);
});
idbtrans.onabort = wrap(function (ev) {
preventDefault(ev);
_this3.active && _this3._reject(new exceptions.Abort());
_this3.active = false;
_this3.on("abort").fire(ev);
});
idbtrans.oncomplete = wrap(function () {
_this3.active = false;
_this3._resolve();
});
return this;
},
_promise: function (mode, fn, bWriteLock) {
var self = this;
return newScope(function () {
var p;
// Read lock always
if (!self._locked()) {
p = self.active ? new Promise(function (resolve, reject) {
if (mode === READWRITE && self.mode !== READWRITE) throw new exceptions.ReadOnly("Transaction is readonly");
if (!self.idbtrans && mode) self.create();
if (bWriteLock) self._lock(); // Write lock if write operation is requested
fn(resolve, reject, self);
}) : rejection(new exceptions.TransactionInactive());
if (self.active && bWriteLock) p.finally(function () {
self._unlock();
});
} else {
// Transaction is write-locked. Wait for mutex.
p = new Promise(function (resolve, reject) {
self._blockedFuncs.push(function () {
self._promise(mode, fn, bWriteLock).then(resolve, reject);
});
});
}
p._lib = true;
return p.uncaught(dbUncaught);
});
},
//
// Transaction Public Properties and Methods
//
abort: function () {
this.active && this._reject(new exceptions.Abort());
this.active = false;
},
// Deprecate:
tables: {
get: function () {
if (this._tables) return this._tables;
return this._tables = arrayToObject(this.storeNames, function (name) {
return [name, allTables[name]];
});
}
},
// Deprecate:
complete: function (cb) {
return this.on("complete", cb);
},
// Deprecate:
error: function (cb) {
return this.on("error", cb);
},
// Deprecate
table: function (name) {
if (this.storeNames.indexOf(name) === -1) throw new exceptions.InvalidTable("Table " + name + " not in transaction");
return allTables[name];
}
});
//
//
//
// WhereClause
//
//
//
function WhereClause(table, index, orCollection) {
/// <param name="table" type="Table"></param>
/// <param name="index" type="String" optional="true"></param>
/// <param name="orCollection" type="Collection" optional="true"></param>
this._ctx = {
table: table,
index: index === ":id" ? null : index,
collClass: table._collClass,
or: orCollection
};
}
props(WhereClause.prototype, function () {
// WhereClause private methods
function fail(collectionOrWhereClause, err, T) {
var collection = collectionOrWhereClause instanceof WhereClause ? new collectionOrWhereClause._ctx.collClass(collectionOrWhereClause) : collectionOrWhereClause;
collection._ctx.error = T ? new T(err) : new TypeError(err);
return collection;
}
function emptyCollection(whereClause) {
return new whereClause._ctx.collClass(whereClause, function () {
return IDBKeyRange.only("");
}).limit(0);
}
function upperFactory(dir) {
return dir === "next" ? function (s) {
return s.toUpperCase();
} : function (s) {
return s.toLowerCase();
};
}
function lowerFactory(dir) {
return dir === "next" ? function (s) {
return s.toLowerCase();
} : function (s) {
return s.toUpperCase();
};
}
function nextCasing(key, lowerKey, upperNeedle, lowerNeedle, cmp, dir) {
var length = Math.min(key.length, lowerNeedle.length);
var llp = -1;
for (var i = 0; i < length; ++i) {
var lwrKeyChar = lowerKey[i];
if (lwrKeyChar !== lowerNeedle[i]) {
if (cmp(key[i], upperNeedle[i]) < 0) return key.substr(0, i) + upperNeedle[i] + upperNeedle.substr(i + 1);
if (cmp(key[i], lowerNeedle[i]) < 0) return key.substr(0, i) + lowerNeedle[i] + upperNeedle.substr(i + 1);
if (llp >= 0) return key.substr(0, llp) + lowerKey[llp] + upperNeedle.substr(llp + 1);
return null;
}
if (cmp(key[i], lwrKeyChar) < 0) llp = i;
}
if (length < lowerNeedle.length && dir === "next") return key + upperNeedle.substr(key.length);
if (length < key.length && dir === "prev") return key.substr(0, upperNeedle.length);
return llp < 0 ? null : key.substr(0, llp) + lowerNeedle[llp] + upperNeedle.substr(llp + 1);
}
function addIgnoreCaseAlgorithm(whereClause, match, needles, suffix) {
/// <param name="needles" type="Array" elementType="String"></param>
var upper,
lower,
compare,
upperNeedles,
lowerNeedles,
direction,
nextKeySuffix,
needlesLen = needles.length;
if (!needles.every(function (s) {
return typeof s === 'string';
})) {
return fail(whereClause, STRING_EXPECTED);
}
function initDirection(dir) {
upper = upperFactory(dir);
lower = lowerFactory(dir);
compare = dir === "next" ? simpleCompare : simpleCompareReverse;
var needleBounds = needles.map(function (needle) {
return { lower: lower(needle), upper: upper(needle) };
}).sort(function (a, b) {
return compare(a.lower, b.lower);
});
upperNeedles = needleBounds.map(function (nb) {
return nb.upper;
});
lowerNeedles = needleBounds.map(function (nb) {
return nb.lower;
});
direction = dir;
nextKeySuffix = dir === "next" ? "" : suffix;
}
initDirection("next");
var c = new whereClause._ctx.collClass(whereClause, function () {
return IDBKeyRange.bound(upperNeedles[0], lowerNeedles[needlesLen - 1] + suffix);
});
c._ondirectionchange = function (direction) {
// This event onlys occur before filter is called the first time.
initDirection(direction);
};
var firstPossibleNeedle = 0;
c._addAlgorithm(function (cursor, advance, resolve) {
/// <param name="cursor" type="IDBCursor"></param>
/// <param name="advance" type="Function"></param>
/// <param name="resolve" type="Function"></param>
var key = cursor.key;
if (typeof key !== 'string') return false;
var lowerKey = lower(key);
if (match(lowerKey, lowerNeedles, firstPossibleNeedle)) {
return true;
} else {
var lowestPossibleCasing = null;
for (var i = firstPossibleNeedle; i < needlesLen; ++i) {
var casing = nextCasing(key, lowerKey, upperNeedles[i], lowerNeedles[i], compare, direction);
if (casing === null && lowestPossibleCasing === null) firstPossibleNeedle = i + 1;else if (lowestPossibleCasing === null || compare(lowestPossibleCasing, casing) > 0) {
lowestPossibleCasing = casing;
}
}
if (lowestPossibleCasing !== null) {
advance(function () {
cursor.continue(lowestPossibleCasing + nextKeySuffix);
});
} else {
advance(resolve);
}
return false;
}
});
return c;
}
//
// WhereClause public methods
//
return {
between: function (lower, upper, includeLower, includeUpper) {
/// <summary>
/// Filter out records whose where-field lays between given lower and upper values. Applies to Strings, Numbers and Dates.
/// </summary>
/// <param name="lower"></param>
/// <param name="upper"></param>
/// <param name="includeLower" optional="true">Whether items that equals lower should be included. Default true.</param>
/// <param name="includeUpper" optional="true">Whether items that equals upper should be included. Default false.</param>
/// <returns type="Collection"></returns>
includeLower = includeLower !== false; // Default to true
includeUpper = includeUpper === true; // Default to false
try {
if (cmp(lower, upper) > 0 || cmp(lower, upper) === 0 && (includeLower || includeUpper) && !(includeLower && includeUpper)) return emptyCollection(this); // Workaround for idiotic W3C Specification that DataError must be thrown if lower > upper. The natural result would be to return an empty collection.
return new this._ctx.collClass(this, function () {
return IDBKeyRange.bound(lower, upper, !includeLower, !includeUpper);
});
} catch (e) {
return fail(this, INVALID_KEY_ARGUMENT);
}
},
equals: function (value) {
return new this._ctx.collClass(this, function () {
return IDBKeyRange.only(value);
});
},
above: function (value) {
return new this._ctx.collClass(this, function () {
return IDBKeyRange.lowerBound(value, true);
});
},
aboveOrEqual: function (value) {
return new this._ctx.collClass(this, function () {
return IDBKeyRange.lowerBound(value);
});
},
below: function (value) {
return new this._ctx.collClass(this, function () {
return IDBKeyRange.upperBound(value, true);
});
},
belowOrEqual: function (value) {
return new this._ctx.collClass(this, function () {
return IDBKeyRange.upperBound(value);
});
},
startsWith: function (str) {
/// <param name="str" type="String"></param>
if (typeof str !== 'string') return fail(this, STRING_EXPECTED);
return this.between(str, str + maxString, true, true);
},
startsWithIgnoreCase: function (str) {
/// <param name="str" type="String"></param>
if (str === "") return this.startsWith(str);
return addIgnoreCaseAlgorithm(this, function (x, a) {
return x.indexOf(a[0]) === 0;
}, [str], maxString);
},
equalsIgnoreCase: function (str) {
/// <param name="str" type="String"></param>
return addIgnoreCaseAlgorithm(this, function (x, a) {
return x === a[0];
}, [str], "");
},
anyOfIgnoreCase: function () {
var set = getArrayOf.apply(NO_CHAR_ARRAY, arguments);
if (set.length === 0) return emptyCollection(this);
return addIgnoreCaseAlgorithm(this, function (x, a) {
return a.indexOf(x) !== -1;
}, set, "");
},
startsWithAnyOfIgnoreCase: function () {
var set = getArrayOf.apply(NO_CHAR_ARRAY, arguments);
if (set.length === 0) return emptyCollection(this);
return addIgnoreCaseAlgorithm(this, function (x, a) {
return a.some(function (n) {
return x.indexOf(n) === 0;
});
}, set, maxString);
},
anyOf: function () {
var set = getArrayOf.apply(NO_CHAR_ARRAY, arguments);
var compare = ascending;
try {
set.sort(compare);
} catch (e) {
return fail(this, INVALID_KEY_ARGUMENT);
}
if (set.length === 0) return emptyCollection(this);
var c = new this._ctx.collClass(this, function () {
return IDBKeyRange.bound(set[0], set[set.length - 1]);
});
c._ondirectionchange = function (direction) {
compare = direction === "next" ? ascending : descending;
set.sort(compare);
};
var i = 0;
c._addAlgorithm(function (cursor, advance, resolve) {
var key = cursor.key;
while (compare(key, set[i]) > 0) {
// The cursor has passed beyond this key. Check next.
++i;
if (i === set.length) {
// There is no next. Stop searching.
advance(resolve);
return false;
}
}
if (compare(key, set[i]) === 0) {
// The current cursor value should be included and we should continue a single step in case next item has the same key or possibly our next key in set.
return true;
} else {
// cursor.key not yet at set[i]. Forward cursor to the next key to hunt for.
advance(function () {
cursor.continue(set[i]);
});
return false;
}
});
return c;
},
notEqual: function (value) {
return this.inAnyRange([[-Infinity, value], [value, maxKey]], { includeLowers: false, includeUppers: false });
},
noneOf: function () {
var set = getArrayOf.apply(NO_CHAR_ARRAY, arguments);
if (set.length === 0) return new this._ctx.collClass(this); // Return entire collection.
try {
set.sort(ascending);
} catch (e) {
return fail(this, INVALID_KEY_ARGUMENT);
}
// Transform ["a","b","c"] to a set of ranges for between/above/below: [[-Infinity,"a"], ["a","b"], ["b","c"], ["c",maxKey]]
var ranges = set.reduce(function (res, val) {
return res ? res.concat([[res[res.length - 1][1], val]]) : [[-Infinity, val]];
}, null);
ranges.push([set[set.length - 1], maxKey]);
return this.inAnyRange(ranges, { includeLowers: false, includeUppers: false });
},
/** Filter out values withing given set of ranges.
* Example, give children and elders a rebate of 50%:
*
* db.friends.where('age').inAnyRange([[0,18],[65,Infinity]]).modify({Rebate: 1/2});
*
* @param {(string|number|Date|Array)[][]} ranges
* @param {{includeLowers: boolean, includeUppers: boolean}} options
*/
inAnyRange: function (ranges, options) {
var ctx = this._ctx;
if (ranges.length === 0) return emptyCollection(this);
if (!ranges.every(function (range) {
return range[0] !== undefined && range[1] !== undefined && ascending(range[0], range[1]) <= 0;
})) {
return fail(this, "First argument to inAnyRange() must be an Array of two-value Arrays [lower,upper] where upper must not be lower than lower", exceptions.InvalidArgument);
}
var includeLowers = !options || options.includeLowers !== false; // Default to true
var includeUppers = options && options.includeUppers === true; // Default to false
function addRange(ranges, newRange) {
for (var i = 0, l = ranges.length; i < l; ++i) {
var range = ranges[i];
if (cmp(newRange[0], range[1]) < 0 && cmp(newRange[1], range[0]) > 0) {
range[0] = min(range[0], newRange[0]);
range[1] = max(range[1], newRange[1]);
break;
}
}
if (i === l) ranges.push(newRange);
return ranges;
}
var sortDirection = ascending;
function rangeSorter(a, b) {
return sortDirection(a[0], b[0]);
}
// Join overlapping ranges
var set;
try {
set = ranges.reduce(addRange, []);
set.sort(rangeSorter);
} catch (ex) {
return fail(this, INVALID_KEY_ARGUMENT);
}
var i = 0;
var keyIsBeyondCurrentEntry = includeUppers ? function (key) {
return ascending(key, set[i][1]) > 0;
} : function (key) {
return ascending(key, set[i][1]) >= 0;
};
var keyIsBeforeCurrentEntry = includeLowers ? function (key) {
return descending(key, set[i][0]) > 0;
} : function (key) {
return descending(key, set[i][0]) >= 0;
};
function keyWithinCurrentRange(key) {
return !keyIsBeyondCurrentEntry(key) && !keyIsBeforeCurrentEntry(key);
}
var checkKey = keyIsBeyondCurrentEntry;
var c = new ctx.collClass(this, function () {
return IDBKeyRange.bound(set[0][0], set[set.length - 1][1], !includeLowers, !includeUppers);
});
c._ondirectionchange = function (direction) {
if (direction === "next") {
checkKey = keyIsBeyondCurrentEntry;
sortDirection = ascending;
} else {
checkKey = keyIsBeforeCurrentEntry;
sortDirection = descending;
}
set.sort(rangeSorter);
};
c._addAlgorithm(function (cursor, advance, resolve) {
var key = cursor.key;
while (checkKey(key)) {
// The cursor has passed beyond this key. Check next.
++i;
if (i === set.length) {
// There is no next. Stop searching.
advance(resolve);
return false;
}
}
if (keyWithinCurrentRange(key)) {
// The current cursor value should be included and we should continue a single step in case next item has the same key or possibly our next key in set.
return true;
} else if (cmp(key, set[i][1]) === 0 || cmp(key, set[i][0]) === 0) {
// includeUpper or includeLower is false so keyWithinCurrentRange() returns false even though we are at range border.
// Continue to next key but don't include this one.
return false;
} else {
// cursor.key not yet at set[i]. Forward cursor to the next key to hunt for.
advance(function () {
if (sortDirection === ascending) cursor.continue(set[i][0]);else cursor.continue(set[i][1]);
});
return false;
}
});
return c;
},
startsWithAnyOf: function () {
var set = getArrayOf.apply(NO_CHAR_ARRAY, arguments);
if (!set.every(function (s) {
return typeof s === 'string';
})) {
return fail(this, "startsWithAnyOf() only works with strings");
}
if (set.length === 0) return emptyCollection(this);
return this.inAnyRange(set.map(function (str) {
return [str, str + maxString];
}));
}
};
});
//
//
//
// Collection Class
//
//
//
function Collection(whereClause, keyRangeGenerator) {
/// <summary>
///
/// </summary>
/// <param name="whereClause" type="WhereClause">Where clause instance</param>
/// <param name="keyRangeGenerator" value="function(){ return IDBKeyRange.bound(0,1);}" optional="true"></param>
var keyRange = null,
error = null;
if (keyRangeGenerator) try {
keyRange = keyRangeGenerator();
} catch (ex) {
error = ex;
}
var whereCtx = whereClause._ctx,
table = whereCtx.table;
this._ctx = {
table: table,
index: whereCtx.index,
isPrimKey: !whereCtx.index || table.schema.primKey.keyPath && whereCtx.index === table.schema.primKey.name,
range: keyRange,
keysOnly: false,
dir: "next",
unique: "",
algorithm: null,
filter: null,
replayFilter: null,
justLimit: true, // True if a replayFilter is just a filter that performs a "limit" operation (or none at all)
isMatch: null,
offset: 0,
limit: Infinity,
error: error, // If set, any promise must be rejected with this error
or: whereCtx.or,
valueMapper: table.hook.reading.fire
};
}
function isPlainKeyRange(ctx, ignoreLimitFilter) {
return !(ctx.filter || ctx.algorithm || ctx.or) && (ignoreLimitFilter ? ctx.justLimit : !ctx.replayFilter);
}
props(Collection.prototype, function () {
//
// Collection Private Functions
//
function addFilter(ctx, fn) {
ctx.filter = combine(ctx.filter, fn);
}
function addReplayFilter(ctx, factory, isLimitFilter) {
var curr = ctx.replayFilter;
ctx.replayFilter = curr ? function () {
return combine(curr(), factory());
} : factory;
ctx.justLimit = isLimitFilter && !curr;
}
function addMatchFilter(ctx, fn) {
ctx.isMatch = combine(ctx.isMatch, fn);
}
/** @param ctx {
* isPrimKey: boolean,
* table: Table,
* index: string
* }
* @param store IDBObjectStore
**/
function getIndexOrStore(ctx, store) {
if (ctx.isPrimKey) return store;
var indexSpec = ctx.table.schema.idxByName[ctx.index];
if (!indexSpec) throw new exceptions.Schema("KeyPath " + ctx.index + " on object store " + store.name + " is not indexed");
return store.index(indexSpec.name);
}
/** @param ctx {
* isPrimKey: boolean,
* table: Table,
* index: string,
* keysOnly: boolean,
* range?: IDBKeyRange,
* dir: "next" | "prev"
* }
*/
function openCursor(ctx, store) {
var idxOrStore = getIndexOrStore(ctx, store);
return ctx.keysOnly && 'openKeyCursor' in idxOrStore ? idxOrStore.openKeyCursor(ctx.range || null, ctx.dir + ctx.unique) : idxOrStore.openCursor(ctx.range || null, ctx.dir + ctx.unique);
}
function iter(ctx, fn, resolve, reject, idbstore) {
var filter = ctx.replayFilter ? combine(ctx.filter, ctx.replayFilter()) : ctx.filter;
if (!ctx.or) {
iterate(openCursor(ctx, idbstore), combine(ctx.algorithm, filter), fn, resolve, reject, !ctx.keysOnly && ctx.valueMapper);
} else (function () {
var set = {};
var resolved = 0;
function resolveboth() {
if (++resolved === 2) resolve(); // Seems like we just support or btwn max 2 expressions, but there are no limit because we do recursion.
}
function union(item, cursor, advance) {
if (!filter || filter(cursor, advance, resolveboth, reject)) {
var key = cursor.primaryKey.toString(); // Converts any Date to String, String to String, Number to String and Array to comma-separated string
if (!hasOwn(set, key)) {
set[key] = true;
fn(item, cursor, advance);
}
}
}
ctx.or._iterate(union, resolveboth, reject, idbstore);
iterate(openCursor(ctx, idbstore), ctx.algorithm, union, resolveboth, reject, !ctx.keysOnly && ctx.valueMapper);
})();
}
function getInstanceTemplate(ctx) {
return ctx.table.schema.instanceTemplate;
}
return {
//
// Collection Protected Functions
//
_read: function (fn, cb) {
var ctx = this._ctx;
if (ctx.error) return ctx.table._trans(null, function rejector(resolve, reject) {
reject(ctx.error);
});else return ctx.table._idbstore(READONLY, fn).then(cb);
},
_write: function (fn) {
var ctx = this._ctx;
if (ctx.error) return ctx.table._trans(null, function rejector(resolve, reject) {
reject(ctx.error);
});else return ctx.table._idbstore(READWRITE, fn, "locked"); // When doing write operations on collections, always lock the operation so that upcoming operations gets queued.
},
_addAlgorithm: function (fn) {
var ctx = this._ctx;
ctx.algorithm = combine(ctx.algorithm, fn);
},
_iterate: function (fn, resolve, reject, idbstore) {
return iter(this._ctx, fn, resolve, reject, idbstore);
},
clone: function (props) {
var rv = Object.create(this.constructor.prototype),
ctx = Object.create(this._ctx);
if (props) extend(ctx, props);
rv._ctx = ctx;
return rv;
},
raw: function () {
this._ctx.valueMapper = null;
return this;
},
//
// Collection Public methods
//
each: function (fn) {
var ctx = this._ctx;
if (fake) {
var item = getInstanceTemplate(ctx),
primKeyPath = ctx.table.schema.primKey.keyPath,
key = getByKeyPath(item, ctx.index ? ctx.table.schema.idxByName[ctx.index].keyPath : primKeyPath),
primaryKey = getByKeyPath(item, primKeyPath);
fn(item, { key: key, primaryKey: primaryKey });
}
return this._read(function (resolve, reject, idbstore) {
iter(ctx, fn, resolve, reject, idbstore);
});
},
count: function (cb) {
if (fake) return Promise.resolve(0).then(cb);
var ctx = this._ctx;
if (isPlainKeyRange(ctx, true)) {
// This is a plain key range. We can use the count() method if the index.
return this._read(function (resolve, reject, idbstore) {
var idx = getIndexOrStore(ctx, idbstore);
var req = ctx.range ? idx.count(ctx.range) : idx.count();
req.onerror = eventRejectHandler(reject);
req.onsuccess = function (e) {
resolve(Math.min(e.target.result, ctx.limit));
};
}, cb);
} else {
// Algorithms, filters or expressions are applied. Need to count manually.
var count = 0;
return this._read(function (resolve, reject, idbstore) {
iter(ctx, function () {
++count;return false;
}, function () {
resolve(count);
}, reject, idbstore);
}, cb);
}
},
sortBy: function (keyPath, cb) {
/// <param name="keyPath" type="String"></param>
var parts = keyPath.split('.').reverse(),
lastPart = parts[0],
lastIndex = parts.length - 1;
function getval(obj, i) {
if (i) return getval(obj[parts[i]], i - 1);
return obj[lastPart];
}
var order = this._ctx.dir === "next" ? 1 : -1;
function sorter(a, b) {
var aVal = getval(a, lastIndex),
bVal = getval(b, lastIndex);
return aVal < bVal ? -order : aVal > bVal ? order : 0;
}
return this.toArray(function (a) {
return a.sort(sorter);
}).then(cb);
},
toArray: function (cb) {
var ctx = this._ctx;
return this._read(function (resolve, reject, idbstore) {
fake && resolve([getInstanceTemplate(ctx)]);
if (hasGetAll && ctx.dir === 'next' && isPlainKeyRange(ctx, true) && ctx.limit > 0) {
// Special optimation if we could use IDBObjectStore.getAll() or
// IDBKeyRange.getAll():
var readingHook = ctx.table.hook.reading.fire;
var idxOrStore = getIndexOrStore(ctx, idbstore);
var req = ctx.limit < Infinity ? idxOrStore.getAll(ctx.range, ctx.limit) : idxOrStore.getAll(ctx.range);
req.onerror = eventRejectHandler(reject);
req.onsuccess = readingHook === mirror ? eventSuccessHandler(resolve) : wrap(eventSuccessHandler(function (res) {
resolve(res.map(readingHook));
}));
} else {
// Getting array through a cursor.
var a = [];
iter(ctx, function (item) {
a.push(item);
}, function arrayComplete() {
resolve(a);
}, reject, idbstore);
}
}, cb);
},
offset: function (offset) {
var ctx = this._ctx;
if (offset <= 0) return this;
ctx.offset += offset; // For count()
if (isPlainKeyRange(ctx)) {
addReplayFilter(ctx, function () {
var offsetLeft = offset;
return function (cursor, advance) {
if (offsetLeft === 0) return true;
if (offsetLeft === 1) {
--offsetLeft;return false;
}
advance(function () {
cursor.advance(offsetLeft);
offsetLeft = 0;
});
return false;
};
});
} else {
addReplayFilter(ctx, function () {
var offsetLeft = offset;
return function () {
return --offsetLeft < 0;
};
});
}
return this;
},
limit: function (numRows) {
this._ctx.limit = Math.min(this._ctx.limit, numRows); // For count()
addReplayFilter(this._ctx, function () {
var rowsLeft = numRows;
return function (cursor, advance, resolve) {
if (--rowsLeft <= 0) advance(resolve); // Stop after this item has been included
return rowsLeft >= 0; // If numRows is already below 0, return false because then 0 was passed to numRows initially. Otherwise we wouldnt come here.
};
}, true);
return this;
},
until: function (filterFunction, bIncludeStopEntry) {
var ctx = this._ctx;
fake && filterFunction(getInstanceTemplate(ctx));
addFilter(this._ctx, function (cursor, advance, resolve) {
if (filterFunction(cursor.value)) {
advance(resolve);
return bIncludeStopEntry;
} else {
return true;
}
});
return this;
},
first: function (cb) {
return this.limit(1).toArray(function (a) {
return a[0];
}).then(cb);
},
last: function (cb) {
return this.reverse().first(cb);
},
filter: function (filterFunction) {
/// <param name="jsFunctionFilter" type="Function">function(val){return true/false}</param>
fake && filterFunction(getInstanceTemplate(this._ctx));
addFilter(this._ctx, function (cursor) {
return filterFunction(cursor.value);
});
// match filters not used in Dexie.js but can be used by 3rd part libraries to test a
// collection for a match without querying DB. Used by Dexie.Observable.
addMatchFilter(this._ctx, filterFunction);
return this;
},
and: function (filterFunction) {
return this.filter(filterFunction);
},
or: function (indexName) {
return new WhereClause(this._ctx.table, indexName, this);
},
reverse: function () {
this._ctx.dir = this._ctx.dir === "prev" ? "next" : "prev";
if (this._ondirectionchange) this._ondirectionchange(this._ctx.dir);
return this;
},
desc: function () {
return this.reverse();
},
eachKey: function (cb) {
var ctx = this._ctx;
ctx.keysOnly = !ctx.isMatch;
return this.each(function (val, cursor) {
cb(cursor.key, cursor);
});
},
eachUniqueKey: function (cb) {
this._ctx.unique = "unique";
return this.eachKey(cb);
},
eachPrimaryKey: function (cb) {
var ctx = this._ctx;
ctx.keysOnly = !ctx.isMatch;
return this.each(function (val, cursor) {
cb(cursor.primaryKey, cursor);
});
},
keys: function (cb) {
var ctx = this._ctx;
ctx.keysOnly = !ctx.isMatch;
var a = [];
return this.each(function (item, cursor) {
a.push(cursor.key);
}).then(function () {
return a;
}).then(cb);
},
primaryKeys: function (cb) {
var ctx = this._ctx;
if (hasGetAll && ctx.dir === 'next' && isPlainKeyRange(ctx, true) && ctx.limit > 0) {
// Special optimation if we could use IDBObjectStore.getAllKeys() or
// IDBKeyRange.getAllKeys():
return this._read(function (resolve, reject, idbstore) {
var idxOrStore = getIndexOrStore(ctx, idbstore);
var req = ctx.limit < Infinity ? idxOrStore.getAllKeys(ctx.range, ctx.limit) : idxOrStore.getAllKeys(ctx.range);
req.onerror = eventRejectHandler(reject);
req.onsuccess = eventSuccessHandler(resolve);
}).then(cb);
}
ctx.keysOnly = !ctx.isMatch;
var a = [];
return this.each(function (item, cursor) {
a.push(cursor.primaryKey);
}).then(function () {
return a;
}).then(cb);
},
uniqueKeys: function (cb) {
this._ctx.unique = "unique";
return this.keys(cb);
},
firstKey: function (cb) {
return this.limit(1).keys(function (a) {
return a[0];
}).then(cb);
},
lastKey: function (cb) {
return this.reverse().firstKey(cb);
},
distinct: function () {
var ctx = this._ctx,
idx = ctx.index && ctx.table.schema.idxByName[ctx.index];
if (!idx || !idx.multi) return this; // distinct() only makes differencies on multiEntry indexes.
var set = {};
addFilter(this._ctx, function (cursor) {
var strKey = cursor.primaryKey.toString(); // Converts any Date to String, String to String, Number to String and Array to comma-separated string
var found = hasOwn(set, strKey);
set[strKey] = true;
return !found;
});
return this;
}
};
});
//
//
// WriteableCollection Class
//
//
function WriteableCollection() {
Collection.apply(this, arguments);
}
derive(WriteableCollection).from(Collection).extend({
//
// WriteableCollection Public Methods
//
modify: function (changes) {
var self = this,
ctx = this._ctx,
hook = ctx.table.hook,
updatingHook = hook.updating.fire,
deletingHook = hook.deleting.fire;
fake && typeof changes === 'function' && changes.call({ value: ctx.table.schema.instanceTemplate }, ctx.table.schema.instanceTemplate);
return this._write(function (resolve, reject, idbstore, trans) {
var modifyer;
if (typeof changes === 'function') {
// Changes is a function that may update, add or delete propterties or even require a deletion the object itself (delete this.item)
if (updatingHook === nop && deletingHook === nop) {
// Noone cares about what is being changed. Just let the modifier function be the given argument as is.
modifyer = changes;
} else {
// People want to know exactly what is being modified or deleted.
// Let modifyer be a proxy function that finds out what changes the caller is actually doing
// and call the hooks accordingly!
modifyer = function (item) {
var origItem = deepClone(item); // Clone the item first so we can compare laters.
if (changes.call(this, item, this) === false) return false; // Call the real modifyer function (If it returns false explicitely, it means it dont want to modify anyting on this object)
if (!hasOwn(this, "value")) {
// The real modifyer function requests a deletion of the object. Inform the deletingHook that a deletion is taking place.
deletingHook.call(this, this.primKey, item, trans);
} else {
// No deletion. Check what was changed
var objectDiff = getObjectDiff(origItem, this.value);
var additionalChanges = updatingHook.call(this, objectDiff, this.primKey, origItem, trans);
if (additionalChanges) {
// Hook want to apply additional modifications. Make sure to fullfill the will of the hook.
item = this.value;
keys(additionalChanges).forEach(function (keyPath) {
setByKeyPath(item, keyPath, additionalChanges[keyPath]); // Adding {keyPath: undefined} means that the keyPath should be deleted. Handled by setByKeyPath
});
}
}
};
}
} else if (updatingHook === nop) {
// changes is a set of {keyPath: value} and no one is listening to the updating hook.
var keyPaths = keys(changes);
var numKeys = keyPaths.length;
modifyer = function (item) {
var anythingModified = false;
for (var i = 0; i < numKeys; ++i) {
var keyPath = keyPaths[i],
val = changes[keyPath];
if (getByKeyPath(item, keyPath) !== val) {
setByKeyPath(item, keyPath, val); // Adding {keyPath: undefined} means that the keyPath should be deleted. Handled by setByKeyPath
anythingModified = true;
}
}
return anythingModified;
};
} else {
// changes is a set of {keyPath: value} and people are listening to the updating hook so we need to call it and
// allow it to add additional modifications to make.
var origChanges = changes;
changes = shallowClone(origChanges); // Let's work with a clone of the changes keyPath/value set so that we can restore it in case a hook extends it.
modifyer = function (item) {
var anythingModified = false;
var additionalChanges = updatingHook.call(this, changes, this.primKey, deepClone(item), trans);
if (additionalChanges) extend(changes, additionalChanges);
keys(changes).forEach(function (keyPath) {
var val = changes[keyPath];
if (getByKeyPath(item, keyPath) !== val) {
setByKeyPath(item, keyPath, val);
anythingModified = true;
}
});
if (additionalChanges) changes = shallowClone(origChanges); // Restore original changes for next iteration
return anythingModified;
};
}
var count = 0;
var successCount = 0;
var iterationComplete = false;
var failures = [];
var failKeys = [];
var currentKey = null;
function modifyItem(item, cursor) {
currentKey = cursor.primaryKey;
var thisContext = {
primKey: cursor.primaryKey,
value: item,
onsuccess: null,
onerror: null
};
function onerror(e) {
failures.push(e);
failKeys.push(thisContext.primKey);
checkFinished();
return true; // Catch these errors and let a final rejection decide whether or not to abort entire transaction
}
if (modifyer.call(thisContext, item, thisContext) !== false) {
// If a callback explicitely returns false, do not perform the update!
var bDelete = !hasOwn(thisContext, "value");
++count;
tryCatch(function () {
var req = bDelete ? cursor.delete() : cursor.update(thisContext.value);
req._hookCtx = thisContext;
req.onerror = hookedEventRejectHandler(onerror);
req.onsuccess = hookedEventSuccessHandler(function () {
++successCount;
checkFinished();
});
}, onerror);
} else if (thisContext.onsuccess) {
// Hook will expect either onerror or onsuccess to always be called!
thisContext.onsuccess(thisContext.value);
}
}
function doReject(e) {
if (e) {
failures.push(e);
failKeys.push(currentKey);
}
return reject(new ModifyError("Error modifying one or more objects", failures, successCount, failKeys));
}
function checkFinished() {
if (iterationComplete && successCount + failures.length === count) {
if (failures.length > 0) doReject();else resolve(successCount);
}
}
self.clone().raw()._iterate(modifyItem, function () {
iterationComplete = true;
checkFinished();
}, doReject, idbstore);
});
},
'delete': function () {
var _this4 = this;
var ctx = this._ctx,
range = ctx.range,
deletingHook = ctx.table.hook.deleting.fire,
hasDeleteHook = deletingHook !== nop;
if (!hasDeleteHook && isPlainKeyRange(ctx) && (ctx.isPrimKey && !hangsOnDeleteLargeKeyRange || !range)) // if no range, we'll use clear().
{
// May use IDBObjectStore.delete(IDBKeyRange) in this case (Issue #208)
// For chromium, this is the way most optimized version.
// For IE/Edge, this could hang the indexedDB engine and make operating system instable
// (https://gist.github.com/dfahlander/5a39328f029de18222cf2125d56c38f7)
return this._write(function (resolve, reject, idbstore) {
// Our API contract is to return a count of deleted items, so we have to count() before delete().
var onerror = eventRejectHandler(reject),
countReq = range ? idbstore.count(range) : idbstore.count();
countReq.onerror = onerror;
countReq.onsuccess = function () {
var count = countReq.result;
tryCatch(function () {
var delReq = range ? idbstore.delete(range) : idbstore.clear();
delReq.onerror = onerror;
delReq.onsuccess = function () {
return resolve(count);
};
}, function (err) {
return reject(err);
});
};
});
}
// Default version to use when collection is not a vanilla IDBKeyRange on the primary key.
// Divide into chunks to not starve RAM.
// If has delete hook, we will have to collect not just keys but also objects, so it will use
// more memory and need lower chunk size.
var CHUNKSIZE = hasDeleteHook ? 2000 : 10000;
return this._write(function (resolve, reject, idbstore, trans) {
var totalCount = 0;
// Clone collection and change its table and set a limit of CHUNKSIZE on the cloned Collection instance.
var collection = _this4.clone({
keysOnly: !ctx.isMatch && !hasDeleteHook }) // load just keys (unless filter() or and() or deleteHook has subscribers)
.distinct() // In case multiEntry is used, never delete same key twice because resulting count
// would become larger than actual delete count.
.limit(CHUNKSIZE).raw(); // Don't filter through reading-hooks (like mapped classes etc)
var keysOrTuples = [];
// We're gonna do things on as many chunks that are needed.
// Use recursion of nextChunk function:
var nextChunk = function () {
return collection.each(hasDeleteHook ? function (val, cursor) {
// Somebody subscribes to hook('deleting'). Collect all primary keys and their values,
// so that the hook can be called with its values in bulkDelete().
keysOrTuples.push([cursor.primaryKey, cursor.value]);
} : function (val, cursor) {
// No one subscribes to hook('deleting'). Collect only primary keys:
keysOrTuples.push(cursor.primaryKey);
}).then(function () {
// Chromium deletes faster when doing it in sort order.
hasDeleteHook ? keysOrTuples.sort(function (a, b) {
return ascending(a[0], b[0]);
}) : keysOrTuples.sort(ascending);
return bulkDelete(idbstore, trans, keysOrTuples, hasDeleteHook, deletingHook);
}).then(function () {
var count = keysOrTuples.length;
totalCount += count;
keysOrTuples = [];
return count < CHUNKSIZE ? totalCount : nextChunk();
});
};
resolve(nextChunk());
});
}
});
//
//
//
// ------------------------- Help functions ---------------------------
//
//
//
function lowerVersionFirst(a, b) {
return a._cfg.version - b._cfg.version;
}
function setApiOnPlace(objs, tableNames, mode, dbschema) {
tableNames.forEach(function (tableName) {
var tableInstance = db._tableFactory(mode, dbschema[tableName]);
objs.forEach(function (obj) {
tableName in obj || (obj[tableName] = tableInstance);
});
});
}
function removeTablesApi(objs) {
objs.forEach(function (obj) {
for (var key in obj) {
if (obj[key] instanceof Table) delete obj[key];
}
});
}
function iterate(req, filter, fn, resolve, reject, valueMapper) {
// Apply valueMapper (hook('reading') or mappped class)
var mappedFn = valueMapper ? function (x, c, a) {
return fn(valueMapper(x), c, a);
} : fn;
// Wrap fn with PSD and microtick stuff from Promise.
var wrappedFn = wrap(mappedFn, reject);
if (!req.onerror) req.onerror = eventRejectHandler(reject);
if (filter) {
req.onsuccess = trycatcher(function filter_record() {
var cursor = req.result;
if (cursor) {
var c = function () {
cursor.continue();
};
if (filter(cursor, function (advancer) {
c = advancer;
}, resolve, reject)) wrappedFn(cursor.value, cursor, function (advancer) {
c = advancer;
});
c();
} else {
resolve();
}
}, reject);
} else {
req.onsuccess = trycatcher(function filter_record() {
var cursor = req.result;
if (cursor) {
var c = function () {
cursor.continue();
};
wrappedFn(cursor.value, cursor, function (advancer) {
c = advancer;
});
c();
} else {
resolve();
}
}, reject);
}
}
function parseIndexSyntax(indexes) {
/// <param name="indexes" type="String"></param>
/// <returns type="Array" elementType="IndexSpec"></returns>
var rv = [];
indexes.split(',').forEach(function (index) {
index = index.trim();
var name = index.replace(/([&*]|\+\+)/g, ""); // Remove "&", "++" and "*"
// Let keyPath of "[a+b]" be ["a","b"]:
var keyPath = /^\[/.test(name) ? name.match(/^\[(.*)\]$/)[1].split('+') : name;
rv.push(new IndexSpec(name, keyPath || null, /\&/.test(index), /\*/.test(index), /\+\+/.test(index), isArray(keyPath), /\./.test(index)));
});
return rv;
}
function cmp(key1, key2) {
return indexedDB.cmp(key1, key2);
}
function min(a, b) {
return cmp(a, b) < 0 ? a : b;
}
function max(a, b) {
return cmp(a, b) > 0 ? a : b;
}
function ascending(a, b) {
return indexedDB.cmp(a, b);
}
function descending(a, b) {
return indexedDB.cmp(b, a);
}
function simpleCompare(a, b) {
return a < b ? -1 : a === b ? 0 : 1;
}
function simpleCompareReverse(a, b) {
return a > b ? -1 : a === b ? 0 : 1;
}
function combine(filter1, filter2) {
return filter1 ? filter2 ? function () {
return filter1.apply(this, arguments) && filter2.apply(this, arguments);
} : filter1 : filter2;
}
function readGlobalSchema() {
db.verno = idbdb.version / 10;
db._dbSchema = globalSchema = {};
dbStoreNames = slice(idbdb.objectStoreNames, 0);
if (dbStoreNames.length === 0) return; // Database contains no stores.
var trans = idbdb.transaction(safariMultiStoreFix(dbStoreNames), 'readonly');
dbStoreNames.forEach(function (storeName) {
var store = trans.objectStore(storeName),
keyPath = store.keyPath,
dotted = keyPath && typeof keyPath === 'string' && keyPath.indexOf('.') !== -1;
var primKey = new IndexSpec(keyPath, keyPath || "", false, false, !!store.autoIncrement, keyPath && typeof keyPath !== 'string', dotted);
var indexes = [];
for (var j = 0; j < store.indexNames.length; ++j) {
var idbindex = store.index(store.indexNames[j]);
keyPath = idbindex.keyPath;
dotted = keyPath && typeof keyPath === 'string' && keyPath.indexOf('.') !== -1;
var index = new IndexSpec(idbindex.name, keyPath, !!idbindex.unique, !!idbindex.multiEntry, false, keyPath && typeof keyPath !== 'string', dotted);
indexes.push(index);
}
globalSchema[storeName] = new TableSchema(storeName, primKey, indexes, {});
});
setApiOnPlace([allTables, Transaction.prototype], keys(globalSchema), READWRITE, globalSchema);
}
function adjustToExistingIndexNames(schema, idbtrans) {
/// <summary>
/// Issue #30 Problem with existing db - adjust to existing index names when migrating from non-dexie db
/// </summary>
/// <param name="schema" type="Object">Map between name and TableSchema</param>
/// <param name="idbtrans" type="IDBTransaction"></param>
var storeNames = idbtrans.db.objectStoreNames;
for (var i = 0; i < storeNames.length; ++i) {
var storeName = storeNames[i];
var store = idbtrans.objectStore(storeName);
hasGetAll = 'getAll' in store;
for (var j = 0; j < store.indexNames.length; ++j) {
var indexName = store.indexNames[j];
var keyPath = store.index(indexName).keyPath;
var dexieName = typeof keyPath === 'string' ? keyPath : "[" + slice(keyPath).join('+') + "]";
if (schema[storeName]) {
var indexSpec = schema[storeName].idxByName[dexieName];
if (indexSpec) indexSpec.name = indexName;
}
}
}
}
function fireOnBlocked(ev) {
db.on("blocked").fire(ev);
// Workaround (not fully*) for missing "versionchange" event in IE,Edge and Safari:
connections.filter(function (c) {
return c.name === db.name && c !== db && !c._vcFired;
}).map(function (c) {
return c.on("versionchange").fire(ev);
});
}
extend(this, {
Collection: Collection,
Table: Table,
Transaction: Transaction,
Version: Version,
WhereClause: WhereClause,
WriteableCollection: WriteableCollection,
WriteableTable: WriteableTable
});
init();
addons.forEach(function (fn) {
fn(db);
});
}
var fakeAutoComplete = function () {}; // Will never be changed. We just fake for the IDE that we change it (see doFakeAutoComplete())
var fake = false; // Will never be changed. We just fake for the IDE that we change it (see doFakeAutoComplete())
function parseType(type) {
if (typeof type === 'function') {
return new type();
} else if (isArray(type)) {
return [parseType(type[0])];
} else if (type && typeof type === 'object') {
var rv = {};
applyStructure(rv, type);
return rv;
} else {
return type;
}
}
function applyStructure(obj, structure) {
keys(structure).forEach(function (member) {
var value = parseType(structure[member]);
obj[member] = value;
});
return obj;
}
function eventSuccessHandler(done) {
return function (ev) {
done(ev.target.result);
};
}
function hookedEventSuccessHandler(resolve) {
// wrap() is needed when calling hooks because the rare scenario of:
// * hook does a db operation that fails immediately (IDB throws exception)
// For calling db operations on correct transaction, wrap makes sure to set PSD correctly.
// wrap() will also execute in a virtual tick.
// * If not wrapped in a virtual tick, direct exception will launch a new physical tick.
// * If this was the last event in the bulk, the promise will resolve after a physical tick
// and the transaction will have committed already.
// If no hook, the virtual tick will be executed in the reject()/resolve of the final promise,
// because it is always marked with _lib = true when created using Transaction._promise().
return wrap(function (event) {
var req = event.target,
result = req.result,
ctx = req._hookCtx,
// Contains the hook error handler. Put here instead of closure to boost performance.
hookSuccessHandler = ctx && ctx.onsuccess;
hookSuccessHandler && hookSuccessHandler(result);
resolve && resolve(result);
}, resolve);
}
function eventRejectHandler(reject) {
return function (event) {
preventDefault(event);
reject(event.target.error);
return false;
};
}
function hookedEventRejectHandler(reject) {
return wrap(function (event) {
// See comment on hookedEventSuccessHandler() why wrap() is needed only when supporting hooks.
var req = event.target,
err = req.error,
ctx = req._hookCtx,
// Contains the hook error handler. Put here instead of closure to boost performance.
hookErrorHandler = ctx && ctx.onerror;
hookErrorHandler && hookErrorHandler(err);
preventDefault(event);
reject(err);
return false;
});
}
function preventDefault(event) {
if (event.stopPropagation) // IndexedDBShim doesnt support this on Safari 8 and below.
event.stopPropagation();
if (event.preventDefault) // IndexedDBShim doesnt support this on Safari 8 and below.
event.preventDefault();
}
function globalDatabaseList(cb) {
var val,
localStorage = Dexie.dependencies.localStorage;
if (!localStorage) return cb([]); // Envs without localStorage support
try {
val = JSON.parse(localStorage.getItem('Dexie.DatabaseNames') || "[]");
} catch (e) {
val = [];
}
if (cb(val)) {
localStorage.setItem('Dexie.DatabaseNames', JSON.stringify(val));
}
}
function awaitIterator(iterator) {
var callNext = function (result) {
return iterator.next(result);
},
doThrow = function (error) {
return iterator.throw(error);
},
onSuccess = step(callNext),
onError = step(doThrow);
function step(getNext) {
return function (val) {
var next = getNext(val),
value = next.value;
return next.done ? value : !value || typeof value.then !== 'function' ? isArray(value) ? Promise.all(value).then(onSuccess, onError) : onSuccess(value) : value.then(onSuccess, onError);
};
}
return step(callNext)();
}
//
// IndexSpec struct
//
function IndexSpec(name, keyPath, unique, multi, auto, compound, dotted) {
/// <param name="name" type="String"></param>
/// <param name="keyPath" type="String"></param>
/// <param name="unique" type="Boolean"></param>
/// <param name="multi" type="Boolean"></param>
/// <param name="auto" type="Boolean"></param>
/// <param name="compound" type="Boolean"></param>
/// <param name="dotted" type="Boolean"></param>
this.name = name;
this.keyPath = keyPath;
this.unique = unique;
this.multi = multi;
this.auto = auto;
this.compound = compound;
this.dotted = dotted;
var keyPathSrc = typeof keyPath === 'string' ? keyPath : keyPath && '[' + [].join.call(keyPath, '+') + ']';
this.src = (unique ? '&' : '') + (multi ? '*' : '') + (auto ? "++" : "") + keyPathSrc;
}
//
// TableSchema struct
//
function TableSchema(name, primKey, indexes, instanceTemplate) {
/// <param name="name" type="String"></param>
/// <param name="primKey" type="IndexSpec"></param>
/// <param name="indexes" type="Array" elementType="IndexSpec"></param>
/// <param name="instanceTemplate" type="Object"></param>
this.name = name;
this.primKey = primKey || new IndexSpec();
this.indexes = indexes || [new IndexSpec()];
this.instanceTemplate = instanceTemplate;
this.mappedClass = null;
this.idxByName = arrayToObject(indexes, function (index) {
return [index.name, index];
});
}
//
// Static delete() method.
//
Dexie.delete = function (databaseName) {
var db = new Dexie(databaseName),
promise = db.delete();
promise.onblocked = function (fn) {
db.on("blocked", fn);
return this;
};
return promise;
};
//
// Static exists() method.
//
Dexie.exists = function (name) {
return new Dexie(name).open().then(function (db) {
db.close();
return true;
}).catch(Dexie.NoSuchDatabaseError, function () {
return false;
});
};
//
// Static method for retrieving a list of all existing databases at current host.
//
Dexie.getDatabaseNames = function (cb) {
return new Promise(function (resolve, reject) {
var getDatabaseNames = getNativeGetDatabaseNamesFn(indexedDB);
if (getDatabaseNames) {
// In case getDatabaseNames() becomes standard, let's prepare to support it:
var req = getDatabaseNames();
req.onsuccess = function (event) {
resolve(slice(event.target.result, 0)); // Converst DOMStringList to Array<String>
};
req.onerror = eventRejectHandler(reject);
} else {
globalDatabaseList(function (val) {
resolve(val);
return false;
});
}
}).then(cb);
};
Dexie.defineClass = function (structure) {
/// <summary>
/// Create a javascript constructor based on given template for which properties to expect in the class.
/// Any property that is a constructor function will act as a type. So {name: String} will be equal to {name: new String()}.
/// </summary>
/// <param name="structure">Helps IDE code completion by knowing the members that objects contain and not just the indexes. Also
/// know what type each member has. Example: {name: String, emailAddresses: [String], properties: {shoeSize: Number}}</param>
// Default constructor able to copy given properties into this object.
function Class(properties) {
/// <param name="properties" type="Object" optional="true">Properties to initialize object with.
/// </param>
properties ? extend(this, properties) : fake && applyStructure(this, structure);
}
return Class;
};
Dexie.applyStructure = applyStructure;
Dexie.ignoreTransaction = function (scopeFunc) {
// In case caller is within a transaction but needs to create a separate transaction.
// Example of usage:
//
// Let's say we have a logger function in our app. Other application-logic should be unaware of the
// logger function and not need to include the 'logentries' table in all transaction it performs.
// The logging should always be done in a separate transaction and not be dependant on the current
// running transaction context. Then you could use Dexie.ignoreTransaction() to run code that starts a new transaction.
//
// Dexie.ignoreTransaction(function() {
// db.logentries.add(newLogEntry);
// });
//
// Unless using Dexie.ignoreTransaction(), the above example would try to reuse the current transaction
// in current Promise-scope.
//
// An alternative to Dexie.ignoreTransaction() would be setImmediate() or setTimeout(). The reason we still provide an
// API for this because
// 1) The intention of writing the statement could be unclear if using setImmediate() or setTimeout().
// 2) setTimeout() would wait unnescessary until firing. This is however not the case with setImmediate().
// 3) setImmediate() is not supported in the ES standard.
// 4) You might want to keep other PSD state that was set in a parent PSD, such as PSD.letThrough.
return PSD.trans ? usePSD(PSD.transless, scopeFunc) : // Use the closest parent that was non-transactional.
scopeFunc(); // No need to change scope because there is no ongoing transaction.
};
Dexie.vip = function (fn) {
// To be used by subscribers to the on('ready') event.
// This will let caller through to access DB even when it is blocked while the db.ready() subscribers are firing.
// This would have worked automatically if we were certain that the Provider was using Dexie.Promise for all asyncronic operations. The promise PSD
// from the provider.connect() call would then be derived all the way to when provider would call localDatabase.applyChanges(). But since
// the provider more likely is using non-promise async APIs or other thenable implementations, we cannot assume that.
// Note that this method is only useful for on('ready') subscribers that is returning a Promise from the event. If not using vip()
// the database could deadlock since it wont open until the returned Promise is resolved, and any non-VIPed operation started by
// the caller will not resolve until database is opened.
return newScope(function () {
PSD.letThrough = true; // Make sure we are let through if still blocking db due to onready is firing.
return fn();
});
};
Dexie.async = function (generatorFn) {
return function () {
try {
var rv = awaitIterator(generatorFn.apply(this, arguments));
if (!rv || typeof rv.then !== 'function') return Promise.resolve(rv);
return rv;
} catch (e) {
return rejection(e);
}
};
};
Dexie.spawn = function (generatorFn, args, thiz) {
try {
var rv = awaitIterator(generatorFn.apply(thiz, args || []));
if (!rv || typeof rv.then !== 'function') return Promise.resolve(rv);
return rv;
} catch (e) {
return rejection(e);
}
};
// Dexie.currentTransaction property. Only applicable for transactions entered using the new "transact()" method.
setProp(Dexie, "currentTransaction", {
get: function () {
/// <returns type="Transaction"></returns>
return PSD.trans || null;
}
});
function safariMultiStoreFix(storeNames) {
return storeNames.length === 1 ? storeNames[0] : storeNames;
}
// Export our Promise implementation since it can be handy as a standalone Promise implementation
Dexie.Promise = Promise;
// Dexie.debug proptery:
// Dexie.debug = false
// Dexie.debug = true
// Dexie.debug = "dexie" - don't hide dexie's stack frames.
setProp(Dexie, "debug", {
get: function () {
return debug;
},
set: function (value) {
setDebug(value, value === 'dexie' ? function () {
return true;
} : dexieStackFrameFilter);
}
});
Promise.rejectionMapper = mapError;
// Export our derive/extend/override methodology
Dexie.derive = derive;
Dexie.extend = extend;
Dexie.props = props;
Dexie.override = override;
// Export our Events() function - can be handy as a toolkit
Dexie.Events = Dexie.events = Events; // Backward compatible lowercase version.
// Utilities
Dexie.getByKeyPath = getByKeyPath;
Dexie.setByKeyPath = setByKeyPath;
Dexie.delByKeyPath = delByKeyPath;
Dexie.shallowClone = shallowClone;
Dexie.deepClone = deepClone;
Dexie.addons = [];
Dexie.fakeAutoComplete = fakeAutoComplete;
Dexie.asap = asap;
Dexie.maxKey = maxKey;
Dexie.connections = connections;
// Export Error classes
extend(Dexie, fullNameExceptions); // Dexie.XXXError = class XXXError {...};
Dexie.MultiModifyError = Dexie.ModifyError; // Backward compatibility 0.9.8
Dexie.errnames = errnames;
// Export other static classes
Dexie.IndexSpec = IndexSpec;
Dexie.TableSchema = TableSchema;
//
// Dependencies
//
// These will automatically work in browsers with indexedDB support, or where an indexedDB polyfill has been included.
//
// In node.js, however, these properties must be set "manually" before instansiating a new Dexie(). For node.js, you need to require indexeddb-js or similar and then set these deps.
//
var idbshim = _global.idbModules && _global.idbModules.shimIndexedDB ? _global.idbModules : {};
Dexie.dependencies = {
// Required:
indexedDB: idbshim.shimIndexedDB || _global.indexedDB || _global.mozIndexedDB || _global.webkitIndexedDB || _global.msIndexedDB,
IDBKeyRange: idbshim.IDBKeyRange || _global.IDBKeyRange || _global.webkitIDBKeyRange
};
tryCatch(function () {
// Optional dependencies
// localStorage
Dexie.dependencies.localStorage = (typeof chrome !== "undefined" && chrome !== null ? chrome.storage : void 0) != null ? null : _global.localStorage;
});
// API Version Number: Type Number, make sure to always set a version number that can be comparable correctly. Example: 0.9, 0.91, 0.92, 1.0, 1.01, 1.1, 1.2, 1.21, etc.
Dexie.semVer = "1.4.0-beta2";
Dexie.version = Dexie.semVer.split('.').map(function (n) {
return parseInt(n);
}).reduce(function (p, c, i) {
return p + c / Math.pow(10, i * 2);
});
function getNativeGetDatabaseNamesFn(indexedDB) {
var fn = indexedDB && (indexedDB.getDatabaseNames || indexedDB.webkitGetDatabaseNames);
return fn && fn.bind(indexedDB);
}
// Fool IDE to improve autocomplete. Tested with Visual Studio 2013 and 2015.
doFakeAutoComplete(function () {
Dexie.fakeAutoComplete = fakeAutoComplete = doFakeAutoComplete;
Dexie.fake = fake = true;
});
// https://github.com/dfahlander/Dexie.js/issues/186
// typescript compiler tsc in mode ts-->es5 & commonJS, will expect require() to return
// x.default. Workaround: Set Dexie.default = Dexie.
Dexie.default = Dexie;
return Dexie;
}));
//# sourceMappingURL=dexie.js.map
|
dakshshah96/cdnjs
|
ajax/libs/dexie/1.4.0-beta2/dexie.js
|
JavaScript
|
mit
| 207,257
|
/**
* angular-permission-ng
* Extension module of angular-permission for access control within angular-route
* @version v3.1.0 - 2016-05-15
* @link https://github.com/Narzerus/angular-permission
* @author Rafael Vidaurre <narzerus@gmail.com> (http://www.rafaelvidaurre.com), Blazej Krysiak
* <blazej.krysiak@gmail.com>
* @license MIT License, http://www.opensource.org/licenses/MIT
*/
(function (module) {
'use strict';
/**
* @namespace permission.ng
*/
run.$inject = ['$rootScope', '$location', 'TransitionProperties', 'TransitionEvents', 'Authorization', 'PermissionMap'];
function run($rootScope, $location, TransitionProperties, TransitionEvents, Authorization, PermissionMap) {
'ngInject';
/**
* State transition interceptor
*/
$rootScope.$on('$routeChangeStart', function (event, next, current) {
if (areSetRoutePermissions() && !TransitionEvents.areEventsDefaultPrevented()) {
setTransitionProperties();
TransitionEvents.broadcastPermissionStartEvent();
var permissionMap = new PermissionMap({
only: next.$$route.data.permissions.only,
except: next.$$route.data.permissions.except,
redirectTo: next.$$route.data.permissions.redirectTo
});
Authorization
.authorize(permissionMap)
.then(function () {
handleAuthorizedState();
})
.catch(function (rejectedPermission) {
event.preventDefault();
handleUnauthorizedState(rejectedPermission, permissionMap);
});
}
/**
* Checks if route has set permissions restrictions
* @method
* @private
*
* @returns {boolean}
*/
function areSetRoutePermissions() {
return angular.isDefined(next.$$route.data) && angular.isDefined(next.$$route.data.permissions);
}
/**
* Updates values of `TransitionProperties` holder object
* @method
* @private
*/
function setTransitionProperties() {
TransitionProperties.next = next;
TransitionProperties.current = current;
}
/**
* Handles redirection for authorized access
* @method
* @private
*/
function handleAuthorizedState() {
TransitionEvents.broadcastPermissionAcceptedEvent();
}
/**
* Handles redirection for unauthorized access
* @method
* @private
*
* @param rejectedPermission {String} Rejected access right
* @param permissionMap {permission.PermissionMap} State permission map
*/
function handleUnauthorizedState(rejectedPermission, permissionMap) {
TransitionEvents.broadcastPermissionDeniedEvent();
permissionMap
.resolveRedirectState(rejectedPermission)
.then(function (redirect) {
$location.path(redirect.state).replace();
});
}
});
}
var ngPermission = angular
.module('permission.ng', ['permission', 'ngRoute'])
.run(run).name;
module.exports = ngPermission.name;
}(module || {}));
(function () {
'use strict';
/**
* Service responsible for managing and emitting events
* @name permission.ng.TransitionEvents
*
* @extends {permission.TransitionEvents}
*
* @param $delegate {Object} Parent instance being extended
* @param $rootScope {Object} Top-level angular scope
* @param TransitionProperties {permission.TransitionProperties} Helper storing transition parameters
* @param TransitionEventNames {permission.ng.TransitionEventNames} Constant storing event names
*/
TransitionEvents.$inject = ['$delegate', '$rootScope', 'TransitionProperties', 'TransitionEventNames'];
function TransitionEvents($delegate, $rootScope, TransitionProperties, TransitionEventNames) {
'ngInject';
$delegate.areEventsDefaultPrevented = areEventsDefaultPrevented;
$delegate.broadcastPermissionStartEvent = broadcastPermissionStartEvent;
$delegate.broadcastPermissionAcceptedEvent = broadcastPermissionAcceptedEvent;
$delegate.broadcastPermissionDeniedEvent = broadcastPermissionDeniedEvent;
/**
* Checks if state events are not prevented by default
* @methodOf permission.ng.TransitionEvents
*
* @returns {boolean}
*/
function areEventsDefaultPrevented() {
return isRouteChangePermissionStartDefaultPrevented();
}
/**
* Broadcasts "$routeChangePermissionStart" event from $rootScope
* @methodOf permission.ng.TransitionEvents
*/
function broadcastPermissionStartEvent() {
$rootScope.$broadcast(TransitionEventNames.permissionStart, TransitionProperties.next);
}
/**
* Broadcasts "$routeChangePermissionAccepted" event from $rootScope
* @methodOf permission.ng.TransitionEvents
*/
function broadcastPermissionAcceptedEvent() {
$rootScope.$broadcast(TransitionEventNames.permissionAccepted, TransitionProperties.next);
}
/**
* Broadcasts "$routeChangePermissionDenied" event from $rootScope
* @methodOf permission.ng.TransitionEvents
*/
function broadcastPermissionDeniedEvent() {
$rootScope.$broadcast(TransitionEventNames.permissionDenied, TransitionProperties.next);
}
/**
* Checks if event $routeChangePermissionStart hasn't been disabled by default
* @methodOf permission.ng.TransitionEvents
* @private
*
* @returns {boolean}
*/
function isRouteChangePermissionStartDefaultPrevented() {
return $rootScope.$broadcast(TransitionEventNames.permissionStart, TransitionProperties.next).defaultPrevented;
}
return $delegate;
}
angular
.module('permission.ng')
.decorator('TransitionEvents', TransitionEvents);
}());
(function () {
'use strict';
/**
* Constant storing event names for ng-route
* @name permission.ng.TransitionEventNames
*
* @type {Object.<String,Object>}
*
* @property permissionStart {String} Event name called when started checking for permissions
* @property permissionAccepted {String} Event name called when authorized
* @property permissionDenied {String} Event name called when unauthorized
*/
var TransitionEventNames = {
permissionStart: '$routeChangePermissionStart',
permissionAccepted: '$routeChangePermissionAccepted',
permissionDenied: '$routeChangePermissionDenied'
};
angular
.module('permission.ng')
.value('TransitionEventNames', TransitionEventNames);
}());
|
hare1039/cdnjs
|
ajax/libs/angular-permission/3.1.0/angular-permission-ng.js
|
JavaScript
|
mit
| 6,553
|
/*
* WPA Supplicant - RSN PMKSA cache
* Copyright (c) 2004-2008, Jouni Malinen <j@w1.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Alternatively, this software may be distributed under the terms of BSD
* license.
*
* See README and COPYING for more details.
*/
#include "includes.h"
#include "common.h"
#include "wpa.h"
#include "eloop.h"
#include "crypto/sha1.h"
#include "crypto/sha256.h"
#include "wpa_i.h"
#include "eapol_supp/eapol_supp_sm.h"
#include "pmksa_cache.h"
#if defined(IEEE8021X_EAPOL) && !defined(CONFIG_NO_WPA2)
static const int pmksa_cache_max_entries = 32;
struct rsn_pmksa_cache {
struct rsn_pmksa_cache_entry *pmksa; /* PMKSA cache */
int pmksa_count; /* number of entries in PMKSA cache */
struct wpa_sm *sm; /* TODO: get rid of this reference(?) */
void (*free_cb)(struct rsn_pmksa_cache_entry *entry, void *ctx,
int replace);
void *ctx;
};
/**
* rsn_pmkid - Calculate PMK identifier
* @pmk: Pairwise master key
* @pmk_len: Length of pmk in bytes
* @aa: Authenticator address
* @spa: Supplicant address
* @use_sha256: Whether to use SHA256-based KDF
*
* IEEE Std 802.11i-2004 - 8.5.1.2 Pairwise key hierarchy
* PMKID = HMAC-SHA1-128(PMK, "PMK Name" || AA || SPA)
*/
static void rsn_pmkid(const u8 *pmk, size_t pmk_len, const u8 *aa,
const u8 *spa, u8 *pmkid, int use_sha256)
{
char *title = "PMK Name";
const u8 *addr[3];
const size_t len[3] = { 8, ETH_ALEN, ETH_ALEN };
unsigned char hash[SHA256_MAC_LEN];
addr[0] = (u8 *) title;
addr[1] = aa;
addr[2] = spa;
#ifdef CONFIG_IEEE80211W
if (use_sha256)
hmac_sha256_vector(pmk, pmk_len, 3, addr, len, hash);
else
#endif /* CONFIG_IEEE80211W */
hmac_sha1_vector(pmk, pmk_len, 3, addr, len, hash);
os_memcpy(pmkid, hash, PMKID_LEN);
}
static void pmksa_cache_set_expiration(struct rsn_pmksa_cache *pmksa);
static void _pmksa_cache_free_entry(struct rsn_pmksa_cache_entry *entry)
{
os_free(entry);
}
static void pmksa_cache_free_entry(struct rsn_pmksa_cache *pmksa,
struct rsn_pmksa_cache_entry *entry,
int replace)
{
pmksa->pmksa_count--;
pmksa->free_cb(entry, pmksa->ctx, replace);
_pmksa_cache_free_entry(entry);
}
static void pmksa_cache_expire(void *eloop_ctx, void *timeout_ctx)
{
struct rsn_pmksa_cache *pmksa = eloop_ctx;
struct os_time now;
os_get_time(&now);
while (pmksa->pmksa && pmksa->pmksa->expiration <= now.sec) {
struct rsn_pmksa_cache_entry *entry = pmksa->pmksa;
pmksa->pmksa = entry->next;
wpa_printf(MSG_DEBUG, "RSN: expired PMKSA cache entry for "
MACSTR, MAC2STR(entry->aa));
pmksa_cache_free_entry(pmksa, entry, 0);
}
pmksa_cache_set_expiration(pmksa);
}
static void pmksa_cache_reauth(void *eloop_ctx, void *timeout_ctx)
{
struct rsn_pmksa_cache *pmksa = eloop_ctx;
pmksa->sm->cur_pmksa = NULL;
eapol_sm_request_reauth(pmksa->sm->eapol);
}
static void pmksa_cache_set_expiration(struct rsn_pmksa_cache *pmksa)
{
int sec;
struct rsn_pmksa_cache_entry *entry;
struct os_time now;
eloop_cancel_timeout(pmksa_cache_expire, pmksa, NULL);
eloop_cancel_timeout(pmksa_cache_reauth, pmksa, NULL);
if (pmksa->pmksa == NULL)
return;
os_get_time(&now);
sec = pmksa->pmksa->expiration - now.sec;
if (sec < 0)
sec = 0;
eloop_register_timeout(sec + 1, 0, pmksa_cache_expire, pmksa, NULL);
entry = pmksa->sm->cur_pmksa ? pmksa->sm->cur_pmksa :
pmksa_cache_get(pmksa, pmksa->sm->bssid, NULL);
if (entry) {
sec = pmksa->pmksa->reauth_time - now.sec;
if (sec < 0)
sec = 0;
eloop_register_timeout(sec, 0, pmksa_cache_reauth, pmksa,
NULL);
}
}
/**
* pmksa_cache_add - Add a PMKSA cache entry
* @pmksa: Pointer to PMKSA cache data from pmksa_cache_init()
* @pmk: The new pairwise master key
* @pmk_len: PMK length in bytes, usually PMK_LEN (32)
* @aa: Authenticator address
* @spa: Supplicant address
* @network_ctx: Network configuration context for this PMK
* @akmp: WPA_KEY_MGMT_* used in key derivation
* Returns: Pointer to the added PMKSA cache entry or %NULL on error
*
* This function create a PMKSA entry for a new PMK and adds it to the PMKSA
* cache. If an old entry is already in the cache for the same Authenticator,
* this entry will be replaced with the new entry. PMKID will be calculated
* based on the PMK and the driver interface is notified of the new PMKID.
*/
struct rsn_pmksa_cache_entry *
pmksa_cache_add(struct rsn_pmksa_cache *pmksa, const u8 *pmk, size_t pmk_len,
const u8 *aa, const u8 *spa, void *network_ctx, int akmp)
{
struct rsn_pmksa_cache_entry *entry, *pos, *prev;
struct os_time now;
if (pmksa->sm->proto != WPA_PROTO_RSN || pmk_len > PMK_LEN)
return NULL;
entry = os_zalloc(sizeof(*entry));
if (entry == NULL)
return NULL;
os_memcpy(entry->pmk, pmk, pmk_len);
entry->pmk_len = pmk_len;
rsn_pmkid(pmk, pmk_len, aa, spa, entry->pmkid,
wpa_key_mgmt_sha256(akmp));
os_get_time(&now);
entry->expiration = now.sec + pmksa->sm->dot11RSNAConfigPMKLifetime;
entry->reauth_time = now.sec + pmksa->sm->dot11RSNAConfigPMKLifetime *
pmksa->sm->dot11RSNAConfigPMKReauthThreshold / 100;
entry->akmp = akmp;
os_memcpy(entry->aa, aa, ETH_ALEN);
entry->network_ctx = network_ctx;
/* Replace an old entry for the same Authenticator (if found) with the
* new entry */
pos = pmksa->pmksa;
prev = NULL;
while (pos) {
if (os_memcmp(aa, pos->aa, ETH_ALEN) == 0) {
if (pos->pmk_len == pmk_len &&
os_memcmp(pos->pmk, pmk, pmk_len) == 0 &&
os_memcmp(pos->pmkid, entry->pmkid, PMKID_LEN) ==
0) {
wpa_printf(MSG_DEBUG, "WPA: reusing previous "
"PMKSA entry");
os_free(entry);
return pos;
}
if (prev == NULL)
pmksa->pmksa = pos->next;
else
prev->next = pos->next;
if (pos == pmksa->sm->cur_pmksa) {
/* We are about to replace the current PMKSA
* cache entry. This happens when the PMKSA
* caching attempt fails, so we don't want to
* force pmksa_cache_free_entry() to disconnect
* at this point. Let's just make sure the old
* PMKSA cache entry will not be used in the
* future.
*/
wpa_printf(MSG_DEBUG, "RSN: replacing current "
"PMKSA entry");
pmksa->sm->cur_pmksa = NULL;
}
wpa_printf(MSG_DEBUG, "RSN: Replace PMKSA entry for "
"the current AP");
pmksa_cache_free_entry(pmksa, pos, 1);
break;
}
prev = pos;
pos = pos->next;
}
if (pmksa->pmksa_count >= pmksa_cache_max_entries && pmksa->pmksa) {
/* Remove the oldest entry to make room for the new entry */
pos = pmksa->pmksa;
pmksa->pmksa = pos->next;
wpa_printf(MSG_DEBUG, "RSN: removed the oldest PMKSA cache "
"entry (for " MACSTR ") to make room for new one",
MAC2STR(pos->aa));
wpa_sm_remove_pmkid(pmksa->sm, pos->aa, pos->pmkid);
pmksa_cache_free_entry(pmksa, pos, 0);
}
/* Add the new entry; order by expiration time */
pos = pmksa->pmksa;
prev = NULL;
while (pos) {
if (pos->expiration > entry->expiration)
break;
prev = pos;
pos = pos->next;
}
if (prev == NULL) {
entry->next = pmksa->pmksa;
pmksa->pmksa = entry;
pmksa_cache_set_expiration(pmksa);
} else {
entry->next = prev->next;
prev->next = entry;
}
pmksa->pmksa_count++;
wpa_printf(MSG_DEBUG, "RSN: added PMKSA cache entry for " MACSTR,
MAC2STR(entry->aa));
wpa_sm_add_pmkid(pmksa->sm, entry->aa, entry->pmkid);
return entry;
}
/**
* pmksa_cache_deinit - Free all entries in PMKSA cache
* @pmksa: Pointer to PMKSA cache data from pmksa_cache_init()
*/
void pmksa_cache_deinit(struct rsn_pmksa_cache *pmksa)
{
struct rsn_pmksa_cache_entry *entry, *prev;
if (pmksa == NULL)
return;
entry = pmksa->pmksa;
pmksa->pmksa = NULL;
while (entry) {
prev = entry;
entry = entry->next;
os_free(prev);
}
pmksa_cache_set_expiration(pmksa);
os_free(pmksa);
}
/**
* pmksa_cache_get - Fetch a PMKSA cache entry
* @pmksa: Pointer to PMKSA cache data from pmksa_cache_init()
* @aa: Authenticator address or %NULL to match any
* @pmkid: PMKID or %NULL to match any
* Returns: Pointer to PMKSA cache entry or %NULL if no match was found
*/
struct rsn_pmksa_cache_entry * pmksa_cache_get(struct rsn_pmksa_cache *pmksa,
const u8 *aa, const u8 *pmkid)
{
struct rsn_pmksa_cache_entry *entry = pmksa->pmksa;
while (entry) {
if ((aa == NULL || os_memcmp(entry->aa, aa, ETH_ALEN) == 0) &&
(pmkid == NULL ||
os_memcmp(entry->pmkid, pmkid, PMKID_LEN) == 0))
return entry;
entry = entry->next;
}
return NULL;
}
/**
* pmksa_cache_notify_reconfig - Reconfiguration notification for PMKSA cache
* @pmksa: Pointer to PMKSA cache data from pmksa_cache_init()
*
* Clear references to old data structures when wpa_supplicant is reconfigured.
*/
void pmksa_cache_notify_reconfig(struct rsn_pmksa_cache *pmksa)
{
struct rsn_pmksa_cache_entry *entry = pmksa->pmksa;
while (entry) {
entry->network_ctx = NULL;
entry = entry->next;
}
}
static struct rsn_pmksa_cache_entry *
pmksa_cache_clone_entry(struct rsn_pmksa_cache *pmksa,
const struct rsn_pmksa_cache_entry *old_entry,
const u8 *aa)
{
struct rsn_pmksa_cache_entry *new_entry;
new_entry = pmksa_cache_add(pmksa, old_entry->pmk, old_entry->pmk_len,
aa, pmksa->sm->own_addr,
old_entry->network_ctx, old_entry->akmp);
if (new_entry == NULL)
return NULL;
/* TODO: reorder entries based on expiration time? */
new_entry->expiration = old_entry->expiration;
new_entry->opportunistic = 1;
return new_entry;
}
/**
* pmksa_cache_get_opportunistic - Try to get an opportunistic PMKSA entry
* @pmksa: Pointer to PMKSA cache data from pmksa_cache_init()
* @network_ctx: Network configuration context
* @aa: Authenticator address for the new AP
* Returns: Pointer to a new PMKSA cache entry or %NULL if not available
*
* Try to create a new PMKSA cache entry opportunistically by guessing that the
* new AP is sharing the same PMK as another AP that has the same SSID and has
* already an entry in PMKSA cache.
*/
struct rsn_pmksa_cache_entry *
pmksa_cache_get_opportunistic(struct rsn_pmksa_cache *pmksa, void *network_ctx,
const u8 *aa)
{
struct rsn_pmksa_cache_entry *entry = pmksa->pmksa;
if (network_ctx == NULL)
return NULL;
while (entry) {
if (entry->network_ctx == network_ctx) {
entry = pmksa_cache_clone_entry(pmksa, entry, aa);
if (entry) {
wpa_printf(MSG_DEBUG, "RSN: added "
"opportunistic PMKSA cache entry "
"for " MACSTR, MAC2STR(aa));
}
return entry;
}
entry = entry->next;
}
return NULL;
}
/**
* pmksa_cache_get_current - Get the current used PMKSA entry
* @sm: Pointer to WPA state machine data from wpa_sm_init()
* Returns: Pointer to the current PMKSA cache entry or %NULL if not available
*/
struct rsn_pmksa_cache_entry * pmksa_cache_get_current(struct wpa_sm *sm)
{
if (sm == NULL)
return NULL;
return sm->cur_pmksa;
}
/**
* pmksa_cache_clear_current - Clear the current PMKSA entry selection
* @sm: Pointer to WPA state machine data from wpa_sm_init()
*/
void pmksa_cache_clear_current(struct wpa_sm *sm)
{
if (sm == NULL)
return;
sm->cur_pmksa = NULL;
}
/**
* pmksa_cache_set_current - Set the current PMKSA entry selection
* @sm: Pointer to WPA state machine data from wpa_sm_init()
* @pmkid: PMKID for selecting PMKSA or %NULL if not used
* @bssid: BSSID for PMKSA or %NULL if not used
* @network_ctx: Network configuration context
* @try_opportunistic: Whether to allow opportunistic PMKSA caching
* Returns: 0 if PMKSA was found or -1 if no matching entry was found
*/
int pmksa_cache_set_current(struct wpa_sm *sm, const u8 *pmkid,
const u8 *bssid, void *network_ctx,
int try_opportunistic)
{
struct rsn_pmksa_cache *pmksa = sm->pmksa;
sm->cur_pmksa = NULL;
if (pmkid)
sm->cur_pmksa = pmksa_cache_get(pmksa, NULL, pmkid);
if (sm->cur_pmksa == NULL && bssid)
sm->cur_pmksa = pmksa_cache_get(pmksa, bssid, NULL);
if (sm->cur_pmksa == NULL && try_opportunistic && bssid)
sm->cur_pmksa = pmksa_cache_get_opportunistic(pmksa,
network_ctx,
bssid);
if (sm->cur_pmksa) {
wpa_hexdump(MSG_DEBUG, "RSN: PMKID",
sm->cur_pmksa->pmkid, PMKID_LEN);
return 0;
}
return -1;
}
/**
* pmksa_cache_list - Dump text list of entries in PMKSA cache
* @sm: Pointer to WPA state machine data from wpa_sm_init()
* @buf: Buffer for the list
* @len: Length of the buffer
* Returns: number of bytes written to buffer
*
* This function is used to generate a text format representation of the
* current PMKSA cache contents for the ctrl_iface PMKSA command.
*/
int pmksa_cache_list(struct wpa_sm *sm, char *buf, size_t len)
{
int i, ret;
char *pos = buf;
struct rsn_pmksa_cache_entry *entry;
struct os_time now;
os_get_time(&now);
ret = os_snprintf(pos, buf + len - pos,
"Index / AA / PMKID / expiration (in seconds) / "
"opportunistic\n");
if (ret < 0 || ret >= buf + len - pos)
return pos - buf;
pos += ret;
i = 0;
entry = sm->pmksa->pmksa;
while (entry) {
i++;
ret = os_snprintf(pos, buf + len - pos, "%d " MACSTR " ",
i, MAC2STR(entry->aa));
if (ret < 0 || ret >= buf + len - pos)
return pos - buf;
pos += ret;
pos += wpa_snprintf_hex(pos, buf + len - pos, entry->pmkid,
PMKID_LEN);
ret = os_snprintf(pos, buf + len - pos, " %d %d\n",
(int) (entry->expiration - now.sec),
entry->opportunistic);
if (ret < 0 || ret >= buf + len - pos)
return pos - buf;
pos += ret;
entry = entry->next;
}
return pos - buf;
}
/**
* pmksa_cache_init - Initialize PMKSA cache
* @free_cb: Callback function to be called when a PMKSA cache entry is freed
* @ctx: Context pointer for free_cb function
* @sm: Pointer to WPA state machine data from wpa_sm_init()
* Returns: Pointer to PMKSA cache data or %NULL on failure
*/
struct rsn_pmksa_cache *
pmksa_cache_init(void (*free_cb)(struct rsn_pmksa_cache_entry *entry,
void *ctx, int replace),
void *ctx, struct wpa_sm *sm)
{
struct rsn_pmksa_cache *pmksa;
pmksa = os_zalloc(sizeof(*pmksa));
if (pmksa) {
pmksa->free_cb = free_cb;
pmksa->ctx = ctx;
pmksa->sm = sm;
}
return pmksa;
}
#endif /* IEEE8021X_EAPOL and !CONFIG_NO_WPA2 */
|
AOKP/external_wpa_supplicant_6
|
wpa_supplicant/src/rsn_supp/pmksa_cache.c
|
C
|
gpl-2.0
| 14,321
|
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en_GB" xml:lang="en_GB">
<head>
<title>COM_ADMIN_HELP_EXTENSIONS_EXTENSION_MANAGER_DISCOVER</title>
<link href="css/help.css" rel="stylesheet" type="text/css" />
<meta name="copyright" content="Copyright (C) 2005 - 2011 Open Source Matters. All rights reserved." />
<meta name="license" content="GNU General Public License version 2 or later; see LICENSE.txt" />
</head>
<body>
<h1>Extension Manager: Discover</h1>
<p>This page is not available offline. Please use the <a href="http://help.joomla.org/proxy/index.php?option=com_help&keyref=Help16:Extensions_Extension_Manager_Discover">online version</a>.</p>
</body>
</html>
|
heqiaoliu/Viral-Dark-Matter
|
tmp/install_4f20924cbb0a1/administrator/help/en-GB/Extensions_Extension_Manager_Discover.html
|
HTML
|
gpl-2.0
| 801
|
// 2003-05-01 Petur Runolfsson <peturr02@ru.is>
// Copyright (C) 2002-2015 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
// 27.6.1.5 - Template class basic_iostream
// NB: This file is for testing iostream with NO OTHER INCLUDES.
#include <iostream>
// libstdc++/3647
void test07()
{
// Should not block.
std::wcout << std::wcin.rdbuf()->in_avail() << std::endl;
}
int main()
{
test07();
return 0;
}
|
evaautomation/gcc-linaro
|
libstdc++-v3/testsuite/27_io/objects/wchar_t/3647.cc
|
C++
|
gpl-2.0
| 1,106
|
// { dg-options "-std=gnu++11" }
// { dg-do compile }
// Copyright (C) 2011-2015 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
// NB: This file is for testing type_traits with NO OTHER INCLUDES.
#include <type_traits>
namespace std
{
typedef short test_type;
template struct is_fundamental<test_type>;
}
|
evaautomation/gcc-linaro
|
libstdc++-v3/testsuite/20_util/is_fundamental/requirements/explicit_instantiation.cc
|
C++
|
gpl-2.0
| 1,002
|
all: libradius.a
clean:
rm -f *~ *.o *.d *.gcno *.gcda *.gcov libradius.a
install:
@echo Nothing to be made.
include ../lib.rules
CFLAGS += -DCONFIG_IPV6
LIB_OBJS= \
radius.o \
radius_client.o \
radius_das.o \
radius_server.o
libradius.a: $(LIB_OBJS)
$(AR) crT $@ $?
-include $(OBJS:%.o=%.d)
|
anoidgit/padavan
|
trunk/user/wpa_supplicant/src/radius/Makefile
|
Makefile
|
gpl-3.0
| 307
|
require 'abstract_unit'
class SanitizerTest < Test::Unit::TestCase
def setup
@sanitizer = nil # used by assert_sanitizer
end
def test_strip_tags
sanitizer = HTML::FullSanitizer.new
assert_equal("<<<bad html", sanitizer.sanitize("<<<bad html"))
assert_equal("<<", sanitizer.sanitize("<<<bad html>"))
assert_equal("Dont touch me", sanitizer.sanitize("Dont touch me"))
assert_equal("This is a test.", sanitizer.sanitize("<p>This <u>is<u> a <a href='test.html'><strong>test</strong></a>.</p>"))
assert_equal("Weirdos", sanitizer.sanitize("Wei<<a>a onclick='alert(document.cookie);'</a>/>rdos"))
assert_equal("This is a test.", sanitizer.sanitize("This is a test."))
assert_equal(
%{This is a test.\n\n\nIt no longer contains any HTML.\n}, sanitizer.sanitize(
%{<title>This is <b>a <a href="" target="_blank">test</a></b>.</title>\n\n<!-- it has a comment -->\n\n<p>It no <b>longer <strong>contains <em>any <strike>HTML</strike></em>.</strong></b></p>\n}))
assert_equal "This has a here.", sanitizer.sanitize("This has a <!-- comment --> here.")
assert_equal "This has a here.", sanitizer.sanitize("This has a <![CDATA[<section>]]> here.")
assert_equal "This has an unclosed ", sanitizer.sanitize("This has an unclosed <![CDATA[<section>]] here...")
[nil, '', ' '].each { |blank| assert_equal blank, sanitizer.sanitize(blank) }
end
def test_strip_links
sanitizer = HTML::LinkSanitizer.new
assert_equal "Dont touch me", sanitizer.sanitize("Dont touch me")
assert_equal "on my mind\nall day long", sanitizer.sanitize("<a href='almost'>on my mind</a>\n<A href='almost'>all day long</A>")
assert_equal "0wn3d", sanitizer.sanitize("<a href='http://www.rubyonrails.com/'><a href='http://www.rubyonrails.com/' onlclick='steal()'>0wn3d</a></a>")
assert_equal "Magic", sanitizer.sanitize("<a href='http://www.rubyonrails.com/'>Mag<a href='http://www.ruby-lang.org/'>ic")
assert_equal "FrrFox", sanitizer.sanitize("<href onlclick='steal()'>FrrFox</a></href>")
assert_equal "My mind\nall <b>day</b> long", sanitizer.sanitize("<a href='almost'>My mind</a>\n<A href='almost'>all <b>day</b> long</A>")
assert_equal "all <b>day</b> long", sanitizer.sanitize("<<a>a href='hello'>all <b>day</b> long<</A>/a>")
assert_equal "<a<a", sanitizer.sanitize("<a<a")
end
def test_sanitize_form
assert_sanitized "<form action=\"/foo/bar\" method=\"post\"><input></form>", ''
end
def test_sanitize_plaintext
raw = "<plaintext><span>foo</span></plaintext>"
assert_sanitized raw, "<span>foo</span>"
end
def test_sanitize_script
assert_sanitized "a b c<script language=\"Javascript\">blah blah blah</script>d e f", "a b cd e f"
end
# fucked
def test_sanitize_js_handlers
raw = %{onthis="do that" <a href="#" onclick="hello" name="foo" onbogus="remove me">hello</a>}
assert_sanitized raw, %{onthis="do that" <a name="foo" href="#">hello</a>}
end
def test_sanitize_javascript_href
raw = %{href="javascript:bang" <a href="javascript:bang" name="hello">foo</a>, <span href="javascript:bang">bar</span>}
assert_sanitized raw, %{href="javascript:bang" <a name="hello">foo</a>, <span>bar</span>}
end
def test_sanitize_image_src
raw = %{src="javascript:bang" <img src="javascript:bang" width="5">foo</img>, <span src="javascript:bang">bar</span>}
assert_sanitized raw, %{src="javascript:bang" <img width="5">foo</img>, <span>bar</span>}
end
HTML::WhiteListSanitizer.allowed_tags.each do |tag_name|
define_method "test_should_allow_#{tag_name}_tag" do
assert_sanitized "start <#{tag_name} title=\"1\" onclick=\"foo\">foo <bad>bar</bad> baz</#{tag_name}> end", %(start <#{tag_name} title="1">foo bar baz</#{tag_name}> end)
end
end
def test_should_allow_anchors
assert_sanitized %(<a href="foo" onclick="bar"><script>baz</script></a>), %(<a href="foo"></a>)
end
# RFC 3986, sec 4.2
def test_allow_colons_in_path_component
assert_sanitized("<a href=\"./this:that\">foo</a>")
end
%w(src width height alt).each do |img_attr|
define_method "test_should_allow_image_#{img_attr}_attribute" do
assert_sanitized %(<img #{img_attr}="foo" onclick="bar" />), %(<img #{img_attr}="foo" />)
end
end
def test_should_handle_non_html
assert_sanitized 'abc'
end
def test_should_handle_blank_text
assert_sanitized nil
assert_sanitized ''
end
def test_should_allow_custom_tags
text = "<u>foo</u>"
sanitizer = HTML::WhiteListSanitizer.new
assert_equal(text, sanitizer.sanitize(text, :tags => %w(u)))
end
def test_should_allow_only_custom_tags
text = "<u>foo</u> with <i>bar</i>"
sanitizer = HTML::WhiteListSanitizer.new
assert_equal("<u>foo</u> with bar", sanitizer.sanitize(text, :tags => %w(u)))
end
def test_should_allow_custom_tags_with_attributes
text = %(<blockquote cite="http://example.com/">foo</blockquote>)
sanitizer = HTML::WhiteListSanitizer.new
assert_equal(text, sanitizer.sanitize(text))
end
def test_should_allow_custom_tags_with_custom_attributes
text = %(<blockquote foo="bar">Lorem ipsum</blockquote>)
sanitizer = HTML::WhiteListSanitizer.new
assert_equal(text, sanitizer.sanitize(text, :attributes => ['foo']))
end
[%w(img src), %w(a href)].each do |(tag, attr)|
define_method "test_should_strip_#{attr}_attribute_in_#{tag}_with_bad_protocols" do
assert_sanitized %(<#{tag} #{attr}="javascript:bang" title="1">boo</#{tag}>), %(<#{tag} title="1">boo</#{tag}>)
end
end
def test_should_flag_bad_protocols
sanitizer = HTML::WhiteListSanitizer.new
%w(about chrome data disk hcp help javascript livescript lynxcgi lynxexec ms-help ms-its mhtml mocha opera res resource shell vbscript view-source vnd.ms.radio wysiwyg).each do |proto|
assert sanitizer.send(:contains_bad_protocols?, 'src', "#{proto}://bad")
end
end
def test_should_accept_good_protocols
sanitizer = HTML::WhiteListSanitizer.new
HTML::WhiteListSanitizer.allowed_protocols.each do |proto|
assert !sanitizer.send(:contains_bad_protocols?, 'src', "#{proto}://good")
end
end
def test_should_reject_hex_codes_in_protocol
assert_sanitized %(<a href="%6A%61%76%61%73%63%72%69%70%74%3A%61%6C%65%72%74%28%22%58%53%53%22%29">1</a>), "<a>1</a>"
assert @sanitizer.send(:contains_bad_protocols?, 'src', "%6A%61%76%61%73%63%72%69%70%74%3A%61%6C%65%72%74%28%22%58%53%53%22%29")
end
def test_should_block_script_tag
assert_sanitized %(<SCRIPT\nSRC=http://ha.ckers.org/xss.js></SCRIPT>), ""
end
[%(<IMG SRC="javascript:alert('XSS');">),
%(<IMG SRC=javascript:alert('XSS')>),
%(<IMG SRC=JaVaScRiPt:alert('XSS')>),
%(<IMG """><SCRIPT>alert("XSS")</SCRIPT>">),
%(<IMG SRC=javascript:alert("XSS")>),
%(<IMG SRC=javascript:alert(String.fromCharCode(88,83,83))>),
%(<IMG SRC=javascript:alert('XSS')>),
%(<IMG SRC=javascript:alert('XSS')>),
%(<IMG SRC=javascript:alert('XSS')>),
%(<IMG SRC="jav\tascript:alert('XSS');">),
%(<IMG SRC="jav	ascript:alert('XSS');">),
%(<IMG SRC="jav
ascript:alert('XSS');">),
%(<IMG SRC="jav
ascript:alert('XSS');">),
%(<IMG SRC="  javascript:alert('XSS');">),
%(<IMG SRC=`javascript:alert("RSnake says, 'XSS'")`>)].each_with_index do |img_hack, i|
define_method "test_should_not_fall_for_xss_image_hack_#{i+1}" do
assert_sanitized img_hack, "<img>"
end
end
def test_should_sanitize_tag_broken_up_by_null
assert_sanitized %(<SCR\0IPT>alert(\"XSS\")</SCR\0IPT>), "alert(\"XSS\")"
end
def test_should_sanitize_invalid_script_tag
assert_sanitized %(<SCRIPT/XSS SRC="http://ha.ckers.org/xss.js"></SCRIPT>), ""
end
def test_should_sanitize_script_tag_with_multiple_open_brackets
assert_sanitized %(<<SCRIPT>alert("XSS");//<</SCRIPT>), "<"
assert_sanitized %(<iframe src=http://ha.ckers.org/scriptlet.html\n<a), %(<a)
end
def test_should_sanitize_unclosed_script
assert_sanitized %(<SCRIPT SRC=http://ha.ckers.org/xss.js?<B>), "<b>"
end
def test_should_sanitize_half_open_scripts
assert_sanitized %(<IMG SRC="javascript:alert('XSS')"), "<img>"
end
def test_should_not_fall_for_ridiculous_hack
img_hack = %(<IMG\nSRC\n=\n"\nj\na\nv\na\ns\nc\nr\ni\np\nt\n:\na\nl\ne\nr\nt\n(\n'\nX\nS\nS\n'\n)\n"\n>)
assert_sanitized img_hack, "<img>"
end
# fucked
def test_should_sanitize_attributes
assert_sanitized %(<SPAN title="'><script>alert()</script>">blah</SPAN>), %(<span title="'><script>alert()</script>">blah</span>)
end
def test_should_sanitize_illegal_style_properties
raw = %(display:block; position:absolute; left:0; top:0; width:100%; height:100%; z-index:1; background-color:black; background-image:url(http://www.ragingplatypus.com/i/cam-full.jpg); background-x:center; background-y:center; background-repeat:repeat;)
expected = %(display: block; width: 100%; height: 100%; background-color: black; background-image: ; background-x: center; background-y: center;)
assert_equal expected, sanitize_css(raw)
end
def test_should_sanitize_with_trailing_space
raw = "display:block; "
expected = "display: block;"
assert_equal expected, sanitize_css(raw)
end
def test_should_sanitize_xul_style_attributes
raw = %(-moz-binding:url('http://ha.ckers.org/xssmoz.xml#xss'))
assert_equal '', sanitize_css(raw)
end
def test_should_sanitize_invalid_tag_names
assert_sanitized(%(a b c<script/XSS src="http://ha.ckers.org/xss.js"></script>d e f), "a b cd e f")
end
def test_should_sanitize_non_alpha_and_non_digit_characters_in_tags
assert_sanitized('<a onclick!#$%&()*~+-_.,:;?@[/|\]^`=alert("XSS")>foo</a>', "<a>foo</a>")
end
def test_should_sanitize_invalid_tag_names_in_single_tags
assert_sanitized('<img/src="http://ha.ckers.org/xss.js"/>', "<img />")
end
def test_should_sanitize_img_dynsrc_lowsrc
assert_sanitized(%(<img lowsrc="javascript:alert('XSS')" />), "<img />")
end
def test_should_sanitize_div_background_image_unicode_encoded
raw = %(background-image:\0075\0072\006C\0028'\006a\0061\0076\0061\0073\0063\0072\0069\0070\0074\003a\0061\006c\0065\0072\0074\0028.1027\0058.1053\0053\0027\0029'\0029)
assert_equal '', sanitize_css(raw)
end
def test_should_sanitize_div_style_expression
raw = %(width: expression(alert('XSS'));)
assert_equal '', sanitize_css(raw)
end
def test_should_sanitize_img_vbscript
assert_sanitized %(<img src='vbscript:msgbox("XSS")' />), '<img />'
end
def test_should_sanitize_cdata_section
assert_sanitized "<![CDATA[<span>section</span>]]>", "<![CDATA[<span>section</span>]]>"
end
def test_should_sanitize_unterminated_cdata_section
assert_sanitized "<![CDATA[<span>neverending...", "<![CDATA[<span>neverending...]]>"
end
def test_should_not_mangle_urls_with_ampersand
assert_sanitized %{<a href=\"http://www.domain.com?var1=1&var2=2\">my link</a>}
end
protected
def assert_sanitized(input, expected = nil)
@sanitizer ||= HTML::WhiteListSanitizer.new
if input
assert_dom_equal expected || input, @sanitizer.sanitize(input)
else
assert_nil @sanitizer.sanitize(input)
end
end
def sanitize_css(input)
(@sanitizer ||= HTML::WhiteListSanitizer.new).sanitize_css(input)
end
end
|
javan/rreset
|
vendor/rails/actionpack/test/controller/html-scanner/sanitizer_test.rb
|
Ruby
|
gpl-3.0
| 11,929
|
# OS X Setup Guide
This guide will walk you through running the new Go based [Docker registry](https://github.com/docker/distribution) on your local OS X machine.
## Checkout the Docker Distribution source tree
```
mkdir -p $GOPATH/src/github.com/docker
git clone https://github.com/docker/distribution.git $GOPATH/src/github.com/docker/distribution
cd $GOPATH/src/github.com/docker/distribution
```
## Build the registry binary
```
GOPATH=$(PWD)/Godeps/_workspace:$GOPATH make binaries
sudo cp bin/registry /usr/local/libexec/registry
```
## Setup
Copy the registry configuration file in place:
```
mkdir /Users/Shared/Registry
cp docs/osx/config.yml /Users/Shared/Registry/config.yml
```
## Running the Docker Registry under launchd
Copy the Docker registry plist into place:
```
plutil -lint docs/osx/com.docker.registry.plist
cp docs/osx/com.docker.registry.plist ~/Library/LaunchAgents/
chmod 644 ~/Library/LaunchAgents/com.docker.registry.plist
```
Start the Docker registry:
```
launchctl load ~/Library/LaunchAgents/com.docker.registry.plist
```
### Restarting the docker registry service
```
launchctl stop com.docker.registry
launchctl start com.docker.registry
```
### Unloading the docker registry service
```
launchctl unload ~/Library/LaunchAgents/com.docker.registry.plist
```
|
qiniu/distribution
|
docs/osx-setup-guide.md
|
Markdown
|
apache-2.0
| 1,309
|
package org.sakaiproject.section.api.coursemanagement;
import java.sql.Time;
public interface Meeting {
/**
* Gets the location where this CourseSection meets.
* @return
*/
public String getLocation();
/**
* Whether the CourseSection meets on Mondays.
*
* @return
*/
public boolean isMonday();
/**
* Whether the CourseSection meets on Tuesdays.
*
* @return
*/
public boolean isTuesday();
/**
* Whether the CourseSection meets on Wednesdays.
*
* @return
*/
public boolean isWednesday();
/**
* Whether the CourseSection meets on Thursdays.
*
* @return
*/
public boolean isThursday();
/**
* Whether the CourseSection meets on Fridays.
*
* @return
*/
public boolean isFriday();
/**
* Whether the CourseSection meets on Saturdays.
*
* @return
*/
public boolean isSaturday();
/**
* Whether the CourseSection meets on Sundays.
*
* @return
*/
public boolean isSunday();
/**
* Gets the time of day that this CourseSection's meeting(s) start.
*
* @return
*/
public Time getStartTime();
/**
* Gets the time of day that this CourseSection's meeting(s) end.
*
* @return
*/
public Time getEndTime();
/**
* Indicates whether this meeting has no information. Should return true if there are
* no meeting times and a null location.
*
* @return
*/
public boolean isEmpty();
}
|
marktriggs/nyu-sakai-10.4
|
edu-services/sections-service/sections-api/src/java/org/sakaiproject/section/api/coursemanagement/Meeting.java
|
Java
|
apache-2.0
| 1,405
|
/*version.h
=========*/
#define MAJOR_VERSION "1"
#define MINOR_VERSION "7"
|
jspaleta/SuperDARN_MSI_ROS
|
linux/home/radar/ros.3.6/usr/codebase/superdarn/src.bin/os/cp/themisscan.1.7/version.h
|
C
|
mit
| 81
|
Imports System
Imports System.Reflection
Imports System.Runtime.InteropServices
' General Information about an assembly is controlled through the following
' set of attributes. Change these attribute values to modify the information
' associated with an assembly.
' Review the values of the assembly attributes
<Assembly: AssemblyTitle("WeatherStationVB")>
<Assembly: AssemblyDescription("")>
<Assembly: AssemblyCompany("")>
<Assembly: AssemblyProduct("WeatherStationVB")>
<Assembly: AssemblyCopyright("Copyright © 2015")>
<Assembly: AssemblyTrademark("")>
' Version information for an assembly consists of the following four values:
'
' Major Version
' Minor Version
' Build Number
' Revision
'
' You can specify all the values or you can default the Build and Revision Numbers
' by using the '*' as shown below:
' <Assembly: AssemblyVersion("1.0.*")>
<Assembly: AssemblyVersion("1.0.0.0")>
<Assembly: AssemblyFileVersion("1.0.0.0")>
<Assembly: ComVisible(False)>
|
sndnvaps/ms-iot_samples
|
WeatherStation/VB/WeatherStationVB/My Project/AssemblyInfo.vb
|
Visual Basic
|
mit
| 998
|
/*!
* numbro.js
* version : 1.8.1
* author : Företagsplatsen AB
* license : MIT
* http://www.foretagsplatsen.se
*/
(function () {
'use strict';
/************************************
Constants
************************************/
var numbro,
VERSION = '1.8.1',
binarySuffixes = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'],
decimalSuffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'],
bytes = {
general: { scale: 1024, suffixes: decimalSuffixes, marker: 'bd' },
binary: { scale: 1024, suffixes: binarySuffixes, marker: 'b' },
decimal: { scale: 1000, suffixes: decimalSuffixes, marker: 'd' }
},
// general must be before the others because it reuses their characters!
byteFormatOrder = [ bytes.general, bytes.binary, bytes.decimal ],
// internal storage for culture config files
cultures = {},
// Todo: Remove in 2.0.0
languages = cultures,
currentCulture = 'en-US',
zeroFormat = null,
defaultFormat = '0,0',
defaultCurrencyFormat = '0$',
// check for nodeJS
hasModule = (typeof module !== 'undefined' && module.exports),
// default culture
enUS = {
delimiters: {
thousands: ',',
decimal: '.'
},
abbreviations: {
thousand: 'k',
million: 'm',
billion: 'b',
trillion: 't'
},
ordinal: function(number) {
var b = number % 10;
return (~~(number % 100 / 10) === 1) ? 'th' :
(b === 1) ? 'st' :
(b === 2) ? 'nd' :
(b === 3) ? 'rd' : 'th';
},
currency: {
symbol: '$',
position: 'prefix'
},
defaults: {
currencyFormat: ',0000 a'
},
formats: {
fourDigits: '0000 a',
fullWithTwoDecimals: '$ ,0.00',
fullWithTwoDecimalsNoCurrency: ',0.00'
}
};
/************************************
Constructors
************************************/
// Numbro prototype object
function Numbro(number) {
this._value = number;
}
function zeroes(count) {
var i, ret = '';
for (i = 0; i < count; i++) {
ret += '0';
}
return ret;
}
/**
* Implementation of toFixed() for numbers with exponents
* This function may return negative representations for zero values e.g. "-0.0"
*/
function toFixedLargeSmall(value, precision) {
var mantissa,
beforeDec,
afterDec,
exponent,
prefix,
endStr,
zerosStr,
str;
str = value.toString();
mantissa = str.split('e')[0];
exponent = str.split('e')[1];
beforeDec = mantissa.split('.')[0];
afterDec = mantissa.split('.')[1] || '';
if (+exponent > 0) {
// exponent is positive - add zeros after the numbers
str = beforeDec + afterDec + zeroes(exponent - afterDec.length);
} else {
// exponent is negative
if (+beforeDec < 0) {
prefix = '-0';
} else {
prefix = '0';
}
// tack on the decimal point if needed
if (precision > 0) {
prefix += '.';
}
zerosStr = zeroes((-1 * exponent) - 1);
// substring off the end to satisfy the precision
endStr = (zerosStr + Math.abs(beforeDec) + afterDec).substr(0, precision);
str = prefix + endStr;
}
// only add percision 0's if the exponent is positive
if (+exponent > 0 && precision > 0) {
str += '.' + zeroes(precision);
}
return str;
}
/**
* Implementation of toFixed() that treats floats more like decimals
*
* Fixes binary rounding issues (eg. (0.615).toFixed(2) === '0.61') that present
* problems for accounting- and finance-related software.
*
* Also removes negative signs for zero-formatted numbers. e.g. -0.01 w/ precision 1 -> 0.0
*/
function toFixed(value, precision, roundingFunction, optionals) {
var power = Math.pow(10, precision),
optionalsRegExp,
output;
if (value.toString().indexOf('e') > -1) {
// toFixed returns scientific notation for numbers above 1e21 and below 1e-7
output = toFixedLargeSmall(value, precision);
// remove the leading negative sign if it exists and should not be present (e.g. -0.00)
if (output.charAt(0) === '-' && +output >= 0) {
output = output.substr(1); // chop off the '-'
}
}
else {
// Multiply up by precision, round accurately, then divide and use native toFixed():
output = (roundingFunction(value + 'e+' + precision) / power).toFixed(precision);
}
if (optionals) {
optionalsRegExp = new RegExp('0{1,' + optionals + '}$');
output = output.replace(optionalsRegExp, '');
}
return output;
}
/************************************
Formatting
************************************/
// determine what type of formatting we need to do
function formatNumbro(n, format, roundingFunction) {
var output,
escapedFormat = format.replace(/\{[^\{\}]*\}/g, '');
// figure out what kind of format we are dealing with
if (escapedFormat.indexOf('$') > -1) { // currency!!!!!
output = formatCurrency(n, format, roundingFunction);
} else if (escapedFormat.indexOf('%') > -1) { // percentage
output = formatPercentage(n, format, roundingFunction);
} else if (escapedFormat.indexOf(':') > -1) { // time
output = formatTime(n, format);
} else { // plain ol' numbers or bytes
output = formatNumber(n._value, format, roundingFunction);
}
// return string
return output;
}
// revert to number
function unformatNumbro(n, string) {
var stringOriginal = string,
thousandRegExp,
millionRegExp,
billionRegExp,
trillionRegExp,
bytesMultiplier = false,
power;
if (string.indexOf(':') > -1) {
n._value = unformatTime(string);
} else {
if (string === zeroFormat) {
n._value = 0;
} else {
if (cultures[currentCulture].delimiters.decimal !== '.') {
string = string.replace(/\./g, '').replace(cultures[currentCulture].delimiters.decimal, '.');
}
// see if abbreviations are there so that we can multiply to the correct number
thousandRegExp = new RegExp('[^a-zA-Z]' + cultures[currentCulture].abbreviations.thousand +
'(?:\\)|(\\' + cultures[currentCulture].currency.symbol + ')?(?:\\))?)?$');
millionRegExp = new RegExp('[^a-zA-Z]' + cultures[currentCulture].abbreviations.million +
'(?:\\)|(\\' + cultures[currentCulture].currency.symbol + ')?(?:\\))?)?$');
billionRegExp = new RegExp('[^a-zA-Z]' + cultures[currentCulture].abbreviations.billion +
'(?:\\)|(\\' + cultures[currentCulture].currency.symbol + ')?(?:\\))?)?$');
trillionRegExp = new RegExp('[^a-zA-Z]' + cultures[currentCulture].abbreviations.trillion +
'(?:\\)|(\\' + cultures[currentCulture].currency.symbol + ')?(?:\\))?)?$');
// see if bytes are there so that we can multiply to the correct number
for (power = 1; power < binarySuffixes.length && !bytesMultiplier; ++power) {
if (string.indexOf(binarySuffixes[power]) > -1) {
bytesMultiplier = Math.pow(1024, power);
} else if (string.indexOf(decimalSuffixes[power]) > -1) {
bytesMultiplier = Math.pow(1000, power);
}
}
var str = string.replace(/[^0-9\.]+/g, '');
if (str === '') {
// An empty string is not a number.
n._value = NaN;
} else {
// do some math to create our number
n._value = ((bytesMultiplier) ? bytesMultiplier : 1) *
((stringOriginal.match(thousandRegExp)) ? Math.pow(10, 3) : 1) *
((stringOriginal.match(millionRegExp)) ? Math.pow(10, 6) : 1) *
((stringOriginal.match(billionRegExp)) ? Math.pow(10, 9) : 1) *
((stringOriginal.match(trillionRegExp)) ? Math.pow(10, 12) : 1) *
((string.indexOf('%') > -1) ? 0.01 : 1) *
(((string.split('-').length +
Math.min(string.split('(').length - 1, string.split(')').length - 1)) % 2) ? 1 : -1) *
Number(str);
// round if we are talking about bytes
n._value = (bytesMultiplier) ? Math.ceil(n._value) : n._value;
}
}
}
return n._value;
}
function formatCurrency(n, originalFormat, roundingFunction) {
var format = originalFormat,
symbolIndex = format.indexOf('$'),
openParenIndex = format.indexOf('('),
plusSignIndex = format.indexOf('+'),
minusSignIndex = format.indexOf('-'),
space = '',
decimalSeparator = '',
spliceIndex,
output;
if(format.indexOf('$') === -1){
// Use defaults instead of the format provided
if (cultures[currentCulture].currency.position === 'infix') {
decimalSeparator = cultures[currentCulture].currency.symbol;
if (cultures[currentCulture].currency.spaceSeparated) {
decimalSeparator = ' ' + decimalSeparator + ' ';
}
} else if (cultures[currentCulture].currency.spaceSeparated) {
space = ' ';
}
} else {
// check for space before or after currency
if (format.indexOf(' $') > -1) {
space = ' ';
format = format.replace(' $', '');
} else if (format.indexOf('$ ') > -1) {
space = ' ';
format = format.replace('$ ', '');
} else {
format = format.replace('$', '');
}
}
// Format The Number
output = formatNumber(n._value, format, roundingFunction, decimalSeparator);
if (originalFormat.indexOf('$') === -1) {
// Use defaults instead of the format provided
switch (cultures[currentCulture].currency.position) {
case 'postfix':
if (output.indexOf(')') > -1) {
output = output.split('');
output.splice(-1, 0, space + cultures[currentCulture].currency.symbol);
output = output.join('');
} else {
output = output + space + cultures[currentCulture].currency.symbol;
}
break;
case 'infix':
break;
case 'prefix':
if (output.indexOf('(') > -1 || output.indexOf('-') > -1) {
output = output.split('');
spliceIndex = Math.max(openParenIndex, minusSignIndex) + 1;
output.splice(spliceIndex, 0, cultures[currentCulture].currency.symbol + space);
output = output.join('');
} else {
output = cultures[currentCulture].currency.symbol + space + output;
}
break;
default:
throw Error('Currency position should be among ["prefix", "infix", "postfix"]');
}
} else {
// position the symbol
if (symbolIndex <= 1) {
if (output.indexOf('(') > -1 || output.indexOf('+') > -1 || output.indexOf('-') > -1) {
output = output.split('');
spliceIndex = 1;
if (symbolIndex < openParenIndex || symbolIndex < plusSignIndex || symbolIndex < minusSignIndex) {
// the symbol appears before the "(", "+" or "-"
spliceIndex = 0;
}
output.splice(spliceIndex, 0, cultures[currentCulture].currency.symbol + space);
output = output.join('');
} else {
output = cultures[currentCulture].currency.symbol + space + output;
}
} else {
if (output.indexOf(')') > -1) {
output = output.split('');
output.splice(-1, 0, space + cultures[currentCulture].currency.symbol);
output = output.join('');
} else {
output = output + space + cultures[currentCulture].currency.symbol;
}
}
}
return output;
}
function formatPercentage(n, format, roundingFunction) {
var space = '',
output,
value = n._value * 100;
// check for space before %
if (format.indexOf(' %') > -1) {
space = ' ';
format = format.replace(' %', '');
} else {
format = format.replace('%', '');
}
output = formatNumber(value, format, roundingFunction);
if (output.indexOf(')') > -1) {
output = output.split('');
output.splice(-1, 0, space + '%');
output = output.join('');
} else {
output = output + space + '%';
}
return output;
}
function formatTime(n) {
var hours = Math.floor(n._value / 60 / 60),
minutes = Math.floor((n._value - (hours * 60 * 60)) / 60),
seconds = Math.round(n._value - (hours * 60 * 60) - (minutes * 60));
return hours + ':' +
((minutes < 10) ? '0' + minutes : minutes) + ':' +
((seconds < 10) ? '0' + seconds : seconds);
}
function unformatTime(string) {
var timeArray = string.split(':'),
seconds = 0;
// turn hours and minutes into seconds and add them all up
if (timeArray.length === 3) {
// hours
seconds = seconds + (Number(timeArray[0]) * 60 * 60);
// minutes
seconds = seconds + (Number(timeArray[1]) * 60);
// seconds
seconds = seconds + Number(timeArray[2]);
} else if (timeArray.length === 2) {
// minutes
seconds = seconds + (Number(timeArray[0]) * 60);
// seconds
seconds = seconds + Number(timeArray[1]);
}
return Number(seconds);
}
function formatByteUnits (value, suffixes, scale) {
var suffix = suffixes[0],
power,
min,
max,
abs = Math.abs(value);
if (abs >= scale) {
for (power = 1; power < suffixes.length; ++power) {
min = Math.pow(scale, power);
max = Math.pow(scale, power + 1);
if (abs >= min && abs < max) {
suffix = suffixes[power];
value = value / min;
break;
}
}
// values greater than or equal to [scale] YB never set the suffix
if (suffix === suffixes[0]) {
value = value / Math.pow(scale, suffixes.length - 1);
suffix = suffixes[suffixes.length - 1];
}
}
return { value: value, suffix: suffix };
}
function formatNumber (value, format, roundingFunction, sep) {
var negP = false,
signed = false,
optDec = false,
abbr = '',
abbrK = false, // force abbreviation to thousands
abbrM = false, // force abbreviation to millions
abbrB = false, // force abbreviation to billions
abbrT = false, // force abbreviation to trillions
abbrForce = false, // force abbreviation
bytes = '',
byteFormat,
units,
ord = '',
abs = Math.abs(value),
totalLength,
length,
minimumPrecision,
pow,
w,
intPrecision,
precision,
prefix,
postfix,
thousands,
d = '',
forcedNeg = false,
neg = false,
indexOpenP,
size,
indexMinus,
paren = '',
minlen,
i;
// check if number is zero and a custom zero format has been set
if (value === 0 && zeroFormat !== null) {
return zeroFormat;
}
if (!isFinite(value)) {
return '' + value;
}
if (format.indexOf('{') === 0) {
var end = format.indexOf('}');
if (end === -1) {
throw Error('Format should also contain a "}"');
}
prefix = format.slice(1, end);
format = format.slice(end + 1);
} else {
prefix = '';
}
if (format.indexOf('}') === format.length - 1) {
var start = format.indexOf('{');
if (start === -1) {
throw Error('Format should also contain a "{"');
}
postfix = format.slice(start + 1, -1);
format = format.slice(0, start + 1);
} else {
postfix = '';
}
// check for min length
var info;
if (format.indexOf('.') === -1) {
info = format.match(/([0-9]+).*/);
} else {
info = format.match(/([0-9]+)\..*/);
}
minlen = info === null ? -1 : info[1].length;
// see if we should use parentheses for negative number or if we should prefix with a sign
// if both are present we default to parentheses
if (format.indexOf('-') !== -1) {
forcedNeg = true;
}
if (format.indexOf('(') > -1) {
negP = true;
format = format.slice(1, -1);
} else if (format.indexOf('+') > -1) {
signed = true;
format = format.replace(/\+/g, '');
}
// see if abbreviation is wanted
if (format.indexOf('a') > -1) {
intPrecision = format.split('.')[0].match(/[0-9]+/g) || ['0'];
intPrecision = parseInt(intPrecision[0], 10);
// check if abbreviation is specified
abbrK = format.indexOf('aK') >= 0;
abbrM = format.indexOf('aM') >= 0;
abbrB = format.indexOf('aB') >= 0;
abbrT = format.indexOf('aT') >= 0;
abbrForce = abbrK || abbrM || abbrB || abbrT;
// check for space before abbreviation
if (format.indexOf(' a') > -1) {
abbr = ' ';
format = format.replace(' a', '');
} else {
format = format.replace('a', '');
}
totalLength = Math.floor(Math.log(abs) / Math.LN10) + 1;
minimumPrecision = totalLength % 3;
minimumPrecision = minimumPrecision === 0 ? 3 : minimumPrecision;
if (intPrecision && abs !== 0) {
length = Math.floor(Math.log(abs) / Math.LN10) + 1 - intPrecision;
pow = 3 * ~~((Math.min(intPrecision, totalLength) - minimumPrecision) / 3);
abs = abs / Math.pow(10, pow);
if (format.indexOf('.') === -1 && intPrecision > 3) {
format += '[.]';
size = length === 0 ? 0 : 3 * ~~(length / 3) - length;
size = size < 0 ? size + 3 : size;
format += zeroes(size);
}
}
if (Math.floor(Math.log(Math.abs(value)) / Math.LN10) + 1 !== intPrecision) {
if (abs >= Math.pow(10, 12) && !abbrForce || abbrT) {
// trillion
abbr = abbr + cultures[currentCulture].abbreviations.trillion;
value = value / Math.pow(10, 12);
} else if (abs < Math.pow(10, 12) && abs >= Math.pow(10, 9) && !abbrForce || abbrB) {
// billion
abbr = abbr + cultures[currentCulture].abbreviations.billion;
value = value / Math.pow(10, 9);
} else if (abs < Math.pow(10, 9) && abs >= Math.pow(10, 6) && !abbrForce || abbrM) {
// million
abbr = abbr + cultures[currentCulture].abbreviations.million;
value = value / Math.pow(10, 6);
} else if (abs < Math.pow(10, 6) && abs >= Math.pow(10, 3) && !abbrForce || abbrK) {
// thousand
abbr = abbr + cultures[currentCulture].abbreviations.thousand;
value = value / Math.pow(10, 3);
}
}
}
// see if we are formatting
// binary-decimal bytes (1024 MB), binary bytes (1024 MiB), or decimal bytes (1000 MB)
for (i = 0; i < byteFormatOrder.length; ++i) {
byteFormat = byteFormatOrder[i];
if (format.indexOf(byteFormat.marker) > -1) {
// check for space before
if (format.indexOf(' ' + byteFormat.marker) >-1) {
bytes = ' ';
}
// remove the marker (with the space if it had one)
format = format.replace(bytes + byteFormat.marker, '');
units = formatByteUnits(value, byteFormat.suffixes, byteFormat.scale);
value = units.value;
bytes = bytes + units.suffix;
break;
}
}
// see if ordinal is wanted
if (format.indexOf('o') > -1) {
// check for space before
if (format.indexOf(' o') > -1) {
ord = ' ';
format = format.replace(' o', '');
} else {
format = format.replace('o', '');
}
if (cultures[currentCulture].ordinal) {
ord = ord + cultures[currentCulture].ordinal(value);
}
}
if (format.indexOf('[.]') > -1) {
optDec = true;
format = format.replace('[.]', '.');
}
w = value.toString().split('.')[0];
precision = format.split('.')[1];
thousands = format.indexOf(',');
if (precision) {
if (precision.indexOf('*') !== -1) {
d = toFixed(value, value.toString().split('.')[1].length, roundingFunction);
} else {
if (precision.indexOf('[') > -1) {
precision = precision.replace(']', '');
precision = precision.split('[');
d = toFixed(value, (precision[0].length + precision[1].length), roundingFunction,
precision[1].length);
} else {
d = toFixed(value, precision.length, roundingFunction);
}
}
w = d.split('.')[0];
if (d.split('.')[1].length) {
var p = sep ? abbr + sep : cultures[currentCulture].delimiters.decimal;
d = p + d.split('.')[1];
} else {
d = '';
}
if (optDec && Number(d.slice(1)) === 0) {
d = '';
}
} else {
w = toFixed(value, 0, roundingFunction);
}
// format number
if (w.indexOf('-') > -1) {
w = w.slice(1);
neg = true;
}
if (w.length < minlen) {
w = zeroes(minlen - w.length) + w;
}
if (thousands > -1) {
w = w.toString().replace(/(\d)(?=(\d{3})+(?!\d))/g, '$1' +
cultures[currentCulture].delimiters.thousands);
}
if (format.indexOf('.') === 0) {
w = '';
}
indexOpenP = format.indexOf('(');
indexMinus = format.indexOf('-');
if (indexOpenP < indexMinus) {
paren = ((negP && neg) ? '(' : '') + (((forcedNeg && neg) || (!negP && neg)) ? '-' : '');
} else {
paren = (((forcedNeg && neg) || (!negP && neg)) ? '-' : '') + ((negP && neg) ? '(' : '');
}
return prefix +
paren + ((!neg && signed && value !== 0) ? '+' : '') +
w + d +
((ord) ? ord : '') +
((abbr && !sep) ? abbr : '') +
((bytes) ? bytes : '') +
((negP && neg) ? ')' : '') +
postfix;
}
/************************************
Top Level Functions
************************************/
numbro = function(input) {
if (numbro.isNumbro(input)) {
input = input.value();
} else if (input === 0 || typeof input === 'undefined') {
input = 0;
} else if (!Number(input)) {
input = numbro.fn.unformat(input);
}
return new Numbro(Number(input));
};
// version number
numbro.version = VERSION;
// compare numbro object
numbro.isNumbro = function(obj) {
return obj instanceof Numbro;
};
/**
* This function allow the user to set a new language with a fallback if
* the language does not exist. If no fallback language is provided,
* it fallbacks to english.
*
* @deprecated Since in version 1.6.0. It will be deleted in version 2.0
* `setCulture` should be used instead.
*/
numbro.setLanguage = function(newLanguage, fallbackLanguage) {
console.warn('`setLanguage` is deprecated since version 1.6.0. Use `setCulture` instead');
var key = newLanguage,
prefix = newLanguage.split('-')[0],
matchingLanguage = null;
if (!languages[key]) {
Object.keys(languages).forEach(function(language) {
if (!matchingLanguage && language.split('-')[0] === prefix) {
matchingLanguage = language;
}
});
key = matchingLanguage || fallbackLanguage || 'en-US';
}
chooseCulture(key);
};
/**
* This function allow the user to set a new culture with a fallback if
* the culture does not exist. If no fallback culture is provided,
* it falls back to "en-US".
*/
numbro.setCulture = function(newCulture, fallbackCulture) {
var key = newCulture,
suffix = newCulture.split('-')[1],
matchingCulture = null;
if (!cultures[key]) {
if (suffix) {
Object.keys(cultures).forEach(function(language) {
if (!matchingCulture && language.split('-')[1] === suffix) {
matchingCulture = language;
}
});
}
key = matchingCulture || fallbackCulture || 'en-US';
}
chooseCulture(key);
};
/**
* This function will load languages and then set the global language. If
* no arguments are passed in, it will simply return the current global
* language key.
*
* @deprecated Since in version 1.6.0. It will be deleted in version 2.0
* `culture` should be used instead.
*/
numbro.language = function(key, values) {
console.warn('`language` is deprecated since version 1.6.0. Use `culture` instead');
if (!key) {
return currentCulture;
}
if (key && !values) {
if (!languages[key]) {
throw new Error('Unknown language : ' + key);
}
chooseCulture(key);
}
if (values || !languages[key]) {
setCulture(key, values);
}
return numbro;
};
/**
* This function will load cultures and then set the global culture. If
* no arguments are passed in, it will simply return the current global
* culture code.
*/
numbro.culture = function(code, values) {
if (!code) {
return currentCulture;
}
if (code && !values) {
if (!cultures[code]) {
throw new Error('Unknown culture : ' + code);
}
chooseCulture(code);
}
if (values || !cultures[code]) {
setCulture(code, values);
}
return numbro;
};
/**
* This function provides access to the loaded language data. If
* no arguments are passed in, it will simply return the current
* global language object.
*
* @deprecated Since in version 1.6.0. It will be deleted in version 2.0
* `culture` should be used instead.
*/
numbro.languageData = function(key) {
console.warn('`languageData` is deprecated since version 1.6.0. Use `cultureData` instead');
if (!key) {
return languages[currentCulture];
}
if (!languages[key]) {
throw new Error('Unknown language : ' + key);
}
return languages[key];
};
/**
* This function provides access to the loaded culture data. If
* no arguments are passed in, it will simply return the current
* global culture object.
*/
numbro.cultureData = function(code) {
if (!code) {
return cultures[currentCulture];
}
if (!cultures[code]) {
throw new Error('Unknown culture : ' + code);
}
return cultures[code];
};
numbro.culture('en-US', enUS);
/**
* @deprecated Since in version 1.6.0. It will be deleted in version 2.0
* `cultures` should be used instead.
*/
numbro.languages = function() {
console.warn('`languages` is deprecated since version 1.6.0. Use `cultures` instead');
return languages;
};
numbro.cultures = function() {
return cultures;
};
numbro.zeroFormat = function(format) {
zeroFormat = typeof(format) === 'string' ? format : null;
};
numbro.defaultFormat = function(format) {
defaultFormat = typeof(format) === 'string' ? format : '0.0';
};
numbro.defaultCurrencyFormat = function (format) {
defaultCurrencyFormat = typeof(format) === 'string' ? format : '0$';
};
numbro.validate = function(val, culture) {
var _decimalSep,
_thousandSep,
_currSymbol,
_valArray,
_abbrObj,
_thousandRegEx,
cultureData,
temp;
//coerce val to string
if (typeof val !== 'string') {
val += '';
if (console.warn) {
console.warn('Numbro.js: Value is not string. It has been co-erced to: ', val);
}
}
//trim whitespaces from either sides
val = val.trim();
//replace the initial '+' or '-' sign if present
val = val.replace(/^[+-]?/, '');
//if val is just digits return true
if ( !! val.match(/^\d+$/)) {
return true;
}
//if val is empty return false
if (val === '') {
return false;
}
//get the decimal and thousands separator from numbro.cultureData
try {
//check if the culture is understood by numbro. if not, default it to current culture
cultureData = numbro.cultureData(culture);
} catch (e) {
cultureData = numbro.cultureData(numbro.culture());
}
//setup the delimiters and currency symbol based on culture
_currSymbol = cultureData.currency.symbol;
_abbrObj = cultureData.abbreviations;
_decimalSep = cultureData.delimiters.decimal;
if (cultureData.delimiters.thousands === '.') {
_thousandSep = '\\.';
} else {
_thousandSep = cultureData.delimiters.thousands;
}
// validating currency symbol
temp = val.match(/^[^\d\.\,]+/);
if (temp !== null) {
val = val.substr(1);
if (temp[0] !== _currSymbol) {
return false;
}
}
//validating abbreviation symbol
temp = val.match(/[^\d]+$/);
if (temp !== null) {
val = val.slice(0, -1);
if (temp[0] !== _abbrObj.thousand && temp[0] !== _abbrObj.million &&
temp[0] !== _abbrObj.billion && temp[0] !== _abbrObj.trillion) {
return false;
}
}
_thousandRegEx = new RegExp(_thousandSep + '{2}');
if (!val.match(/[^\d.,]/g)) {
_valArray = val.split(_decimalSep);
if (_valArray.length > 2) {
return false;
} else {
if (_valArray.length < 2) {
return ( !! _valArray[0].match(/^\d+.*\d$/) && !_valArray[0].match(_thousandRegEx));
} else {
if (_valArray[0] === '') {
// for values without leading zero eg. .984
return (!_valArray[0].match(_thousandRegEx) &&
!!_valArray[1].match(/^\d+$/));
} else if (_valArray[0].length === 1) {
return ( !! _valArray[0].match(/^\d+$/) &&
!_valArray[0].match(_thousandRegEx) &&
!! _valArray[1].match(/^\d+$/));
} else {
return ( !! _valArray[0].match(/^\d+.*\d$/) &&
!_valArray[0].match(_thousandRegEx) &&
!! _valArray[1].match(/^\d+$/));
}
}
}
}
return false;
};
/**
* * @deprecated Since in version 1.6.0. It will be deleted in version 2.0
* `loadCulturesInNode` should be used instead.
*/
numbro.loadLanguagesInNode = function() {
console.warn('`loadLanguagesInNode` is deprecated since version 1.6.0. Use `loadCulturesInNode` instead');
numbro.loadCulturesInNode();
};
numbro.loadCulturesInNode = function() {
// TODO: Rename the folder in 2.0.0
var cultures = require('./languages');
for(var langLocaleCode in cultures) {
if(langLocaleCode) {
numbro.culture(langLocaleCode, cultures[langLocaleCode]);
}
}
};
/************************************
Helpers
************************************/
function setCulture(code, values) {
cultures[code] = values;
}
function chooseCulture(code) {
currentCulture = code;
var defaults = cultures[code].defaults;
if (defaults && defaults.format) {
numbro.defaultFormat(defaults.format);
}
if (defaults && defaults.currencyFormat) {
numbro.defaultCurrencyFormat(defaults.currencyFormat);
}
}
function inNodejsRuntime() {
return (typeof process !== 'undefined') &&
(process.browser === undefined) &&
(process.title.indexOf('node') === 0 || process.title === 'grunt' || process.title === 'gulp') &&
(typeof require !== 'undefined');
}
/************************************
Floating-point helpers
************************************/
// The floating-point helper functions and implementation
// borrows heavily from sinful.js: http://guipn.github.io/sinful.js/
/**
* Array.prototype.reduce for browsers that don't support it
* https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/Reduce#Compatibility
*/
if ('function' !== typeof Array.prototype.reduce) {
Array.prototype.reduce = function(callback, optInitialValue) {
if (null === this || 'undefined' === typeof this) {
// At the moment all modern browsers, that support strict mode, have
// native implementation of Array.prototype.reduce. For instance, IE8
// does not support strict mode, so this check is actually useless.
throw new TypeError('Array.prototype.reduce called on null or undefined');
}
if ('function' !== typeof callback) {
throw new TypeError(callback + ' is not a function');
}
var index,
value,
length = this.length >>> 0,
isValueSet = false;
if (1 < arguments.length) {
value = optInitialValue;
isValueSet = true;
}
for (index = 0; length > index; ++index) {
if (this.hasOwnProperty(index)) {
if (isValueSet) {
value = callback(value, this[index], index, this);
} else {
value = this[index];
isValueSet = true;
}
}
}
if (!isValueSet) {
throw new TypeError('Reduce of empty array with no initial value');
}
return value;
};
}
/**
* Computes the multiplier necessary to make x >= 1,
* effectively eliminating miscalculations caused by
* finite precision.
*/
function multiplier(x) {
var parts = x.toString().split('.');
if (parts.length < 2) {
return 1;
}
return Math.pow(10, parts[1].length);
}
/**
* Given a variable number of arguments, returns the maximum
* multiplier that must be used to normalize an operation involving
* all of them.
*/
function correctionFactor() {
var args = Array.prototype.slice.call(arguments);
return args.reduce(function(prev, next) {
var mp = multiplier(prev),
mn = multiplier(next);
return mp > mn ? mp : mn;
}, -Infinity);
}
/************************************
Numbro Prototype
************************************/
numbro.fn = Numbro.prototype = {
clone: function() {
return numbro(this);
},
format: function(inputString, roundingFunction) {
return formatNumbro(this,
inputString ? inputString : defaultFormat,
(roundingFunction !== undefined) ? roundingFunction : Math.round
);
},
formatCurrency: function(inputString, roundingFunction) {
return formatCurrency(this,
inputString ? inputString : defaultCurrencyFormat,
(roundingFunction !== undefined) ? roundingFunction : Math.round
);
},
unformat: function(inputString) {
if (typeof inputString === 'number') {
return inputString;
} else if (typeof inputString === 'string') {
var result = unformatNumbro(this, inputString);
// Any unparseable string (represented as NaN in the result) is
// converted into undefined.
return isNaN(result) ? undefined : result;
} else {
return undefined;
}
},
binaryByteUnits: function() {
return formatByteUnits(this._value, bytes.binary.suffixes, bytes.binary.scale).suffix;
},
byteUnits: function() {
return formatByteUnits(this._value, bytes.general.suffixes, bytes.general.scale).suffix;
},
decimalByteUnits: function() {
return formatByteUnits(this._value, bytes.decimal.suffixes, bytes.decimal.scale).suffix;
},
value: function() {
return this._value;
},
valueOf: function() {
return this._value;
},
set: function(value) {
this._value = Number(value);
return this;
},
add: function(value) {
var corrFactor = correctionFactor.call(null, this._value, value);
function cback(accum, curr) {
return accum + corrFactor * curr;
}
this._value = [this._value, value].reduce(cback, 0) / corrFactor;
return this;
},
subtract: function(value) {
var corrFactor = correctionFactor.call(null, this._value, value);
function cback(accum, curr) {
return accum - corrFactor * curr;
}
this._value = [value].reduce(cback, this._value * corrFactor) / corrFactor;
return this;
},
multiply: function(value) {
function cback(accum, curr) {
var corrFactor = correctionFactor(accum, curr),
result = accum * corrFactor;
result *= curr * corrFactor;
result /= corrFactor * corrFactor;
return result;
}
this._value = [this._value, value].reduce(cback, 1);
return this;
},
divide: function(value) {
function cback(accum, curr) {
var corrFactor = correctionFactor(accum, curr);
return (accum * corrFactor) / (curr * corrFactor);
}
this._value = [this._value, value].reduce(cback);
return this;
},
difference: function(value) {
return Math.abs(numbro(this._value).subtract(value).value());
}
};
/************************************
Exposing Numbro
************************************/
if (inNodejsRuntime()) {
//Todo: Rename the folder in 2.0.0
numbro.loadCulturesInNode();
}
// CommonJS module is defined
if (hasModule) {
module.exports = numbro;
} else {
/*global ender:false */
if (typeof ender === 'undefined') {
// here, `this` means `window` in the browser, or `global` on the server
// add `numbro` as a global object via a string identifier,
// for Closure Compiler 'advanced' mode
this.numbro = numbro;
}
/*global define:false */
if (typeof define === 'function' && define.amd) {
define([], function() {
return numbro;
});
}
}
}.call(typeof window === 'undefined' ? this : window));
|
PaulZadorozhniy/handsontable
|
dist/numbro/numbro.js
|
JavaScript
|
mit
| 43,505
|
/*!
* Angular Material Design
* https://github.com/angular/material
* @license MIT
* v1.0.1
*/
.md-dialog-is-showing {
max-height: 100%; }
.md-dialog-container {
display: -webkit-flex;
display: -ms-flexbox;
display: flex;
-webkit-justify-content: center;
-ms-flex-pack: center;
justify-content: center;
-webkit-align-items: center;
-ms-flex-align: center;
align-items: center;
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
z-index: 80;
overflow: hidden; }
md-dialog {
opacity: 0;
min-width: 240px;
max-width: 80%;
max-height: 80%;
position: relative;
overflow: auto;
box-shadow: 0px 7px 8px -4px rgba(0, 0, 0, 0.2), 0px 13px 19px 2px rgba(0, 0, 0, 0.14), 0px 5px 24px 4px rgba(0, 0, 0, 0.12);
display: -webkit-flex;
display: -ms-flexbox;
display: flex;
-webkit-flex-direction: column;
-ms-flex-direction: column;
flex-direction: column; }
md-dialog.md-transition-in {
opacity: 1;
transition: all 0.4s cubic-bezier(0.25, 0.8, 0.25, 1);
-webkit-transform: translate3d(0, 0, 0) scale(1);
transform: translate3d(0, 0, 0) scale(1); }
md-dialog.md-transition-out {
opacity: 0;
transition: all 0.4s cubic-bezier(0.25, 0.8, 0.25, 1);
-webkit-transform: translate3d(0, 100%, 0) scale(0.2);
transform: translate3d(0, 100%, 0) scale(0.2); }
md-dialog > form {
display: -webkit-flex;
display: -ms-flexbox;
display: flex;
-webkit-flex-direction: column;
-ms-flex-direction: column;
flex-direction: column;
overflow: auto; }
md-dialog .md-dialog-content {
padding: 24px; }
md-dialog md-dialog-content {
-webkit-order: 1;
-ms-flex-order: 1;
order: 1;
-webkit-flex-direction: column;
-ms-flex-direction: column;
flex-direction: column;
overflow: auto;
-webkit-overflow-scrolling: touch; }
md-dialog md-dialog-content:not([layout=row]) > *:first-child:not(.md-subheader) {
margin-top: 0; }
md-dialog md-dialog-content:focus {
outline: none; }
md-dialog md-dialog-content .md-subheader {
margin: 0; }
md-dialog md-dialog-content .md-subheader.sticky-clone {
box-shadow: 0 2px 4px 0 rgba(0, 0, 0, 0.16); }
md-dialog md-dialog-content.sticky-container {
padding: 0; }
md-dialog md-dialog-content.sticky-container > div {
padding: 24px;
padding-top: 0; }
md-dialog md-dialog-content .md-dialog-content-body {
width: 100%; }
md-dialog .md-actions, md-dialog md-dialog-actions {
display: -webkit-flex;
display: -ms-flexbox;
display: flex;
-webkit-order: 2;
-ms-flex-order: 2;
order: 2;
box-sizing: border-box;
-webkit-align-items: center;
-ms-flex-align: center;
align-items: center;
-webkit-justify-content: flex-end;
-ms-flex-pack: end;
justify-content: flex-end;
margin-bottom: 0;
padding-right: 8px;
padding-left: 16px;
min-height: 52px;
overflow: hidden; }
md-dialog .md-actions .md-button, md-dialog md-dialog-actions .md-button {
margin-bottom: 8px;
margin-left: 8px;
margin-right: 0;
margin-top: 8px; }
md-dialog.md-content-overflow .md-actions, md-dialog.md-content-overflow md-dialog-actions {
border-top-width: 1px;
border-top-style: solid; }
@media screen and (-ms-high-contrast: active) {
md-dialog {
border: 1px solid #fff; } }
@media (max-width: 959px) {
md-dialog.md-dialog-fullscreen {
min-height: 100%;
min-width: 100%;
border-radius: 0; } }
|
ac-adekunle/secondlead
|
vendor/assets/javascripts/angular-material/modules/js/dialog/dialog.css
|
CSS
|
mit
| 3,672
|
#include <sstream>
#include <algorithm>
#include <iostream>
#include <cstring>
#include "node.hpp"
#include "constants.hpp"
#include "error.hpp"
#include "sass_values.h"
#ifdef _WIN32
#include <stdlib.h>
#define realpath(N,R) _fullpath((R),(N),_MAX_PATH)
#endif
namespace Sass {
using namespace std;
using namespace Constants;
using std::strlen;
using std::strcpy;
// ------------------------------------------------------------------------
// Node method implementations
// ------------------------------------------------------------------------
void Node::flatten()
{
switch (type())
{
case block:
case mixin_call:
case mixin_content:
case root:
case if_directive:
case for_through_directive:
case for_to_directive:
case each_directive:
case while_directive:
break;
default:
return;
}
// size can change during flattening, so we need to call size() on each pass
for (size_t i = 0; i < size(); ++i) {
switch (at(i).type())
{
case mixin_call:
case mixin_content:
case block:
case if_directive:
case for_through_directive:
case for_to_directive:
case each_directive:
case while_directive: {
Node expn(at(i));
if (expn.has_expansions()) expn.flatten();
ip_->has_statements |= expn.has_statements();
ip_->has_comments |= expn.has_comments();
ip_->has_blocks |= expn.has_blocks();
ip_->has_expansions |= expn.has_expansions();
// TO DO: make this more efficient -- replace with a dummy node instead of erasing
ip_->children.erase(begin() + i);
insert(begin() + i, expn.begin(), expn.end());
// skip over what we just spliced in
i += expn.size() - 1;
} break;
default: {
} break;
}
}
}
string Node::unquote() const
{
switch (type())
{
case string_constant:
case identifier: {
return token().unquote();
} break;
default: {
// do nothing; fall though to the rest
} break;
}
string intermediate(to_string());
if (!intermediate.empty() && (intermediate[0] == '"' || intermediate[0] == '\'')) {
return intermediate.substr(1, intermediate.length() - 2);
}
else {
return intermediate;
}
}
string Node::debug_info_path() const
{
char* c_abs_path = realpath( path().c_str(), NULL);
string abs_path(c_abs_path);
delete c_abs_path;
return abs_path;
}
bool Node::operator==(Node rhs) const
{
Type t = type(), u = rhs.type();
// if ((t == identifier || t == string_constant || t == string_schema || t == concatenation) &&
// (u == identifier || u == string_constant || u == string_schema || u == concatenation)) {
// return unquote() == rhs.unquote();
// }
if (is_string() && rhs.is_string()) {
return unquote() == rhs.unquote();
}
else if (t != u) {
return false;
}
switch (t)
{
case list:
case expression:
case term:
case numeric_color: {
if (size() != rhs.size()) return false;
if ((t == list) && (is_comma_separated() != rhs.is_comma_separated())) return false;
for (size_t i = 0, L = size(); i < L; ++i) {
if (at(i) == rhs[i]) continue;
else return false;
}
return true;
} break;
case variable:
case identifier:
case uri:
case textual_percentage:
case textual_dimension:
case textual_number:
case textual_hex:
case string_constant: {
return token().unquote() == rhs.token().unquote();
} break;
case number:
case numeric_percentage: {
return numeric_value() == rhs.numeric_value();
} break;
case numeric_dimension: {
if (unit() == rhs.unit()) {
return numeric_value() == rhs.numeric_value();
}
else {
return false;
}
} break;
case boolean: {
return boolean_value() == rhs.boolean_value();
} break;
case selector: {
if (has_children() && rhs.has_children() && (size() == rhs.size())) {
for (size_t i = 0, S = size(); i < S; ++i) {
if (at(i) == rhs[i]) continue;
else return false;
}
return true;
}
else {
return false;
}
} break;
case simple_selector: {
if (token() == rhs.token()) return true;
} break;
default: {
return false;
} break;
}
return false;
}
bool Node::operator!=(Node rhs) const
{ return !(*this == rhs); }
bool Node::operator<(Node rhs) const
{
Type lhs_type = type();
Type rhs_type = rhs.type();
// comparing atomic numbers
if ((lhs_type == number && rhs_type == number) ||
(lhs_type == numeric_percentage && rhs_type == numeric_percentage)) {
return numeric_value() < rhs.numeric_value();
}
// comparing numbers with units
else if (lhs_type == numeric_dimension && rhs_type == numeric_dimension) {
if (unit() == rhs.unit()) {
return numeric_value() < rhs.numeric_value();
}
else {
throw Error(Error::evaluation, path(), line(), "incompatible units");
}
}
// comparing colors
else if (lhs_type == numeric_color && rhs_type == numeric_color) {
return lexicographical_compare(begin(), end(), rhs.begin(), rhs.end());
}
// comparing identifiers and strings (treat them as comparable)
else if ((is_string() && rhs.is_string()) ||
(lhs_type == value && rhs_type == value)) {
return unquote() < rhs.unquote();
}
// else if ((lhs_type == identifier || lhs_type == string_constant || lhs_type == value) &&
// (rhs_type == identifier || lhs_type == string_constant || rhs_type == value)) {
// return token().unquote() < rhs.token().unquote();
// }
// COMPARING SELECTORS -- IMPORTANT FOR ORDERING AND NORMALIZING
else if ((type() >= selector_group && type() <=selector_schema) &&
(rhs.type() >= selector_group && rhs.type() <=selector_schema)) {
// if they're not the same kind, just compare type tags
if (type() != rhs.type()) return type() < rhs.type();
// otherwise we have to do more work
switch (type())
{
case simple_selector:
case pseudo: {
return token() < rhs.token();
} break;
// assumes selectors are normalized by the time they're compared
case selector:
case simple_selector_sequence:
case attribute_selector:
case functional_pseudo:
case pseudo_negation: {
return lexicographical_compare(begin(), end(), rhs.begin(), rhs.end());
} break;
default: {
return false;
} break;
}
}
// END OF SELECTOR COMPARISON
// catch-all
else {
throw Error(Error::evaluation, path(), line(), "incomparable types");
}
}
bool Node::operator<=(Node rhs) const
{ return *this < rhs || *this == rhs; }
bool Node::operator>(Node rhs) const
{ return !(*this <= rhs); }
bool Node::operator>=(Node rhs) const
{ return !(*this < rhs); }
// Converting nodes to C-structs, for C callbacks. Allocates memory with
// malloc in the case of strings and lists. (Strings don't strictly need to
// be malloc'ed at this point, but it's better to do it for consistency,
// because clients of the C-structs will need to malloc their own strings too.
Sass_Value Node::to_c_val()
{
Sass_Value v;
switch (type())
{
case boolean: {
v.boolean.tag = SASS_BOOLEAN;
v.boolean.value = boolean_value();
} break;
case number: {
v.number.tag = SASS_NUMBER;
v.number.value = numeric_value();
} break;
case numeric_percentage: {
v.percentage.tag = SASS_PERCENTAGE;
v.percentage.value = numeric_value();
} break;
case numeric_dimension: {
v.dimension.tag = SASS_DIMENSION;
v.dimension.value = numeric_value();
v.dimension.unit = strdup(string(unit().begin, unit().end - unit().begin).c_str());
} break;
case numeric_color: {
v.color.tag = SASS_COLOR;
v.color.r = at(0).numeric_value();
v.color.g = at(1).numeric_value();
v.color.b = at(2).numeric_value();
v.color.a = at(3).numeric_value();
} break;
case list: {
v.list.tag = SASS_LIST;
v.list.separator = is_comma_separated() ? SASS_COMMA : SASS_SPACE;
size_t S = size();
v.list.length = S;
v.list.values = (Sass_Value*) malloc(sizeof(Sass_Value)*S);
Sass_Value* values = v.list.values;
for (size_t i = 0; i < S; ++i) {
values[i] = at(i).to_c_val();
}
} break;
default: { // should only be string-like things at this point
v.string.tag = SASS_STRING;
v.string.value = strdup(to_string().c_str());
} break;
}
return v;
}
// ------------------------------------------------------------------------
// Token method implementations
// ------------------------------------------------------------------------
string Token::unquote() const
{
string result;
const char* p = begin;
if (*begin == '\'' || *begin == '"') {
++p;
while (p < end) {
if (*p == '\\') {
switch (*(++p)) {
case 'n': result += '\n'; break;
case 't': result += '\t'; break;
case 'b': result += '\b'; break;
case 'r': result += '\r'; break;
case 'f': result += '\f'; break;
case 'v': result += '\v'; break;
case 'a': result += '\a'; break;
case '\\': result += '\\'; break;
default: result += *p; break;
}
}
else if (p == end - 1) {
return result;
}
else {
result += *p;
}
++p;
}
return result;
}
else {
while (p < end) {
result += *(p++);
}
return result;
}
}
void Token::unquote_to_stream(std::stringstream& buf) const
{
const char* p = begin;
if (*begin == '\'' || *begin == '"') {
++p;
while (p < end) {
if (*p == '\\') {
switch (*(++p)) {
case 'n': buf << '\n'; break;
case 't': buf << '\t'; break;
case 'b': buf << '\b'; break;
case 'r': buf << '\r'; break;
case 'f': buf << '\f'; break;
case 'v': buf << '\v'; break;
case 'a': buf << '\a'; break;
case '\\': buf << '\\'; break;
default: buf << *p; break;
}
}
else if (p == end - 1) {
return;
}
else {
buf << *p;
}
++p;
}
return;
}
else {
while (p < end) {
buf << *(p++);
}
return;
}
}
bool Token::operator<(const Token& rhs) const
{
const char* first1 = begin;
const char* last1 = end;
const char* first2 = rhs.begin;
const char* last2 = rhs.end;
while (first1!=last1)
{
if (first2 == last2 || *first2 < *first1) return false;
else if (*first1 < *first2) return true;
++first1; ++first2;
}
return (first2 != last2);
}
bool Token::operator==(const Token& rhs) const
{
if (length() != rhs.length()) return false;
if ((begin[0] == '"' || begin[0] == '\'') &&
(rhs.begin[0] == '"' || rhs.begin[0] == '\''))
{ return unquote() == rhs.unquote(); }
const char* p = begin;
const char* q = rhs.begin;
for (; p < end; ++p, ++q) if (*p != *q) return false;
return true;
}
// ------------------------------------------------------------------------
// Node_Impl method implementations
// ------------------------------------------------------------------------
double Node_Impl::numeric_value()
{
switch (type)
{
case Node::number:
case Node::numeric_percentage:
return value.numeric;
case Node::numeric_dimension:
return value.dimension.numeric;
default:
break;
// throw an exception?
}
// if you reach this point, you've got a logic error somewhere
return 0;
}
Token Node_Impl::unit()
{
switch (type)
{
case Node::numeric_percentage: {
return Token::make(percent_str);
} break;
case Node::numeric_dimension: {
return value.dimension.unit;
} break;
default: break;
}
return Token::make(empty_str);
}
}
|
Voxer/blog
|
wp-content/themes/voxer/node_modules/node-sass/libsass/old/node.cpp
|
C++
|
gpl-2.0
| 12,909
|
/*
* Just-In-Time compiler for BPF filters on MIPS
*
* Copyright (c) 2014 Imagination Technologies Ltd.
* Author: Markos Chandras <markos.chandras@imgtec.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License.
*/
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <linux/kconfig.h>
#include <linux/moduleloader.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/asm.h>
#include <asm/bitops.h>
#include <asm/cacheflush.h>
#include <asm/cpu-features.h>
#include <asm/uasm.h>
#include "bpf_jit.h"
/* ABI
* r_skb_hl SKB header length
* r_data SKB data pointer
* r_off Offset
* r_A BPF register A
* r_X BPF register X
* r_skb *skb
* r_M *scratch memory
* r_skb_len SKB length
*
* On entry (*bpf_func)(*skb, *filter)
* a0 = MIPS_R_A0 = skb;
* a1 = MIPS_R_A1 = filter;
*
* Stack
* ...
* M[15]
* M[14]
* M[13]
* ...
* M[0] <-- r_M
* saved reg k-1
* saved reg k-2
* ...
* saved reg 0 <-- r_sp
* <no argument area>
*
* Packet layout
*
* <--------------------- len ------------------------>
* <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
* ----------------------------------------------------
* | skb->data |
* ----------------------------------------------------
*/
#define ptr typeof(unsigned long)
#define SCRATCH_OFF(k) (4 * (k))
/* JIT flags */
#define SEEN_CALL (1 << BPF_MEMWORDS)
#define SEEN_SREG_SFT (BPF_MEMWORDS + 1)
#define SEEN_SREG_BASE (1 << SEEN_SREG_SFT)
#define SEEN_SREG(x) (SEEN_SREG_BASE << (x))
#define SEEN_OFF SEEN_SREG(2)
#define SEEN_A SEEN_SREG(3)
#define SEEN_X SEEN_SREG(4)
#define SEEN_SKB SEEN_SREG(5)
#define SEEN_MEM SEEN_SREG(6)
/* SEEN_SK_DATA also implies skb_hl an skb_len */
#define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0))
/* Arguments used by JIT */
#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
#define SBIT(x) (1 << (x)) /* Signed version of BIT() */
/**
* struct jit_ctx - JIT context
* @skf: The sk_filter
* @prologue_bytes: Number of bytes for prologue
* @idx: Instruction index
* @flags: JIT flags
* @offsets: Instruction offsets
* @target: Memory location for the compiled filter
*/
struct jit_ctx {
const struct bpf_prog *skf;
unsigned int prologue_bytes;
u32 idx;
u32 flags;
u32 *offsets;
u32 *target;
};
static inline int optimize_div(u32 *k)
{
/* power of 2 divides can be implemented with right shift */
if (!(*k & (*k-1))) {
*k = ilog2(*k);
return 1;
}
return 0;
}
static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
/* Simply emit the instruction if the JIT memory space has been allocated */
#define emit_instr(ctx, func, ...) \
do { \
if ((ctx)->target != NULL) { \
u32 *p = &(ctx)->target[ctx->idx]; \
uasm_i_##func(&p, ##__VA_ARGS__); \
} \
(ctx)->idx++; \
} while (0)
/*
* Similar to emit_instr but it must be used when we need to emit
* 32-bit or 64-bit instructions
*/
#define emit_long_instr(ctx, func, ...) \
do { \
if ((ctx)->target != NULL) { \
u32 *p = &(ctx)->target[ctx->idx]; \
UASM_i_##func(&p, ##__VA_ARGS__); \
} \
(ctx)->idx++; \
} while (0)
/* Determine if immediate is within the 16-bit signed range */
static inline bool is_range16(s32 imm)
{
return !(imm >= SBIT(15) || imm < -SBIT(15));
}
static inline void emit_addu(unsigned int dst, unsigned int src1,
unsigned int src2, struct jit_ctx *ctx)
{
emit_instr(ctx, addu, dst, src1, src2);
}
static inline void emit_nop(struct jit_ctx *ctx)
{
emit_instr(ctx, nop);
}
/* Load a u32 immediate to a register */
static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
{
if (ctx->target != NULL) {
/* addiu can only handle s16 */
if (!is_range16(imm)) {
u32 *p = &ctx->target[ctx->idx];
uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
p = &ctx->target[ctx->idx + 1];
uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
} else {
u32 *p = &ctx->target[ctx->idx];
uasm_i_addiu(&p, dst, r_zero, imm);
}
}
ctx->idx++;
if (!is_range16(imm))
ctx->idx++;
}
static inline void emit_or(unsigned int dst, unsigned int src1,
unsigned int src2, struct jit_ctx *ctx)
{
emit_instr(ctx, or, dst, src1, src2);
}
static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
struct jit_ctx *ctx)
{
if (imm >= BIT(16)) {
emit_load_imm(r_tmp, imm, ctx);
emit_or(dst, src, r_tmp, ctx);
} else {
emit_instr(ctx, ori, dst, src, imm);
}
}
static inline void emit_daddiu(unsigned int dst, unsigned int src,
int imm, struct jit_ctx *ctx)
{
/*
* Only used for stack, so the imm is relatively small
* and it fits in 15-bits
*/
emit_instr(ctx, daddiu, dst, src, imm);
}
static inline void emit_addiu(unsigned int dst, unsigned int src,
u32 imm, struct jit_ctx *ctx)
{
if (!is_range16(imm)) {
emit_load_imm(r_tmp, imm, ctx);
emit_addu(dst, r_tmp, src, ctx);
} else {
emit_instr(ctx, addiu, dst, src, imm);
}
}
static inline void emit_and(unsigned int dst, unsigned int src1,
unsigned int src2, struct jit_ctx *ctx)
{
emit_instr(ctx, and, dst, src1, src2);
}
static inline void emit_andi(unsigned int dst, unsigned int src,
u32 imm, struct jit_ctx *ctx)
{
/* If imm does not fit in u16 then load it to register */
if (imm >= BIT(16)) {
emit_load_imm(r_tmp, imm, ctx);
emit_and(dst, src, r_tmp, ctx);
} else {
emit_instr(ctx, andi, dst, src, imm);
}
}
static inline void emit_xor(unsigned int dst, unsigned int src1,
unsigned int src2, struct jit_ctx *ctx)
{
emit_instr(ctx, xor, dst, src1, src2);
}
static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
{
/* If imm does not fit in u16 then load it to register */
if (imm >= BIT(16)) {
emit_load_imm(r_tmp, imm, ctx);
emit_xor(dst, src, r_tmp, ctx);
} else {
emit_instr(ctx, xori, dst, src, imm);
}
}
static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
{
emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
}
static inline void emit_subu(unsigned int dst, unsigned int src1,
unsigned int src2, struct jit_ctx *ctx)
{
emit_instr(ctx, subu, dst, src1, src2);
}
static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
{
emit_subu(reg, r_zero, reg, ctx);
}
static inline void emit_sllv(unsigned int dst, unsigned int src,
unsigned int sa, struct jit_ctx *ctx)
{
emit_instr(ctx, sllv, dst, src, sa);
}
static inline void emit_sll(unsigned int dst, unsigned int src,
unsigned int sa, struct jit_ctx *ctx)
{
/* sa is 5-bits long */
if (sa >= BIT(5))
/* Shifting >= 32 results in zero */
emit_jit_reg_move(dst, r_zero, ctx);
else
emit_instr(ctx, sll, dst, src, sa);
}
static inline void emit_srlv(unsigned int dst, unsigned int src,
unsigned int sa, struct jit_ctx *ctx)
{
emit_instr(ctx, srlv, dst, src, sa);
}
static inline void emit_srl(unsigned int dst, unsigned int src,
unsigned int sa, struct jit_ctx *ctx)
{
/* sa is 5-bits long */
if (sa >= BIT(5))
/* Shifting >= 32 results in zero */
emit_jit_reg_move(dst, r_zero, ctx);
else
emit_instr(ctx, srl, dst, src, sa);
}
static inline void emit_slt(unsigned int dst, unsigned int src1,
unsigned int src2, struct jit_ctx *ctx)
{
emit_instr(ctx, slt, dst, src1, src2);
}
static inline void emit_sltu(unsigned int dst, unsigned int src1,
unsigned int src2, struct jit_ctx *ctx)
{
emit_instr(ctx, sltu, dst, src1, src2);
}
static inline void emit_sltiu(unsigned dst, unsigned int src,
unsigned int imm, struct jit_ctx *ctx)
{
/* 16 bit immediate */
if (!is_range16((s32)imm)) {
emit_load_imm(r_tmp, imm, ctx);
emit_sltu(dst, src, r_tmp, ctx);
} else {
emit_instr(ctx, sltiu, dst, src, imm);
}
}
/* Store register on the stack */
static inline void emit_store_stack_reg(ptr reg, ptr base,
unsigned int offset,
struct jit_ctx *ctx)
{
emit_long_instr(ctx, SW, reg, offset, base);
}
static inline void emit_store(ptr reg, ptr base, unsigned int offset,
struct jit_ctx *ctx)
{
emit_instr(ctx, sw, reg, offset, base);
}
static inline void emit_load_stack_reg(ptr reg, ptr base,
unsigned int offset,
struct jit_ctx *ctx)
{
emit_long_instr(ctx, LW, reg, offset, base);
}
static inline void emit_load(unsigned int reg, unsigned int base,
unsigned int offset, struct jit_ctx *ctx)
{
emit_instr(ctx, lw, reg, offset, base);
}
static inline void emit_load_byte(unsigned int reg, unsigned int base,
unsigned int offset, struct jit_ctx *ctx)
{
emit_instr(ctx, lb, reg, offset, base);
}
static inline void emit_half_load(unsigned int reg, unsigned int base,
unsigned int offset, struct jit_ctx *ctx)
{
emit_instr(ctx, lh, reg, offset, base);
}
static inline void emit_mul(unsigned int dst, unsigned int src1,
unsigned int src2, struct jit_ctx *ctx)
{
emit_instr(ctx, mul, dst, src1, src2);
}
static inline void emit_div(unsigned int dst, unsigned int src,
struct jit_ctx *ctx)
{
if (ctx->target != NULL) {
u32 *p = &ctx->target[ctx->idx];
uasm_i_divu(&p, dst, src);
p = &ctx->target[ctx->idx + 1];
uasm_i_mflo(&p, dst);
}
ctx->idx += 2; /* 2 insts */
}
static inline void emit_mod(unsigned int dst, unsigned int src,
struct jit_ctx *ctx)
{
if (ctx->target != NULL) {
u32 *p = &ctx->target[ctx->idx];
uasm_i_divu(&p, dst, src);
p = &ctx->target[ctx->idx + 1];
uasm_i_mfhi(&p, dst);
}
ctx->idx += 2; /* 2 insts */
}
static inline void emit_dsll(unsigned int dst, unsigned int src,
unsigned int sa, struct jit_ctx *ctx)
{
emit_instr(ctx, dsll, dst, src, sa);
}
static inline void emit_dsrl32(unsigned int dst, unsigned int src,
unsigned int sa, struct jit_ctx *ctx)
{
emit_instr(ctx, dsrl32, dst, src, sa);
}
static inline void emit_wsbh(unsigned int dst, unsigned int src,
struct jit_ctx *ctx)
{
emit_instr(ctx, wsbh, dst, src);
}
/* load pointer to register */
static inline void emit_load_ptr(unsigned int dst, unsigned int src,
int imm, struct jit_ctx *ctx)
{
/* src contains the base addr of the 32/64-pointer */
emit_long_instr(ctx, LW, dst, imm, src);
}
/* load a function pointer to register */
static inline void emit_load_func(unsigned int reg, ptr imm,
struct jit_ctx *ctx)
{
if (IS_ENABLED(CONFIG_64BIT)) {
/* At this point imm is always 64-bit */
emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
} else {
emit_load_imm(reg, imm, ctx);
}
}
/* Move to real MIPS register */
static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
{
emit_long_instr(ctx, ADDU, dst, src, r_zero);
}
/* Move to JIT (32-bit) register */
static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
{
emit_addu(dst, src, r_zero, ctx);
}
/* Compute the immediate value for PC-relative branches. */
static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
{
if (ctx->target == NULL)
return 0;
/*
* We want a pc-relative branch. We only do forward branches
* so tgt is always after pc. tgt is the instruction offset
* we want to jump to.
* Branch on MIPS:
* I: target_offset <- sign_extend(offset)
* I+1: PC += target_offset (delay slot)
*
* ctx->idx currently points to the branch instruction
* but the offset is added to the delay slot so we need
* to subtract 4.
*/
return ctx->offsets[tgt] -
(ctx->idx * 4 - ctx->prologue_bytes) - 4;
}
static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
unsigned int imm, struct jit_ctx *ctx)
{
if (ctx->target != NULL) {
u32 *p = &ctx->target[ctx->idx];
switch (cond) {
case MIPS_COND_EQ:
uasm_i_beq(&p, reg1, reg2, imm);
break;
case MIPS_COND_NE:
uasm_i_bne(&p, reg1, reg2, imm);
break;
case MIPS_COND_ALL:
uasm_i_b(&p, imm);
break;
default:
pr_warn("%s: Unhandled branch conditional: %d\n",
__func__, cond);
}
}
ctx->idx++;
}
static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
{
emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
}
static inline void emit_jalr(unsigned int link, unsigned int reg,
struct jit_ctx *ctx)
{
emit_instr(ctx, jalr, link, reg);
}
static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
{
emit_instr(ctx, jr, reg);
}
static inline u16 align_sp(unsigned int num)
{
/* Double word alignment for 32-bit, quadword for 64-bit */
unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
num = (num + (align - 1)) & -align;
return num;
}
static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
{
int i = 0, real_off = 0;
u32 sflags, tmp_flags;
/* Adjust the stack pointer */
emit_stack_offset(-align_sp(offset), ctx);
tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
/* sflags is essentially a bitmap */
while (tmp_flags) {
if ((sflags >> i) & 0x1) {
emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
ctx);
real_off += SZREG;
}
i++;
tmp_flags >>= 1;
}
/* save return address */
if (ctx->flags & SEEN_CALL) {
emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
real_off += SZREG;
}
/* Setup r_M leaving the alignment gap if necessary */
if (ctx->flags & SEEN_MEM) {
if (real_off % (SZREG * 2))
real_off += SZREG;
emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
}
}
static void restore_bpf_jit_regs(struct jit_ctx *ctx,
unsigned int offset)
{
int i, real_off = 0;
u32 sflags, tmp_flags;
tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
/* sflags is a bitmap */
i = 0;
while (tmp_flags) {
if ((sflags >> i) & 0x1) {
emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
ctx);
real_off += SZREG;
}
i++;
tmp_flags >>= 1;
}
/* restore return address */
if (ctx->flags & SEEN_CALL)
emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
/* Restore the sp and discard the scrach memory */
emit_stack_offset(align_sp(offset), ctx);
}
static unsigned int get_stack_depth(struct jit_ctx *ctx)
{
int sp_off = 0;
/* How may s* regs do we need to preserved? */
sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
if (ctx->flags & SEEN_MEM)
sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
if (ctx->flags & SEEN_CALL)
sp_off += SZREG; /* Space for our ra register */
return sp_off;
}
static void build_prologue(struct jit_ctx *ctx)
{
int sp_off;
/* Calculate the total offset for the stack pointer */
sp_off = get_stack_depth(ctx);
save_bpf_jit_regs(ctx, sp_off);
if (ctx->flags & SEEN_SKB)
emit_reg_move(r_skb, MIPS_R_A0, ctx);
if (ctx->flags & SEEN_SKB_DATA) {
/* Load packet length */
emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len),
ctx);
emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len),
ctx);
/* Load the data pointer */
emit_load_ptr(r_skb_data, r_skb,
offsetof(struct sk_buff, data), ctx);
/* Load the header length */
emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx);
}
if (ctx->flags & SEEN_X)
emit_jit_reg_move(r_X, r_zero, ctx);
/* Do not leak kernel data to userspace */
if (bpf_needs_clear_a(&ctx->skf->insns[0]))
emit_jit_reg_move(r_A, r_zero, ctx);
}
static void build_epilogue(struct jit_ctx *ctx)
{
unsigned int sp_off;
/* Calculate the total offset for the stack pointer */
sp_off = get_stack_depth(ctx);
restore_bpf_jit_regs(ctx, sp_off);
/* Return */
emit_jr(r_ra, ctx);
emit_nop(ctx);
}
#define CHOOSE_LOAD_FUNC(K, func) \
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
func##_positive)
static int build_body(struct jit_ctx *ctx)
{
const struct bpf_prog *prog = ctx->skf;
const struct sock_filter *inst;
unsigned int i, off, condt;
u32 k, b_off __maybe_unused;
u8 (*sk_load_func)(unsigned long *skb, int offset);
for (i = 0; i < prog->len; i++) {
u16 code;
inst = &(prog->insns[i]);
pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
__func__, inst->code, inst->jt, inst->jf, inst->k);
k = inst->k;
code = bpf_anc_helper(inst);
if (ctx->target == NULL)
ctx->offsets[i] = ctx->idx * 4;
switch (code) {
case BPF_LD | BPF_IMM:
/* A <- k ==> li r_A, k */
ctx->flags |= SEEN_A;
emit_load_imm(r_A, k, ctx);
break;
case BPF_LD | BPF_W | BPF_LEN:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
/* A <- len ==> lw r_A, offset(skb) */
ctx->flags |= SEEN_SKB | SEEN_A;
off = offsetof(struct sk_buff, len);
emit_load(r_A, r_skb, off, ctx);
break;
case BPF_LD | BPF_MEM:
/* A <- M[k] ==> lw r_A, offset(M) */
ctx->flags |= SEEN_MEM | SEEN_A;
emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
break;
case BPF_LD | BPF_W | BPF_ABS:
/* A <- P[k:4] */
sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word);
goto load;
case BPF_LD | BPF_H | BPF_ABS:
/* A <- P[k:2] */
sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half);
goto load;
case BPF_LD | BPF_B | BPF_ABS:
/* A <- P[k:1] */
sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte);
load:
emit_load_imm(r_off, k, ctx);
load_common:
ctx->flags |= SEEN_CALL | SEEN_OFF |
SEEN_SKB | SEEN_A | SEEN_SKB_DATA;
emit_load_func(r_s0, (ptr)sk_load_func, ctx);
emit_reg_move(MIPS_R_A0, r_skb, ctx);
emit_jalr(MIPS_R_RA, r_s0, ctx);
/* Load second argument to delay slot */
emit_reg_move(MIPS_R_A1, r_off, ctx);
/* Check the error value */
emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx),
ctx);
/* Load return register on DS for failures */
emit_reg_move(r_ret, r_zero, ctx);
/* Return with error */
emit_b(b_imm(prog->len, ctx), ctx);
emit_nop(ctx);
break;
case BPF_LD | BPF_W | BPF_IND:
/* A <- P[X + k:4] */
sk_load_func = sk_load_word;
goto load_ind;
case BPF_LD | BPF_H | BPF_IND:
/* A <- P[X + k:2] */
sk_load_func = sk_load_half;
goto load_ind;
case BPF_LD | BPF_B | BPF_IND:
/* A <- P[X + k:1] */
sk_load_func = sk_load_byte;
load_ind:
ctx->flags |= SEEN_OFF | SEEN_X;
emit_addiu(r_off, r_X, k, ctx);
goto load_common;
case BPF_LDX | BPF_IMM:
/* X <- k */
ctx->flags |= SEEN_X;
emit_load_imm(r_X, k, ctx);
break;
case BPF_LDX | BPF_MEM:
/* X <- M[k] */
ctx->flags |= SEEN_X | SEEN_MEM;
emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
break;
case BPF_LDX | BPF_W | BPF_LEN:
/* X <- len */
ctx->flags |= SEEN_X | SEEN_SKB;
off = offsetof(struct sk_buff, len);
emit_load(r_X, r_skb, off, ctx);
break;
case BPF_LDX | BPF_B | BPF_MSH:
/* X <- 4 * (P[k:1] & 0xf) */
ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
/* Load offset to a1 */
emit_load_func(r_s0, (ptr)sk_load_byte, ctx);
/*
* This may emit two instructions so it may not fit
* in the delay slot. So use a0 in the delay slot.
*/
emit_load_imm(MIPS_R_A1, k, ctx);
emit_jalr(MIPS_R_RA, r_s0, ctx);
emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
/* Check the error value */
emit_bcond(MIPS_COND_NE, r_ret, 0,
b_imm(prog->len, ctx), ctx);
emit_reg_move(r_ret, r_zero, ctx);
/* We are good */
/* X <- P[1:K] & 0xf */
emit_andi(r_X, r_A, 0xf, ctx);
/* X << 2 */
emit_b(b_imm(i + 1, ctx), ctx);
emit_sll(r_X, r_X, 2, ctx); /* delay slot */
break;
case BPF_ST:
/* M[k] <- A */
ctx->flags |= SEEN_MEM | SEEN_A;
emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
break;
case BPF_STX:
/* M[k] <- X */
ctx->flags |= SEEN_MEM | SEEN_X;
emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
break;
case BPF_ALU | BPF_ADD | BPF_K:
/* A += K */
ctx->flags |= SEEN_A;
emit_addiu(r_A, r_A, k, ctx);
break;
case BPF_ALU | BPF_ADD | BPF_X:
/* A += X */
ctx->flags |= SEEN_A | SEEN_X;
emit_addu(r_A, r_A, r_X, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_K:
/* A -= K */
ctx->flags |= SEEN_A;
emit_addiu(r_A, r_A, -k, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_X:
/* A -= X */
ctx->flags |= SEEN_A | SEEN_X;
emit_subu(r_A, r_A, r_X, ctx);
break;
case BPF_ALU | BPF_MUL | BPF_K:
/* A *= K */
/* Load K to scratch register before MUL */
ctx->flags |= SEEN_A;
emit_load_imm(r_s0, k, ctx);
emit_mul(r_A, r_A, r_s0, ctx);
break;
case BPF_ALU | BPF_MUL | BPF_X:
/* A *= X */
ctx->flags |= SEEN_A | SEEN_X;
emit_mul(r_A, r_A, r_X, ctx);
break;
case BPF_ALU | BPF_DIV | BPF_K:
/* A /= k */
if (k == 1)
break;
if (optimize_div(&k)) {
ctx->flags |= SEEN_A;
emit_srl(r_A, r_A, k, ctx);
break;
}
ctx->flags |= SEEN_A;
emit_load_imm(r_s0, k, ctx);
emit_div(r_A, r_s0, ctx);
break;
case BPF_ALU | BPF_MOD | BPF_K:
/* A %= k */
if (k == 1) {
ctx->flags |= SEEN_A;
emit_jit_reg_move(r_A, r_zero, ctx);
} else {
ctx->flags |= SEEN_A;
emit_load_imm(r_s0, k, ctx);
emit_mod(r_A, r_s0, ctx);
}
break;
case BPF_ALU | BPF_DIV | BPF_X:
/* A /= X */
ctx->flags |= SEEN_X | SEEN_A;
/* Check if r_X is zero */
emit_bcond(MIPS_COND_EQ, r_X, r_zero,
b_imm(prog->len, ctx), ctx);
emit_load_imm(r_ret, 0, ctx); /* delay slot */
emit_div(r_A, r_X, ctx);
break;
case BPF_ALU | BPF_MOD | BPF_X:
/* A %= X */
ctx->flags |= SEEN_X | SEEN_A;
/* Check if r_X is zero */
emit_bcond(MIPS_COND_EQ, r_X, r_zero,
b_imm(prog->len, ctx), ctx);
emit_load_imm(r_ret, 0, ctx); /* delay slot */
emit_mod(r_A, r_X, ctx);
break;
case BPF_ALU | BPF_OR | BPF_K:
/* A |= K */
ctx->flags |= SEEN_A;
emit_ori(r_A, r_A, k, ctx);
break;
case BPF_ALU | BPF_OR | BPF_X:
/* A |= X */
ctx->flags |= SEEN_A;
emit_ori(r_A, r_A, r_X, ctx);
break;
case BPF_ALU | BPF_XOR | BPF_K:
/* A ^= k */
ctx->flags |= SEEN_A;
emit_xori(r_A, r_A, k, ctx);
break;
case BPF_ANC | SKF_AD_ALU_XOR_X:
case BPF_ALU | BPF_XOR | BPF_X:
/* A ^= X */
ctx->flags |= SEEN_A;
emit_xor(r_A, r_A, r_X, ctx);
break;
case BPF_ALU | BPF_AND | BPF_K:
/* A &= K */
ctx->flags |= SEEN_A;
emit_andi(r_A, r_A, k, ctx);
break;
case BPF_ALU | BPF_AND | BPF_X:
/* A &= X */
ctx->flags |= SEEN_A | SEEN_X;
emit_and(r_A, r_A, r_X, ctx);
break;
case BPF_ALU | BPF_LSH | BPF_K:
/* A <<= K */
ctx->flags |= SEEN_A;
emit_sll(r_A, r_A, k, ctx);
break;
case BPF_ALU | BPF_LSH | BPF_X:
/* A <<= X */
ctx->flags |= SEEN_A | SEEN_X;
emit_sllv(r_A, r_A, r_X, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_K:
/* A >>= K */
ctx->flags |= SEEN_A;
emit_srl(r_A, r_A, k, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_X:
ctx->flags |= SEEN_A | SEEN_X;
emit_srlv(r_A, r_A, r_X, ctx);
break;
case BPF_ALU | BPF_NEG:
/* A = -A */
ctx->flags |= SEEN_A;
emit_neg(r_A, ctx);
break;
case BPF_JMP | BPF_JA:
/* pc += K */
emit_b(b_imm(i + k + 1, ctx), ctx);
emit_nop(ctx);
break;
case BPF_JMP | BPF_JEQ | BPF_K:
/* pc += ( A == K ) ? pc->jt : pc->jf */
condt = MIPS_COND_EQ | MIPS_COND_K;
goto jmp_cmp;
case BPF_JMP | BPF_JEQ | BPF_X:
ctx->flags |= SEEN_X;
/* pc += ( A == X ) ? pc->jt : pc->jf */
condt = MIPS_COND_EQ | MIPS_COND_X;
goto jmp_cmp;
case BPF_JMP | BPF_JGE | BPF_K:
/* pc += ( A >= K ) ? pc->jt : pc->jf */
condt = MIPS_COND_GE | MIPS_COND_K;
goto jmp_cmp;
case BPF_JMP | BPF_JGE | BPF_X:
ctx->flags |= SEEN_X;
/* pc += ( A >= X ) ? pc->jt : pc->jf */
condt = MIPS_COND_GE | MIPS_COND_X;
goto jmp_cmp;
case BPF_JMP | BPF_JGT | BPF_K:
/* pc += ( A > K ) ? pc->jt : pc->jf */
condt = MIPS_COND_GT | MIPS_COND_K;
goto jmp_cmp;
case BPF_JMP | BPF_JGT | BPF_X:
ctx->flags |= SEEN_X;
/* pc += ( A > X ) ? pc->jt : pc->jf */
condt = MIPS_COND_GT | MIPS_COND_X;
jmp_cmp:
/* Greater or Equal */
if ((condt & MIPS_COND_GE) ||
(condt & MIPS_COND_GT)) {
if (condt & MIPS_COND_K) { /* K */
ctx->flags |= SEEN_A;
emit_sltiu(r_s0, r_A, k, ctx);
} else { /* X */
ctx->flags |= SEEN_A |
SEEN_X;
emit_sltu(r_s0, r_A, r_X, ctx);
}
/* A < (K|X) ? r_scrach = 1 */
b_off = b_imm(i + inst->jf + 1, ctx);
emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
ctx);
emit_nop(ctx);
/* A > (K|X) ? scratch = 0 */
if (condt & MIPS_COND_GT) {
/* Checking for equality */
ctx->flags |= SEEN_A | SEEN_X;
if (condt & MIPS_COND_K)
emit_load_imm(r_s0, k, ctx);
else
emit_jit_reg_move(r_s0, r_X,
ctx);
b_off = b_imm(i + inst->jf + 1, ctx);
emit_bcond(MIPS_COND_EQ, r_A, r_s0,
b_off, ctx);
emit_nop(ctx);
/* Finally, A > K|X */
b_off = b_imm(i + inst->jt + 1, ctx);
emit_b(b_off, ctx);
emit_nop(ctx);
} else {
/* A >= (K|X) so jump */
b_off = b_imm(i + inst->jt + 1, ctx);
emit_b(b_off, ctx);
emit_nop(ctx);
}
} else {
/* A == K|X */
if (condt & MIPS_COND_K) { /* K */
ctx->flags |= SEEN_A;
emit_load_imm(r_s0, k, ctx);
/* jump true */
b_off = b_imm(i + inst->jt + 1, ctx);
emit_bcond(MIPS_COND_EQ, r_A, r_s0,
b_off, ctx);
emit_nop(ctx);
/* jump false */
b_off = b_imm(i + inst->jf + 1,
ctx);
emit_bcond(MIPS_COND_NE, r_A, r_s0,
b_off, ctx);
emit_nop(ctx);
} else { /* X */
/* jump true */
ctx->flags |= SEEN_A | SEEN_X;
b_off = b_imm(i + inst->jt + 1,
ctx);
emit_bcond(MIPS_COND_EQ, r_A, r_X,
b_off, ctx);
emit_nop(ctx);
/* jump false */
b_off = b_imm(i + inst->jf + 1, ctx);
emit_bcond(MIPS_COND_NE, r_A, r_X,
b_off, ctx);
emit_nop(ctx);
}
}
break;
case BPF_JMP | BPF_JSET | BPF_K:
ctx->flags |= SEEN_A;
/* pc += (A & K) ? pc -> jt : pc -> jf */
emit_load_imm(r_s1, k, ctx);
emit_and(r_s0, r_A, r_s1, ctx);
/* jump true */
b_off = b_imm(i + inst->jt + 1, ctx);
emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
emit_nop(ctx);
/* jump false */
b_off = b_imm(i + inst->jf + 1, ctx);
emit_b(b_off, ctx);
emit_nop(ctx);
break;
case BPF_JMP | BPF_JSET | BPF_X:
ctx->flags |= SEEN_X | SEEN_A;
/* pc += (A & X) ? pc -> jt : pc -> jf */
emit_and(r_s0, r_A, r_X, ctx);
/* jump true */
b_off = b_imm(i + inst->jt + 1, ctx);
emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
emit_nop(ctx);
/* jump false */
b_off = b_imm(i + inst->jf + 1, ctx);
emit_b(b_off, ctx);
emit_nop(ctx);
break;
case BPF_RET | BPF_A:
ctx->flags |= SEEN_A;
if (i != prog->len - 1)
/*
* If this is not the last instruction
* then jump to the epilogue
*/
emit_b(b_imm(prog->len, ctx), ctx);
emit_reg_move(r_ret, r_A, ctx); /* delay slot */
break;
case BPF_RET | BPF_K:
/*
* It can emit two instructions so it does not fit on
* the delay slot.
*/
emit_load_imm(r_ret, k, ctx);
if (i != prog->len - 1) {
/*
* If this is not the last instruction
* then jump to the epilogue
*/
emit_b(b_imm(prog->len, ctx), ctx);
emit_nop(ctx);
}
break;
case BPF_MISC | BPF_TAX:
/* X = A */
ctx->flags |= SEEN_X | SEEN_A;
emit_jit_reg_move(r_X, r_A, ctx);
break;
case BPF_MISC | BPF_TXA:
/* A = X */
ctx->flags |= SEEN_A | SEEN_X;
emit_jit_reg_move(r_A, r_X, ctx);
break;
/* AUX */
case BPF_ANC | SKF_AD_PROTOCOL:
/* A = ntohs(skb->protocol */
ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
protocol) != 2);
off = offsetof(struct sk_buff, protocol);
emit_half_load(r_A, r_skb, off, ctx);
#ifdef CONFIG_CPU_LITTLE_ENDIAN
/* This needs little endian fixup */
if (cpu_has_wsbh) {
/* R2 and later have the wsbh instruction */
emit_wsbh(r_A, r_A, ctx);
} else {
/* Get first byte */
emit_andi(r_tmp_imm, r_A, 0xff, ctx);
/* Shift it */
emit_sll(r_tmp, r_tmp_imm, 8, ctx);
/* Get second byte */
emit_srl(r_tmp_imm, r_A, 8, ctx);
emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
/* Put everyting together in r_A */
emit_or(r_A, r_tmp, r_tmp_imm, ctx);
}
#endif
break;
case BPF_ANC | SKF_AD_CPU:
ctx->flags |= SEEN_A | SEEN_OFF;
/* A = current_thread_info()->cpu */
BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
cpu) != 4);
off = offsetof(struct thread_info, cpu);
/* $28/gp points to the thread_info struct */
emit_load(r_A, 28, off, ctx);
break;
case BPF_ANC | SKF_AD_IFINDEX:
/* A = skb->dev->ifindex */
ctx->flags |= SEEN_SKB | SEEN_A;
off = offsetof(struct sk_buff, dev);
/* Load *dev pointer */
emit_load_ptr(r_s0, r_skb, off, ctx);
/* error (0) in the delay slot */
emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
b_imm(prog->len, ctx), ctx);
emit_reg_move(r_ret, r_zero, ctx);
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
ifindex) != 4);
off = offsetof(struct net_device, ifindex);
emit_load(r_A, r_s0, off, ctx);
break;
case BPF_ANC | SKF_AD_MARK:
ctx->flags |= SEEN_SKB | SEEN_A;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
off = offsetof(struct sk_buff, mark);
emit_load(r_A, r_skb, off, ctx);
break;
case BPF_ANC | SKF_AD_RXHASH:
ctx->flags |= SEEN_SKB | SEEN_A;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
off = offsetof(struct sk_buff, hash);
emit_load(r_A, r_skb, off, ctx);
break;
case BPF_ANC | SKF_AD_VLAN_TAG:
case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
ctx->flags |= SEEN_SKB | SEEN_A;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
vlan_tci) != 2);
off = offsetof(struct sk_buff, vlan_tci);
emit_half_load(r_s0, r_skb, off, ctx);
if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
} else {
emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
/* return 1 if present */
emit_sltu(r_A, r_zero, r_A, ctx);
}
break;
case BPF_ANC | SKF_AD_PKTTYPE:
ctx->flags |= SEEN_SKB;
emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
/* Keep only the last 3 bits */
emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
#ifdef __BIG_ENDIAN_BITFIELD
/* Get the actual packet type to the lower 3 bits */
emit_srl(r_A, r_A, 5, ctx);
#endif
break;
case BPF_ANC | SKF_AD_QUEUE:
ctx->flags |= SEEN_SKB | SEEN_A;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
queue_mapping) != 2);
BUILD_BUG_ON(offsetof(struct sk_buff,
queue_mapping) > 0xff);
off = offsetof(struct sk_buff, queue_mapping);
emit_half_load(r_A, r_skb, off, ctx);
break;
default:
pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
inst->code);
return -1;
}
}
/* compute offsets only during the first pass */
if (ctx->target == NULL)
ctx->offsets[i] = ctx->idx * 4;
return 0;
}
int bpf_jit_enable __read_mostly;
void bpf_jit_compile(struct bpf_prog *fp)
{
struct jit_ctx ctx;
unsigned int alloc_size, tmp_idx;
if (!bpf_jit_enable)
return;
memset(&ctx, 0, sizeof(ctx));
ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
if (ctx.offsets == NULL)
return;
ctx.skf = fp;
if (build_body(&ctx))
goto out;
tmp_idx = ctx.idx;
build_prologue(&ctx);
ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
/* just to complete the ctx.idx count */
build_epilogue(&ctx);
alloc_size = 4 * ctx.idx;
ctx.target = module_alloc(alloc_size);
if (ctx.target == NULL)
goto out;
/* Clean it */
memset(ctx.target, 0, alloc_size);
ctx.idx = 0;
/* Generate the actual JIT code */
build_prologue(&ctx);
build_body(&ctx);
build_epilogue(&ctx);
/* Update the icache */
flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
if (bpf_jit_enable > 1)
/* Dump JIT code */
bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
fp->bpf_func = (void *)ctx.target;
fp->jited = 1;
out:
kfree(ctx.offsets);
}
void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited)
module_memfree(fp->bpf_func);
bpf_prog_unlock_free(fp);
}
|
sfumato77/Kernel-4.8_Android-x86_BayTrail
|
arch/mips/net/bpf_jit.c
|
C
|
gpl-2.0
| 32,413
|
/* ScummVM - Graphic Adventure Engine
*
* ScummVM is the legal property of its developers, whose names
* are too numerous to list here. Please refer to the COPYRIGHT
* file distributed with this source distribution.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#ifndef GOB_SOUND_INFOGRAMES_H
#define GOB_SOUND_INFOGRAMES_H
#include "audio/mixer.h"
#include "audio/mods/infogrames.h"
namespace Gob {
class Infogrames {
public:
Infogrames(Audio::Mixer &mixer);
~Infogrames();
bool loadInstruments(const char *fileName);
bool loadSong(const char *fileName);
void play();
void stop();
private:
Audio::Mixer *_mixer;
Audio::Infogrames::Instruments *_instruments;
Audio::Infogrames *_song;
Audio::SoundHandle _handle;
void clearInstruments();
void clearSong();
bool loadInst(const char *fileName);
};
} // End of namespace Gob
#endif // GOB_SOUND_INFOGRAMES_H
|
blorente/scummvm
|
engines/gob/sound/infogrames.h
|
C
|
gpl-2.0
| 1,566
|
\documentclass[12pt]{article}
\begin{document}
\begin{eqnarray*}
E^{ZBL}_{ij} & = & \frac{1}{4\pi\epsilon_0} \frac{Z_i Z_j \,e^2}{r_{ij}} \phi(r_{ij}/a)+ S(r_{ij})\\
a & = & \frac{0.46850}{Z_{i}^{0.23} + Z_{j}^{0.23}}\\
\phi(x) & = & 0.18175e^{-3.19980x} + 0.50986e^{-0.94229x} + 0.28022e^{-0.40290x} + 0.02817e^{-0.20162x}\\
\end{eqnarray*}
\end{document}
|
ovilab/atomify-lammps
|
libs/lammps/doc/src/Eqs/pair_zbl.tex
|
TeX
|
gpl-3.0
| 394
|
/*
* IBM PowerPC Virtual I/O Infrastructure Support.
*
* Copyright (c) 2003,2008 IBM Corp.
* Dave Engebretsen engebret@us.ibm.com
* Santiago Leon santil@us.ibm.com
* Hollis Blanchard <hollisb@us.ibm.com>
* Stephen Rothwell
* Robert Jennings <rcjenn@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/stat.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/console.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/kobject.h>
#include <asm/iommu.h>
#include <asm/dma.h>
#include <asm/vio.h>
#include <asm/prom.h>
#include <asm/firmware.h>
#include <asm/tce.h>
#include <asm/page.h>
#include <asm/hvcall.h>
static struct vio_dev vio_bus_device = { /* fake "parent" device */
.name = "vio",
.type = "",
.dev.init_name = "vio",
.dev.bus = &vio_bus_type,
};
#ifdef CONFIG_PPC_SMLPAR
/**
* vio_cmo_pool - A pool of IO memory for CMO use
*
* @size: The size of the pool in bytes
* @free: The amount of free memory in the pool
*/
struct vio_cmo_pool {
size_t size;
size_t free;
};
/* How many ms to delay queued balance work */
#define VIO_CMO_BALANCE_DELAY 100
/* Portion out IO memory to CMO devices by this chunk size */
#define VIO_CMO_BALANCE_CHUNK 131072
/**
* vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
*
* @vio_dev: struct vio_dev pointer
* @list: pointer to other devices on bus that are being tracked
*/
struct vio_cmo_dev_entry {
struct vio_dev *viodev;
struct list_head list;
};
/**
* vio_cmo - VIO bus accounting structure for CMO entitlement
*
* @lock: spinlock for entire structure
* @balance_q: work queue for balancing system entitlement
* @device_list: list of CMO-enabled devices requiring entitlement
* @entitled: total system entitlement in bytes
* @reserve: pool of memory from which devices reserve entitlement, incl. spare
* @excess: pool of excess entitlement not needed for device reserves or spare
* @spare: IO memory for device hotplug functionality
* @min: minimum necessary for system operation
* @desired: desired memory for system operation
* @curr: bytes currently allocated
* @high: high water mark for IO data usage
*/
struct vio_cmo {
spinlock_t lock;
struct delayed_work balance_q;
struct list_head device_list;
size_t entitled;
struct vio_cmo_pool reserve;
struct vio_cmo_pool excess;
size_t spare;
size_t min;
size_t desired;
size_t curr;
size_t high;
} vio_cmo;
/**
* vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
*/
static int vio_cmo_num_OF_devs(void)
{
struct device_node *node_vroot;
int count = 0;
/*
* Count the number of vdevice entries with an
* ibm,my-dma-window OF property
*/
node_vroot = of_find_node_by_name(NULL, "vdevice");
if (node_vroot) {
struct device_node *of_node;
struct property *prop;
for_each_child_of_node(node_vroot, of_node) {
prop = of_find_property(of_node, "ibm,my-dma-window",
NULL);
if (prop)
count++;
}
}
of_node_put(node_vroot);
return count;
}
/**
* vio_cmo_alloc - allocate IO memory for CMO-enable devices
*
* @viodev: VIO device requesting IO memory
* @size: size of allocation requested
*
* Allocations come from memory reserved for the devices and any excess
* IO memory available to all devices. The spare pool used to service
* hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
* made available.
*
* Return codes:
* 0 for successful allocation and -ENOMEM for a failure
*/
static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
{
unsigned long flags;
size_t reserve_free = 0;
size_t excess_free = 0;
int ret = -ENOMEM;
spin_lock_irqsave(&vio_cmo.lock, flags);
/* Determine the amount of free entitlement available in reserve */
if (viodev->cmo.entitled > viodev->cmo.allocated)
reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
/* If spare is not fulfilled, the excess pool can not be used. */
if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
excess_free = vio_cmo.excess.free;
/* The request can be satisfied */
if ((reserve_free + excess_free) >= size) {
vio_cmo.curr += size;
if (vio_cmo.curr > vio_cmo.high)
vio_cmo.high = vio_cmo.curr;
viodev->cmo.allocated += size;
size -= min(reserve_free, size);
vio_cmo.excess.free -= size;
ret = 0;
}
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return ret;
}
/**
* vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
* @viodev: VIO device freeing IO memory
* @size: size of deallocation
*
* IO memory is freed by the device back to the correct memory pools.
* The spare pool is replenished first from either memory pool, then
* the reserve pool is used to reduce device entitlement, the excess
* pool is used to increase the reserve pool toward the desired entitlement
* target, and then the remaining memory is returned to the pools.
*
*/
static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
{
unsigned long flags;
size_t spare_needed = 0;
size_t excess_freed = 0;
size_t reserve_freed = size;
size_t tmp;
int balance = 0;
spin_lock_irqsave(&vio_cmo.lock, flags);
vio_cmo.curr -= size;
/* Amount of memory freed from the excess pool */
if (viodev->cmo.allocated > viodev->cmo.entitled) {
excess_freed = min(reserve_freed, (viodev->cmo.allocated -
viodev->cmo.entitled));
reserve_freed -= excess_freed;
}
/* Remove allocation from device */
viodev->cmo.allocated -= (reserve_freed + excess_freed);
/* Spare is a subset of the reserve pool, replenish it first. */
spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
/*
* Replenish the spare in the reserve pool from the excess pool.
* This moves entitlement into the reserve pool.
*/
if (spare_needed && excess_freed) {
tmp = min(excess_freed, spare_needed);
vio_cmo.excess.size -= tmp;
vio_cmo.reserve.size += tmp;
vio_cmo.spare += tmp;
excess_freed -= tmp;
spare_needed -= tmp;
balance = 1;
}
/*
* Replenish the spare in the reserve pool from the reserve pool.
* This removes entitlement from the device down to VIO_CMO_MIN_ENT,
* if needed, and gives it to the spare pool. The amount of used
* memory in this pool does not change.
*/
if (spare_needed && reserve_freed) {
tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
vio_cmo.spare += tmp;
viodev->cmo.entitled -= tmp;
reserve_freed -= tmp;
spare_needed -= tmp;
balance = 1;
}
/*
* Increase the reserve pool until the desired allocation is met.
* Move an allocation freed from the excess pool into the reserve
* pool and schedule a balance operation.
*/
if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
vio_cmo.excess.size -= tmp;
vio_cmo.reserve.size += tmp;
excess_freed -= tmp;
balance = 1;
}
/* Return memory from the excess pool to that pool */
if (excess_freed)
vio_cmo.excess.free += excess_freed;
if (balance)
schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
spin_unlock_irqrestore(&vio_cmo.lock, flags);
}
/**
* vio_cmo_entitlement_update - Manage system entitlement changes
*
* @new_entitlement: new system entitlement to attempt to accommodate
*
* Increases in entitlement will be used to fulfill the spare entitlement
* and the rest is given to the excess pool. Decreases, if they are
* possible, come from the excess pool and from unused device entitlement
*
* Returns: 0 on success, -ENOMEM when change can not be made
*/
int vio_cmo_entitlement_update(size_t new_entitlement)
{
struct vio_dev *viodev;
struct vio_cmo_dev_entry *dev_ent;
unsigned long flags;
size_t avail, delta, tmp;
spin_lock_irqsave(&vio_cmo.lock, flags);
/* Entitlement increases */
if (new_entitlement > vio_cmo.entitled) {
delta = new_entitlement - vio_cmo.entitled;
/* Fulfill spare allocation */
if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
vio_cmo.spare += tmp;
vio_cmo.reserve.size += tmp;
delta -= tmp;
}
/* Remaining new allocation goes to the excess pool */
vio_cmo.entitled += delta;
vio_cmo.excess.size += delta;
vio_cmo.excess.free += delta;
goto out;
}
/* Entitlement decreases */
delta = vio_cmo.entitled - new_entitlement;
avail = vio_cmo.excess.free;
/*
* Need to check how much unused entitlement each device can
* sacrifice to fulfill entitlement change.
*/
list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
if (avail >= delta)
break;
viodev = dev_ent->viodev;
if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
(viodev->cmo.entitled > VIO_CMO_MIN_ENT))
avail += viodev->cmo.entitled -
max_t(size_t, viodev->cmo.allocated,
VIO_CMO_MIN_ENT);
}
if (delta <= avail) {
vio_cmo.entitled -= delta;
/* Take entitlement from the excess pool first */
tmp = min(vio_cmo.excess.free, delta);
vio_cmo.excess.size -= tmp;
vio_cmo.excess.free -= tmp;
delta -= tmp;
/*
* Remove all but VIO_CMO_MIN_ENT bytes from devices
* until entitlement change is served
*/
list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
if (!delta)
break;
viodev = dev_ent->viodev;
tmp = 0;
if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
(viodev->cmo.entitled > VIO_CMO_MIN_ENT))
tmp = viodev->cmo.entitled -
max_t(size_t, viodev->cmo.allocated,
VIO_CMO_MIN_ENT);
viodev->cmo.entitled -= min(tmp, delta);
delta -= min(tmp, delta);
}
} else {
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return -ENOMEM;
}
out:
schedule_delayed_work(&vio_cmo.balance_q, 0);
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return 0;
}
/**
* vio_cmo_balance - Balance entitlement among devices
*
* @work: work queue structure for this operation
*
* Any system entitlement above the minimum needed for devices, or
* already allocated to devices, can be distributed to the devices.
* The list of devices is iterated through to recalculate the desired
* entitlement level and to determine how much entitlement above the
* minimum entitlement is allocated to devices.
*
* Small chunks of the available entitlement are given to devices until
* their requirements are fulfilled or there is no entitlement left to give.
* Upon completion sizes of the reserve and excess pools are calculated.
*
* The system minimum entitlement level is also recalculated here.
* Entitlement will be reserved for devices even after vio_bus_remove to
* accommodate reloading the driver. The OF tree is walked to count the
* number of devices present and this will remove entitlement for devices
* that have actually left the system after having vio_bus_remove called.
*/
static void vio_cmo_balance(struct work_struct *work)
{
struct vio_cmo *cmo;
struct vio_dev *viodev;
struct vio_cmo_dev_entry *dev_ent;
unsigned long flags;
size_t avail = 0, level, chunk, need;
int devcount = 0, fulfilled;
cmo = container_of(work, struct vio_cmo, balance_q.work);
spin_lock_irqsave(&vio_cmo.lock, flags);
/* Calculate minimum entitlement and fulfill spare */
cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
BUG_ON(cmo->min > cmo->entitled);
cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
cmo->min += cmo->spare;
cmo->desired = cmo->min;
/*
* Determine how much entitlement is available and reset device
* entitlements
*/
avail = cmo->entitled - cmo->spare;
list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
viodev = dev_ent->viodev;
devcount++;
viodev->cmo.entitled = VIO_CMO_MIN_ENT;
cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
}
/*
* Having provided each device with the minimum entitlement, loop
* over the devices portioning out the remaining entitlement
* until there is nothing left.
*/
level = VIO_CMO_MIN_ENT;
while (avail) {
fulfilled = 0;
list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
viodev = dev_ent->viodev;
if (viodev->cmo.desired <= level) {
fulfilled++;
continue;
}
/*
* Give the device up to VIO_CMO_BALANCE_CHUNK
* bytes of entitlement, but do not exceed the
* desired level of entitlement for the device.
*/
chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
chunk = min(chunk, (viodev->cmo.desired -
viodev->cmo.entitled));
viodev->cmo.entitled += chunk;
/*
* If the memory for this entitlement increase was
* already allocated to the device it does not come
* from the available pool being portioned out.
*/
need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
max(viodev->cmo.allocated, level);
avail -= need;
}
if (fulfilled == devcount)
break;
level += VIO_CMO_BALANCE_CHUNK;
}
/* Calculate new reserve and excess pool sizes */
cmo->reserve.size = cmo->min;
cmo->excess.free = 0;
cmo->excess.size = 0;
need = 0;
list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
viodev = dev_ent->viodev;
/* Calculated reserve size above the minimum entitlement */
if (viodev->cmo.entitled)
cmo->reserve.size += (viodev->cmo.entitled -
VIO_CMO_MIN_ENT);
/* Calculated used excess entitlement */
if (viodev->cmo.allocated > viodev->cmo.entitled)
need += viodev->cmo.allocated - viodev->cmo.entitled;
}
cmo->excess.size = cmo->entitled - cmo->reserve.size;
cmo->excess.free = cmo->excess.size - need;
cancel_delayed_work(to_delayed_work(work));
spin_unlock_irqrestore(&vio_cmo.lock, flags);
}
static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
void *ret;
if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
atomic_inc(&viodev->cmo.allocs_failed);
return NULL;
}
ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
if (unlikely(ret == NULL)) {
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
atomic_inc(&viodev->cmo.allocs_failed);
}
return ret;
}
static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
}
static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
dma_addr_t ret = DMA_ERROR_CODE;
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
atomic_inc(&viodev->cmo.allocs_failed);
return ret;
}
ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
if (unlikely(dma_mapping_error(dev, ret))) {
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
atomic_inc(&viodev->cmo.allocs_failed);
}
return ret;
}
static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
}
static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct scatterlist *sgl;
int ret, count = 0;
size_t alloc_size = 0;
for (sgl = sglist; count < nelems; count++, sgl++)
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE);
if (vio_cmo_alloc(viodev, alloc_size)) {
atomic_inc(&viodev->cmo.allocs_failed);
return 0;
}
ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
if (unlikely(!ret)) {
vio_cmo_dealloc(viodev, alloc_size);
atomic_inc(&viodev->cmo.allocs_failed);
return ret;
}
for (sgl = sglist, count = 0; count < ret; count++, sgl++)
alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
if (alloc_size)
vio_cmo_dealloc(viodev, alloc_size);
return ret;
}
static void vio_dma_iommu_unmap_sg(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct scatterlist *sgl;
size_t alloc_size = 0;
int count = 0;
for (sgl = sglist; count < nelems; count++, sgl++)
alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
vio_cmo_dealloc(viodev, alloc_size);
}
static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
{
return dma_iommu_ops.dma_supported(dev, mask);
}
static u64 vio_dma_get_required_mask(struct device *dev)
{
return dma_iommu_ops.get_required_mask(dev);
}
struct dma_map_ops vio_dma_mapping_ops = {
.alloc = vio_dma_iommu_alloc_coherent,
.free = vio_dma_iommu_free_coherent,
.mmap = dma_direct_mmap_coherent,
.map_sg = vio_dma_iommu_map_sg,
.unmap_sg = vio_dma_iommu_unmap_sg,
.map_page = vio_dma_iommu_map_page,
.unmap_page = vio_dma_iommu_unmap_page,
.dma_supported = vio_dma_iommu_dma_supported,
.get_required_mask = vio_dma_get_required_mask,
};
/**
* vio_cmo_set_dev_desired - Set desired entitlement for a device
*
* @viodev: struct vio_dev for device to alter
* @desired: new desired entitlement level in bytes
*
* For use by devices to request a change to their entitlement at runtime or
* through sysfs. The desired entitlement level is changed and a balancing
* of system resources is scheduled to run in the future.
*/
void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
{
unsigned long flags;
struct vio_cmo_dev_entry *dev_ent;
int found = 0;
if (!firmware_has_feature(FW_FEATURE_CMO))
return;
spin_lock_irqsave(&vio_cmo.lock, flags);
if (desired < VIO_CMO_MIN_ENT)
desired = VIO_CMO_MIN_ENT;
/*
* Changes will not be made for devices not in the device list.
* If it is not in the device list, then no driver is loaded
* for the device and it can not receive entitlement.
*/
list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
if (viodev == dev_ent->viodev) {
found = 1;
break;
}
if (!found) {
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return;
}
/* Increase/decrease in desired device entitlement */
if (desired >= viodev->cmo.desired) {
/* Just bump the bus and device values prior to a balance*/
vio_cmo.desired += desired - viodev->cmo.desired;
viodev->cmo.desired = desired;
} else {
/* Decrease bus and device values for desired entitlement */
vio_cmo.desired -= viodev->cmo.desired - desired;
viodev->cmo.desired = desired;
/*
* If less entitlement is desired than current entitlement, move
* any reserve memory in the change region to the excess pool.
*/
if (viodev->cmo.entitled > desired) {
vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
vio_cmo.excess.size += viodev->cmo.entitled - desired;
/*
* If entitlement moving from the reserve pool to the
* excess pool is currently unused, add to the excess
* free counter.
*/
if (viodev->cmo.allocated < viodev->cmo.entitled)
vio_cmo.excess.free += viodev->cmo.entitled -
max(viodev->cmo.allocated, desired);
viodev->cmo.entitled = desired;
}
}
schedule_delayed_work(&vio_cmo.balance_q, 0);
spin_unlock_irqrestore(&vio_cmo.lock, flags);
}
/**
* vio_cmo_bus_probe - Handle CMO specific bus probe activities
*
* @viodev - Pointer to struct vio_dev for device
*
* Determine the devices IO memory entitlement needs, attempting
* to satisfy the system minimum entitlement at first and scheduling
* a balance operation to take care of the rest at a later time.
*
* Returns: 0 on success, -EINVAL when device doesn't support CMO, and
* -ENOMEM when entitlement is not available for device or
* device entry.
*
*/
static int vio_cmo_bus_probe(struct vio_dev *viodev)
{
struct vio_cmo_dev_entry *dev_ent;
struct device *dev = &viodev->dev;
struct vio_driver *viodrv = to_vio_driver(dev->driver);
unsigned long flags;
size_t size;
bool dma_capable = false;
/* A device requires entitlement if it has a DMA window property */
switch (viodev->family) {
case VDEVICE:
if (of_get_property(viodev->dev.of_node,
"ibm,my-dma-window", NULL))
dma_capable = true;
break;
case PFO:
dma_capable = false;
break;
default:
dev_warn(dev, "unknown device family: %d\n", viodev->family);
BUG();
break;
}
/* Configure entitlement for the device. */
if (dma_capable) {
/* Check that the driver is CMO enabled and get desired DMA */
if (!viodrv->get_desired_dma) {
dev_err(dev, "%s: device driver does not support CMO\n",
__func__);
return -EINVAL;
}
viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev));
if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
viodev->cmo.desired = VIO_CMO_MIN_ENT;
size = VIO_CMO_MIN_ENT;
dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
GFP_KERNEL);
if (!dev_ent)
return -ENOMEM;
dev_ent->viodev = viodev;
spin_lock_irqsave(&vio_cmo.lock, flags);
list_add(&dev_ent->list, &vio_cmo.device_list);
} else {
viodev->cmo.desired = 0;
size = 0;
spin_lock_irqsave(&vio_cmo.lock, flags);
}
/*
* If the needs for vio_cmo.min have not changed since they
* were last set, the number of devices in the OF tree has
* been constant and the IO memory for this is already in
* the reserve pool.
*/
if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
VIO_CMO_MIN_ENT)) {
/* Updated desired entitlement if device requires it */
if (size)
vio_cmo.desired += (viodev->cmo.desired -
VIO_CMO_MIN_ENT);
} else {
size_t tmp;
tmp = vio_cmo.spare + vio_cmo.excess.free;
if (tmp < size) {
dev_err(dev, "%s: insufficient free "
"entitlement to add device. "
"Need %lu, have %lu\n", __func__,
size, (vio_cmo.spare + tmp));
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return -ENOMEM;
}
/* Use excess pool first to fulfill request */
tmp = min(size, vio_cmo.excess.free);
vio_cmo.excess.free -= tmp;
vio_cmo.excess.size -= tmp;
vio_cmo.reserve.size += tmp;
/* Use spare if excess pool was insufficient */
vio_cmo.spare -= size - tmp;
/* Update bus accounting */
vio_cmo.min += size;
vio_cmo.desired += viodev->cmo.desired;
}
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return 0;
}
/**
* vio_cmo_bus_remove - Handle CMO specific bus removal activities
*
* @viodev - Pointer to struct vio_dev for device
*
* Remove the device from the cmo device list. The minimum entitlement
* will be reserved for the device as long as it is in the system. The
* rest of the entitlement the device had been allocated will be returned
* to the system.
*/
static void vio_cmo_bus_remove(struct vio_dev *viodev)
{
struct vio_cmo_dev_entry *dev_ent;
unsigned long flags;
size_t tmp;
spin_lock_irqsave(&vio_cmo.lock, flags);
if (viodev->cmo.allocated) {
dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
"allocated after remove operation.\n",
__func__, viodev->cmo.allocated);
BUG();
}
/*
* Remove the device from the device list being maintained for
* CMO enabled devices.
*/
list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
if (viodev == dev_ent->viodev) {
list_del(&dev_ent->list);
kfree(dev_ent);
break;
}
/*
* Devices may not require any entitlement and they do not need
* to be processed. Otherwise, return the device's entitlement
* back to the pools.
*/
if (viodev->cmo.entitled) {
/*
* This device has not yet left the OF tree, it's
* minimum entitlement remains in vio_cmo.min and
* vio_cmo.desired
*/
vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
/*
* Save min allocation for device in reserve as long
* as it exists in OF tree as determined by later
* balance operation
*/
viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
/* Replenish spare from freed reserve pool */
if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
vio_cmo.spare));
vio_cmo.spare += tmp;
viodev->cmo.entitled -= tmp;
}
/* Remaining reserve goes to excess pool */
vio_cmo.excess.size += viodev->cmo.entitled;
vio_cmo.excess.free += viodev->cmo.entitled;
vio_cmo.reserve.size -= viodev->cmo.entitled;
/*
* Until the device is removed it will keep a
* minimum entitlement; this will guarantee that
* a module unload/load will result in a success.
*/
viodev->cmo.entitled = VIO_CMO_MIN_ENT;
viodev->cmo.desired = VIO_CMO_MIN_ENT;
atomic_set(&viodev->cmo.allocs_failed, 0);
}
spin_unlock_irqrestore(&vio_cmo.lock, flags);
}
static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
{
set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
}
/**
* vio_cmo_bus_init - CMO entitlement initialization at bus init time
*
* Set up the reserve and excess entitlement pools based on available
* system entitlement and the number of devices in the OF tree that
* require entitlement in the reserve pool.
*/
static void vio_cmo_bus_init(void)
{
struct hvcall_mpp_data mpp_data;
int err;
memset(&vio_cmo, 0, sizeof(struct vio_cmo));
spin_lock_init(&vio_cmo.lock);
INIT_LIST_HEAD(&vio_cmo.device_list);
INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
/* Get current system entitlement */
err = h_get_mpp(&mpp_data);
/*
* On failure, continue with entitlement set to 0, will panic()
* later when spare is reserved.
*/
if (err != H_SUCCESS) {
printk(KERN_ERR "%s: unable to determine system IO "\
"entitlement. (%d)\n", __func__, err);
vio_cmo.entitled = 0;
} else {
vio_cmo.entitled = mpp_data.entitled_mem;
}
/* Set reservation and check against entitlement */
vio_cmo.spare = VIO_CMO_MIN_ENT;
vio_cmo.reserve.size = vio_cmo.spare;
vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
VIO_CMO_MIN_ENT);
if (vio_cmo.reserve.size > vio_cmo.entitled) {
printk(KERN_ERR "%s: insufficient system entitlement\n",
__func__);
panic("%s: Insufficient system entitlement", __func__);
}
/* Set the remaining accounting variables */
vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
vio_cmo.excess.free = vio_cmo.excess.size;
vio_cmo.min = vio_cmo.reserve.size;
vio_cmo.desired = vio_cmo.reserve.size;
}
/* sysfs device functions and data structures for CMO */
#define viodev_cmo_rd_attr(name) \
static ssize_t viodev_cmo_##name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
}
static ssize_t viodev_cmo_allocs_failed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vio_dev *viodev = to_vio_dev(dev);
return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
}
static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct vio_dev *viodev = to_vio_dev(dev);
atomic_set(&viodev->cmo.allocs_failed, 0);
return count;
}
static ssize_t viodev_cmo_desired_set(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct vio_dev *viodev = to_vio_dev(dev);
size_t new_desired;
int ret;
ret = strict_strtoul(buf, 10, &new_desired);
if (ret)
return ret;
vio_cmo_set_dev_desired(viodev, new_desired);
return count;
}
viodev_cmo_rd_attr(desired);
viodev_cmo_rd_attr(entitled);
viodev_cmo_rd_attr(allocated);
static ssize_t name_show(struct device *, struct device_attribute *, char *);
static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf);
static struct device_attribute vio_cmo_dev_attrs[] = {
__ATTR_RO(name),
__ATTR_RO(devspec),
__ATTR_RO(modalias),
__ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
viodev_cmo_desired_show, viodev_cmo_desired_set),
__ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL),
__ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL),
__ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset),
__ATTR_NULL
};
/* sysfs bus functions and data structures for CMO */
#define viobus_cmo_rd_attr(name) \
static ssize_t \
viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \
{ \
return sprintf(buf, "%lu\n", vio_cmo.name); \
}
#define viobus_cmo_pool_rd_attr(name, var) \
static ssize_t \
viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \
{ \
return sprintf(buf, "%lu\n", vio_cmo.name.var); \
}
static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf,
size_t count)
{
unsigned long flags;
spin_lock_irqsave(&vio_cmo.lock, flags);
vio_cmo.high = vio_cmo.curr;
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return count;
}
viobus_cmo_rd_attr(entitled);
viobus_cmo_pool_rd_attr(reserve, size);
viobus_cmo_pool_rd_attr(excess, size);
viobus_cmo_pool_rd_attr(excess, free);
viobus_cmo_rd_attr(spare);
viobus_cmo_rd_attr(min);
viobus_cmo_rd_attr(desired);
viobus_cmo_rd_attr(curr);
viobus_cmo_rd_attr(high);
static struct bus_attribute vio_cmo_bus_attrs[] = {
__ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL),
__ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL),
__ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL),
__ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL),
__ATTR(cmo_spare, S_IRUGO, viobus_cmo_spare_show, NULL),
__ATTR(cmo_min, S_IRUGO, viobus_cmo_min_show, NULL),
__ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL),
__ATTR(cmo_curr, S_IRUGO, viobus_cmo_curr_show, NULL),
__ATTR(cmo_high, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
viobus_cmo_high_show, viobus_cmo_high_reset),
__ATTR_NULL
};
static void vio_cmo_sysfs_init(void)
{
vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
vio_bus_type.bus_attrs = vio_cmo_bus_attrs;
}
#else /* CONFIG_PPC_SMLPAR */
int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
static void vio_cmo_bus_init(void) {}
static void vio_cmo_sysfs_init(void) { }
#endif /* CONFIG_PPC_SMLPAR */
EXPORT_SYMBOL(vio_cmo_entitlement_update);
EXPORT_SYMBOL(vio_cmo_set_dev_desired);
/*
* Platform Facilities Option (PFO) support
*/
/**
* vio_h_cop_sync - Perform a synchronous PFO co-processor operation
*
* @vdev - Pointer to a struct vio_dev for device
* @op - Pointer to a struct vio_pfo_op for the operation parameters
*
* Calls the hypervisor to synchronously perform the PFO operation
* described in @op. In the case of a busy response from the hypervisor,
* the operation will be re-submitted indefinitely unless a non-zero timeout
* is specified or an error occurs. The timeout places a limit on when to
* stop re-submitting a operation, the total time can be exceeded if an
* operation is in progress.
*
* If op->hcall_ret is not NULL, this will be set to the return from the
* last h_cop_op call or it will be 0 if an error not involving the h_call
* was encountered.
*
* Returns:
* 0 on success,
* -EINVAL if the h_call fails due to an invalid parameter,
* -E2BIG if the h_call can not be performed synchronously,
* -EBUSY if a timeout is specified and has elapsed,
* -EACCES if the memory area for data/status has been rescinded, or
* -EPERM if a hardware fault has been indicated
*/
int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
{
struct device *dev = &vdev->dev;
unsigned long deadline = 0;
long hret = 0;
int ret = 0;
if (op->timeout)
deadline = jiffies + msecs_to_jiffies(op->timeout);
while (true) {
hret = plpar_hcall_norets(H_COP, op->flags,
vdev->resource_id,
op->in, op->inlen, op->out,
op->outlen, op->csbcpb);
if (hret == H_SUCCESS ||
(hret != H_NOT_ENOUGH_RESOURCES &&
hret != H_BUSY && hret != H_RESOURCE) ||
(op->timeout && time_after(deadline, jiffies)))
break;
dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
}
switch (hret) {
case H_SUCCESS:
ret = 0;
break;
case H_OP_MODE:
case H_TOO_BIG:
ret = -E2BIG;
break;
case H_RESCINDED:
ret = -EACCES;
break;
case H_HARDWARE:
ret = -EPERM;
break;
case H_NOT_ENOUGH_RESOURCES:
case H_RESOURCE:
case H_BUSY:
ret = -EBUSY;
break;
default:
ret = -EINVAL;
break;
}
if (ret)
dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
__func__, ret, hret);
op->hcall_err = hret;
return ret;
}
EXPORT_SYMBOL(vio_h_cop_sync);
static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
{
const __be32 *dma_window;
struct iommu_table *tbl;
unsigned long offset, size;
dma_window = of_get_property(dev->dev.of_node,
"ibm,my-dma-window", NULL);
if (!dma_window)
return NULL;
tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
if (tbl == NULL)
return NULL;
of_parse_dma_window(dev->dev.of_node, dma_window,
&tbl->it_index, &offset, &size);
/* TCE table size - measured in tce entries */
tbl->it_size = size >> IOMMU_PAGE_SHIFT;
/* offset for VIO should always be 0 */
tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
tbl->it_busno = 0;
tbl->it_type = TCE_VB;
tbl->it_blocksize = 16;
return iommu_init_table(tbl, -1);
}
/**
* vio_match_device: - Tell if a VIO device has a matching
* VIO device id structure.
* @ids: array of VIO device id structures to search in
* @dev: the VIO device structure to match against
*
* Used by a driver to check whether a VIO device present in the
* system is in its list of supported devices. Returns the matching
* vio_device_id structure or NULL if there is no match.
*/
static const struct vio_device_id *vio_match_device(
const struct vio_device_id *ids, const struct vio_dev *dev)
{
while (ids->type[0] != '\0') {
if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
of_device_is_compatible(dev->dev.of_node,
ids->compat))
return ids;
ids++;
}
return NULL;
}
/*
* Convert from struct device to struct vio_dev and pass to driver.
* dev->driver has already been set by generic code because vio_bus_match
* succeeded.
*/
static int vio_bus_probe(struct device *dev)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct vio_driver *viodrv = to_vio_driver(dev->driver);
const struct vio_device_id *id;
int error = -ENODEV;
if (!viodrv->probe)
return error;
id = vio_match_device(viodrv->id_table, viodev);
if (id) {
memset(&viodev->cmo, 0, sizeof(viodev->cmo));
if (firmware_has_feature(FW_FEATURE_CMO)) {
error = vio_cmo_bus_probe(viodev);
if (error)
return error;
}
error = viodrv->probe(viodev, id);
if (error && firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_bus_remove(viodev);
}
return error;
}
/* convert from struct device to struct vio_dev and pass to driver. */
static int vio_bus_remove(struct device *dev)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct vio_driver *viodrv = to_vio_driver(dev->driver);
struct device *devptr;
int ret = 1;
/*
* Hold a reference to the device after the remove function is called
* to allow for CMO accounting cleanup for the device.
*/
devptr = get_device(dev);
if (viodrv->remove)
ret = viodrv->remove(viodev);
if (!ret && firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_bus_remove(viodev);
put_device(devptr);
return ret;
}
/**
* vio_register_driver: - Register a new vio driver
* @viodrv: The vio_driver structure to be registered.
*/
int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
const char *mod_name)
{
pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
/* fill in 'struct driver' fields */
viodrv->driver.name = viodrv->name;
viodrv->driver.pm = viodrv->pm;
viodrv->driver.bus = &vio_bus_type;
viodrv->driver.owner = owner;
viodrv->driver.mod_name = mod_name;
return driver_register(&viodrv->driver);
}
EXPORT_SYMBOL(__vio_register_driver);
/**
* vio_unregister_driver - Remove registration of vio driver.
* @viodrv: The vio_driver struct to be removed form registration
*/
void vio_unregister_driver(struct vio_driver *viodrv)
{
driver_unregister(&viodrv->driver);
}
EXPORT_SYMBOL(vio_unregister_driver);
/* vio_dev refcount hit 0 */
static void vio_dev_release(struct device *dev)
{
struct iommu_table *tbl = get_iommu_table_base(dev);
if (tbl)
iommu_free_table(tbl, of_node_full_name(dev->of_node));
of_node_put(dev->of_node);
kfree(to_vio_dev(dev));
}
/**
* vio_register_device_node: - Register a new vio device.
* @of_node: The OF node for this device.
*
* Creates and initializes a vio_dev structure from the data in
* of_node and adds it to the list of virtual devices.
* Returns a pointer to the created vio_dev or NULL if node has
* NULL device_type or compatible fields.
*/
struct vio_dev *vio_register_device_node(struct device_node *of_node)
{
struct vio_dev *viodev;
struct device_node *parent_node;
const __be32 *prop;
enum vio_dev_family family;
const char *of_node_name = of_node->name ? of_node->name : "<unknown>";
/*
* Determine if this node is a under the /vdevice node or under the
* /ibm,platform-facilities node. This decides the device's family.
*/
parent_node = of_get_parent(of_node);
if (parent_node) {
if (!strcmp(parent_node->full_name, "/ibm,platform-facilities"))
family = PFO;
else if (!strcmp(parent_node->full_name, "/vdevice"))
family = VDEVICE;
else {
pr_warn("%s: parent(%s) of %s not recognized.\n",
__func__,
parent_node->full_name,
of_node_name);
of_node_put(parent_node);
return NULL;
}
of_node_put(parent_node);
} else {
pr_warn("%s: could not determine the parent of node %s.\n",
__func__, of_node_name);
return NULL;
}
if (family == PFO) {
if (of_get_property(of_node, "interrupt-controller", NULL)) {
pr_debug("%s: Skipping the interrupt controller %s.\n",
__func__, of_node_name);
return NULL;
}
}
/* allocate a vio_dev for this node */
viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
if (viodev == NULL) {
pr_warn("%s: allocation failure for VIO device.\n", __func__);
return NULL;
}
/* we need the 'device_type' property, in order to match with drivers */
viodev->family = family;
if (viodev->family == VDEVICE) {
unsigned int unit_address;
if (of_node->type != NULL)
viodev->type = of_node->type;
else {
pr_warn("%s: node %s is missing the 'device_type' "
"property.\n", __func__, of_node_name);
goto out;
}
prop = of_get_property(of_node, "reg", NULL);
if (prop == NULL) {
pr_warn("%s: node %s missing 'reg'\n",
__func__, of_node_name);
goto out;
}
unit_address = of_read_number(prop, 1);
dev_set_name(&viodev->dev, "%x", unit_address);
viodev->irq = irq_of_parse_and_map(of_node, 0);
viodev->unit_address = unit_address;
} else {
/* PFO devices need their resource_id for submitting COP_OPs
* This is an optional field for devices, but is required when
* performing synchronous ops */
prop = of_get_property(of_node, "ibm,resource-id", NULL);
if (prop != NULL)
viodev->resource_id = of_read_number(prop, 1);
dev_set_name(&viodev->dev, "%s", of_node_name);
viodev->type = of_node_name;
viodev->irq = 0;
}
viodev->name = of_node->name;
viodev->dev.of_node = of_node_get(of_node);
set_dev_node(&viodev->dev, of_node_to_nid(of_node));
/* init generic 'struct device' fields: */
viodev->dev.parent = &vio_bus_device.dev;
viodev->dev.bus = &vio_bus_type;
viodev->dev.release = vio_dev_release;
if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_set_dma_ops(viodev);
else
set_dma_ops(&viodev->dev, &dma_iommu_ops);
set_iommu_table_base(&viodev->dev,
vio_build_iommu_table(viodev));
/* needed to ensure proper operation of coherent allocations
* later, in case driver doesn't set it explicitly */
dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
}
/* register with generic device framework */
if (device_register(&viodev->dev)) {
printk(KERN_ERR "%s: failed to register device %s\n",
__func__, dev_name(&viodev->dev));
put_device(&viodev->dev);
return NULL;
}
return viodev;
out: /* Use this exit point for any return prior to device_register */
kfree(viodev);
return NULL;
}
EXPORT_SYMBOL(vio_register_device_node);
/*
* vio_bus_scan_for_devices - Scan OF and register each child device
* @root_name - OF node name for the root of the subtree to search.
* This must be non-NULL
*
* Starting from the root node provide, register the device node for
* each child beneath the root.
*/
static void vio_bus_scan_register_devices(char *root_name)
{
struct device_node *node_root, *node_child;
if (!root_name)
return;
node_root = of_find_node_by_name(NULL, root_name);
if (node_root) {
/*
* Create struct vio_devices for each virtual device in
* the device tree. Drivers will associate with them later.
*/
node_child = of_get_next_child(node_root, NULL);
while (node_child) {
vio_register_device_node(node_child);
node_child = of_get_next_child(node_root, node_child);
}
of_node_put(node_root);
}
}
/**
* vio_bus_init: - Initialize the virtual IO bus
*/
static int __init vio_bus_init(void)
{
int err;
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_sysfs_init();
err = bus_register(&vio_bus_type);
if (err) {
printk(KERN_ERR "failed to register VIO bus\n");
return err;
}
/*
* The fake parent of all vio devices, just to give us
* a nice directory
*/
err = device_register(&vio_bus_device.dev);
if (err) {
printk(KERN_WARNING "%s: device_register returned %i\n",
__func__, err);
return err;
}
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_bus_init();
return 0;
}
postcore_initcall(vio_bus_init);
static int __init vio_device_init(void)
{
vio_bus_scan_register_devices("vdevice");
vio_bus_scan_register_devices("ibm,platform-facilities");
return 0;
}
device_initcall(vio_device_init);
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
}
static ssize_t devspec_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct device_node *of_node = dev->of_node;
return sprintf(buf, "%s\n", of_node_full_name(of_node));
}
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
struct device_node *dn;
const char *cp;
dn = dev->of_node;
if (!dn) {
strcpy(buf, "\n");
return strlen(buf);
}
cp = of_get_property(dn, "compatible", NULL);
if (!cp) {
strcpy(buf, "\n");
return strlen(buf);
}
return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
}
static struct device_attribute vio_dev_attrs[] = {
__ATTR_RO(name),
__ATTR_RO(devspec),
__ATTR_RO(modalias),
__ATTR_NULL
};
void vio_unregister_device(struct vio_dev *viodev)
{
device_unregister(&viodev->dev);
}
EXPORT_SYMBOL(vio_unregister_device);
static int vio_bus_match(struct device *dev, struct device_driver *drv)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
struct vio_driver *vio_drv = to_vio_driver(drv);
const struct vio_device_id *ids = vio_drv->id_table;
return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
}
static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
struct device_node *dn;
const char *cp;
dn = dev->of_node;
if (!dn)
return -ENODEV;
cp = of_get_property(dn, "compatible", NULL);
if (!cp)
return -ENODEV;
add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
return 0;
}
struct bus_type vio_bus_type = {
.name = "vio",
.dev_attrs = vio_dev_attrs,
.uevent = vio_hotplug,
.match = vio_bus_match,
.probe = vio_bus_probe,
.remove = vio_bus_remove,
};
/**
* vio_get_attribute: - get attribute for virtual device
* @vdev: The vio device to get property.
* @which: The property/attribute to be extracted.
* @length: Pointer to length of returned data size (unused if NULL).
*
* Calls prom.c's of_get_property() to return the value of the
* attribute specified by @which
*/
const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
{
return of_get_property(vdev->dev.of_node, which, length);
}
EXPORT_SYMBOL(vio_get_attribute);
#ifdef CONFIG_PPC_PSERIES
/* vio_find_name() - internal because only vio.c knows how we formatted the
* kobject name
*/
static struct vio_dev *vio_find_name(const char *name)
{
struct device *found;
found = bus_find_device_by_name(&vio_bus_type, NULL, name);
if (!found)
return NULL;
return to_vio_dev(found);
}
/**
* vio_find_node - find an already-registered vio_dev
* @vnode: device_node of the virtual device we're looking for
*/
struct vio_dev *vio_find_node(struct device_node *vnode)
{
char kobj_name[20];
struct device_node *vnode_parent;
const char *dev_type;
vnode_parent = of_get_parent(vnode);
if (!vnode_parent)
return NULL;
dev_type = of_get_property(vnode_parent, "device_type", NULL);
of_node_put(vnode_parent);
if (!dev_type)
return NULL;
/* construct the kobject name from the device node */
if (!strcmp(dev_type, "vdevice")) {
const __be32 *prop;
prop = of_get_property(vnode, "reg", NULL);
if (!prop)
return NULL;
snprintf(kobj_name, sizeof(kobj_name), "%x",
(uint32_t)of_read_number(prop, 1));
} else if (!strcmp(dev_type, "ibm,platform-facilities"))
snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name);
else
return NULL;
return vio_find_name(kobj_name);
}
EXPORT_SYMBOL(vio_find_node);
int vio_enable_interrupts(struct vio_dev *dev)
{
int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
if (rc != H_SUCCESS)
printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
return rc;
}
EXPORT_SYMBOL(vio_enable_interrupts);
int vio_disable_interrupts(struct vio_dev *dev)
{
int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
if (rc != H_SUCCESS)
printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
return rc;
}
EXPORT_SYMBOL(vio_disable_interrupts);
#endif /* CONFIG_PPC_PSERIES */
|
BlastarIndia/Blastarix
|
blastarix-3.12.7/arch/powerpc/kernel/vio.c
|
C
|
gpl-3.0
| 48,587
|
/* Common hooks for Motorola MCore.
Copyright (C) 1993-2015 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "common/common-target.h"
#include "common/common-target-def.h"
/* What options are we going to default to specific settings when
-O* happens; the user can subsequently override these settings.
Omitting the frame pointer is a very good idea on the MCore.
Scheduling isn't worth anything on the current MCore implementation. */
static const struct default_options mcore_option_optimization_table[] =
{
{ OPT_LEVELS_1_PLUS, OPT_ffunction_cse, NULL, 0 },
{ OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
{ OPT_LEVELS_ALL, OPT_fcaller_saves, NULL, 0 },
{ OPT_LEVELS_ALL, OPT_fschedule_insns, NULL, 0 },
{ OPT_LEVELS_ALL, OPT_fschedule_insns2, NULL, 0 },
{ OPT_LEVELS_SIZE, OPT_mhardlit, NULL, 0 },
{ OPT_LEVELS_NONE, 0, NULL, 0 }
};
#undef TARGET_DEFAULT_TARGET_FLAGS
#define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
#undef TARGET_OPTION_OPTIMIZATION_TABLE
#define TARGET_OPTION_OPTIMIZATION_TABLE mcore_option_optimization_table
#undef TARGET_EXCEPT_UNWIND_INFO
#define TARGET_EXCEPT_UNWIND_INFO sjlj_except_unwind_info
struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
|
KubaKaszycki/kubux
|
gcc/gcc/common/config/mcore/mcore-common.c
|
C
|
gpl-3.0
| 1,992
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
gTestsubsuite = 'LexicalConventions';
|
sam/htmlunit-rhino-fork
|
testsrc/tests/js1_5/LexicalConventions/shell.js
|
JavaScript
|
mpl-2.0
| 244
|
---
layout: article
title: "Otimização de imagens"
description: "As imagens geralmente são responsáveis pela maior parte dos bytes transferidos em uma página da Web e muitas vezes ocupam uma parcela significativa do espaço visual. Como resultado, a otimização de imagens pode muitas vezes resultar em uma grande economia de bytes e melhor desempenho para seu site. Se menos bytes precisarem ser transferidos pelo navegador, haverá menor concorrência pela largura de banda do cliente e, consequentemente, o conteúdo relevante poderá ser transferido e exibido mais rapidamente na tela."
introduction: "Em geral, as imagens são responsáveis pela maior parte dos bytes transferidos em uma página da Web e frequentemente ocupam uma parcela significativa do espaço visual. Como resultado, a otimização de imagens pode muitas vezes resultar em uma grande economia de bytes e melhor desempenho para seu site. Se menos bytes precisarem ser transferidos pelo navegador, haverá menor concorrência pela largura de banda do cliente e, consequentemente, o conteúdo relevante poderá ser transferido e exibido mais rapidamente na tela."
article:
written_on: 2014-05-07
updated_on: 2014-05-10
order: 3
collection: optimizing-content-efficiency
authors:
- ilyagrigorik
key-takeaways:
replace:
- Elimine recursos de imagem desnecessários
- Utilize efeitos CSS3 sempre que possível
- Use fontes da Web em vez de incorporar texto em imagens por meio de código
vector-raster:
- Imagens vetoriais são ideais para representar formas geométricas
- Imagens vetoriais são independentes de resolução e zoom
- Imagens de varredura devem ser usadas para cenas complexas com muitos detalhes e formas irregulares
hidpi:
- Telas de alta resolução têm vários pixels físicos por pixel CSS
- Imagens de alta resolução exigem um número bem maior de pixels e bytes
- As técnicas de otimização de imagens são iguais independentemente da resolução
optimizing-vector:
- O formato de imagem SVG é baseado em XML
- Os arquivos SVG devem ser reduzidos para ficarem menores
- Os arquivos SVG devem ser compactados com GZIP
optimizing-raster:
- Uma imagem de varredura é uma grade de pixels
- Cada pixel codificar informações de cor e transparência
- Os compactadores de imagem usam várias técnicas para diminuir o número de bits por pixel necessários e, consequentemente, reduzir o tamanho do arquivo de imagem
lossless-lossy:
- "Devido à forma como nossa visão funciona, as imagens são ótimas candidatas à compactação com perdas"
- "A otimização de imagens é uma relação entre a compactação com perdas e sem perdas"
- "A escolha e o uso dos algoritmos com ou sem perdas e a forma como eles são usados determinam as diferenças nos formatos das imagens"
- "Não há um formato ou `configuração de qualidade` ideal para todas as imagens: cada combinação entre o conteúdo da imagem e o compactador usado gera resultados diferentes"
formats:
- "Comece selecionando o formato universal correto: GIF, PNG, JPEG"
- "Faça testes e selecione a melhor configuração para cada formato: qualidade, tamanho da paleta etc."
- Considere incluir recursos em WebP e JPEG XR para imagens dimensionadas de clientes modernos
scaled-images:
- A exibição de recursos dimensionados é uma das formas mais simples e eficazes de otimização
- Esteja atento a recursos muito grandes, pois eles resultam em alta sobrecarga
- Reduza o número de pixels desnecessários dimensionando suas imagens de acordo com o tamanho de exibição
notes:
decompressed:
- "Além disso, independentemente do formato de imagem usado para transferir os dados do servidor para o cliente, quando a imagem é decodificada pelo navegador, cada pixel ocupa sempre 4 bytes de memória. Isso pode ser uma limitação importante para imagens grandes e dispositivos que não têm muita memória disponível, como dispositivos móveis mais antigos."
artifacts:
- "Da esquerda para a direita (PNG): 32 bits (16 milhões de cores), 7 bits (128 cores), 5 bits (32 cores). Cenas complexas com transições graduais de cor (gradientes, céu etc.) exigem paletas com mais cores para evitar artefatos visuais como um céu pixelado em um recurso de 5 bits. Por outro lado, se a imagem usa poucas cores, então uma paleta com muitas cores seria um desperdício de bits."
quality:
- "Os níveis de qualidade de formatos de imagens diferentes não são diretamente comparáveis devido às variações entre os algoritmos usados para codificar a imagem: um JPEG com 90% de qualidade será bem diferente de um WebP com 90% de qualidade. Na verdade, até os níveis de qualidade do mesmo formato de imagem podem gerar resultados visivelmente diferentes dependendo da implementação do compactador."
resized:
- 'Passar o cursor sobre o elemento de imagem no Chrome DevTools revela o tamanho "natural" e o tamanho de "exibição" do recurso de imagem. No exemplo acima, a imagem de 300 x 260 pixels é transferida, mas depois é redimensionada (245 x 212) no cliente quando é exibida.'
---
{% wrap content%}
<style>
img, video, object {
max-width: 100%;
}
img.center {
display: block;
margin-left: auto;
margin-right: auto;
}
</style>
{% include modules/toc.liquid %}
A otimização de imagens é uma arte e uma ciência: é uma arte porque não há uma maneira ideal para compactar uma determinada imagem. Ela é uma ciência porque há várias técnicas e algoritmos avançados que podem reduzir bastante o tamanho de uma imagem. Encontrar a melhor configuração para sua imagem exige uma análise detalhada de vários aspectos: recursos do formato, conteúdo dos dados codificados, qualidade, dimensões de pixel, entre outros.
## Como eliminar e substituir imagens
{% include modules/takeaway.liquid list=page.key-takeaways.replace %}
A primeira pergunta que você deve se fazer é se determinada imagem é realmente necessária para alcançar o efeito desejado. O bom design é simples e sempre resultará em um melhor desempenho. A melhor estratégia de otimização é eliminar recursos de imagem sempre que possível, pois eles geralmente consomem muitos bytes em relação aos recursos em HTML, CSS, JavaScript e outros. Uma imagem bem posicionada pode dizer mais que mil palavras. Por isso, cabe a você encontrar o equilíbrio.
Em seguida, é preciso considerar se há outra tecnologia que possa gerar os resultados desejados de maneira mais eficiente:
* **Efeitos CSS** (gradientes, sombras etc.) e animações CSS podem ser usados para gerar recursos que não dependem de resolução e mantêm a qualidade em qualquer nível de zoom, além de geralmente consumirem bem menos bytes do que um arquivo de imagem.
* **Fontes da Web** permitem o uso de elegantes faces de tipos e mantêm a capacidade de selecionar, pesquisar e redimensionar o texto, o que significa uma grande melhoria na facilidade de uso.
Pense duas vezes antes de incorporar texto por meio de código em uma imagem. A qualidade da tipografia é essencial para um bom design, para o branding e para facilitar a leitura, mas o texto em imagens prejudica a experiência do usuário. O motivo é que esse tipo de texto não pode ser selecionado, pesquisado, ampliado nem acessado, além de não ser exibido corretamente em dispositivos com alto DPI. Usar fontes da Web requer um [conjunto próprio de otimizações] (https://www.igvita.com/2014/01/31/optimizing-web-font-rendering-performance/), mas resolve todos os problemas citados acima e é sempre uma opção melhor para a exibição de texto.
## Imagens vetoriais vs. imagens de varredura
{% include modules/takeaway.liquid list=page.key-takeaways.vector-raster %}
Depois de decidir que o uso de uma imagem é a melhor maneira de alcançar o efeito desejado, você precisa selecionar o formato certo:
<div class="clear">
<div class="g--half">
<b>Vetor</b>
<img class="center" src="images/vector-zoom.png" alt="Imagem vetorial ampliada">
</div>
<div class="g--half g--last">
<b>Varredura</b>
<img src="images/raster-zoom.png" alt="Imagem de varredura ampliada">
</div>
</div>
* [Gráficos vetoriais](http://pt.wikipedia.org/wiki/Desenho_vetorial) usam linhas, pontos e polígonos para representar uma imagem.
* [Gráficos de varredura](http://pt.wikipedia.org/wiki/Raster) representam uma imagem codificando valores individuais para cada pixel em uma grade retangular.
Cada formato tem seus prós e contras. Os formatos vetoriais são ideais para representar imagens de formas geométricas simples (por exemplo, logo marcas, texto, ícones etc.) e para ter uma qualidade excelente em qualquer configuração de resolução e zoom. Assim, eles são perfeitos para telas de alta resolução e recursos que precisam ser exibidos em vários tamanhos.
No entanto, os formatos vetoriais deixam a desejar quando a cena é complexa (por exemplo, uma foto): talvez seja necessária uma quantidade proibitiva de marcações SVG para descrever todas as formas e mesmo assim o resultado poderá não ser `fotorrealista`. Nesse caso, é melhor usar um formato de imagem de varredura, como GIF, PNG ou JPEG, ou um dos formatos mais atuais, como JPEG-XR e WebP.
As imagens de varredura não são independentes de resolução e zoom, ou seja, quando ampliadas, elas ficam irregulares e embaçadas. Como resultado, para proporcionar a melhor experiência possível aos usuários, talvez você precise salvar várias versões da mesma imagem de varredura em várias resoluções.
## Implicações das telas de resolução alta
{% include modules/takeaway.liquid list=page.key-takeaways.hidpi %}
Quando falamos em pixels de imagem, precisamos distinguir entre dois tipos de pixel: CSS e físicos. Um único pixel CSS pode conter vários pixels físicos, ou seja, um único pixel CSS pode corresponder diretamente a um único pixel físico ou ser composto por vários. Qual é o objetivo disso? Quanto mais pixels, mais detalhado é o conteúdo exibido na tela.
<img src="images/css-vs-device-pixels.png" class="center" alt="Pixels CSS vs. pixels físicos">
Telas com alto DPI (HiDPI) exibem resultados incríveis, mas há um ponto negativo: os recursos de imagem precisam de mais detalhes para se beneficiarem da maior quantidade de pixels físicos. O ponto positivo é que as imagens vetoriais são ideais para essa função, pois podem ser exibidas em qualquer resolução com ótimos resultados. Para exibir detalhes mais sutis talvez seja necessário um maior custo de processamento, mas o recurso resultante é o mesmo e não depende da resolução.
Por outro lado, as imagens de varredura impõem um desafio muito maior, pois codificam os dados da imagem pixel por pixel. Portanto, quanto maior o número de pixels, maior o tamanho do arquivo da imagem de varredura. Como exemplo, considere a diferença entre um recurso de foto exibido na resolução de 100 x 100 pixels (CSS):
<table class="table-3">
<colgroup><col span="1"><col span="1"><col span="1"></colgroup>
<thead>
<tr>
<th>Resolução da tela</th>
<th>Total de pixels</th>
<th>Tamanho do arquivo descompactado (4 bytes por pixel)</th>
</tr>
</thead>
<tbody>
<tr>
<td data-th="resolução">1 x</td>
<td data-th="total de pixels">100 x 100 = 10.000</td>
<td data-th="tamanho do arquivo">40.000 bytes</td>
</tr>
<tr>
<td data-th="resolução">2 x</td>
<td data-th="total de pixels">100 x 100 x 4 = 40.000</td>
<td data-th="tamanho do arquivo">160.000 bytes</td>
</tr>
<tr>
<td data-th="resolução">3 x</td>
<td data-th="total de pixels">100 x 100 x 9 = 90.000</td>
<td data-th="tamanho do arquivo">360.000 bytes</td>
</tr>
</tbody>
</table>
Quando a resolução da tela física é duplicada, o número total de pixels é multiplicado por quatro: o dobro de pixels horizontais vezes o dobro de pixels verticais. Portanto, uma tela ampliada `2 x` não dobra, mas quadruplica o número de pixels necessários.
O que isso significa na prática? Uma das melhores características das telas de alta resolução é a capacidade de exibir lindas imagens. No entanto, as telas de alta resolução exigem imagens também de alta resolução. Prefira as imagens vetoriais sempre que possível, pois elas não dependem da resolução e sempre exibem resultados de alta qualidade. Se for necessário usar imagens de varredura, disponibilize e otimize múltiplas variações de cada imagem. Veja mais detalhes a seguir.
## Otimização de imagens vetoriais
{% include modules/takeaway.liquid list=page.key-takeaways.optimizing-vector %}
Todos os navegadores modernos oferecem suporte ao formato SVG (gráficos vetoriais escaláveis), que é um formato de imagem para gráficos bidimensionais baseado em XML. É possível incorporar as marcações SVG diretamente na página ou como um recurso externo. Por sua vez, um arquivo SVG pode ser criado pela maioria dos softwares de ilustração vetorial ou manualmente e direto no seu editor de texto preferido.
{% highlight xml %}
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 17.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.2" baseProfile="tiny" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"
x="0px" y="0px" viewBox="0 0 612 792" xml:space="preserve">
<g id="XMLID_1_">
<g>
<circle fill="red" stroke="black" stroke-width="2" stroke-miterlimit="10" cx="50" cy="50" r="40"/>
</g>
</g>
</svg>
{% endhighlight %}
O exemplo acima mostra um simples círculo com contorno preto e fundo vermelho que foi exportado pelo Adobe Illustrator. É possível ver que ele contém muitos metadados, como informações de camadas, comentários e namespaces de XML que muitas vezes são desnecessários para exibir o recurso no navegador. Consequentemente, recomendamos que você sempre reduza os arquivos SVG usando ferramentas como o [svgo](https://github.com/svg/svgo).
Nesse contexto, o svgo diminui em 58% o tamanho do arquivo SVG gerado pelo Illustrator, reduzindo-o de 470 para 199 bytes. Como o SVG é um formato baseado em XML, também é possível aplicar a compactação do GZIP para reduzir o tamanho de transferência. Verifique se o servidor está configurado para compactar recursos SVG.
## Otimização de imagens de varredura
{% include modules/takeaway.liquid list=page.key-takeaways.optimizing-raster %}
Uma imagem vetorial é somente uma grade bidimensional de `pixels` individuais, ou seja, uma imagem de 100 x 100 pixels é uma sequência de 10.000 pixels. Cada pixel, por sua vez, armazena os valores `[ÓRGÃO](http://en.wikipedia.org/wiki/RGBA_color_espaço): (R) canal vermelho, (G) canal verde, (B) canal azul e (A) canal alfa (transparência).
Internamente, o navegador aloca 256 valores (tons) para cada canal, que se convertem em 8 bits por canal (2 ^ 8 = 256) e 4 bytes por pixel (4 canais x 8 bits = 32 bits = 4 bytes). Como resultado, o tamanho do arquivo é facilmente calculado quando conhecemos as dimensões da grade:
* uma imagem de 100 x 100 pixels é composta por 10.000 pixels
* 10.000 pixels x 4 bytes = 40.000 bytes
* 40.000 bytes / 1024 = 39 KB
^
{% include modules/remember.liquid title="Note" list=page.notes.decompressed %}
<table class="table-3">
<colgroup><col span="1"><col span="1"><col span="1"></colgroup>
<thead>
<tr>
<th>Dimensões</th>
<th>Pixels</th>
<th>Tamanho do arquivo</th>
</tr>
</thead>
<tbody>
<tr>
<td data-th="dimensões">100 x 100</td>
<td data-th="pixels">10.000</td>
<td data-th="tamanho do arquivo">39 KB</td>
</tr>
<tr>
<td data-th="dimensões">200 x 200</td>
<td data-th="pixels">40.000</td>
<td data-th="tamanho do arquivo">156 KB</td>
</tr>
<tr>
<td data-th="dimensões">300 x 300</td>
<td data-th="pixels">90.000</td>
<td data-th="tamanho do arquivo">351 KB</td>
</tr>
<tr>
<td data-th="dimensões">500 x 500</td>
<td data-th="pixels">250.000</td>
<td data-th="tamanho do arquivo">977 KB</td>
</tr>
<tr>
<td data-th="dimensões">800 x 800</td>
<td data-th="pixels">640.000</td>
<td data-th="tamanho do arquivo">2.500 KB</td>
</tr>
</tbody>
</table>
39 KB pode não parecer muito para uma imagem de 100 x 100 pixels, mas esse valor pode aumentar rapidamente com o uso de imagens maiores, tornando os recursos de imagem lentos e encarecendo o download. No entanto, até agora descrevemos o formato de imagem `descompactado`. O que podemos fazer para reduzir o tamanho da imagem?
Uma estratégia simples é reduzir a `profundidade de bits` da imagem de 8 bits por canal para uma paleta de cores menor: 8 bits por canal resulta em 256 valores por canal e 16.777.216 (2563) cores no total. E se reduzirmos a paleta para 256 cores? Precisaríamos de um total de apenas 8 bits para os canais RGB, reduzindo automaticamente 2 bytes por pixel. Isso significa uma compactação de 50% sobre o formato original de 4 bytes por pixel.
<img src="images/artifacts.png" class="center" alt="Artefatos de compactação">
{% include modules/remember.liquid title="Note" list=page.notes.artifacts %}
Em seguida, depois de otimizar os dados armazenados em pixels individuais, podemos começar a tratar os pixels adjacentes. Muitas imagens, principalmente as fotos, têm vários pixels adjacentes com cores semelhantes (por exemplo, o céu, texturas repetidas etc.). Com essa informação ao nosso favor, o compactador pode aplicar a `[codificação delta] (http://en.wikipedia.org/wiki/Delta_encoding)`, que em vez armazenar os valores individuais de cada pixel, armazena a diferença entre pixels adjacentes. Se os pixels adjacentes forem iguais, o delta é igual a `zero` e só é preciso armazenar um bit. Não pararemos por aqui...
O olho humano tem níveis de sensibilidade diferentes para cores diferentes. Sendo assim, é possível otimizar o código de cores reduzindo ou aumentando a paleta para incluir ou excluir essas cores.
Os pixels `adjacentes` formam uma grade em duas dimensões, indicando que cada pixel tem vários vizinhos. Podemos usar esse fato para melhorar ainda mais a codificação delta.
Em vez de olhar somente para os vizinhos diretos de cada pixel, podemos incluir blocos maiores de pixels adjacentes e codificar diferentes blocos com diferentes configurações. E assim por diante...
Como você deve saber, a otimização de imagens pode se tornar bastante complexa (ou divertida, dependendo do seu ponto de vista) e é uma importante área de pesquisa acadêmica e comercial. As imagens consomem muitos bytes e o desenvolvimento de técnicas de compactação mais eficientes é muito benéfico. Para mais informações, acesse a [página da Wikipédia](http://pt.wikipedia.org/wiki/Compactação_de_imagens) ou o [documento sobre técnicas de compactação WebP] (https://developers.google.com/speed/webp/docs/compression) e veja alguns exemplos práticos.
Tudo isso é ótimo, porém muito acadêmico: como isso ajuda a otimizar imagens em nossas páginas? Nosso papel aqui não é inventar novas técnicas de compactação, mas é importante entender o problema: pixels RGBA, profundidade de pixels e várias técnicas de otimização. É fundamental entender e ter em mente todos esses problemas antes de começarmos a falar sobre os vários formatos de imagens de varredura.
## Compactação sem perdas vs. com perdas
{% include modules/takeaway.liquid list=page.key-takeaways.lossless-lossy %}
Para certos tipos de dados, como o código-fonte de uma página ou um arquivo executável, é essencial que o compactador não altere nem perca nenhuma informação original. Qualquer bit ausente ou incorreto pode modificar completamente o significado do conteúdo do arquivo ou, ainda pior, torná-lo completamente inválido. Para outros tipos de dados, como imagens, áudio e vídeo, pode ser perfeitamente aceitável exibir uma representação `aproximada` dos dados originais.
Na verdade, devido ao funcionamento da nossa visão, geralmente não há problema em descartar algumas informações de cada pixel para reduzir o tamanho do arquivo. Por exemplo, como não temos a mesma sensibilidade para todas as cores, podemos usar menos bits para codificar algumas cores. Consequentemente, um fluxo típico de otimização de imagens consiste em duas etapas de alto nível:
1. A imagem é processada usando um filtro `[com perdas](http://en.wikipedia.org/wiki/Lossy_compression)` que elimina alguns dados de pixel
1. A imagem é processada usando um filtro `[sem perdas](http://en.wikipedia.org/wiki/Lossless_compression)` que compacta os dados de pixel
**A primeira etapa é opcional e o algoritmo exato dependerá do formato específico da imagem. No entanto, é preciso entender que qualquer imagem pode passar pelo processo de compactação com perdas para reduzir seu tamanho.** Na verdade, a diferença entre os vários formatos de imagens, como GIF, PNG, JPEG e outros, está na combinação entre os algoritmos usados (ou omitidos) ao aplicar as etapas de compactação com perdas e sem perdas.
Qual é a configuração ideal para a otimização com perdas e sem perdas? A resposta depende o conteúdo da imagem e de seus próprios critérios, como as implicações impostas pelo tamanho do arquivo e pelos artefatos introduzidos na compactação com perdas. Em alguns casos, pode ser melhor evitar a otimização com perdas para poder comunicar detalhes complexos integralmente e, em outros casos, pode ser melhor usar técnicas avançadas de otimização com perdas para reduzir o tamanho do arquivo do recurso de imagem. Aqui será preciso levar em conta o contexto e sua avaliação pessoal, pois não há uma configuração universal.
<img src="images/save-for-web.png" class="center" alt="Salvar para a Web">
Por exemplo, ao usar um formato com perdas, como o JPEG, o compactador geralmente exibe uma opção de configuração personalizável de `qualidade` (como o seletor de qualidade oferecido pela função `Salvar para a Web` do Adobe Photoshop) que normalmente é um número entre 1 e 100 que controla como os algoritmos com perdas e sem perdas irão se comportar internamente. Para conseguir os melhores resultados, faça testes em suas imagens com várias configurações diferentes e não hesite em reduzir a qualidade, pois geralmente o resultado visual é muito bom e a redução no tamanho do arquivo pode ser enorme.
{% include modules/remember.liquid title="Note" list=page.notes.quality %}
## Como selecionar o formato de imagem correto
{% include modules/takeaway.liquid list=page.key-takeaways.formats %}
Além das diferenças entre os vários algoritmos de compactação com perdas e sem perdas, os vários formatos de imagem são compatíveis com diferentes recursos, como canais de transparência (alfa) e animação. Consequentemente, escolher o `formato correto` para uma determinada imagem é uma combinação dos resultados visuais desejados e dos requisitos funcionais.
<table class="table-4">
<colgroup><col span="1"><col span="1"><col span="1"><col span="1"></colgroup>
<thead>
<tr>
<th>Formato</th>
<th>Transparência</th>
<th>Animação</th>
<th>Navegador</th>
</tr>
</thead>
<tbody>
<tr>
<td data-th="formato"><a href="http://pt.wikipedia.org/wiki/Graphics_Interchange_Format">GIF</a></td>
<td data-th="transparência">Sim</td>
<td data-th="animação">Sim</td>
<td data-th="navegador">Todos</td>
</tr>
<tr>
<td data-th="formato"><a href="http://pt.wikipedia.org/wiki/PNG">PNG</a></td>
<td data-th="transparência">Sim</td>
<td data-th="animação">Não</td>
<td data-th="navegador">Todos</td>
</tr>
<tr>
<td data-th="formato"><a href="http://pt.wikipedia.org/wiki/Joint_Photographic_Experts_Group">JPEG</a></td>
<td data-th="transparência">Não</td>
<td data-th="animação">Não</td>
<td data-th="navegador">Todos</td>
</tr>
<tr>
<td data-th="formato"><a href="http://pt.wikipedia.org/wiki/JPEG_XR">JPEG XR</a></td>
<td data-th="transparência">Sim</td>
<td data-th="animação">Sim</td>
<td data-th="navegador">IE</td>
</tr>
<tr>
<td data-th="formato"><a href="http://pt.wikipedia.org/wiki/WebP">WebP</a></td>
<td data-th="transparência">Sim</td>
<td data-th="animação">Sim</td>
<td data-th="navegador">Google Chrome, Opera, Android</td>
</tr>
</tbody>
</table>
Existem três formatos de imagem compatíveis universalmente: GIF, PNG e JPEG. Além desses formatos, alguns navegadores também aceitam formatos mais novos como WebP e JPEG XR, que oferecem melhor compactação geral e mais recursos. Então, qual formato você deve usar?
<img src="images/format-tree.png" class="center" alt="Salvar para a Web">
1. **Você precisa de animação? Se precisar, o GIF é a única escolha universal.**
* O GIF limita a paleta de cores a 256 cores, tornando-se uma péssima escolha para a maioria das imagens. Além disso, o PNG-8 oferece melhor compactação para imagens com paletas reduzidas. Consequentemente, o GIF é a melhor opção somente quando é preciso usar animação.
1. **Você precisa preservar detalhes sutis com a maior resolução possível? Use o PNG.**
* O PNG não utiliza algoritmos de compactação com perdas após a escolha do tamanho da paleta de cores. Assim, esse formato gera imagens com a máxima qualidade, porém com tamanhos de arquivos bem maiores do que outros formatos. Use-o com cautela.
* Se o recurso de imagem possuir gráficos compostos por formas geométricas, considere convertê-lo para um formato vetorial (SVG).
* Se o recurso de imagem tiver texto, reconsidere antes usá-lo. O texto incorporado a imagens não pode ser selecionado, pesquisado ou ampliado/reduzido. Se houver necessidade de apresentar uma aparência personalizada (para fins de branding ou por outros motivos), é melhor usar as fontes da Web.
1. **Você pretende otimizar uma foto, uma captura de tela ou algum recurso de imagem semelhante? Use o JPEG.**
* O JPEG usa uma combinação de otimização com e sem perdas para reduzir o tamanho do arquivo do recurso de imagem. Teste vários níveis de qualidade do JPEG para encontrar a melhor relação entre qualidade e tamanho de arquivo para seu recurso.
Finalmente, após determinar o formato de imagem ideal e a respectiva configuração para cada um dos recursos, considere adicionar mais uma variante codificada em WebP e JPEG XR. Esses dois formatos são novos e (ainda) não são universalmente compatíveis com todos os navegadores. Mesmo assim, eles proporcionam grandes economias para clientes mais novos: em média, o WebP proporciona uma [redução de 30% no tamanho do arquivo](https://developers.google.com/speed/webp/docs/webp_study) em comparação a uma imagem JPEG equivalente.
Como o WebP e o JPEG XR não são universalmente compatíveis, é preciso alterar a lógica dos aplicativos ou dos servidores para que eles exibam o recurso adequado:
* Algumas CDNs incluem a exibição de WebP e JPEG XR.
* Algumas ferramentas de código aberto (como PageSpeed for apache ou Nginx) automatizam a otimização, a conversão e a exibição de determinados recursos.
* É possível alterar a lógica do aplicativo para detectar o cliente, verificar quais formatos são compatíveis e exibir o melhor formato de imagem disponível.
Finalmente, se você usar um Webview para exibir o conteúdo no seu aplicativo nativo, você terá total controle sobre o cliente e poderá usar o formato WebP com exclusividade. O Facebook, o Google+ e muitos outros usam o WebP para exibir todas as imagens em seus aplicativos. A economia definitivamente compensa. Para saber mais sobre o WebP, assista à apresentação do Google I/O 2013: [WebP: Deploying Faster, Smaller, and More Beautiful Images](https://www.youtube.com/watch?v=pS8udLMOOaE).
## Ajuste de ferramentas e parâmetros
Não há um formato de imagem, uma ferramenta ou um conjunto de parâmetros de otimização perfeito para todas as imagens. Para conseguir os melhores resultados, é preciso escolher o formato e suas configurações com base no conteúdo da imagem, nos seus requisitos visuais e em outros requisitos técnicos.
<table class="table-2">
<colgroup><col span="1"><col span="1"></colgroup>
<thead>
<tr>
<th>Ferramenta</th>
<th>Descrição</th>
</tr>
</thead>
<tbody>
<tr>
<td data-th="ferramenta"><a href="http://www.lcdf.org/gifsicle/">gifsicle</a></td>
<td data-th="descrição">cria e otimiza imagens GIF</td>
</tr>
<tr>
<td data-th="ferramenta"><a href="http://jpegclub.org/jpegtran/">jpegtran</a></td>
<td data-th="descrição">otimiza imagens JPEG</td>
</tr>
<tr>
<td data-th="ferramenta"><a href="http://optipng.sourceforge.net/">optipng</a></td>
<td data-th="descrição">otimização de PNG sem perdas</td>
</tr>
<tr>
<td data-th="ferramenta"><a href="http://pngquant.org/">pngquant</a></td>
<td data-th="descrição">otimização de PNG com perdas</td>
</tr>
</tbody>
</table>
Não hesite em testar os parâmetros de cada compactador. Diminua a qualidade, veja como fica, apague e repita o processo. Quando você encontrar uma boa configuração, será possível aplicá-la a outras imagens semelhantes do seu site, mas não presuma que todas as imagens precisam ser compactadas com a mesma configuração.
## Exibição de recursos de imagem dimensionados
{% include modules/takeaway.liquid list=page.key-takeaways.scaled-images %}
A otimização de imagens se resume a dois critérios: a otimização do número de bytes usados para codificar cada pixel de imagem e a otimização do número total de pixels. O tamanho do arquivo de imagem é simplesmente o número total de pixels multiplicado pelo número total de bytes usados para codificar cada pixel. Simples assim.
Consequentemente, uma das técnicas mais fáceis e eficientes é garantir que não sejam enviados mais pixels do que o necessário para exibir o recurso no navegador com o tamanho desejado. Parece simples, não é? No entanto, muitas páginas não passam nesse teste para a maioria dos recursos de imagem. Normalmente, elas enviam recursos de imagem maiores e dependem do navegador para redimensioná-los e exibi-los com uma resolução mais baixa. Além disso, isso consome recursos extra de CPU.
<img src="images/resized-image.png" class="center" alt="Imagem redimensionada">
{% include modules/remember.liquid title="Note" list=page.notes.resized %}
A sobrecarga do envio de pixels desnecessários com o propósito de que o navegador faça o redimensionamento por nós, representa uma oportunidade desperdiçada de reduzir e otimizar o número total de bytes necessários para exibir a página. Além disso, o redimensionamento não é somente uma função do número de pixels subtraídos da imagem, mas também dos pixels subtraídos do tamanho natural.
<table class="table-3">
<colgroup><col span="1"><col span="1"><col span="1"></colgroup>
<thead>
<tr>
<th>Tamanho natural</th>
<th>Tamanho de exibição</th>
<th>Pixels desnecessários</th>
</tr>
</thead>
<tbody>
<tr>
<td data-th="natural">110 x 110</td>
<td data-th="exibição">100 x 100</td>
<td data-th="sobrecarga">110 x 110 - 100 x 100 = 2100</td>
</tr>
<tr>
<td data-th="natural">410 x 410</td>
<td data-th="exibição">400 x 400</td>
<td data-th="sobrecarga">410 x 410 - 400 x 400 = 8.100</td>
</tr>
<tr>
<td data-th="natural">810 x 810</td>
<td data-th="exibição">800 x 800</td>
<td data-th="sobrecarga">810 x 810 - 800 x 800 = 16.100</td>
</tr>
</tbody>
</table>
Nos três casos acima, o tamanho de exibição tem `apenas 10 pixels a menos` do que o tamanho natural da imagem. No entanto, o número de pixels extra que teríamos que codificar e enviar é significativamente maior do que o tamanho natural. Consequentemente, embora não seja possível garantir que todos os recursos de imagem sejam exibidos no tamanho exato de exibição, **você deve garantir que o número de pixels desnecessários seja mínimo e que os recursos maiores, especificamente, se aproximem do tamanho máximo de exibição.**
## Lista de verificação da otimização de imagens
A otimização de imagens é uma arte e uma ciência: é uma arte porque não há uma maneira ideal para compactar uma determinada imagem. Ela é uma ciência porque há várias técnicas e algoritmos avançados que podem reduzir bastante o tamanho de uma imagem.
Dicas e técnicas importantes para otimizar suas imagens:
* **Prefira formatos vetoriais:** as imagens vetoriais não dependem de resolução e escala e, por isso, são perfeitas para a grande variedade de dispositivos e telas de alta resolução que existem atualmente.
* **Faça a redução e a compactação de recursos SVG:** as marcações XML geradas pela maioria dos aplicativos de ilustração geralmente contêm metadados desnecessários que podem ser removidos. Verifique se seus servidores estão configurados para aplicar a compactação com o GZIP aos recursos SVG.
* **Escolha o melhor formato de imagem de varredura:** determine seus requisitos funcionais e selecione a melhor opção para cada recurso.
* **Faça testes com as configurações de qualidade ideais para os formatos de varredura:** não hesite em reduzir as configurações de `qualidade`, pois geralmente os resultados são muito bons e a redução do número de bytes é significativa.
* **Remova os metadados de imagem desnecessários:** muitas imagens de varredura possuem metadados desnecessários sobre o recurso: dados geográficos, informações da câmera etc. Use as ferramentas adequadas para excluir esses dados.
* **Exiba imagens dimensionadas:** redimensione as imagens no servidor e verifique se o tamanho de `exibição` está o mais próximo possível do tamanho `natural` da imagem. Esteja especialmente atento a imagens grandes, pois são elas que geram a maior sobrecarga quando redimensionadas.
* **Automatizar é a palavra de ordem:** invista em ferramentas e infraestrutura de automação para garantir que seus recursos de imagem estejam sempre otimizados.
{% include modules/nextarticle.liquid %}
{% endwrap %}
|
tzik/WebFundamentals
|
src/_langs/pt-br/fundamentals/performance/optimizing-content-efficiency/image-optimization.markdown
|
Markdown
|
apache-2.0
| 34,252
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* <!-- Package description. -->
* Contains automatic JDBC store example.
*/
package org.apache.ignite.examples.datagrid.store.auto;
|
irudyak/ignite
|
examples/src/main/java/org/apache/ignite/examples/datagrid/store/auto/package-info.java
|
Java
|
apache-2.0
| 942
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.schematron.processor;
import javax.xml.transform.Templates;
import javax.xml.transform.TransformerFactory;
import net.sf.saxon.TransformerFactoryImpl;
import org.apache.camel.component.schematron.constant.Constants;
import org.junit.Assert;
import org.junit.Test;
/**
* TemplateFactory Unit Test.
*
*/
public class TemplatesFactoryTest {
private String rules = "sch/schematron-1.sch";
@Test
public void testInstantiateAnInstanceOfTemplates() throws Exception {
TemplatesFactory fac = TemplatesFactory.newInstance();
TransformerFactory factory = new TransformerFactoryImpl();
factory.setURIResolver(new ClassPathURIResolver(Constants.SCHEMATRON_TEMPLATES_ROOT_DIR, null));
Templates templates = fac.getTemplates(ClassLoader.getSystemResourceAsStream(rules), factory);
Assert.assertNotNull(templates);
}
}
|
anoordover/camel
|
components/camel-schematron/src/test/java/org/apache/camel/component/schematron/processor/TemplatesFactoryTest.java
|
Java
|
apache-2.0
| 1,708
|
// Copyright ©2015 The gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mat64
import (
"math"
"github.com/gonum/blas"
"github.com/gonum/blas/blas64"
"github.com/gonum/matrix"
)
var (
symDense *SymDense
_ Matrix = symDense
_ Symmetric = symDense
_ RawSymmetricer = symDense
_ MutableSymmetric = symDense
)
const (
badSymTriangle = "mat64: blas64.Symmetric not upper"
badSymCap = "mat64: bad capacity for SymDense"
)
// SymDense is a symmetric matrix that uses dense storage. SymDense
// matrices are stored in the upper triangle.
type SymDense struct {
mat blas64.Symmetric
cap int
}
// Symmetric represents a symmetric matrix (where the element at {i, j} equals
// the element at {j, i}). Symmetric matrices are always square.
type Symmetric interface {
Matrix
// Symmetric returns the number of rows/columns in the matrix.
Symmetric() int
}
// A RawSymmetricer can return a view of itself as a BLAS Symmetric matrix.
type RawSymmetricer interface {
RawSymmetric() blas64.Symmetric
}
type MutableSymmetric interface {
Symmetric
SetSym(i, j int, v float64)
}
// NewSymDense creates a new Symmetric matrix with n rows and columns. If data == nil,
// a new slice is allocated for the backing slice. If len(data) == n*n, data is
// used as the backing slice, and changes to the elements of the returned SymDense
// will be reflected in data. If neither of these is true, NewSymDense will panic.
//
// The data must be arranged in row-major order, i.e. the (i*c + j)-th
// element in the data slice is the {i, j}-th element in the matrix.
// Only the values in the upper triangular portion of the matrix are used.
func NewSymDense(n int, data []float64) *SymDense {
if n < 0 {
panic("mat64: negative dimension")
}
if data != nil && n*n != len(data) {
panic(matrix.ErrShape)
}
if data == nil {
data = make([]float64, n*n)
}
return &SymDense{
mat: blas64.Symmetric{
N: n,
Stride: n,
Data: data,
Uplo: blas.Upper,
},
cap: n,
}
}
func (s *SymDense) Dims() (r, c int) {
return s.mat.N, s.mat.N
}
// T implements the Matrix interface. Symmetric matrices, by definition, are
// equal to their transpose, and this is a no-op.
func (s *SymDense) T() Matrix {
return s
}
func (s *SymDense) Symmetric() int {
return s.mat.N
}
// RawSymmetric returns the matrix as a blas64.Symmetric. The returned
// value must be stored in upper triangular format.
func (s *SymDense) RawSymmetric() blas64.Symmetric {
return s.mat
}
// SetRawSymmetric sets the underlying blas64.Symmetric used by the receiver.
// Changes to elements in the receiver following the call will be reflected
// in b. SetRawSymmetric will panic if b is not an upper-encoded symmetric
// matrix.
func (s *SymDense) SetRawSymmetric(b blas64.Symmetric) {
if b.Uplo != blas.Upper {
panic(badSymTriangle)
}
s.mat = b
}
// Reset zeros the dimensions of the matrix so that it can be reused as the
// receiver of a dimensionally restricted operation.
//
// See the Reseter interface for more information.
func (s *SymDense) Reset() {
// N and Stride must be zeroed in unison.
s.mat.N, s.mat.Stride = 0, 0
s.mat.Data = s.mat.Data[:0]
}
func (s *SymDense) isZero() bool {
// It must be the case that m.Dims() returns
// zeros in this case. See comment in Reset().
return s.mat.N == 0
}
// reuseAs resizes an empty matrix to a n×n matrix,
// or checks that a non-empty matrix is n×n.
func (s *SymDense) reuseAs(n int) {
if s.mat.N > s.cap {
panic(badSymCap)
}
if s.isZero() {
s.mat = blas64.Symmetric{
N: n,
Stride: n,
Data: use(s.mat.Data, n*n),
Uplo: blas.Upper,
}
s.cap = n
return
}
if s.mat.Uplo != blas.Upper {
panic(badSymTriangle)
}
if s.mat.N != n {
panic(matrix.ErrShape)
}
}
func (s *SymDense) isolatedWorkspace(a Symmetric) (w *SymDense, restore func()) {
n := a.Symmetric()
w = getWorkspaceSym(n, false)
return w, func() {
s.CopySym(w)
putWorkspaceSym(w)
}
}
func (s *SymDense) AddSym(a, b Symmetric) {
n := a.Symmetric()
if n != b.Symmetric() {
panic(matrix.ErrShape)
}
s.reuseAs(n)
if a, ok := a.(RawSymmetricer); ok {
if b, ok := b.(RawSymmetricer); ok {
amat, bmat := a.RawSymmetric(), b.RawSymmetric()
if s != a {
s.checkOverlap(amat)
}
if s != b {
s.checkOverlap(bmat)
}
for i := 0; i < n; i++ {
btmp := bmat.Data[i*bmat.Stride+i : i*bmat.Stride+n]
stmp := s.mat.Data[i*s.mat.Stride+i : i*s.mat.Stride+n]
for j, v := range amat.Data[i*amat.Stride+i : i*amat.Stride+n] {
stmp[j] = v + btmp[j]
}
}
return
}
}
for i := 0; i < n; i++ {
stmp := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n]
for j := i; j < n; j++ {
stmp[j] = a.At(i, j) + b.At(i, j)
}
}
}
func (s *SymDense) CopySym(a Symmetric) int {
n := a.Symmetric()
n = min(n, s.mat.N)
if n == 0 {
return 0
}
switch a := a.(type) {
case RawSymmetricer:
amat := a.RawSymmetric()
if amat.Uplo != blas.Upper {
panic(badSymTriangle)
}
for i := 0; i < n; i++ {
copy(s.mat.Data[i*s.mat.Stride+i:i*s.mat.Stride+n], amat.Data[i*amat.Stride+i:i*amat.Stride+n])
}
default:
for i := 0; i < n; i++ {
stmp := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n]
for j := i; j < n; j++ {
stmp[j] = a.At(i, j)
}
}
}
return n
}
// SymRankOne performs a symetric rank-one update to the matrix a and stores
// the result in the receiver
// s = a + alpha * x * x'
func (s *SymDense) SymRankOne(a Symmetric, alpha float64, x *Vector) {
n := x.Len()
if a.Symmetric() != n {
panic(matrix.ErrShape)
}
s.reuseAs(n)
if s != a {
if rs, ok := a.(RawSymmetricer); ok {
s.checkOverlap(rs.RawSymmetric())
}
s.CopySym(a)
}
blas64.Syr(alpha, x.mat, s.mat)
}
// SymRankK performs a symmetric rank-k update to the matrix a and stores the
// result into the receiver. If a is zero, see SymOuterK.
// s = a + alpha * x * x'
func (s *SymDense) SymRankK(a Symmetric, alpha float64, x Matrix) {
n := a.Symmetric()
r, _ := x.Dims()
if r != n {
panic(matrix.ErrShape)
}
xMat, aTrans := untranspose(x)
var g blas64.General
if rm, ok := xMat.(RawMatrixer); ok {
g = rm.RawMatrix()
} else {
g = DenseCopyOf(x).mat
aTrans = false
}
if a != s {
if rs, ok := a.(RawSymmetricer); ok {
s.checkOverlap(rs.RawSymmetric())
}
s.reuseAs(n)
s.CopySym(a)
}
t := blas.NoTrans
if aTrans {
t = blas.Trans
}
blas64.Syrk(t, alpha, g, 1, s.mat)
}
// SymOuterK calculates the outer product of x with itself and stores
// the result into the receiver. It is equivalent to the matrix
// multiplication
// s = alpha * x * x'.
// In order to update an existing matrix, see SymRankOne.
func (s *SymDense) SymOuterK(alpha float64, x Matrix) {
n, _ := x.Dims()
switch {
case s.isZero():
s.mat = blas64.Symmetric{
N: n,
Stride: n,
Data: useZeroed(s.mat.Data, n*n),
Uplo: blas.Upper,
}
s.cap = n
s.SymRankK(s, alpha, x)
case s.mat.Uplo != blas.Upper:
panic(badSymTriangle)
case s.mat.N == n:
if s == x {
w := getWorkspaceSym(n, true)
w.SymRankK(w, alpha, x)
s.CopySym(w)
putWorkspaceSym(w)
} else {
if rs, ok := x.(RawSymmetricer); ok {
s.checkOverlap(rs.RawSymmetric())
}
// Only zero the upper triangle.
for i := 0; i < n; i++ {
ri := i * s.mat.Stride
zero(s.mat.Data[ri+i : ri+n])
}
s.SymRankK(s, alpha, x)
}
default:
panic(matrix.ErrShape)
}
}
// RankTwo performs a symmmetric rank-two update to the matrix a and stores
// the result in the receiver
// m = a + alpha * (x * y' + y * x')
func (s *SymDense) RankTwo(a Symmetric, alpha float64, x, y *Vector) {
n := s.mat.N
if x.Len() != n {
panic(matrix.ErrShape)
}
if y.Len() != n {
panic(matrix.ErrShape)
}
var w SymDense
if s == a {
w = *s
}
w.reuseAs(n)
if s != a {
if rs, ok := a.(RawSymmetricer); ok {
s.checkOverlap(rs.RawSymmetric())
}
w.CopySym(a)
}
blas64.Syr2(alpha, x.mat, y.mat, w.mat)
*s = w
return
}
// ScaleSym multiplies the elements of a by f, placing the result in the receiver.
func (s *SymDense) ScaleSym(f float64, a Symmetric) {
n := a.Symmetric()
s.reuseAs(n)
if a, ok := a.(RawSymmetricer); ok {
amat := a.RawSymmetric()
if s != a {
s.checkOverlap(amat)
}
for i := 0; i < n; i++ {
for j := i; j < n; j++ {
s.mat.Data[i*s.mat.Stride+j] = f * amat.Data[i*amat.Stride+j]
}
}
return
}
for i := 0; i < n; i++ {
for j := i; j < n; j++ {
s.mat.Data[i*s.mat.Stride+j] = f * a.At(i, j)
}
}
}
// SubsetSym extracts a subset of the rows and columns of the matrix a and stores
// the result in-place into the receiver. The resulting matrix size is
// len(set)×len(set). Specifically, at the conclusion of SubsetSym,
// s.At(i, j) equals a.At(set[i], set[j]). Note that the supplied set does not
// have to be a strict subset, dimension repeats are allowed.
func (s *SymDense) SubsetSym(a Symmetric, set []int) {
n := len(set)
na := a.Symmetric()
s.reuseAs(n)
var restore func()
if a == s {
s, restore = s.isolatedWorkspace(a)
defer restore()
}
if a, ok := a.(RawSymmetricer); ok {
raw := a.RawSymmetric()
if s != a {
s.checkOverlap(raw)
}
for i := 0; i < n; i++ {
ssub := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n]
r := set[i]
rsub := raw.Data[r*raw.Stride : r*raw.Stride+na]
for j := i; j < n; j++ {
c := set[j]
if r <= c {
ssub[j] = rsub[c]
} else {
ssub[j] = raw.Data[c*raw.Stride+r]
}
}
}
return
}
for i := 0; i < n; i++ {
for j := i; j < n; j++ {
s.mat.Data[i*s.mat.Stride+j] = a.At(set[i], set[j])
}
}
}
// ViewSquare returns a view of the submatrix starting at {i, i} and extending
// for n rows and columns. ViewSquare panics if the view is outside the bounds
// of the receiver.
//
// ViewSquare is deprecated and should not be used. It will be removed at a later date.
func (s *SymDense) ViewSquare(i, n int) Matrix {
return s.SliceSquare(i, i+n)
}
// SliceSquare returns a new Matrix that shares backing data with the receiver.
// The returned matrix starts at {i,i} of the recevier and extends k-i rows
// and columns. The final row and column in the resulting matrix is k-1.
// SliceSquare panics with ErrIndexOutOfRange if the slice is outside the bounds
// of the receiver.
func (s *SymDense) SliceSquare(i, k int) Matrix {
sz := s.Symmetric()
if i < 0 || sz < i || k < i || sz < k {
panic(matrix.ErrIndexOutOfRange)
}
v := *s
v.mat.Data = s.mat.Data[i*s.mat.Stride+i : (k-1)*s.mat.Stride+k]
v.mat.N = k - i
v.cap = s.cap - i
return &v
}
// GrowSquare returns the receiver expanded by n rows and n columns. If the
// dimensions of the expanded matrix are outside the capacity of the receiver
// a new allocation is made, otherwise not. Note that the receiver itself is
// not modified during the call to GrowSquare.
func (s *SymDense) GrowSquare(n int) Matrix {
if n < 0 {
panic(matrix.ErrIndexOutOfRange)
}
if n == 0 {
return s
}
var v SymDense
n += s.mat.N
if n > s.cap {
v.mat = blas64.Symmetric{
N: n,
Stride: n,
Uplo: blas.Upper,
Data: make([]float64, n*n),
}
v.cap = n
// Copy elements, including those not currently visible. Use a temporary
// structure to avoid modifying the receiver.
var tmp SymDense
tmp.mat = blas64.Symmetric{
N: s.cap,
Stride: s.mat.Stride,
Data: s.mat.Data,
Uplo: s.mat.Uplo,
}
tmp.cap = s.cap
v.CopySym(&tmp)
return &v
}
v.mat = blas64.Symmetric{
N: n,
Stride: s.mat.Stride,
Uplo: blas.Upper,
Data: s.mat.Data[:(n-1)*s.mat.Stride+n],
}
v.cap = s.cap
return &v
}
// PowPSD computes a^pow where a is a positive symmetric definite matrix.
//
// PowPSD returns an error if the matrix is not not positive symmetric definite
// or the Eigendecomposition is not successful.
func (s *SymDense) PowPSD(a Symmetric, pow float64) error {
dim := a.Symmetric()
s.reuseAs(dim)
var eigen EigenSym
ok := eigen.Factorize(a, true)
if !ok {
return matrix.ErrFailedEigen
}
values := eigen.Values(nil)
for i, v := range values {
if v <= 0 {
return matrix.ErrNotPSD
}
values[i] = math.Pow(v, pow)
}
var u Dense
u.EigenvectorsSym(&eigen)
s.SymOuterK(values[0], u.ColView(0))
for i := 1; i < dim; i++ {
s.SymRankOne(s, values[i], u.ColView(i))
}
return nil
}
|
pweil-/origin
|
vendor/github.com/gonum/matrix/mat64/symmetric.go
|
GO
|
apache-2.0
| 12,411
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/spdy/spdy_protocol.h"
namespace net {
SpdyFrameWithNameValueBlockIR::SpdyFrameWithNameValueBlockIR(
SpdyStreamId stream_id) : SpdyFrameWithFinIR(stream_id) {}
SpdyFrameWithNameValueBlockIR::~SpdyFrameWithNameValueBlockIR() {}
SpdyDataIR::SpdyDataIR(SpdyStreamId stream_id, const base::StringPiece& data)
: SpdyFrameWithFinIR(stream_id) {
SetDataDeep(data);
}
SpdyDataIR::SpdyDataIR(SpdyStreamId stream_id)
: SpdyFrameWithFinIR(stream_id) {}
SpdyDataIR::~SpdyDataIR() {}
void SpdyDataIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitData(*this);
}
void SpdySynStreamIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitSynStream(*this);
}
void SpdySynReplyIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitSynReply(*this);
}
void SpdyRstStreamIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitRstStream(*this);
}
SpdySettingsIR::SpdySettingsIR() : clear_settings_(false) {}
SpdySettingsIR::~SpdySettingsIR() {}
void SpdySettingsIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitSettings(*this);
}
void SpdyPingIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitPing(*this);
}
void SpdyGoAwayIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitGoAway(*this);
}
void SpdyHeadersIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitHeaders(*this);
}
void SpdyWindowUpdateIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitWindowUpdate(*this);
}
SpdyCredentialIR::SpdyCredentialIR(int16 slot) {
set_slot(slot);
}
SpdyCredentialIR::~SpdyCredentialIR() {}
void SpdyCredentialIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitCredential(*this);
}
void SpdyBlockedIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitBlocked(*this);
}
void SpdyPushPromiseIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitPushPromise(*this);
}
} // namespace net
|
androidarmv6/android_external_chromium_org
|
net/spdy/spdy_protocol.cc
|
C++
|
bsd-3-clause
| 2,165
|
#ifndef __SUN3_HEAD_H
#define __SUN3_HEAD_H
#define KERNBASE 0xE000000 /* First address the kernel will eventually be */
#define LOAD_ADDR 0x4000 /* prom jumps to us here unless this is elf /boot */
#define FC_CONTROL 3
#define FC_SUPERD 5
#define FC_CPU 7
#endif /* __SUN3_HEAD_H */
|
AiJiaZone/linux-4.0
|
virt/arch/m68k/include/asm/sun3-head.h
|
C
|
gpl-2.0
| 314
|
/******************************************************************************
*
* Module Name: utcopy - Internal to external object translation utilities
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2010, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utcopy")
/* Local prototypes */
static acpi_status
acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
union acpi_object *external_object,
u8 * data_space, acpi_size * buffer_space_used);
static acpi_status
acpi_ut_copy_ielement_to_ielement(u8 object_type,
union acpi_operand_object *source_object,
union acpi_generic_state *state,
void *context);
static acpi_status
acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
u8 * buffer, acpi_size * space_used);
static acpi_status
acpi_ut_copy_esimple_to_isimple(union acpi_object *user_obj,
union acpi_operand_object **return_obj);
static acpi_status
acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
union acpi_operand_object **internal_object);
static acpi_status
acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
union acpi_operand_object *dest_desc);
static acpi_status
acpi_ut_copy_ielement_to_eelement(u8 object_type,
union acpi_operand_object *source_object,
union acpi_generic_state *state,
void *context);
static acpi_status
acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
union acpi_operand_object *dest_obj,
struct acpi_walk_state *walk_state);
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_isimple_to_esimple
*
* PARAMETERS: internal_object - Source object to be copied
* external_object - Where to return the copied object
* data_space - Where object data is returned (such as
* buffer and string data)
* buffer_space_used - Length of data_space that was used
*
* RETURN: Status
*
* DESCRIPTION: This function is called to copy a simple internal object to
* an external object.
*
* The data_space buffer is assumed to have sufficient space for
* the object.
*
******************************************************************************/
static acpi_status
acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
union acpi_object *external_object,
u8 * data_space, acpi_size * buffer_space_used)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ut_copy_isimple_to_esimple);
*buffer_space_used = 0;
/*
* Check for NULL object case (could be an uninitialized
* package element)
*/
if (!internal_object) {
return_ACPI_STATUS(AE_OK);
}
/* Always clear the external object */
ACPI_MEMSET(external_object, 0, sizeof(union acpi_object));
/*
* In general, the external object will be the same type as
* the internal object
*/
external_object->type = internal_object->common.type;
/* However, only a limited number of external types are supported */
switch (internal_object->common.type) {
case ACPI_TYPE_STRING:
external_object->string.pointer = (char *)data_space;
external_object->string.length = internal_object->string.length;
*buffer_space_used = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size)
internal_object->
string.
length + 1);
ACPI_MEMCPY((void *)data_space,
(void *)internal_object->string.pointer,
(acpi_size) internal_object->string.length + 1);
break;
case ACPI_TYPE_BUFFER:
external_object->buffer.pointer = data_space;
external_object->buffer.length = internal_object->buffer.length;
*buffer_space_used =
ACPI_ROUND_UP_TO_NATIVE_WORD(internal_object->string.
length);
ACPI_MEMCPY((void *)data_space,
(void *)internal_object->buffer.pointer,
internal_object->buffer.length);
break;
case ACPI_TYPE_INTEGER:
external_object->integer.value = internal_object->integer.value;
break;
case ACPI_TYPE_LOCAL_REFERENCE:
/* This is an object reference. */
switch (internal_object->reference.class) {
case ACPI_REFCLASS_NAME:
/*
* For namepath, return the object handle ("reference")
* We are referring to the namespace node
*/
external_object->reference.handle =
internal_object->reference.node;
external_object->reference.actual_type =
acpi_ns_get_type(internal_object->reference.node);
break;
default:
/* All other reference types are unsupported */
return_ACPI_STATUS(AE_TYPE);
}
break;
case ACPI_TYPE_PROCESSOR:
external_object->processor.proc_id =
internal_object->processor.proc_id;
external_object->processor.pblk_address =
internal_object->processor.address;
external_object->processor.pblk_length =
internal_object->processor.length;
break;
case ACPI_TYPE_POWER:
external_object->power_resource.system_level =
internal_object->power_resource.system_level;
external_object->power_resource.resource_order =
internal_object->power_resource.resource_order;
break;
default:
/*
* There is no corresponding external object type
*/
ACPI_ERROR((AE_INFO,
"Unsupported object type, cannot convert to external object: %s",
acpi_ut_get_type_name(internal_object->common.
type)));
return_ACPI_STATUS(AE_SUPPORT);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_ielement_to_eelement
*
* PARAMETERS: acpi_pkg_callback
*
* RETURN: Status
*
* DESCRIPTION: Copy one package element to another package element
*
******************************************************************************/
static acpi_status
acpi_ut_copy_ielement_to_eelement(u8 object_type,
union acpi_operand_object *source_object,
union acpi_generic_state *state,
void *context)
{
acpi_status status = AE_OK;
struct acpi_pkg_info *info = (struct acpi_pkg_info *)context;
acpi_size object_space;
u32 this_index;
union acpi_object *target_object;
ACPI_FUNCTION_ENTRY();
this_index = state->pkg.index;
target_object = (union acpi_object *)
&((union acpi_object *)(state->pkg.dest_object))->package.
elements[this_index];
switch (object_type) {
case ACPI_COPY_TYPE_SIMPLE:
/*
* This is a simple or null object
*/
status = acpi_ut_copy_isimple_to_esimple(source_object,
target_object,
info->free_space,
&object_space);
if (ACPI_FAILURE(status)) {
return (status);
}
break;
case ACPI_COPY_TYPE_PACKAGE:
/*
* Build the package object
*/
target_object->type = ACPI_TYPE_PACKAGE;
target_object->package.count = source_object->package.count;
target_object->package.elements =
ACPI_CAST_PTR(union acpi_object, info->free_space);
/*
* Pass the new package object back to the package walk routine
*/
state->pkg.this_target_obj = target_object;
/*
* Save space for the array of objects (Package elements)
* update the buffer length counter
*/
object_space = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size)
target_object->
package.count *
sizeof(union
acpi_object));
break;
default:
return (AE_BAD_PARAMETER);
}
info->free_space += object_space;
info->length += object_space;
return (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_ipackage_to_epackage
*
* PARAMETERS: internal_object - Pointer to the object we are returning
* Buffer - Where the object is returned
* space_used - Where the object length is returned
*
* RETURN: Status
*
* DESCRIPTION: This function is called to place a package object in a user
* buffer. A package object by definition contains other objects.
*
* The buffer is assumed to have sufficient space for the object.
* The caller must have verified the buffer length needed using
* the acpi_ut_get_object_size function before calling this function.
*
******************************************************************************/
static acpi_status
acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
u8 * buffer, acpi_size * space_used)
{
union acpi_object *external_object;
acpi_status status;
struct acpi_pkg_info info;
ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_epackage);
/*
* First package at head of the buffer
*/
external_object = ACPI_CAST_PTR(union acpi_object, buffer);
/*
* Free space begins right after the first package
*/
info.length = ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
info.free_space =
buffer + ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
info.object_space = 0;
info.num_packages = 1;
external_object->type = internal_object->common.type;
external_object->package.count = internal_object->package.count;
external_object->package.elements = ACPI_CAST_PTR(union acpi_object,
info.free_space);
/*
* Leave room for an array of ACPI_OBJECTS in the buffer
* and move the free space past it
*/
info.length += (acpi_size) external_object->package.count *
ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
info.free_space += external_object->package.count *
ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
status = acpi_ut_walk_package_tree(internal_object, external_object,
acpi_ut_copy_ielement_to_eelement,
&info);
*space_used = info.length;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_iobject_to_eobject
*
* PARAMETERS: internal_object - The internal object to be converted
* ret_buffer - Where the object is returned
*
* RETURN: Status
*
* DESCRIPTION: This function is called to build an API object to be returned
* to the caller.
*
******************************************************************************/
acpi_status
acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *internal_object,
struct acpi_buffer *ret_buffer)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ut_copy_iobject_to_eobject);
if (internal_object->common.type == ACPI_TYPE_PACKAGE) {
/*
* Package object: Copy all subobjects (including
* nested packages)
*/
status = acpi_ut_copy_ipackage_to_epackage(internal_object,
ret_buffer->pointer,
&ret_buffer->length);
} else {
/*
* Build a simple object (no nested objects)
*/
status = acpi_ut_copy_isimple_to_esimple(internal_object,
ACPI_CAST_PTR(union
acpi_object,
ret_buffer->
pointer),
ACPI_ADD_PTR(u8,
ret_buffer->
pointer,
ACPI_ROUND_UP_TO_NATIVE_WORD
(sizeof
(union
acpi_object))),
&ret_buffer->length);
/*
* build simple does not include the object size in the length
* so we add it in here
*/
ret_buffer->length += sizeof(union acpi_object);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_esimple_to_isimple
*
* PARAMETERS: external_object - The external object to be converted
* ret_internal_object - Where the internal object is returned
*
* RETURN: Status
*
* DESCRIPTION: This function copies an external object to an internal one.
* NOTE: Pointers can be copied, we don't need to copy data.
* (The pointers have to be valid in our address space no matter
* what we do with them!)
*
******************************************************************************/
static acpi_status
acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
union acpi_operand_object **ret_internal_object)
{
union acpi_operand_object *internal_object;
ACPI_FUNCTION_TRACE(ut_copy_esimple_to_isimple);
/*
* Simple types supported are: String, Buffer, Integer
*/
switch (external_object->type) {
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_LOCAL_REFERENCE:
internal_object = acpi_ut_create_internal_object((u8)
external_object->
type);
if (!internal_object) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
break;
case ACPI_TYPE_ANY: /* This is the case for a NULL object */
*ret_internal_object = NULL;
return_ACPI_STATUS(AE_OK);
default:
/* All other types are not supported */
ACPI_ERROR((AE_INFO,
"Unsupported object type, cannot convert to internal object: %s",
acpi_ut_get_type_name(external_object->type)));
return_ACPI_STATUS(AE_SUPPORT);
}
/* Must COPY string and buffer contents */
switch (external_object->type) {
case ACPI_TYPE_STRING:
internal_object->string.pointer =
ACPI_ALLOCATE_ZEROED((acpi_size)
external_object->string.length + 1);
if (!internal_object->string.pointer) {
goto error_exit;
}
ACPI_MEMCPY(internal_object->string.pointer,
external_object->string.pointer,
external_object->string.length);
internal_object->string.length = external_object->string.length;
break;
case ACPI_TYPE_BUFFER:
internal_object->buffer.pointer =
ACPI_ALLOCATE_ZEROED(external_object->buffer.length);
if (!internal_object->buffer.pointer) {
goto error_exit;
}
ACPI_MEMCPY(internal_object->buffer.pointer,
external_object->buffer.pointer,
external_object->buffer.length);
internal_object->buffer.length = external_object->buffer.length;
/* Mark buffer data valid */
internal_object->buffer.flags |= AOPOBJ_DATA_VALID;
break;
case ACPI_TYPE_INTEGER:
internal_object->integer.value = external_object->integer.value;
break;
case ACPI_TYPE_LOCAL_REFERENCE:
/* TBD: should validate incoming handle */
internal_object->reference.class = ACPI_REFCLASS_NAME;
internal_object->reference.node =
external_object->reference.handle;
break;
default:
/* Other types can't get here */
break;
}
*ret_internal_object = internal_object;
return_ACPI_STATUS(AE_OK);
error_exit:
acpi_ut_remove_reference(internal_object);
return_ACPI_STATUS(AE_NO_MEMORY);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_epackage_to_ipackage
*
* PARAMETERS: external_object - The external object to be converted
* internal_object - Where the internal object is returned
*
* RETURN: Status
*
* DESCRIPTION: Copy an external package object to an internal package.
* Handles nested packages.
*
******************************************************************************/
static acpi_status
acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
union acpi_operand_object **internal_object)
{
acpi_status status = AE_OK;
union acpi_operand_object *package_object;
union acpi_operand_object **package_elements;
u32 i;
ACPI_FUNCTION_TRACE(ut_copy_epackage_to_ipackage);
/* Create the package object */
package_object =
acpi_ut_create_package_object(external_object->package.count);
if (!package_object) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
package_elements = package_object->package.elements;
/*
* Recursive implementation. Probably ok, since nested external packages
* as parameters should be very rare.
*/
for (i = 0; i < external_object->package.count; i++) {
status =
acpi_ut_copy_eobject_to_iobject(&external_object->package.
elements[i],
&package_elements[i]);
if (ACPI_FAILURE(status)) {
/* Truncate package and delete it */
package_object->package.count = i;
package_elements[i] = NULL;
acpi_ut_remove_reference(package_object);
return_ACPI_STATUS(status);
}
}
/* Mark package data valid */
package_object->package.flags |= AOPOBJ_DATA_VALID;
*internal_object = package_object;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_eobject_to_iobject
*
* PARAMETERS: external_object - The external object to be converted
* internal_object - Where the internal object is returned
*
* RETURN: Status
*
* DESCRIPTION: Converts an external object to an internal object.
*
******************************************************************************/
acpi_status
acpi_ut_copy_eobject_to_iobject(union acpi_object *external_object,
union acpi_operand_object **internal_object)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ut_copy_eobject_to_iobject);
if (external_object->type == ACPI_TYPE_PACKAGE) {
status =
acpi_ut_copy_epackage_to_ipackage(external_object,
internal_object);
} else {
/*
* Build a simple object (no nested objects)
*/
status =
acpi_ut_copy_esimple_to_isimple(external_object,
internal_object);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_simple_object
*
* PARAMETERS: source_desc - The internal object to be copied
* dest_desc - New target object
*
* RETURN: Status
*
* DESCRIPTION: Simple copy of one internal object to another. Reference count
* of the destination object is preserved.
*
******************************************************************************/
static acpi_status
acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
union acpi_operand_object *dest_desc)
{
u16 reference_count;
union acpi_operand_object *next_object;
acpi_status status;
/* Save fields from destination that we don't want to overwrite */
reference_count = dest_desc->common.reference_count;
next_object = dest_desc->common.next_object;
/* Copy the entire source object over the destination object */
ACPI_MEMCPY((char *)dest_desc, (char *)source_desc,
sizeof(union acpi_operand_object));
/* Restore the saved fields */
dest_desc->common.reference_count = reference_count;
dest_desc->common.next_object = next_object;
/* New object is not static, regardless of source */
dest_desc->common.flags &= ~AOPOBJ_STATIC_POINTER;
/* Handle the objects with extra data */
switch (dest_desc->common.type) {
case ACPI_TYPE_BUFFER:
/*
* Allocate and copy the actual buffer if and only if:
* 1) There is a valid buffer pointer
* 2) The buffer has a length > 0
*/
if ((source_desc->buffer.pointer) &&
(source_desc->buffer.length)) {
dest_desc->buffer.pointer =
ACPI_ALLOCATE(source_desc->buffer.length);
if (!dest_desc->buffer.pointer) {
return (AE_NO_MEMORY);
}
/* Copy the actual buffer data */
ACPI_MEMCPY(dest_desc->buffer.pointer,
source_desc->buffer.pointer,
source_desc->buffer.length);
}
break;
case ACPI_TYPE_STRING:
/*
* Allocate and copy the actual string if and only if:
* 1) There is a valid string pointer
* (Pointer to a NULL string is allowed)
*/
if (source_desc->string.pointer) {
dest_desc->string.pointer =
ACPI_ALLOCATE((acpi_size) source_desc->string.
length + 1);
if (!dest_desc->string.pointer) {
return (AE_NO_MEMORY);
}
/* Copy the actual string data */
ACPI_MEMCPY(dest_desc->string.pointer,
source_desc->string.pointer,
(acpi_size) source_desc->string.length + 1);
}
break;
case ACPI_TYPE_LOCAL_REFERENCE:
/*
* We copied the reference object, so we now must add a reference
* to the object pointed to by the reference
*
* DDBHandle reference (from Load/load_table) is a special reference,
* it does not have a Reference.Object, so does not need to
* increase the reference count
*/
if (source_desc->reference.class == ACPI_REFCLASS_TABLE) {
break;
}
acpi_ut_add_reference(source_desc->reference.object);
break;
case ACPI_TYPE_REGION:
/*
* We copied the Region Handler, so we now must add a reference
*/
if (dest_desc->region.handler) {
acpi_ut_add_reference(dest_desc->region.handler);
}
break;
/*
* For Mutex and Event objects, we cannot simply copy the underlying
* OS object. We must create a new one.
*/
case ACPI_TYPE_MUTEX:
status = acpi_os_create_mutex(&dest_desc->mutex.os_mutex);
if (ACPI_FAILURE(status)) {
return status;
}
break;
case ACPI_TYPE_EVENT:
status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0,
&dest_desc->event.
os_semaphore);
if (ACPI_FAILURE(status)) {
return status;
}
break;
default:
/* Nothing to do for other simple objects */
break;
}
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_ielement_to_ielement
*
* PARAMETERS: acpi_pkg_callback
*
* RETURN: Status
*
* DESCRIPTION: Copy one package element to another package element
*
******************************************************************************/
static acpi_status
acpi_ut_copy_ielement_to_ielement(u8 object_type,
union acpi_operand_object *source_object,
union acpi_generic_state *state,
void *context)
{
acpi_status status = AE_OK;
u32 this_index;
union acpi_operand_object **this_target_ptr;
union acpi_operand_object *target_object;
ACPI_FUNCTION_ENTRY();
this_index = state->pkg.index;
this_target_ptr = (union acpi_operand_object **)
&state->pkg.dest_object->package.elements[this_index];
switch (object_type) {
case ACPI_COPY_TYPE_SIMPLE:
/* A null source object indicates a (legal) null package element */
if (source_object) {
/*
* This is a simple object, just copy it
*/
target_object =
acpi_ut_create_internal_object(source_object->
common.type);
if (!target_object) {
return (AE_NO_MEMORY);
}
status =
acpi_ut_copy_simple_object(source_object,
target_object);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
*this_target_ptr = target_object;
} else {
/* Pass through a null element */
*this_target_ptr = NULL;
}
break;
case ACPI_COPY_TYPE_PACKAGE:
/*
* This object is a package - go down another nesting level
* Create and build the package object
*/
target_object =
acpi_ut_create_package_object(source_object->package.count);
if (!target_object) {
return (AE_NO_MEMORY);
}
target_object->common.flags = source_object->common.flags;
/* Pass the new package object back to the package walk routine */
state->pkg.this_target_obj = target_object;
/* Store the object pointer in the parent package object */
*this_target_ptr = target_object;
break;
default:
return (AE_BAD_PARAMETER);
}
return (status);
error_exit:
acpi_ut_remove_reference(target_object);
return (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_ipackage_to_ipackage
*
* PARAMETERS: source_obj - Pointer to the source package object
* dest_obj - Where the internal object is returned
* walk_state - Current Walk state descriptor
*
* RETURN: Status
*
* DESCRIPTION: This function is called to copy an internal package object
* into another internal package object.
*
******************************************************************************/
static acpi_status
acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
union acpi_operand_object *dest_obj,
struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_ipackage);
dest_obj->common.type = source_obj->common.type;
dest_obj->common.flags = source_obj->common.flags;
dest_obj->package.count = source_obj->package.count;
/*
* Create the object array and walk the source package tree
*/
dest_obj->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
source_obj->package.
count +
1) * sizeof(void *));
if (!dest_obj->package.elements) {
ACPI_ERROR((AE_INFO, "Package allocation failure"));
return_ACPI_STATUS(AE_NO_MEMORY);
}
/*
* Copy the package element-by-element by walking the package "tree".
* This handles nested packages of arbitrary depth.
*/
status = acpi_ut_walk_package_tree(source_obj, dest_obj,
acpi_ut_copy_ielement_to_ielement,
walk_state);
if (ACPI_FAILURE(status)) {
/* On failure, delete the destination package object */
acpi_ut_remove_reference(dest_obj);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_iobject_to_iobject
*
* PARAMETERS: source_desc - The internal object to be copied
* dest_desc - Where the copied object is returned
* walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Copy an internal object to a new internal object
*
******************************************************************************/
acpi_status
acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
union acpi_operand_object **dest_desc,
struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ut_copy_iobject_to_iobject);
/* Create the top level object */
*dest_desc = acpi_ut_create_internal_object(source_desc->common.type);
if (!*dest_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Copy the object and possible subobjects */
if (source_desc->common.type == ACPI_TYPE_PACKAGE) {
status =
acpi_ut_copy_ipackage_to_ipackage(source_desc, *dest_desc,
walk_state);
} else {
status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
}
return_ACPI_STATUS(status);
}
|
LinuxKernelDevelopment/lkd
|
drivers/acpi/acpica/utcopy.c
|
C
|
gpl-2.0
| 28,291
|
/*
* trace_export.c - export basic ftrace utilities to user space
*
* Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
*/
#include <linux/stringify.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include "trace_output.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ftrace
/* not needed for this file */
#undef __field_struct
#define __field_struct(type, item)
#undef __field
#define __field(type, item) type item;
#undef __field_desc
#define __field_desc(type, container, item) type item;
#undef __array
#define __array(type, item, size) type item[size];
#undef __array_desc
#define __array_desc(type, container, item, size) type item[size];
#undef __dynamic_array
#define __dynamic_array(type, item) type item[];
#undef F_STRUCT
#define F_STRUCT(args...) args
#undef F_printk
#define F_printk(fmt, args...) fmt, args
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
struct ____ftrace_##name { \
tstruct \
}; \
static void __always_unused ____ftrace_check_##name(void) \
{ \
struct ____ftrace_##name *__entry = NULL; \
\
/* force compile-time check on F_printk() */ \
printk(print); \
}
#undef FTRACE_ENTRY_DUP
#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print) \
FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
#include "trace_entries.h"
#undef __field
#define __field(type, item) \
ret = trace_define_field(event_call, #type, #item, \
offsetof(typeof(field), item), \
sizeof(field.item), \
is_signed_type(type), FILTER_OTHER); \
if (ret) \
return ret;
#undef __field_desc
#define __field_desc(type, container, item) \
ret = trace_define_field(event_call, #type, #item, \
offsetof(typeof(field), \
container.item), \
sizeof(field.container.item), \
is_signed_type(type), FILTER_OTHER); \
if (ret) \
return ret;
#undef __array
#define __array(type, item, len) \
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
ret = trace_define_field(event_call, #type "[" #len "]", #item, \
offsetof(typeof(field), item), \
sizeof(field.item), \
is_signed_type(type), FILTER_OTHER); \
if (ret) \
return ret;
#undef __array_desc
#define __array_desc(type, container, item, len) \
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
ret = trace_define_field(event_call, #type "[" #len "]", #item, \
offsetof(typeof(field), \
container.item), \
sizeof(field.container.item), \
is_signed_type(type), FILTER_OTHER); \
if (ret) \
return ret;
#undef __dynamic_array
#define __dynamic_array(type, item) \
ret = trace_define_field(event_call, #type, #item, \
offsetof(typeof(field), item), \
0, is_signed_type(type), FILTER_OTHER);\
if (ret) \
return ret;
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
int \
ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
{ \
struct struct_name field; \
int ret; \
\
tstruct; \
\
return ret; \
}
#include "trace_entries.h"
static int ftrace_raw_init_event(struct ftrace_event_call *call)
{
INIT_LIST_HEAD(&call->fields);
return 0;
}
#undef __entry
#define __entry REC
#undef __field
#define __field(type, item)
#undef __field_desc
#define __field_desc(type, container, item)
#undef __array
#define __array(type, item, len)
#undef __array_desc
#define __array_desc(type, container, item, len)
#undef __dynamic_array
#define __dynamic_array(type, item)
#undef F_printk
#define F_printk(fmt, args...) #fmt ", " __stringify(args)
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \
\
struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) event_##call = { \
.name = #call, \
.id = type, \
.system = __stringify(TRACE_SYSTEM), \
.raw_init = ftrace_raw_init_event, \
.print_fmt = print, \
.define_fields = ftrace_define_fields_##call, \
}; \
#include "trace_entries.h"
|
ggsamsa/sched_casio
|
kernel/trace/trace_export.c
|
C
|
gpl-2.0
| 4,318
|
/* visorchipset_umode.h
*
* Copyright © 2010 - 2013 UNISYS CORPORATION
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*/
/** @file *********************************************************************
*
* This describes structures needed for the interface between the
* visorchipset driver and a user-mode component that opens the device.
*
******************************************************************************
*/
#ifndef __VISORCHIPSET_UMODE_H
#define __VISORCHIPSET_UMODE_H
/** The user-mode program can access the control channel buffer directly
* via this memory map.
*/
#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET (0x00000000)
#define VISORCHIPSET_MMAP_CONTROLCHANSIZE (0x00400000) /* 4MB */
#endif /* __VISORCHIPSET_UMODE_H */
|
iwinoto/v4l-media_build
|
media/drivers/staging/unisys/visorchipset/visorchipset_umode.h
|
C
|
gpl-2.0
| 1,287
|
"""
Some scripts define objects that we want to import via yaml files
that we pass to the script, so this directory must be a python
module, rather than just a directory full of scripts.
"""
|
fyffyt/pylearn2
|
pylearn2/scripts/__init__.py
|
Python
|
bsd-3-clause
| 191
|
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var gridRole = {
abstract: false,
accessibleNameRequired: true,
baseConcepts: [{
module: 'HTML',
concept: {
name: 'table',
attributes: [{
name: 'role',
value: 'grid'
}]
}
}],
childrenPresentational: false,
nameFrom: ['author'],
props: {
'aria-level': null,
'aria-multiselectable': null,
'aria-readonly': null
},
relatedConcepts: [],
requireContextRole: [],
requiredOwnedElements: [['rowgroup', 'row'], ['row']],
requiredProps: {},
superClass: [['roletype', 'widget', 'composite'], ['roletype', 'structure', 'section', 'table']]
};
exports.default = gridRole;
|
grshane/monthofmud
|
web/themes/custom/mom/node_modules/aria-query/lib/etc/roles/literal/gridRole.js
|
JavaScript
|
mit
| 723
|
<?php
/*
* This file is part of Zippy.
*
* (c) Alchemy <info@alchemy.fr>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Alchemy\Zippy\Adapter;
use Alchemy\Zippy\Adapter\BSDTar\TarBSDTarAdapter;
use Alchemy\Zippy\Adapter\BSDTar\TarBz2BSDTarAdapter;
use Alchemy\Zippy\Adapter\BSDTar\TarGzBSDTarAdapter;
use Alchemy\Zippy\Adapter\GNUTar\TarBz2GNUTarAdapter;
use Alchemy\Zippy\Adapter\GNUTar\TarGNUTarAdapter;
use Alchemy\Zippy\Adapter\GNUTar\TarGzGNUTarAdapter;
use Alchemy\Zippy\Resource\RequestMapper;
use Alchemy\Zippy\Resource\ResourceManager;
use Alchemy\Zippy\Resource\ResourceTeleporter;
use Alchemy\Zippy\Resource\TargetLocator;
use Alchemy\Zippy\Resource\TeleporterContainer;
use Symfony\Component\Filesystem\Filesystem;
use Symfony\Component\Process\ExecutableFinder;
class AdapterContainer implements \ArrayAccess
{
private $items = array();
/**
* Builds the adapter container
*
* @return AdapterContainer
*/
public static function load()
{
$container = new static();
$container['zip.inflator'] = null;
$container['zip.deflator'] = null;
$container['resource-manager'] = function($container) {
return new ResourceManager(
$container['request-mapper'],
$container['resource-teleporter'],
$container['filesystem']
);
};
$container['executable-finder'] = function($container) {
return new ExecutableFinder();
};
$container['request-mapper'] = function($container) {
return new RequestMapper($container['target-locator']);
};
$container['target-locator'] = function() {
return new TargetLocator();
};
$container['teleporter-container'] = function($container) {
return TeleporterContainer::load();
};
$container['resource-teleporter'] = function($container) {
return new ResourceTeleporter($container['teleporter-container']);
};
$container['filesystem'] = function() {
return new Filesystem();
};
$container['Alchemy\\Zippy\\Adapter\\ZipAdapter'] = function($container) {
return ZipAdapter::newInstance(
$container['executable-finder'],
$container['resource-manager'],
$container['zip.inflator'],
$container['zip.deflator']
);
};
$container['gnu-tar.inflator'] = null;
$container['gnu-tar.deflator'] = null;
$container['Alchemy\\Zippy\\Adapter\\GNUTar\\TarGNUTarAdapter'] = function($container) {
return TarGNUTarAdapter::newInstance(
$container['executable-finder'],
$container['resource-manager'],
$container['gnu-tar.inflator'],
$container['gnu-tar.deflator']
);
};
$container['Alchemy\\Zippy\\Adapter\\GNUTar\\TarGzGNUTarAdapter'] = function($container) {
return TarGzGNUTarAdapter::newInstance(
$container['executable-finder'],
$container['resource-manager'],
$container['gnu-tar.inflator'],
$container['gnu-tar.deflator']
);
};
$container['Alchemy\\Zippy\\Adapter\\GNUTar\\TarBz2GNUTarAdapter'] = function($container) {
return TarBz2GNUTarAdapter::newInstance(
$container['executable-finder'],
$container['resource-manager'],
$container['gnu-tar.inflator'],
$container['gnu-tar.deflator']
);
};
$container['bsd-tar.inflator'] = null;
$container['bsd-tar.deflator'] = null;
$container['Alchemy\\Zippy\\Adapter\\BSDTar\\TarBSDTarAdapter'] = function($container) {
return TarBSDTarAdapter::newInstance(
$container['executable-finder'],
$container['resource-manager'],
$container['bsd-tar.inflator'],
$container['bsd-tar.deflator']
);
};
$container['Alchemy\\Zippy\\Adapter\\BSDTar\\TarGzBSDTarAdapter'] = function($container) {
return TarGzBSDTarAdapter::newInstance(
$container['executable-finder'],
$container['resource-manager'],
$container['bsd-tar.inflator'],
$container['bsd-tar.deflator']
);
};
$container['Alchemy\\Zippy\\Adapter\\BSDTar\\TarBz2BSDTarAdapter'] = function($container) {
return TarBz2BSDTarAdapter::newInstance(
$container['executable-finder'],
$container['resource-manager'],
$container['bsd-tar.inflator'],
$container['bsd-tar.deflator']);
};
$container['Alchemy\\Zippy\\Adapter\\ZipExtensionAdapter'] = function() {
return ZipExtensionAdapter::newInstance();
};
return $container;
}
/**
* (PHP 5 >= 5.0.0)<br/>
* Whether a offset exists
*
* @link http://php.net/manual/en/arrayaccess.offsetexists.php
*
* @param mixed $offset <p>
* An offset to check for.
* </p>
*
* @return bool true on success or false on failure.
* <p>The return value will be casted to boolean if non-boolean was returned.</p>
*/
public function offsetExists($offset)
{
return isset($this->items[$offset]);
}
/**
* (PHP 5 >= 5.0.0)<br/>
* Offset to retrieve
* @link http://php.net/manual/en/arrayaccess.offsetget.php
* @param mixed $offset <p>
* The offset to retrieve.
* </p>
* @return mixed Can return all value types.
*/
public function offsetGet($offset)
{
if (array_key_exists($offset, $this->items) && is_callable($this->items[$offset])) {
$this->items[$offset] = call_user_func($this->items[$offset], $this);
}
if (array_key_exists($offset, $this->items)) {
return $this->items[$offset];
}
throw new \InvalidArgumentException();
}
/**
* (PHP 5 >= 5.0.0)<br/>
* Offset to set
* @link http://php.net/manual/en/arrayaccess.offsetset.php
* @param mixed $offset <p>
* The offset to assign the value to.
* </p>
* @param mixed $value <p>
* The value to set.
* </p>
* @return void
*/
public function offsetSet($offset, $value)
{
$this->items[$offset] = $value;
}
/**
* (PHP 5 >= 5.0.0)<br/>
* Offset to unset
* @link http://php.net/manual/en/arrayaccess.offsetunset.php
* @param mixed $offset <p>
* The offset to unset.
* </p>
* @return void
*/
public function offsetUnset($offset)
{
unset($this->items[$offset]);
}
}
|
kanei/vantuch.cz
|
wp-content/plugins/jekyll-exporter/vendor/alchemy/zippy/src/Adapter/AdapterContainer.php
|
PHP
|
gpl-2.0
| 7,057
|
<?php
/**
* Zend Framework
*
* LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://framework.zend.com/license/new-bsd
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@zend.com so we can send you a copy immediately.
*
* @category Zend
* @package Zend_Controller
* @copyright Copyright (c) 2005-2010 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
/**
* Zend_XmlRpc_Value
*/
#require_once 'Zend/XmlRpc/Value.php';
/**
* Zend_XmlRpc_Fault
*/
#require_once 'Zend/XmlRpc/Fault.php';
/**
* XmlRpc Response
*
* Container for accessing an XMLRPC return value and creating the XML response.
*
* @category Zend
* @package Zend_XmlRpc
* @copyright Copyright (c) 2005-2010 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
* @version $Id: Response.php 21359 2010-03-07 00:54:02Z lars $
*/
class Zend_XmlRpc_Response
{
/**
* Return value
* @var mixed
*/
protected $_return;
/**
* Return type
* @var string
*/
protected $_type;
/**
* Response character encoding
* @var string
*/
protected $_encoding = 'UTF-8';
/**
* Fault, if response is a fault response
* @var null|Zend_XmlRpc_Fault
*/
protected $_fault = null;
/**
* Constructor
*
* Can optionally pass in the return value and type hinting; otherwise, the
* return value can be set via {@link setReturnValue()}.
*
* @param mixed $return
* @param string $type
* @return void
*/
public function __construct($return = null, $type = null)
{
$this->setReturnValue($return, $type);
}
/**
* Set encoding to use in response
*
* @param string $encoding
* @return Zend_XmlRpc_Response
*/
public function setEncoding($encoding)
{
$this->_encoding = $encoding;
Zend_XmlRpc_Value::setEncoding($encoding);
return $this;
}
/**
* Retrieve current response encoding
*
* @return string
*/
public function getEncoding()
{
return $this->_encoding;
}
/**
* Set the return value
*
* Sets the return value, with optional type hinting if provided.
*
* @param mixed $value
* @param string $type
* @return void
*/
public function setReturnValue($value, $type = null)
{
$this->_return = $value;
$this->_type = (string) $type;
}
/**
* Retrieve the return value
*
* @return mixed
*/
public function getReturnValue()
{
return $this->_return;
}
/**
* Retrieve the XMLRPC value for the return value
*
* @return Zend_XmlRpc_Value
*/
protected function _getXmlRpcReturn()
{
return Zend_XmlRpc_Value::getXmlRpcValue($this->_return);
}
/**
* Is the response a fault response?
*
* @return boolean
*/
public function isFault()
{
return $this->_fault instanceof Zend_XmlRpc_Fault;
}
/**
* Returns the fault, if any.
*
* @return null|Zend_XmlRpc_Fault
*/
public function getFault()
{
return $this->_fault;
}
/**
* Load a response from an XML response
*
* Attempts to load a response from an XMLRPC response, autodetecting if it
* is a fault response.
*
* @param string $response
* @return boolean True if a valid XMLRPC response, false if a fault
* response or invalid input
*/
public function loadXml($response)
{
if (!is_string($response)) {
$this->_fault = new Zend_XmlRpc_Fault(650);
$this->_fault->setEncoding($this->getEncoding());
return false;
}
$loadEntities = libxml_disable_entity_loader(true);
$useInternalXmlErrors = libxml_use_internal_errors(true);
try {
$xml = new SimpleXMLElement($response);
libxml_disable_entity_loader($loadEntities);
libxml_use_internal_errors($useInternalXmlErrors);
} catch (Exception $e) {
libxml_disable_entity_loader($loadEntities);
libxml_use_internal_errors($useInternalXmlErrors);
// Not valid XML
$this->_fault = new Zend_XmlRpc_Fault(651);
$this->_fault->setEncoding($this->getEncoding());
return false;
}
if (!empty($xml->fault)) {
// fault response
$this->_fault = new Zend_XmlRpc_Fault();
$this->_fault->setEncoding($this->getEncoding());
$this->_fault->loadXml($response);
return false;
}
if (empty($xml->params)) {
// Invalid response
$this->_fault = new Zend_XmlRpc_Fault(652);
$this->_fault->setEncoding($this->getEncoding());
return false;
}
try {
if (!isset($xml->params) || !isset($xml->params->param) || !isset($xml->params->param->value)) {
throw new Zend_XmlRpc_Value_Exception('Missing XML-RPC value in XML');
}
$valueXml = $xml->params->param->value->asXML();
$value = Zend_XmlRpc_Value::getXmlRpcValue($valueXml, Zend_XmlRpc_Value::XML_STRING);
} catch (Zend_XmlRpc_Value_Exception $e) {
$this->_fault = new Zend_XmlRpc_Fault(653);
$this->_fault->setEncoding($this->getEncoding());
return false;
}
$this->setReturnValue($value->getValue());
return true;
}
/**
* Return response as XML
*
* @return string
*/
public function saveXml()
{
$value = $this->_getXmlRpcReturn();
$generator = Zend_XmlRpc_Value::getGenerator();
$generator->openElement('methodResponse')
->openElement('params')
->openElement('param');
$value->generateXml();
$generator->closeElement('param')
->closeElement('params')
->closeElement('methodResponse');
return $generator->flush();
}
/**
* Return XML response
*
* @return string
*/
public function __toString()
{
return $this->saveXML();
}
}
|
T0MM0R/magento
|
web/app/code/core/Zend/XmlRpc/Response.php
|
PHP
|
gpl-2.0
| 6,637
|
<?php
/**
* Short description for file.
*
* PHP 5
*
* CakePHP(tm) Tests <http://book.cakephp.org/2.0/en/development/testing.html>
* Copyright 2005-2012, Cake Software Foundation, Inc. (http://cakefoundation.org)
*
* Licensed under The MIT License
* Redistributions of files must retain the above copyright notice
*
* @copyright Copyright 2005-2012, Cake Software Foundation, Inc. (http://cakefoundation.org)
* @link http://book.cakephp.org/2.0/en/development/testing.html CakePHP(tm) Tests
* @package Cake.Test.Fixture
* @since CakePHP(tm) v 1.2.0.4667
* @license MIT License (http://www.opensource.org/licenses/mit-license.php)
*/
/**
* Short description for class.
*
* @package Cake.Test.Fixture
*/
class AdvertisementFixture extends CakeTestFixture {
/**
* name property
*
* @var string 'Advertisement'
*/
public $name = 'Advertisement';
/**
* fields property
*
* @var array
*/
public $fields = array(
'id' => array('type' => 'integer', 'key' => 'primary'),
'title' => array('type' => 'string', 'null' => false),
'created' => 'datetime',
'updated' => 'datetime'
);
/**
* records property
*
* @var array
*/
public $records = array(
array('title' => 'First Ad', 'created' => '2007-03-18 10:39:23', 'updated' => '2007-03-18 10:41:31'),
array('title' => 'Second Ad', 'created' => '2007-03-18 10:41:23', 'updated' => '2007-03-18 10:43:31')
);
}
|
anoochit/wcbookstore
|
webstore/lib/Cake/Test/Fixture/AdvertisementFixture.php
|
PHP
|
gpl-3.0
| 1,435
|
package context
import (
"context"
"time"
)
// Since looks up key, which should be a time.Time, and returns the duration
// since that time. If the key is not found, the value returned will be zero.
// This is helpful when inferring metrics related to context execution times.
func Since(ctx context.Context, key interface{}) time.Duration {
if startedAt, ok := ctx.Value(key).(time.Time); ok {
return time.Since(startedAt)
}
return 0
}
// GetStringValue returns a string value from the context. The empty string
// will be returned if not found.
func GetStringValue(ctx context.Context, key interface{}) (value string) {
if valuev, ok := ctx.Value(key).(string); ok {
value = valuev
}
return value
}
|
HuKeping/notary
|
vendor/github.com/docker/distribution/context/util.go
|
GO
|
apache-2.0
| 716
|
#!/bin/bash
#
# https://bugzilla.redhat.com/show_bug.cgi?id=1415570
#
# In the initial release of OCP 3.4, paths for two objects,
# User and EgressNetworkPolicy, inadvertantly changed.
# This script migrates any of these resources created in
# version of OCP 3.4 without the fix to the proper location
# in etcd. Namely:
#
# identities -> useridentities
# egressnetworkpolicies -> registry/egressnetworkpolicy
USAGE="${0} [-a] [-c os-master-config-dir] [-p os-etcd-prefix] [-b backup-dir] etcd-endpoints"
usage() {
echo "${USAGE}"
exit 1
}
# default values
APPLY=false
OS_MASTER_CONFIG_DIR="/etc/origin/master"
OS_ETCD_PREFIX="/openshift.io"
BACKUP_DIR="$HOME/openshift-3.4-migration-backup"
while getopts ":ac:p:b:" opt; do
case $opt in
a)
APPLY=true
;;
c)
OS_MASTER_CONFIG_DIR="${OPTARG}"
;;
p)
OS_ETCD_PREFIX="${OPTARG}"
;;
b)
BACKUP_DIR="${OPTARG}"
;;
\?)
usage
;;
:)
echo "Option -$OPTARG requires an argument"
usage
;;
esac
done
shift $((OPTIND-1))
export ETCDCTL_ENDPOINT=${1:-""}
export ETCDCTL_CA_FILE=${ETCDCTL_CA_FILE:-"${OS_MASTER_CONFIG_DIR}/master.etcd-ca.crt"}
export ETCDCTL_CERT_FILE=${ETCDCTL_CERT_FILE:-"${OS_MASTER_CONFIG_DIR}/master.etcd-client.crt"}
export ETCDCTL_KEY_FILE=${ETCDCTL_KEY_FILE:-"${OS_MASTER_CONFIG_DIR}/master.etcd-client.key"}
if [[ ! -e "${ETCDCTL_CA_FILE}" ]]; then
ETCDCTL_CA_FILE="${OS_MASTER_CONFIG_DIR}/ca.crt"
if [[ ! -e "${ETCDCTL_CA_FILE}" ]]; then
echo "Default CA files not found. Please specify correct ETCDCTL_CA_FILE."
exit 1
fi
fi
if [[ ! -e "${ETCDCTL_CERT_FILE}" ]]; then
echo "Default client cert file not found. Please specify correct ETCDCTL_CERT_FILE."
exit 1
fi
if [[ ! -e "${ETCDCTL_KEY_FILE}" ]]; then
echo "Default client key file not found. Please specify correct ETCDCTL_KEY_FILE."
exit 1
fi
if [[ -z "${ETCDCTL_ENDPOINT}" ]]; then
echo "etcd-endpoints required"
usage
fi
if [[ "$APPLY" != "true" ]]; then
echo "Running in dry-run mode. Use -a option to apply changes."
else
if ! mkdir -p "${BACKUP_DIR}"; then
echo "Unable to create backup directory ${BACKUP_DIR}"
exit 1
fi
fi
if ! command -v etcdctl &>/dev/null; then
echo "This utility requires etcdctl to be installed"
exit 1
fi
echo_mode() {
if [[ "$APPLY" != "true" ]]; then
echo "dry-run:" "$@"
else
echo "$@"
fi
}
backup_key() {
key="${1}"
value="${2}"
backupfile="${BACKUP_DIR}/${key}"
mkdir -p "$(dirname "${backupfile}")"
echo "$value" > "${backupfile}"
}
copy_key() {
echo_mode "copying ${1} to ${2}"
if ! value="$(etcdctl get "${1}")"; then
echo_mode "failed to get key ${1}"
exit 1
fi
if existing=$(etcdctl get "${2}" 2>/dev/null); then
echo_mode "overwriting existing key ${2}"
fi
if [[ "$APPLY" = "true" ]]; then
backup_key "${1}" "${value}"
if [[ -n "${existing}" ]]; then
backup_key "${2}" "${existing}"
fi
if ! etcdctl set "${2}" "$value" >/dev/null; then
echo "failed to set key ${2}"
exit 1
fi
if ! etcdctl rm "${1}" >/dev/null; then
echo "failed to remove old key ${1}"
exit 1
fi
fi
return 0
}
copy_keys() {
output="$(etcdctl ls "${1}")"
if [[ $? -ne 0 || -z "$output" ]]; then
echo_mode "No keys found to migrate"
return
fi
for key in $output; do
newkey="${2}/$(basename "${key}")"
copy_key "${key}" "${newkey}"
done
}
IFS=$'\n'
echo_mode "Migrating Users"
copy_keys "${OS_ETCD_PREFIX}/identities" "${OS_ETCD_PREFIX}/useridentities"
echo_mode "Migrating Egress Policies"
output="$(etcdctl ls "${OS_ETCD_PREFIX}/egressnetworkpolicies")"
if [[ $? -ne 0 || -z "$output" ]]; then
echo_mode "No keys found to migrate"
else
for project in $output; do
projectname="$(basename "${project}")"
echo_mode "Project $projectname"
copy_keys "${OS_ETCD_PREFIX}/egressnetworkpolicies/${projectname}" "${OS_ETCD_PREFIX}/registry/egressnetworkpolicy/${projectname}"
done
fi
|
tmckayus/oshinko-cli
|
vendor/github.com/openshift/origin/contrib/migration/fix-3.4-paths.sh
|
Shell
|
apache-2.0
| 4,022
|
#!/usr/bin/env bash
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
TOOLS=`dirname $0`
VENV=$TOOLS/../.venv
source $VENV/bin/activate && "$@"
|
jumpojoy/neutron
|
tools/with_venv.sh
|
Shell
|
apache-2.0
| 731
|
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef RTL8180_SA2400_H
#define RTL8180_SA2400_H
/*
* Radio tuning for Philips SA2400 on RTL8180
*
* Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Code from the BSD driver and the rtl8181 project have been
* very useful to understand certain things
*
* I want to thanks the Authors of such projects and the Ndiswrapper
* project Authors.
*
* A special Big Thanks also is for all people who donated me cards,
* making possible the creation of the original rtl8180 driver
* from which this code is derived!
*/
#define SA2400_ANTENNA 0x91
#define SA2400_DIG_ANAPARAM_PWR1_ON 0x8
#define SA2400_ANA_ANAPARAM_PWR1_ON 0x28
#define SA2400_ANAPARAM_PWR0_ON 0x3
/* RX sensitivity in dbm */
#define SA2400_MAX_SENS 85
#define SA2400_REG4_FIRDAC_SHIFT 7
extern const struct rtl818x_rf_ops sa2400_rf_ops;
#endif /* RTL8180_SA2400_H */
|
CSE3320/kernel-code
|
linux-5.8/drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.h
|
C
|
gpl-2.0
| 903
|
// SPDX-License-Identifier: GPL-2.0
/*
* Check for extended topology enumeration cpuid leaf 0xb and if it
* exists, use it for populating initial_apicid and cpu topology
* detection.
*/
#include <linux/cpu.h>
#include <asm/apic.h>
#include <asm/pat.h>
#include <asm/processor.h>
#include "cpu.h"
/* leaf 0xb SMT level */
#define SMT_LEVEL 0
/* extended topology sub-leaf types */
#define INVALID_TYPE 0
#define SMT_TYPE 1
#define CORE_TYPE 2
#define DIE_TYPE 5
#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
#ifdef CONFIG_SMP
unsigned int __max_die_per_package __read_mostly = 1;
EXPORT_SYMBOL(__max_die_per_package);
/*
* Check if given CPUID extended toplogy "leaf" is implemented
*/
static int check_extended_topology_leaf(int leaf)
{
unsigned int eax, ebx, ecx, edx;
cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
return -1;
return 0;
}
/*
* Return best CPUID Extended Toplogy Leaf supported
*/
static int detect_extended_topology_leaf(struct cpuinfo_x86 *c)
{
if (c->cpuid_level >= 0x1f) {
if (check_extended_topology_leaf(0x1f) == 0)
return 0x1f;
}
if (c->cpuid_level >= 0xb) {
if (check_extended_topology_leaf(0xb) == 0)
return 0xb;
}
return -1;
}
#endif
int detect_extended_topology_early(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
unsigned int eax, ebx, ecx, edx;
int leaf;
leaf = detect_extended_topology_leaf(c);
if (leaf < 0)
return -1;
set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
/*
* initial apic id, which also represents 32-bit extended x2apic id.
*/
c->initial_apicid = edx;
smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
#endif
return 0;
}
/*
* Check for extended topology enumeration cpuid leaf, and if it
* exists, use it for populating initial_apicid and cpu topology
* detection.
*/
int detect_extended_topology(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
unsigned int eax, ebx, ecx, edx, sub_index;
unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width;
unsigned int core_select_mask, core_level_siblings;
unsigned int die_select_mask, die_level_siblings;
int leaf;
leaf = detect_extended_topology_leaf(c);
if (leaf < 0)
return -1;
/*
* Populate HT related information from sub-leaf level 0.
*/
cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
c->initial_apicid = edx;
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
sub_index = 1;
do {
cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx);
/*
* Check for the Core type in the implemented sub leaves.
*/
if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
die_level_siblings = core_level_siblings;
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
}
if (LEAFB_SUBTYPE(ecx) == DIE_TYPE) {
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
}
sub_index++;
} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
die_select_mask = (~(-1 << die_plus_mask_width)) >>
core_plus_mask_width;
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid,
ht_mask_width) & core_select_mask;
c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid,
core_plus_mask_width) & die_select_mask;
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
die_plus_mask_width);
/*
* Reinit the apicid, now that we have extended initial_apicid.
*/
c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
c->x86_max_cores = (core_level_siblings / smp_num_siblings);
__max_die_per_package = (die_level_siblings / core_level_siblings);
#endif
return 0;
}
|
BPI-SINOVOIP/BPI-Mainline-kernel
|
linux-5.4/arch/x86/kernel/cpu/topology.c
|
C
|
gpl-2.0
| 4,048
|
/* GCC Quad-Precision Math Library
Copyright (C) 2010, 2011 Free Software Foundation, Inc.
Written by Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
This file is part of the libquadmath library.
Libquadmath is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
Libquadmath is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with libquadmath; see the file COPYING.LIB. If
not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor,
Boston, MA 02110-1301, USA. */
#ifndef QUADMATH_H
#define QUADMATH_H
#include <stdlib.h>
/* Define the complex type corresponding to __float128
("_Complex __float128" is not allowed) */
typedef _Complex float __attribute__((mode(TC))) __complex128;
#ifdef __cplusplus
# define __quadmath_throw throw ()
# define __quadmath_nth(fct) fct throw ()
#else
# define __quadmath_throw __attribute__((__nothrow__))
# define __quadmath_nth(fct) __attribute__((__nothrow__)) fct
#endif
/* Prototypes for real functions */
extern __float128 acosq (__float128) __quadmath_throw;
extern __float128 acoshq (__float128) __quadmath_throw;
extern __float128 asinq (__float128) __quadmath_throw;
extern __float128 asinhq (__float128) __quadmath_throw;
extern __float128 atanq (__float128) __quadmath_throw;
extern __float128 atanhq (__float128) __quadmath_throw;
extern __float128 atan2q (__float128, __float128) __quadmath_throw;
extern __float128 cbrtq (__float128) __quadmath_throw;
extern __float128 ceilq (__float128) __quadmath_throw;
extern __float128 copysignq (__float128, __float128) __quadmath_throw;
extern __float128 coshq (__float128) __quadmath_throw;
extern __float128 cosq (__float128) __quadmath_throw;
extern __float128 erfq (__float128) __quadmath_throw;
extern __float128 erfcq (__float128) __quadmath_throw;
extern __float128 expq (__float128) __quadmath_throw;
extern __float128 expm1q (__float128) __quadmath_throw;
extern __float128 fabsq (__float128) __quadmath_throw;
extern __float128 fdimq (__float128, __float128) __quadmath_throw;
extern int finiteq (__float128) __quadmath_throw;
extern __float128 floorq (__float128) __quadmath_throw;
extern __float128 fmaq (__float128, __float128, __float128) __quadmath_throw;
extern __float128 fmaxq (__float128, __float128) __quadmath_throw;
extern __float128 fminq (__float128, __float128) __quadmath_throw;
extern __float128 fmodq (__float128, __float128) __quadmath_throw;
extern __float128 frexpq (__float128, int *) __quadmath_throw;
extern __float128 hypotq (__float128, __float128) __quadmath_throw;
extern int isinfq (__float128) __quadmath_throw;
extern int ilogbq (__float128) __quadmath_throw;
extern int isnanq (__float128) __quadmath_throw;
extern __float128 j0q (__float128) __quadmath_throw;
extern __float128 j1q (__float128) __quadmath_throw;
extern __float128 jnq (int, __float128) __quadmath_throw;
extern __float128 ldexpq (__float128, int) __quadmath_throw;
extern __float128 lgammaq (__float128) __quadmath_throw;
extern long long int llrintq (__float128) __quadmath_throw;
extern long long int llroundq (__float128) __quadmath_throw;
extern __float128 logq (__float128) __quadmath_throw;
extern __float128 log10q (__float128) __quadmath_throw;
extern __float128 log2q (__float128) __quadmath_throw;
extern __float128 log1pq (__float128) __quadmath_throw;
extern long int lrintq (__float128) __quadmath_throw;
extern long int lroundq (__float128) __quadmath_throw;
extern __float128 modfq (__float128, __float128 *) __quadmath_throw;
extern __float128 nanq (const char *) __quadmath_throw;
extern __float128 nearbyintq (__float128) __quadmath_throw;
extern __float128 nextafterq (__float128, __float128) __quadmath_throw;
extern __float128 powq (__float128, __float128) __quadmath_throw;
extern __float128 remainderq (__float128, __float128) __quadmath_throw;
extern __float128 remquoq (__float128, __float128, int *) __quadmath_throw;
extern __float128 rintq (__float128) __quadmath_throw;
extern __float128 roundq (__float128) __quadmath_throw;
extern __float128 scalblnq (__float128, long int) __quadmath_throw;
extern __float128 scalbnq (__float128, int) __quadmath_throw;
extern int signbitq (__float128) __quadmath_throw;
extern void sincosq (__float128, __float128 *, __float128 *) __quadmath_throw;
extern __float128 sinhq (__float128) __quadmath_throw;
extern __float128 sinq (__float128) __quadmath_throw;
extern __float128 sqrtq (__float128) __quadmath_throw;
extern __float128 tanq (__float128) __quadmath_throw;
extern __float128 tanhq (__float128) __quadmath_throw;
extern __float128 tgammaq (__float128) __quadmath_throw;
extern __float128 truncq (__float128) __quadmath_throw;
extern __float128 y0q (__float128) __quadmath_throw;
extern __float128 y1q (__float128) __quadmath_throw;
extern __float128 ynq (int, __float128) __quadmath_throw;
/* Prototypes for complex functions */
extern __float128 cabsq (__complex128) __quadmath_throw;
extern __float128 cargq (__complex128) __quadmath_throw;
extern __float128 cimagq (__complex128) __quadmath_throw;
extern __float128 crealq (__complex128) __quadmath_throw;
extern __complex128 cacosq (__complex128) __quadmath_throw;
extern __complex128 cacoshq (__complex128) __quadmath_throw;
extern __complex128 casinq (__complex128) __quadmath_throw;
extern __complex128 casinhq (__complex128) __quadmath_throw;
extern __complex128 catanq (__complex128) __quadmath_throw;
extern __complex128 catanhq (__complex128) __quadmath_throw;
extern __complex128 ccosq (__complex128) __quadmath_throw;
extern __complex128 ccoshq (__complex128) __quadmath_throw;
extern __complex128 cexpq (__complex128) __quadmath_throw;
extern __complex128 cexpiq (__float128) __quadmath_throw;
extern __complex128 clogq (__complex128) __quadmath_throw;
extern __complex128 clog10q (__complex128) __quadmath_throw;
extern __complex128 conjq (__complex128) __quadmath_throw;
extern __complex128 cpowq (__complex128, __complex128) __quadmath_throw;
extern __complex128 cprojq (__complex128) __quadmath_throw;
extern __complex128 csinq (__complex128) __quadmath_throw;
extern __complex128 csinhq (__complex128) __quadmath_throw;
extern __complex128 csqrtq (__complex128) __quadmath_throw;
extern __complex128 ctanq (__complex128) __quadmath_throw;
extern __complex128 ctanhq (__complex128) __quadmath_throw;
/* Prototypes for string <-> __float128 conversion functions */
extern __float128 strtoflt128 (const char *, char **) __quadmath_throw;
extern int quadmath_snprintf (char *str, size_t size,
const char *format, ...) __quadmath_throw;
/* Macros */
#define FLT128_MAX 1.18973149535723176508575932662800702e4932Q
#define FLT128_MIN 3.36210314311209350626267781732175260e-4932Q
#define FLT128_EPSILON 1.92592994438723585305597794258492732e-34Q
#define FLT128_DENORM_MIN 6.475175119438025110924438958227646552e-4966Q
#define FLT128_MANT_DIG 113
#define FLT128_MIN_EXP (-16381)
#define FLT128_MAX_EXP 16384
#define FLT128_DIG 33
#define FLT128_MIN_10_EXP (-4931)
#define FLT128_MAX_10_EXP 4932
#define HUGE_VALQ __builtin_huge_valq()
/* The following alternative is valid, but brings the warning:
(floating constant exceeds range of ‘__float128’) */
/* #define HUGE_VALQ (__extension__ 0x1.0p32767Q) */
#define M_Eq 2.7182818284590452353602874713526625Q /* e */
#define M_LOG2Eq 1.4426950408889634073599246810018921Q /* log_2 e */
#define M_LOG10Eq 0.4342944819032518276511289189166051Q /* log_10 e */
#define M_LN2q 0.6931471805599453094172321214581766Q /* log_e 2 */
#define M_LN10q 2.3025850929940456840179914546843642Q /* log_e 10 */
#define M_PIq 3.1415926535897932384626433832795029Q /* pi */
#define M_PI_2q 1.5707963267948966192313216916397514Q /* pi/2 */
#define M_PI_4q 0.7853981633974483096156608458198757Q /* pi/4 */
#define M_1_PIq 0.3183098861837906715377675267450287Q /* 1/pi */
#define M_2_PIq 0.6366197723675813430755350534900574Q /* 2/pi */
#define M_2_SQRTPIq 1.1283791670955125738961589031215452Q /* 2/sqrt(pi) */
#define M_SQRT2q 1.4142135623730950488016887242096981Q /* sqrt(2) */
#define M_SQRT1_2q 0.7071067811865475244008443621048490Q /* 1/sqrt(2) */
#define __quadmath_extern_inline \
extern inline __attribute__ ((__gnu_inline__))
__quadmath_extern_inline __float128
__quadmath_nth (cimagq (__complex128 __z))
{
return __imag__ __z;
}
__quadmath_extern_inline __float128
__quadmath_nth (crealq (__complex128 __z))
{
return __real__ __z;
}
__quadmath_extern_inline __complex128
__quadmath_nth (conjq (__complex128 __z))
{
return __extension__ ~__z;
}
#endif
|
SanDisk-Open-Source/SSD_Dashboard
|
uefi/gcc/gcc-4.6.3/libquadmath/quadmath.h
|
C
|
gpl-2.0
| 8,961
|
<?php
require_once realpath(dirname(__FILE__)) . '/../TestHelper.php';
class Braintree_MultipleValueOrTextNodeTest extends PHPUnit_Framework_TestCase
{
function testIn()
{
$node = new Braintree_MultipleValueOrTextNode('field');
$node->in(array('firstValue', 'secondValue'));
$this->assertEquals(array('firstValue', 'secondValue'), $node->toParam());
}
function testIs()
{
$node = new Braintree_MultipleValueOrTextNode('field');
$node->is('value');
$this->assertEquals(array('is' => 'value'), $node->toParam());
}
function testIsNot()
{
$node = new Braintree_MultipleValueOrTextNode('field');
$node->isNot('value');
$this->assertEquals(array('is_not' => 'value'), $node->toParam());
}
function testStartsWith()
{
$node = new Braintree_MultipleValueOrTextNode('field');
$node->startsWith('beginning');
$this->assertEquals(array('starts_with' => 'beginning'), $node->toParam());
}
function testEndsWith()
{
$node = new Braintree_MultipleValueOrTextNode('field');
$node->endsWith('end');
$this->assertEquals(array('ends_with' => 'end'), $node->toParam());
}
function testContains()
{
$node = new Braintree_MultipleValueOrTextNode('field');
$node->contains('middle');
$this->assertEquals(array('contains' => 'middle'), $node->toParam());
}
}
|
vocxod/cintez
|
www/system/storage/vendor/braintree/braintree_php/tests/unit/MultipleValueOrTextNodeTest.php
|
PHP
|
gpl-3.0
| 1,459
|
/***
Copyright (c) 2008-2012 CommonsWare, LLC
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy
of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License.
From _The Busy Coder's Guide to Android Development_
https://commonsware.com/Android
*/
package com.commonsware.android.relative;
import android.app.Activity;
import android.os.Bundle;
public class RelativeLayoutDemo extends Activity {
@Override
public void onCreate(Bundle icicle) {
super.onCreate(icicle);
setContentView(R.layout.main);
}
}
|
lexiaoyao20/cw-omnibus
|
Containers/Relative/src/com/commonsware/android/relative/RelativeLayoutDemo.java
|
Java
|
apache-2.0
| 949
|
/***
Copyright (c) 2008-2012 CommonsWare, LLC
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy
of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License.
From _The Busy Coder's Guide to Android Development_
https://commonsware.com/Android
*/
package com.commonsware.android.constants;
import android.os.Bundle;
import com.actionbarsherlock.app.SherlockFragmentActivity;
public class ConstantsBrowser extends SherlockFragmentActivity {
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
if (getSupportFragmentManager().findFragmentById(android.R.id.content)==null) {
getSupportFragmentManager().beginTransaction()
.add(android.R.id.content,
new ConstantsFragment()).commit();
}
}
}
|
suclike/cw-omnibus
|
Database/Constants/src/com/commonsware/android/constants/ConstantsBrowser.java
|
Java
|
apache-2.0
| 1,261
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.