repo_name stringlengths 6 101 | path stringlengths 4 300 | text stringlengths 7 1.31M |
|---|---|---|
nasa/giant | giant/__init__.py | <reponame>nasa/giant
# Copyright 2021 United States Government as represented by the Administrator of the National Aeronautics and Space
# Administration. No copyright is claimed in the United States under Title 17, U.S. Code. All Other Rights Reserved.
"""
Welcome to GIANT
This is a test
"""
from importlib.machinery import PathFinder
import warnings
import sys
warnings.filterwarnings("default", category=DeprecationWarning)
class __MyPathFinder(PathFinder):
__deprecated = ['attitude']
__fixed = ['rotations']
@classmethod
def find_spec(cls, fullname, path=None, target=None):
name = fullname
for dep, fix in zip(cls.__deprecated, cls.__fixed):
if dep in name:
name = name.replace(dep, fix)
warnings.warn('{} is deprecated. Please import {} instead'.format(dep, fix),
DeprecationWarning)
res = super().find_spec(name, path=path, target=target)
return res
sys.meta_path.append(__MyPathFinder())
class __DepWrapper:
def __init__(self, wrapped, deprecated):
self.wrapped = wrapped
self.deprecated = deprecated
self.__doc__ = self.wrapped.__doc__
self.__name__ = self.wrapped.__name__
self.__file__ = self.wrapped.__file__
def __getattr__(self, item):
if item in self.deprecated:
warnings.warn('"{}" is deprecated. Please use "{}" instead'.format(item, self.deprecated[item]),
DeprecationWarning)
item = self.deprecated[item]
return getattr(self.wrapped, item)
|
jdbrice/PicoToFemtoDst_SL16cMTD | modules/PicoDstSL16cMtd/StPicoBTofHit.h | <filename>modules/PicoDstSL16cMtd/StPicoBTofHit.h
#ifndef StPicoBTofHit_hh
#define StPicoBTofHit_hh
#include "TObject.h"
#include "stdio.h"
class StPicoBTofHit : public TObject {
public:
StPicoBTofHit() {}
~StPicoBTofHit() {}
StPicoBTofHit(int) {}
void Clear(const Option_t *opt="") { mId = 0; }
virtual void Print(const Char_t *option = "") const {} ///< Print trigger info
Int_t id() const { return (Int_t)mId; }
Int_t tray() const { return (Int_t)mId/192 + 1; }
Int_t module() const { return ((Int_t)mId%192)/6 + 1; }
Int_t cell() const { return (Int_t)mId/6 + 1; }
protected:
Short_t mId; // (tray-1)*192+(module-1)*6+(cell-1):
ClassDef(StPicoBTofHit, 1)
};
#endif
|
jhkuang11/UniTrade | code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/utils/driver/abstract.py | <gh_stars>0
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implement the Base class for Driver and Connection"""
from abc import ABCMeta, abstractmethod, abstractproperty
import six
from .registry import DriverRegistry
@six.add_metaclass(DriverRegistry)
class BaseDriver(object):
"""
class BaseDriver(object):
This is a base class for different server types.
Inherit this class to implement different type of database driver
implementation.
(For PostgreSQL/EDB Postgres Advanced Server, we will be using psycopg2)
Abstract Properties:
-------- ----------
* Version (string):
Current version string for the database server
Abstract Methods:
-------- -------
* get_connection(*args, **kwargs)
- It should return a Connection class object, which may/may not be
connected to the database server.
* release_connection(*args, **kwargs)
- Implement the connection release logic
* gc()
- Implement this function to release the connections assigned in the
session, which has not been pinged from more than the idle timeout
configuration.
"""
@abstractproperty
def Version(cls):
pass
@abstractmethod
def get_connection(self, *args, **kwargs):
pass
@abstractmethod
def release_connection(self, *args, **kwargs):
pass
@abstractmethod
def gc(self):
pass
@six.add_metaclass(ABCMeta)
class BaseConnection(object):
"""
class BaseConnection(object)
It is a base class for database connection. A different connection
drive must implement this to expose abstract methods for this server.
General idea is to create a wrapper around the actual driver
implementation. It will be instantiated by the driver factory
basically. And, they should not be instantiated directly.
Abstract Methods:
-------- -------
* connect(**kwargs)
- Define this method to connect the server using that particular driver
implementation.
* execute_scalar(query, params, formatted_exception_msg)
- Implement this method to execute the given query and returns single
datum result.
* execute_async(query, params, formatted_exception_msg)
- Implement this method to execute the given query asynchronously and returns result.
* execute_void(query, params, formatted_exception_msg)
- Implement this method to execute the given query with no result.
* execute_2darray(query, params, formatted_exception_msg)
- Implement this method to execute the given query and returns the result
as a 2 dimensional array.
* execute_dict(query, params, formatted_exception_msg)
- Implement this method to execute the given query and returns the result
as an array of dict (column name -> value) format.
* def async_fetchmany_2darray(records=-1, formatted_exception_msg=False):
- Implement this method to retrieve result of asynchronous connection and
polling with no_result flag set to True.
This returns the result as a 2 dimensional array.
If records is -1 then fetchmany will behave as fetchall.
* connected()
- Implement this method to get the status of the connection. It should
return True for connected, otherwise False
* reset()
- Implement this method to reconnect the database server (if possible)
* transaction_status()
- Implement this method to get the transaction status for this
connection. Range of return values different for each driver type.
* ping()
- Implement this method to ping the server. There are times, a connection
has been lost, but - the connection driver does not know about it. This
can be helpful to figure out the actual reason for query failure.
* _release()
- Implement this method to release the connection object. This should not
be directly called using the connection object itself.
NOTE: Please use BaseDriver.release_connection(...) for releasing the
connection object for better memory management, and connection pool
management.
* _wait(conn)
- Implement this method to wait for asynchronous connection to finish the
execution, hence - it must be a blocking call.
* _wait_timeout(conn, time)
- Implement this method to wait for asynchronous connection with timeout.
This must be a non blocking call.
* poll(formatted_exception_msg, no_result)
- Implement this method to poll the data of query running on asynchronous
connection.
* cancel_transaction(conn_id, did=None)
- Implement this method to cancel the running transaction.
* messages()
- Implement this method to return the list of the messages/notices from
the database server.
* rows_affected()
- Implement this method to get the rows affected by the last command
executed on the server.
"""
ASYNC_OK = 1
ASYNC_READ_TIMEOUT = 2
ASYNC_WRITE_TIMEOUT = 3
ASYNC_NOT_CONNECTED = 4
ASYNC_EXECUTION_ABORTED = 5
@abstractmethod
def connect(self, **kwargs):
pass
@abstractmethod
def execute_scalar(self, query, params=None, formatted_exception_msg=False):
pass
@abstractmethod
def execute_async(self, query, params=None, formatted_exception_msg=True):
pass
@abstractmethod
def execute_void(self, query, params=None, formatted_exception_msg=False):
pass
@abstractmethod
def execute_2darray(self, query, params=None, formatted_exception_msg=False):
pass
@abstractmethod
def execute_dict(self, query, params=None, formatted_exception_msg=False):
pass
@abstractmethod
def async_fetchmany_2darray(self, records=-1, formatted_exception_msg=False):
pass
@abstractmethod
def connected(self):
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def transaction_status(self):
pass
@abstractmethod
def ping(self):
pass
@abstractmethod
def _release(self):
pass
@abstractmethod
def _wait(self, conn):
pass
@abstractmethod
def _wait_timeout(self, conn, time):
pass
@abstractmethod
def poll(self, formatted_exception_msg=True, no_result=False):
pass
@abstractmethod
def status_message(self):
pass
@abstractmethod
def rows_affected(self):
pass
@abstractmethod
def cancel_transaction(self, conn_id, did=None):
pass
|
RajeshKumarC6/Learn-Scala-Programming | Chapter02/src/main/scala/ch02/GeneralisedPhantomTypes.scala | <filename>Chapter02/src/main/scala/ch02/GeneralisedPhantomTypes.scala
package ch02
object GeneralisedPhantomTypes {
sealed trait LockState
sealed trait Open extends LockState
sealed trait Closed extends LockState
sealed trait Broken extends LockState
case class Lock[State <: LockState]() {
def break: Lock[Broken] = Lock()
def open(implicit ev: State =:= Closed): Lock[Open] = Lock()
def close(implicit ev: State =:= Open): Lock[Closed] = Lock()
}
val openLock = Lock[Open]
val closedLock = openLock.close
val lock = closedLock.open
val broken = closedLock.break
// closedLock.close // compile error
// openLock.open // compile error
// broken.open // compile error
}
|
gamekit-developers/gamekit | Dependencies/Source/FreeImage/LibOpenJPEG/j2k.c | <reponame>gamekit-developers/gamekit
/*
* Copyright (c) 2002-2007, Communications and Remote Sensing Laboratory, Universite catholique de Louvain (UCL), Belgium
* Copyright (c) 2002-2007, Professor <NAME>
* Copyright (c) 2001-2003, <NAME>
* Copyright (c) 2002-2003, <NAME>
* Copyright (c) 2003-2007, <NAME> and <NAME>
* Copyright (c) 2005, <NAME>, FreeImage Team
* Copyright (c) 2006-2007, <NAME>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "opj_includes.h"
/** @defgroup J2K J2K - JPEG-2000 codestream reader/writer */
/*@{*/
/** @name Local static functions */
/*@{*/
/**
Write the SOC marker (Start Of Codestream)
@param j2k J2K handle
*/
static void j2k_write_soc(opj_j2k_t *j2k);
/**
Read the SOC marker (Start of Codestream)
@param j2k J2K handle
*/
static void j2k_read_soc(opj_j2k_t *j2k);
/**
Write the SIZ marker (image and tile size)
@param j2k J2K handle
*/
static void j2k_write_siz(opj_j2k_t *j2k);
/**
Read the SIZ marker (image and tile size)
@param j2k J2K handle
*/
static void j2k_read_siz(opj_j2k_t *j2k);
/**
Write the COM marker (comment)
@param j2k J2K handle
*/
static void j2k_write_com(opj_j2k_t *j2k);
/**
Read the COM marker (comment)
@param j2k J2K handle
*/
static void j2k_read_com(opj_j2k_t *j2k);
/**
Write the value concerning the specified component in the marker COD and COC
@param j2k J2K handle
@param compno Number of the component concerned by the information written
*/
static void j2k_write_cox(opj_j2k_t *j2k, int compno);
/**
Read the value concerning the specified component in the marker COD and COC
@param j2k J2K handle
@param compno Number of the component concerned by the information read
*/
static void j2k_read_cox(opj_j2k_t *j2k, int compno);
/**
Write the COD marker (coding style default)
@param j2k J2K handle
*/
static void j2k_write_cod(opj_j2k_t *j2k);
/**
Read the COD marker (coding style default)
@param j2k J2K handle
*/
static void j2k_read_cod(opj_j2k_t *j2k);
/**
Write the COC marker (coding style component)
@param j2k J2K handle
@param compno Number of the component concerned by the information written
*/
static void j2k_write_coc(opj_j2k_t *j2k, int compno);
/**
Read the COC marker (coding style component)
@param j2k J2K handle
*/
static void j2k_read_coc(opj_j2k_t *j2k);
/**
Write the value concerning the specified component in the marker QCD and QCC
@param j2k J2K handle
@param compno Number of the component concerned by the information written
*/
static void j2k_write_qcx(opj_j2k_t *j2k, int compno);
/**
Read the value concerning the specified component in the marker QCD and QCC
@param j2k J2K handle
@param compno Number of the component concern by the information read
@param len Length of the information in the QCX part of the marker QCD/QCC
*/
static void j2k_read_qcx(opj_j2k_t *j2k, int compno, int len);
/**
Write the QCD marker (quantization default)
@param j2k J2K handle
*/
static void j2k_write_qcd(opj_j2k_t *j2k);
/**
Read the QCD marker (quantization default)
@param j2k J2K handle
*/
static void j2k_read_qcd(opj_j2k_t *j2k);
/**
Write the QCC marker (quantization component)
@param j2k J2K handle
@param compno Number of the component concerned by the information written
*/
static void j2k_write_qcc(opj_j2k_t *j2k, int compno);
/**
Read the QCC marker (quantization component)
@param j2k J2K handle
*/
static void j2k_read_qcc(opj_j2k_t *j2k);
/**
Write the POC marker (progression order change)
@param j2k J2K handle
*/
static void j2k_write_poc(opj_j2k_t *j2k);
/**
Read the POC marker (progression order change)
@param j2k J2K handle
*/
static void j2k_read_poc(opj_j2k_t *j2k);
/**
Read the CRG marker (component registration)
@param j2k J2K handle
*/
static void j2k_read_crg(opj_j2k_t *j2k);
/**
Read the TLM marker (tile-part lengths)
@param j2k J2K handle
*/
static void j2k_read_tlm(opj_j2k_t *j2k);
/**
Read the PLM marker (packet length, main header)
@param j2k J2K handle
*/
static void j2k_read_plm(opj_j2k_t *j2k);
/**
Read the PLT marker (packet length, tile-part header)
@param j2k J2K handle
*/
static void j2k_read_plt(opj_j2k_t *j2k);
/**
Read the PPM marker (packet packet headers, main header)
@param j2k J2K handle
*/
static void j2k_read_ppm(opj_j2k_t *j2k);
/**
Read the PPT marker (packet packet headers, tile-part header)
@param j2k J2K handle
*/
static void j2k_read_ppt(opj_j2k_t *j2k);
/**
Write the TLM marker (Mainheader)
@param j2k J2K handle
*/
static void j2k_write_tlm(opj_j2k_t *j2k);
/**
Write the SOT marker (start of tile-part)
@param j2k J2K handle
*/
static void j2k_write_sot(opj_j2k_t *j2k);
/**
Read the SOT marker (start of tile-part)
@param j2k J2K handle
*/
static void j2k_read_sot(opj_j2k_t *j2k);
/**
Write the SOD marker (start of data)
@param j2k J2K handle
@param tile_coder Pointer to a TCD handle
*/
static void j2k_write_sod(opj_j2k_t *j2k, void *tile_coder);
/**
Read the SOD marker (start of data)
@param j2k J2K handle
*/
static void j2k_read_sod(opj_j2k_t *j2k);
/**
Write the RGN marker (region-of-interest)
@param j2k J2K handle
@param compno Number of the component concerned by the information written
@param tileno Number of the tile concerned by the information written
*/
static void j2k_write_rgn(opj_j2k_t *j2k, int compno, int tileno);
/**
Read the RGN marker (region-of-interest)
@param j2k J2K handle
*/
static void j2k_read_rgn(opj_j2k_t *j2k);
/**
Write the EOC marker (end of codestream)
@param j2k J2K handle
*/
static void j2k_write_eoc(opj_j2k_t *j2k);
/**
Read the EOC marker (end of codestream)
@param j2k J2K handle
*/
static void j2k_read_eoc(opj_j2k_t *j2k);
/**
Read an unknown marker
@param j2k J2K handle
*/
static void j2k_read_unk(opj_j2k_t *j2k);
/*@}*/
/*@}*/
/* ----------------------------------------------------------------------- */
typedef struct j2k_prog_order{
OPJ_PROG_ORDER enum_prog;
char str_prog[4];
}j2k_prog_order_t;
j2k_prog_order_t j2k_prog_order_list[] = {
{CPRL, "CPRL"},
{LRCP, "LRCP"},
{PCRL, "PCRL"},
{RLCP, "RLCP"},
{RPCL, "RPCL"},
{-1, ""}
};
char *j2k_convert_progression_order(OPJ_PROG_ORDER prg_order){
j2k_prog_order_t *po;
for(po = j2k_prog_order_list; po->enum_prog != -1; po++ ){
if(po->enum_prog == prg_order){
break;
}
}
return po->str_prog;
}
void j2k_dump_image(FILE *fd, opj_image_t * img) {
int compno;
fprintf(fd, "image {\n");
fprintf(fd, " x0=%d, y0=%d, x1=%d, y1=%d\n", img->x0, img->y0, img->x1, img->y1);
fprintf(fd, " numcomps=%d\n", img->numcomps);
for (compno = 0; compno < img->numcomps; compno++) {
opj_image_comp_t *comp = &img->comps[compno];
fprintf(fd, " comp %d {\n", compno);
fprintf(fd, " dx=%d, dy=%d\n", comp->dx, comp->dy);
fprintf(fd, " prec=%d\n", comp->prec);
fprintf(fd, " sgnd=%d\n", comp->sgnd);
fprintf(fd, " }\n");
}
fprintf(fd, "}\n");
}
void j2k_dump_cp(FILE *fd, opj_image_t * img, opj_cp_t * cp) {
int tileno, compno, layno, bandno, resno, numbands;
fprintf(fd, "coding parameters {\n");
fprintf(fd, " tx0=%d, ty0=%d\n", cp->tx0, cp->ty0);
fprintf(fd, " tdx=%d, tdy=%d\n", cp->tdx, cp->tdy);
fprintf(fd, " tw=%d, th=%d\n", cp->tw, cp->th);
for (tileno = 0; tileno < cp->tw * cp->th; tileno++) {
opj_tcp_t *tcp = &cp->tcps[tileno];
fprintf(fd, " tile %d {\n", tileno);
fprintf(fd, " csty=%x\n", tcp->csty);
fprintf(fd, " prg=%d\n", tcp->prg);
fprintf(fd, " numlayers=%d\n", tcp->numlayers);
fprintf(fd, " mct=%d\n", tcp->mct);
fprintf(fd, " rates=");
for (layno = 0; layno < tcp->numlayers; layno++) {
fprintf(fd, "%.1f ", tcp->rates[layno]);
}
fprintf(fd, "\n");
for (compno = 0; compno < img->numcomps; compno++) {
opj_tccp_t *tccp = &tcp->tccps[compno];
fprintf(fd, " comp %d {\n", compno);
fprintf(fd, " csty=%x\n", tccp->csty);
fprintf(fd, " numresolutions=%d\n", tccp->numresolutions);
fprintf(fd, " cblkw=%d\n", tccp->cblkw);
fprintf(fd, " cblkh=%d\n", tccp->cblkh);
fprintf(fd, " cblksty=%x\n", tccp->cblksty);
fprintf(fd, " qmfbid=%d\n", tccp->qmfbid);
fprintf(fd, " qntsty=%d\n", tccp->qntsty);
fprintf(fd, " numgbits=%d\n", tccp->numgbits);
fprintf(fd, " roishift=%d\n", tccp->roishift);
fprintf(fd, " stepsizes=");
numbands = tccp->qntsty == J2K_CCP_QNTSTY_SIQNT ? 1 : tccp->numresolutions * 3 - 2;
for (bandno = 0; bandno < numbands; bandno++) {
fprintf(fd, "(%d,%d) ", tccp->stepsizes[bandno].mant,
tccp->stepsizes[bandno].expn);
}
fprintf(fd, "\n");
if (tccp->csty & J2K_CCP_CSTY_PRT) {
fprintf(fd, " prcw=");
for (resno = 0; resno < tccp->numresolutions; resno++) {
fprintf(fd, "%d ", tccp->prcw[resno]);
}
fprintf(fd, "\n");
fprintf(fd, " prch=");
for (resno = 0; resno < tccp->numresolutions; resno++) {
fprintf(fd, "%d ", tccp->prch[resno]);
}
fprintf(fd, "\n");
}
fprintf(fd, " }\n");
}
fprintf(fd, " }\n");
}
fprintf(fd, "}\n");
}
/* ----------------------------------------------------------------------- */
static int j2k_get_num_tp(opj_cp_t *cp,int pino,int tileno){
char *prog;
int i;
int tpnum=1,tpend=0;
opj_tcp_t *tcp = &cp->tcps[tileno];
prog = j2k_convert_progression_order(tcp->prg);
if(cp->tp_on == 1){
for(i=0;i<4;i++){
if(tpend!=1){
if( cp->tp_flag == prog[i] ){
tpend=1;cp->tp_pos=i;
}
switch(prog[i]){
case 'C':
tpnum= tpnum * tcp->pocs[pino].compE;
break;
case 'R':
tpnum= tpnum * tcp->pocs[pino].resE;
break;
case 'P':
tpnum= tpnum * tcp->pocs[pino].prcE;
break;
case 'L':
tpnum= tpnum * tcp->pocs[pino].layE;
break;
}
}
}
}else{
tpnum=1;
}
return tpnum;
}
/** mem allocation for TLM marker*/
int j2k_calculate_tp(opj_cp_t *cp,int img_numcomp,opj_image_t *image,opj_j2k_t *j2k ){
int pino,tileno,totnum_tp=0;
j2k->cur_totnum_tp = (int *) opj_malloc(cp->tw * cp->th * sizeof(int));
for (tileno = 0; tileno < cp->tw * cp->th; tileno++) {
int cur_totnum_tp = 0;
opj_tcp_t *tcp = &cp->tcps[tileno];
for(pino = 0; pino <= tcp->numpocs; pino++) {
int tp_num=0;
opj_pi_iterator_t *pi = pi_initialise_encode(image, cp, tileno,FINAL_PASS);
if(!pi) { return -1;}
tp_num = j2k_get_num_tp(cp,pino,tileno);
totnum_tp = totnum_tp + tp_num;
cur_totnum_tp = cur_totnum_tp + tp_num;
pi_destroy(pi, cp, tileno);
}
j2k->cur_totnum_tp[tileno] = cur_totnum_tp;
/* INDEX >> */
if (j2k->cstr_info) {
j2k->cstr_info->tile[tileno].num_tps = cur_totnum_tp;
j2k->cstr_info->tile[tileno].tp = (opj_tp_info_t *) opj_malloc(cur_totnum_tp * sizeof(opj_tp_info_t));
}
/* << INDEX */
}
return totnum_tp;
}
static void j2k_write_soc(opj_j2k_t *j2k) {
opj_cio_t *cio = j2k->cio;
cio_write(cio, J2K_MS_SOC, 2);
/* UniPG>> */
#ifdef USE_JPWL
/* update markers struct */
j2k_add_marker(j2k->cstr_info, J2K_MS_SOC, cio_tell(cio) - 2, 2);
#endif /* USE_JPWL */
/* <<UniPG */
}
static void j2k_read_soc(opj_j2k_t *j2k) {
j2k->state = J2K_STATE_MHSIZ;
/* Index */
if (j2k->cstr_info) {
j2k->cstr_info->main_head_start = cio_tell(j2k->cio) - 2;
j2k->cstr_info->codestream_size = cio_numbytesleft(j2k->cio) + 2 - j2k->cstr_info->main_head_start;
}
}
static void j2k_write_siz(opj_j2k_t *j2k) {
int i;
int lenp, len;
opj_cio_t *cio = j2k->cio;
opj_image_t *image = j2k->image;
opj_cp_t *cp = j2k->cp;
cio_write(cio, J2K_MS_SIZ, 2); /* SIZ */
lenp = cio_tell(cio);
cio_skip(cio, 2);
cio_write(cio, cp->rsiz, 2); /* Rsiz (capabilities) */
cio_write(cio, image->x1, 4); /* Xsiz */
cio_write(cio, image->y1, 4); /* Ysiz */
cio_write(cio, image->x0, 4); /* X0siz */
cio_write(cio, image->y0, 4); /* Y0siz */
cio_write(cio, cp->tdx, 4); /* XTsiz */
cio_write(cio, cp->tdy, 4); /* YTsiz */
cio_write(cio, cp->tx0, 4); /* XT0siz */
cio_write(cio, cp->ty0, 4); /* YT0siz */
cio_write(cio, image->numcomps, 2); /* Csiz */
for (i = 0; i < image->numcomps; i++) {
cio_write(cio, image->comps[i].prec - 1 + (image->comps[i].sgnd << 7), 1); /* Ssiz_i */
cio_write(cio, image->comps[i].dx, 1); /* XRsiz_i */
cio_write(cio, image->comps[i].dy, 1); /* YRsiz_i */
}
len = cio_tell(cio) - lenp;
cio_seek(cio, lenp);
cio_write(cio, len, 2); /* Lsiz */
cio_seek(cio, lenp + len);
}
static void j2k_read_siz(opj_j2k_t *j2k) {
int len, i;
opj_cio_t *cio = j2k->cio;
opj_image_t *image = j2k->image;
opj_cp_t *cp = j2k->cp;
len = cio_read(cio, 2); /* Lsiz */
cio_read(cio, 2); /* Rsiz (capabilities) */
image->x1 = cio_read(cio, 4); /* Xsiz */
image->y1 = cio_read(cio, 4); /* Ysiz */
image->x0 = cio_read(cio, 4); /* X0siz */
image->y0 = cio_read(cio, 4); /* Y0siz */
cp->tdx = cio_read(cio, 4); /* XTsiz */
cp->tdy = cio_read(cio, 4); /* YTsiz */
cp->tx0 = cio_read(cio, 4); /* XT0siz */
cp->ty0 = cio_read(cio, 4); /* YT0siz */
if ((image->x0<0)||(image->x1<0)||(image->y0<0)||(image->y1<0)) {
opj_event_msg(j2k->cinfo, EVT_ERROR,
"%s: invalid image size (x0:%d, x1:%d, y0:%d, y1:%d)\n",
image->x0,image->x1,image->y0,image->y1);
return;
}
image->numcomps = cio_read(cio, 2); /* Csiz */
#ifdef USE_JPWL
if (j2k->cp->correct) {
/* if JPWL is on, we check whether TX errors have damaged
too much the SIZ parameters */
if (!(image->x1 * image->y1)) {
opj_event_msg(j2k->cinfo, EVT_ERROR,
"JPWL: bad image size (%d x %d)\n",
image->x1, image->y1);
if (!JPWL_ASSUME || JPWL_ASSUME) {
opj_event_msg(j2k->cinfo, EVT_ERROR, "JPWL: giving up\n");
return;
}
}
if (image->numcomps != ((len - 38) / 3)) {
opj_event_msg(j2k->cinfo, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: Csiz is %d => space in SIZ only for %d comps.!!!\n",
image->numcomps, ((len - 38) / 3));
if (!JPWL_ASSUME) {
opj_event_msg(j2k->cinfo, EVT_ERROR, "JPWL: giving up\n");
return;
}
/* we try to correct */
opj_event_msg(j2k->cinfo, EVT_WARNING, "- trying to adjust this\n");
if (image->numcomps < ((len - 38) / 3)) {
len = 38 + 3 * image->numcomps;
opj_event_msg(j2k->cinfo, EVT_WARNING, "- setting Lsiz to %d => HYPOTHESIS!!!\n",
len);
} else {
image->numcomps = ((len - 38) / 3);
opj_event_msg(j2k->cinfo, EVT_WARNING, "- setting Csiz to %d => HYPOTHESIS!!!\n",
image->numcomps);
}
}
/* update components number in the jpwl_exp_comps filed */
cp->exp_comps = image->numcomps;
}
#endif /* USE_JPWL */
image->comps = (opj_image_comp_t*) opj_calloc(image->numcomps, sizeof(opj_image_comp_t));
for (i = 0; i < image->numcomps; i++) {
int tmp, w, h;
tmp = cio_read(cio, 1); /* Ssiz_i */
image->comps[i].prec = (tmp & 0x7f) + 1;
image->comps[i].sgnd = tmp >> 7;
image->comps[i].dx = cio_read(cio, 1); /* XRsiz_i */
image->comps[i].dy = cio_read(cio, 1); /* YRsiz_i */
#ifdef USE_JPWL
if (j2k->cp->correct) {
/* if JPWL is on, we check whether TX errors have damaged
too much the SIZ parameters, again */
if (!(image->comps[i].dx * image->comps[i].dy)) {
opj_event_msg(j2k->cinfo, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: bad XRsiz_%d/YRsiz_%d (%d x %d)\n",
i, i, image->comps[i].dx, image->comps[i].dy);
if (!JPWL_ASSUME) {
opj_event_msg(j2k->cinfo, EVT_ERROR, "JPWL: giving up\n");
return;
}
/* we try to correct */
opj_event_msg(j2k->cinfo, EVT_WARNING, "- trying to adjust them\n");
if (!image->comps[i].dx) {
image->comps[i].dx = 1;
opj_event_msg(j2k->cinfo, EVT_WARNING, "- setting XRsiz_%d to %d => HYPOTHESIS!!!\n",
i, image->comps[i].dx);
}
if (!image->comps[i].dy) {
image->comps[i].dy = 1;
opj_event_msg(j2k->cinfo, EVT_WARNING, "- setting YRsiz_%d to %d => HYPOTHESIS!!!\n",
i, image->comps[i].dy);
}
}
}
#endif /* USE_JPWL */
/* TODO: unused ? */
w = int_ceildiv(image->x1 - image->x0, image->comps[i].dx);
h = int_ceildiv(image->y1 - image->y0, image->comps[i].dy);
image->comps[i].resno_decoded = 0; /* number of resolution decoded */
image->comps[i].factor = cp->reduce; /* reducing factor per component */
}
cp->tw = int_ceildiv(image->x1 - cp->tx0, cp->tdx);
cp->th = int_ceildiv(image->y1 - cp->ty0, cp->tdy);
#ifdef USE_JPWL
if (j2k->cp->correct) {
/* if JPWL is on, we check whether TX errors have damaged
too much the SIZ parameters */
if ((cp->tw < 1) || (cp->th < 1) || (cp->tw > cp->max_tiles) || (cp->th > cp->max_tiles)) {
opj_event_msg(j2k->cinfo, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: bad number of tiles (%d x %d)\n",
cp->tw, cp->th);
if (!JPWL_ASSUME) {
opj_event_msg(j2k->cinfo, EVT_ERROR, "JPWL: giving up\n");
return;
}
/* we try to correct */
opj_event_msg(j2k->cinfo, EVT_WARNING, "- trying to adjust them\n");
if (cp->tw < 1) {
cp->tw= 1;
opj_event_msg(j2k->cinfo, EVT_WARNING, "- setting %d tiles in x => HYPOTHESIS!!!\n",
cp->tw);
}
if (cp->tw > cp->max_tiles) {
cp->tw= 1;
opj_event_msg(j2k->cinfo, EVT_WARNING, "- too large x, increase expectance of %d\n"
"- setting %d tiles in x => HYPOTHESIS!!!\n",
cp->max_tiles, cp->tw);
}
if (cp->th < 1) {
cp->th= 1;
opj_event_msg(j2k->cinfo, EVT_WARNING, "- setting %d tiles in y => HYPOTHESIS!!!\n",
cp->th);
}
if (cp->th > cp->max_tiles) {
cp->th= 1;
opj_event_msg(j2k->cinfo, EVT_WARNING, "- too large y, increase expectance of %d to continue\n",
"- setting %d tiles in y => HYPOTHESIS!!!\n",
cp->max_tiles, cp->th);
}
}
}
#endif /* USE_JPWL */
cp->tcps = (opj_tcp_t*) opj_calloc(cp->tw * cp->th, sizeof(opj_tcp_t));
cp->tileno = (int*) opj_malloc(cp->tw * cp->th * sizeof(int));
cp->tileno_size = 0;
#ifdef USE_JPWL
if (j2k->cp->correct) {
if (!cp->tcps) {
opj_event_msg(j2k->cinfo, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: could not alloc tcps field of cp\n");
if (!JPWL_ASSUME || JPWL_ASSUME) {
opj_event_msg(j2k->cinfo, EVT_ERROR, "JPWL: giving up\n");
return;
}
}
}
#endif /* USE_JPWL */
for (i = 0; i < cp->tw * cp->th; i++) {
cp->tcps[i].POC = 0;
cp->tcps[i].numpocs = 0;
cp->tcps[i].first = 1;
}
/* Initialization for PPM marker */
cp->ppm = 0;
cp->ppm_data = NULL;
cp->ppm_data_first = NULL;
cp->ppm_previous = 0;
cp->ppm_store = 0;
j2k->default_tcp->tccps = (opj_tccp_t*) opj_calloc(image->numcomps, sizeof(opj_tccp_t));
for (i = 0; i < cp->tw * cp->th; i++) {
cp->tcps[i].tccps = (opj_tccp_t*) opj_malloc(image->numcomps * sizeof(opj_tccp_t));
}
j2k->tile_data = (unsigned char**) opj_calloc(cp->tw * cp->th, sizeof(unsigned char*));
j2k->tile_len = (int*) opj_calloc(cp->tw * cp->th, sizeof(int));
j2k->state = J2K_STATE_MH;
/* Index */
if (j2k->cstr_info) {
opj_codestream_info_t *cstr_info = j2k->cstr_info;
cstr_info->image_w = image->x1 - image->x0;
cstr_info->image_h = image->y1 - image->y0;
cstr_info->numcomps = image->numcomps;
cstr_info->tw = cp->tw;
cstr_info->th = cp->th;
cstr_info->tile_x = cp->tdx;
cstr_info->tile_y = cp->tdy;
cstr_info->tile_Ox = cp->tx0;
cstr_info->tile_Oy = cp->ty0;
cstr_info->tile = (opj_tile_info_t*) opj_calloc(cp->tw * cp->th, sizeof(opj_tile_info_t));
}
}
static void j2k_write_com(opj_j2k_t *j2k) {
unsigned int i;
int lenp, len;
if(j2k->cp->comment) {
opj_cio_t *cio = j2k->cio;
char *comment = j2k->cp->comment;
cio_write(cio, J2K_MS_COM, 2);
lenp = cio_tell(cio);
cio_skip(cio, 2);
cio_write(cio, 1, 2); /* General use (IS 8859-15:1999 (Latin) values) */
for (i = 0; i < strlen(comment); i++) {
cio_write(cio, comment[i], 1);
}
len = cio_tell(cio) - lenp;
cio_seek(cio, lenp);
cio_write(cio, len, 2);
cio_seek(cio, lenp + len);
}
}
static void j2k_read_com(opj_j2k_t *j2k) {
int len;
opj_cio_t *cio = j2k->cio;
len = cio_read(cio, 2);
cio_skip(cio, len - 2);
}
static void j2k_write_cox(opj_j2k_t *j2k, int compno) {
int i;
opj_cp_t *cp = j2k->cp;
opj_tcp_t *tcp = &cp->tcps[j2k->curtileno];
opj_tccp_t *tccp = &tcp->tccps[compno];
opj_cio_t *cio = j2k->cio;
cio_write(cio, tccp->numresolutions - 1, 1); /* SPcox (D) */
cio_write(cio, tccp->cblkw - 2, 1); /* SPcox (E) */
cio_write(cio, tccp->cblkh - 2, 1); /* SPcox (F) */
cio_write(cio, tccp->cblksty, 1); /* SPcox (G) */
cio_write(cio, tccp->qmfbid, 1); /* SPcox (H) */
if (tccp->csty & J2K_CCP_CSTY_PRT) {
for (i = 0; i < tccp->numresolutions; i++) {
cio_write(cio, tccp->prcw[i] + (tccp->prch[i] << 4), 1); /* SPcox (I_i) */
}
}
}
static void j2k_read_cox(opj_j2k_t *j2k, int compno) {
int i;
opj_cp_t *cp = j2k->cp;
opj_tcp_t *tcp = j2k->state == J2K_STATE_TPH ? &cp->tcps[j2k->curtileno] : j2k->default_tcp;
opj_tccp_t *tccp = &tcp->tccps[compno];
opj_cio_t *cio = j2k->cio;
tccp->numresolutions = cio_read(cio, 1) + 1; /* SPcox (D) */
// If user wants to remove more resolutions than the codestream contains, return error
if (cp->reduce >= tccp->numresolutions) {
opj_event_msg(j2k->cinfo, EVT_ERROR, "Error decoding component %d.\nThe number of resolutions to remove is higher than the number "
"of resolutions of this component\nModify the cp_reduce parameter.\n\n", compno);
j2k->state |= J2K_STATE_ERR;
}
tccp->cblkw = cio_read(cio, 1) + 2; /* SPcox (E) */
tccp->cblkh = cio_read(cio, 1) + 2; /* SPcox (F) */
tccp->cblksty = cio_read(cio, 1); /* SPcox (G) */
tccp->qmfbid = cio_read(cio, 1); /* SPcox (H) */
if (tccp->csty & J2K_CP_CSTY_PRT) {
for (i = 0; i < tccp->numresolutions; i++) {
int tmp = cio_read(cio, 1); /* SPcox (I_i) */
tccp->prcw[i] = tmp & 0xf;
tccp->prch[i] = tmp >> 4;
}
}
/* INDEX >> */
if(j2k->cstr_info && compno == 0) {
for (i = 0; i < tccp->numresolutions; i++) {
if (tccp->csty & J2K_CP_CSTY_PRT) {
j2k->cstr_info->tile[j2k->curtileno].pdx[i] = tccp->prcw[i];
j2k->cstr_info->tile[j2k->curtileno].pdy[i] = tccp->prch[i];
}
else {
j2k->cstr_info->tile[j2k->curtileno].pdx[i] = 15;
j2k->cstr_info->tile[j2k->curtileno].pdx[i] = 15;
}
}
}
/* << INDEX */
}
static void j2k_write_cod(opj_j2k_t *j2k) {
opj_cp_t *cp = NULL;
opj_tcp_t *tcp = NULL;
int lenp, len;
opj_cio_t *cio = j2k->cio;
cio_write(cio, J2K_MS_COD, 2); /* COD */
lenp = cio_tell(cio);
cio_skip(cio, 2);
cp = j2k->cp;
tcp = &cp->tcps[j2k->curtileno];
cio_write(cio, tcp->csty, 1); /* Scod */
cio_write(cio, tcp->prg, 1); /* SGcod (A) */
cio_write(cio, tcp->numlayers, 2); /* SGcod (B) */
cio_write(cio, tcp->mct, 1); /* SGcod (C) */
j2k_write_cox(j2k, 0);
len = cio_tell(cio) - lenp;
cio_seek(cio, lenp);
cio_write(cio, len, 2); /* Lcod */
cio_seek(cio, lenp + len);
}
static void j2k_read_cod(opj_j2k_t *j2k) {
int len, i, pos;
opj_cio_t *cio = j2k->cio;
opj_cp_t *cp = j2k->cp;
opj_tcp_t *tcp = j2k->state == J2K_STATE_TPH ? &cp->tcps[j2k->curtileno] : j2k->default_tcp;
opj_image_t *image = j2k->image;
len = cio_read(cio, 2); /* Lcod */
tcp->csty = cio_read(cio, 1); /* Scod */
tcp->prg = (OPJ_PROG_ORDER)cio_read(cio, 1); /* SGcod (A) */
tcp->numlayers = cio_read(cio, 2); /* SGcod (B) */
tcp->mct = cio_read(cio, 1); /* SGcod (C) */
pos = cio_tell(cio);
for (i = 0; i < image->numcomps; i++) {
tcp->tccps[i].csty = tcp->csty & J2K_CP_CSTY_PRT;
cio_seek(cio, pos);
j2k_read_cox(j2k, i);
}
/* Index */
if (j2k->cstr_info) {
opj_codestream_info_t *cstr_info = j2k->cstr_info;
cstr_info->prog = tcp->prg;
cstr_info->numlayers = tcp->numlayers;
cstr_info->numdecompos = (int*) opj_malloc(image->numcomps * sizeof(int));
for (i = 0; i < image->numcomps; i++) {
cstr_info->numdecompos[i] = tcp->tccps[i].numresolutions - 1;
}
}
}
static void j2k_write_coc(opj_j2k_t *j2k, int compno) {
int lenp, len;
opj_cp_t *cp = j2k->cp;
opj_tcp_t *tcp = &cp->tcps[j2k->curtileno];
opj_image_t *image = j2k->image;
opj_cio_t *cio = j2k->cio;
cio_write(cio, J2K_MS_COC, 2); /* COC */
lenp = cio_tell(cio);
cio_skip(cio, 2);
cio_write(cio, compno, image->numcomps <= 256 ? 1 : 2); /* Ccoc */
cio_write(cio, tcp->tccps[compno].csty, 1); /* Scoc */
j2k_write_cox(j2k, compno);
len = cio_tell(cio) - lenp;
cio_seek(cio, lenp);
cio_write(cio, len, 2); /* Lcoc */
cio_seek(cio, lenp + len);
}
static void j2k_read_coc(opj_j2k_t *j2k) {
int len, compno;
opj_cp_t *cp = j2k->cp;
opj_tcp_t *tcp = j2k->state == J2K_STATE_TPH ? &cp->tcps[j2k->curtileno] : j2k->default_tcp;
opj_image_t *image = j2k->image;
opj_cio_t *cio = j2k->cio;
len = cio_read(cio, 2); /* Lcoc */
compno = cio_read(cio, image->numcomps <= 256 ? 1 : 2); /* Ccoc */
tcp->tccps[compno].csty = cio_read(cio, 1); /* Scoc */
j2k_read_cox(j2k, compno);
}
static void j2k_write_qcx(opj_j2k_t *j2k, int compno) {
int bandno, numbands;
int expn, mant;
opj_cp_t *cp = j2k->cp;
opj_tcp_t *tcp = &cp->tcps[j2k->curtileno];
opj_tccp_t *tccp = &tcp->tccps[compno];
opj_cio_t *cio = j2k->cio;
cio_write(cio, tccp->qntsty + (tccp->numgbits << 5), 1); /* Sqcx */
numbands = tccp->qntsty == J2K_CCP_QNTSTY_SIQNT ? 1 : tccp->numresolutions * 3 - 2;
for (bandno = 0; bandno < numbands; bandno++) {
expn = tccp->stepsizes[bandno].expn;
mant = tccp->stepsizes[bandno].mant;
if (tccp->qntsty == J2K_CCP_QNTSTY_NOQNT) {
cio_write(cio, expn << 3, 1); /* SPqcx_i */
} else {
cio_write(cio, (expn << 11) + mant, 2); /* SPqcx_i */
}
}
}
static void j2k_read_qcx(opj_j2k_t *j2k, int compno, int len) {
int tmp;
int bandno, numbands;
opj_cp_t *cp = j2k->cp;
opj_tcp_t *tcp = j2k->state == J2K_STATE_TPH ? &cp->tcps[j2k->curtileno] : j2k->default_tcp;
opj_tccp_t *tccp = &tcp->tccps[compno];
opj_cio_t *cio = j2k->cio;
tmp = cio_read(cio, 1); /* Sqcx */
tccp->qntsty = tmp & 0x1f;
tccp->numgbits = tmp >> 5;
numbands = (tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) ?
1 : ((tccp->qntsty == J2K_CCP_QNTSTY_NOQNT) ? len - 1 : (len - 1) / 2);
#ifdef USE_JPWL
if (j2k->cp->correct) {
/* if JPWL is on, we check whether there are too many subbands */
if ((numbands < 0) || (numbands >= J2K_MAXBANDS)) {
opj_event_msg(j2k->cinfo, JPWL_ASSUME ? EVT_WARNING : EVT_ERROR,
"JPWL: bad number of subbands in Sqcx (%d)\n",
numbands);
if (!JPWL_ASSUME) {
opj_event_msg(j2k->cinfo, EVT_ERROR, "JPWL: giving up\n");
return;
}
/* we try to correct */
numbands = 1;
opj_event_msg(j2k->cinfo, EVT_WARNING, "- trying to adjust them\n"
"- setting number of bands to %d => HYPOTHESIS!!!\n",
numbands);
};
};
#endif /* USE_JPWL */
for (bandno = 0; bandno < numbands; bandno++) {
int expn, mant;
if (tccp->qntsty == J2K_CCP_QNTSTY_NOQNT) {
expn = cio_read(cio, 1) >> 3; /* SPqcx_i */
mant = 0;
} else {
tmp = cio_read(cio, 2); /* SPqcx_i */
expn = tmp >> 11;
mant = tmp & 0x7ff;
}
tccp->stepsizes[bandno].expn = expn;
tccp->stepsizes[bandno].mant = mant;
}
/* Add Antonin : if scalar_derived -> compute other stepsizes */
if (tccp->qntsty == J2K_CCP_QNTSTY_SIQNT) {
for (bandno = 1; bandno < J2K_MAXBANDS; bandno++) {
tccp->stepsizes[bandno].expn =
((tccp->stepsizes[0].expn) - ((bandno - 1) / 3) > 0) ?
(tccp->stepsizes[0].expn) - ((bandno - 1) / 3) : 0;
tccp->stepsizes[bandno].mant = tccp->stepsizes[0].mant;
}
}
/* ddA */
}
static void j2k_write_qcd(opj_j2k_t *j2k) {
int lenp, len;
opj_cio_t *cio = j2k->cio;
cio_write(cio, J2K_MS_QCD, 2); /* QCD */
lenp = cio_tell(cio);
cio_skip(cio, 2);
j2k_write_qcx(j2k, 0);
len = cio_tell(cio) - lenp;
cio_seek(cio, lenp);
cio_write(cio, len, 2); /* Lqcd */
cio_seek(cio, lenp + len);
}
static void j2k_read_qcd(opj_j2k_t *j2k) {
int len, i, pos;
opj_cio_t *cio = j2k->cio;
opj_image_t *image = j2k->image;
len = cio_read(cio, 2); /* Lqcd */
pos = cio_tell(cio);
for (i = 0; i < image->numcomps; i++) {
cio_seek(cio, pos);
j2k_read_qcx(j2k, i, len - 2);
}
}
static void j2k_write_qcc(opj_j2k_t *j2k, int compno) {
int lenp, len;
opj_cio_t *cio = j2k->cio;
cio_write(cio, J2K_MS_QCC, 2); /* QCC */
lenp = cio_tell(cio);
cio_skip(cio, 2);
cio_write(cio, compno, j2k->image->numcomps <= 256 ? 1 : 2); /* Cqcc */
j2k_write_qcx(j2k, compno);
len = cio_tell(cio) - lenp;
cio_seek(cio, lenp);
cio_write(cio, len, 2); /* Lqcc */
cio_seek(cio, lenp + len);
}
static void j2k_read_qcc(opj_j2k_t *j2k) {
int len, compno;
int numcomp = j2k->image->numcomps;
opj_cio_t *cio = j2k->cio;
len = cio_read(cio, 2); /* Lqcc */
compno = cio_read(cio, numcomp <= 256 ? 1 : 2); /* Cqcc */
#ifdef USE_JPWL
if (j2k->cp->correct) {
static int backup_compno = 0;
/* compno is negative or larger than the number of components!!! */
if ((compno < 0) || (compno >= numcomp)) {
opj_event_msg(j2k->cinfo, EVT_ERROR,
"JPWL: bad component number in QCC (%d out of a maximum of %d)\n",
compno, numcomp);
if (!JPWL_ASSUME) {
opj_event_msg(j2k->cinfo, EVT_ERROR, "JPWL: giving up\n");
return;
}
/* we try to correct */
compno = backup_compno % numcomp;
opj_event_msg(j2k->cinfo, EVT_WARNING, "- trying to adjust this\n"
"- setting component number to %d\n",
compno);
}
/* keep your private count of tiles */
backup_compno++;
};
#endif /* USE_JPWL */
j2k_read_qcx(j2k, compno, len - 2 - (numcomp <= 256 ? 1 : 2));
}
static void j2k_write_poc(opj_j2k_t *j2k) {
int len, numpchgs, i;
int numcomps = j2k->image->numcomps;
opj_cp_t *cp = j2k->cp;
opj_tcp_t *tcp = &cp->tcps[j2k->curtileno];
opj_tccp_t *tccp = &tcp->tccps[0];
opj_cio_t *cio = j2k->cio;
numpchgs = 1 + tcp->numpocs;
cio_write(cio, J2K_MS_POC, 2); /* POC */
len = 2 + (5 + 2 * (numcomps <= 256 ? 1 : 2)) * numpchgs;
cio_write(cio, len, 2); /* Lpoc */
for (i = 0; i < numpchgs; i++) {
opj_poc_t *poc = &tcp->pocs[i];
cio_write(cio, poc->resno0, 1); /* RSpoc_i */
cio_write(cio, poc->compno0, (numcomps <= 256 ? 1 : 2)); /* CSpoc_i */
cio_write(cio, poc->layno1, 2); /* LYEpoc_i */
poc->layno1 = int_min(poc->layno1, tcp->numlayers);
cio_write(cio, poc->resno1, 1); /* REpoc_i */
poc->resno1 = int_min(poc->resno1, tccp->numresolutions);
cio_write(cio, poc->compno1, (numcomps <= 256 ? 1 : 2)); /* CEpoc_i */
poc->compno1 = int_min(poc->compno1, numcomps);
cio_write(cio, poc->prg, 1); /* Ppoc_i */
}
}
static void j2k_read_poc(opj_j2k_t *j2k) {
int len, numpchgs, i, old_poc;
int numcomps = j2k->image->numcomps;
opj_cp_t *cp = j2k->cp;
opj_tcp_t *tcp = j2k->state == J2K_STATE_TPH ? &cp->tcps[j2k->curtileno] : j2k->default_tcp;
opj_cio_t *cio = j2k->cio;
old_poc = tcp->POC ? tcp->numpocs + 1 : 0;
tcp->POC = 1;
len = cio_read(cio, 2); /* Lpoc */
numpchgs = (len - 2) / (5 + 2 * (numcomps <= 256 ? 1 : 2));
for (i = old_poc; i < numpchgs + old_poc; i++) {
opj_poc_t *poc;
poc = &tcp->pocs[i];
poc->resno0 = cio_read(cio, 1); /* RSpoc_i */
poc->compno0 = cio_read(cio, numcomps <= 256 ? 1 : 2); /* CSpoc_i */
poc->layno1 = cio_read(cio, 2); /* LYEpoc_i */
poc->resno1 = cio_read(cio, 1); /* REpoc_i */
poc->compno1 = int_min(
cio_read(cio, numcomps <= 256 ? 1 : 2), (unsigned int) numcomps); /* CEpoc_i */
poc->prg = (OPJ_PROG_ORDER)cio_read(cio, 1); /* Ppoc_i */
}
tcp->numpocs = numpchgs + old_poc - 1;
}
static void j2k_read_crg(opj_j2k_t *j2k) {
int len, i, Xcrg_i, Ycrg_i;
opj_cio_t *cio = j2k->cio;
int numcomps = j2k->image->numcomps;
len = cio_read(cio, 2); /* Lcrg */
for (i = 0; i < numcomps; i++) {
Xcrg_i = cio_read(cio, 2); /* Xcrg_i */
Ycrg_i = cio_read(cio, 2); /* Ycrg_i */
}
}
static void j2k_read_tlm(opj_j2k_t *j2k) {
int len, Ztlm, Stlm, ST, SP, tile_tlm, i;
long int Ttlm_i, Ptlm_i;
opj_cio_t *cio = j2k->cio;
len = cio_read(cio, 2); /* Ltlm */
Ztlm = cio_read(cio, 1); /* Ztlm */
Stlm = cio_read(cio, 1); /* Stlm */
ST = ((Stlm >> 4) & 0x01) + ((Stlm >> 4) & 0x02);
SP = (Stlm >> 6) & 0x01;
tile_tlm = (len - 4) / ((SP + 1) * 2 + ST);
for (i = 0; i < tile_tlm; i++) {
Ttlm_i = cio_read(cio, ST); /* Ttlm_i */
Ptlm_i = cio_read(cio, SP ? 4 : 2); /* Ptlm_i */
}
}
static void j2k_read_plm(opj_j2k_t *j2k) {
int len, i, Zplm, Nplm, add, packet_len = 0;
opj_cio_t *cio = j2k->cio;
len = cio_read(cio, 2); /* Lplm */
Zplm = cio_read(cio, 1); /* Zplm */
len -= 3;
while (len > 0) {
Nplm = cio_read(cio, 4); /* Nplm */
len -= 4;
for (i = Nplm; i > 0; i--) {
add = cio_read(cio, 1);
len--;
packet_len = (packet_len << 7) + add; /* Iplm_ij */
if ((add & 0x80) == 0) {
/* New packet */
packet_len = 0;
}
if (len <= 0)
break;
}
}
}
static void j2k_read_plt(opj_j2k_t *j2k) {
int len, i, Zplt, packet_len = 0, add;
opj_cio_t *cio = j2k->cio;
len = cio_read(cio, 2); /* Lplt */
Zplt = cio_read(cio, 1); /* Zplt */
for (i = len - 3; i > 0; i--) {
add = cio_read(cio, 1);
packet_len = (packet_len << 7) + add; /* Iplt_i */
if ((add & 0x80) == 0) {
/* New packet */
packet_len = 0;
}
}
}
static void j2k_read_ppm(opj_j2k_t *j2k) {
int len, Z_ppm, i, j;
int N_ppm;
opj_cp_t *cp = j2k->cp;
opj_cio_t *cio = j2k->cio;
len = cio_read(cio, 2);
cp->ppm = 1;
Z_ppm = cio_read(cio, 1); /* Z_ppm */
len -= 3;
while (len > 0) {
if (cp->ppm_previous == 0) {
N_ppm = cio_read(cio, 4); /* N_ppm */
len -= 4;
} else {
N_ppm = cp->ppm_previous;
}
j = cp->ppm_store;
if (Z_ppm == 0) { /* First PPM marker */
cp->ppm_data = (unsigned char *) opj_malloc(N_ppm * sizeof(unsigned char));
cp->ppm_data_first = cp->ppm_data;
cp->ppm_len = N_ppm;
} else { /* NON-first PPM marker */
cp->ppm_data = (unsigned char *) opj_realloc(cp->ppm_data, (N_ppm + cp->ppm_store) * sizeof(unsigned char));
#ifdef USE_JPWL
/* this memory allocation check could be done even in non-JPWL cases */
if (cp->correct) {
if (!cp->ppm_data) {
opj_event_msg(j2k->cinfo, EVT_ERROR,
"JPWL: failed memory allocation during PPM marker parsing (pos. %x)\n",
cio_tell(cio));
if (!JPWL_ASSUME || JPWL_ASSUME) {
opj_free(cp->ppm_data);
opj_event_msg(j2k->cinfo, EVT_ERROR, "JPWL: giving up\n");
return;
}
}
}
#endif
cp->ppm_data_first = cp->ppm_data;
cp->ppm_len = N_ppm + cp->ppm_store;
}
for (i = N_ppm; i > 0; i--) { /* Read packet header */
cp->ppm_data[j] = cio_read(cio, 1);
j++;
len--;
if (len == 0)
break; /* Case of non-finished packet header in present marker but finished in next one */
}
cp->ppm_previous = i - 1;
cp->ppm_store = j;
}
}
static void j2k_read_ppt(opj_j2k_t *j2k) {
int len, Z_ppt, i, j = 0;
opj_cp_t *cp = j2k->cp;
opj_tcp_t *tcp = cp->tcps + j2k->curtileno;
opj_cio_t *cio = j2k->cio;
len = cio_read(cio, 2);
Z_ppt = cio_read(cio, 1);
tcp->ppt = 1;
if (Z_ppt == 0) { /* First PPT marker */
tcp->ppt_data = (unsigned char *) opj_malloc((len - 3) * sizeof(unsigned char));
tcp->ppt_data_first = tcp->ppt_data;
tcp->ppt_store = 0;
tcp->ppt_len = len - 3;
} else { /* NON-first PPT marker */
tcp->ppt_data = (unsigned char *) opj_realloc(tcp->ppt_data, (len - 3 + tcp->ppt_store) * sizeof(unsigned char));
tcp->ppt_data_first = tcp->ppt_data;
tcp->ppt_len = len - 3 + tcp->ppt_store;
}
j = tcp->ppt_store;
for (i = len - 3; i > 0; i--) {
tcp->ppt_data[j] = cio_read(cio, 1);
j++;
}
tcp->ppt_store = j;
}
static void j2k_write_tlm(opj_j2k_t *j2k){
int lenp;
opj_cio_t *cio = j2k->cio;
j2k->tlm_start = cio_tell(cio);
cio_write(cio, J2K_MS_TLM, 2);/* TLM */
lenp = 4 + (5*j2k->totnum_tp);
cio_write(cio,lenp,2); /* Ltlm */
cio_write(cio, 0,1); /* Ztlm=0*/
cio_write(cio,80,1); /* Stlm ST=1(8bits-255 tiles max),SP=1(Ptlm=32bits) */
cio_skip(cio,5*j2k->totnum_tp);
}
static void j2k_write_sot(opj_j2k_t *j2k) {
int lenp, len;
opj_cio_t *cio = j2k->cio;
j2k->sot_start = cio_tell(cio);
cio_write(cio, J2K_MS_SOT, 2); /* SOT */
lenp = cio_tell(cio);
cio_skip(cio, 2); /* Lsot (further) */
cio_write(cio, j2k->curtileno, 2); /* Isot */
cio_skip(cio, 4); /* Psot (further in j2k_write_sod) */
cio_write(cio, j2k->cur_tp_num , 1); /* TPsot */
cio_write(cio, j2k->cur_totnum_tp[j2k->curtileno], 1); /* TNsot */
len = cio_tell(cio) - lenp;
cio_seek(cio, lenp);
cio_write(cio, len, 2); /* Lsot */
cio_seek(cio, lenp + len);
/* UniPG>> */
#ifdef USE_JPWL
/* update markers struct */
j2k_add_marker(j2k->cstr_info, J2K_MS_SOT, j2k->sot_start, len + 2);
#endif /* USE_JPWL */
/* <<UniPG */
}
static void j2k_read_sot(opj_j2k_t *j2k) {
int len, tileno, totlen, partno, numparts, i;
opj_tcp_t *tcp = NULL;
char status = 0;
opj_cp_t *cp = j2k->cp;
opj_cio_t *cio = j2k->cio;
len = cio_read(cio, 2);
tileno = cio_read(cio, 2);
#ifdef USE_JPWL
if (j2k->cp->correct) {
static int backup_tileno = 0;
/* tileno is negative or larger than the number of tiles!!! */
if ((tileno < 0) || (tileno > (cp->tw * cp->th))) {
opj_event_msg(j2k->cinfo, EVT_ERROR,
"JPWL: bad tile number (%d out of a maximum of %d)\n",
tileno, (cp->tw * cp->th));
if (!JPWL_ASSUME) {
opj_event_msg(j2k->cinfo, EVT_ERROR, "JPWL: giving up\n");
return;
}
/* we try to correct */
tileno = backup_tileno;
opj_event_msg(j2k->cinfo, EVT_WARNING, "- trying to adjust this\n"
"- setting tile number to %d\n",
tileno);
}
/* keep your private count of tiles */
backup_tileno++;
};
#endif /* USE_JPWL */
if (cp->tileno_size == 0) {
cp->tileno[cp->tileno_size] = tileno;
cp->tileno_size++;
} else {
i = 0;
while (i < cp->tileno_size && status == 0) {
status = cp->tileno[i] == tileno ? 1 : 0;
i++;
}
if (status == 0) {
cp->tileno[cp->tileno_size] = tileno;
cp->tileno_size++;
}
}
totlen = cio_read(cio, 4);
#ifdef USE_JPWL
if (j2k->cp->correct) {
/* totlen is negative or larger than the bytes left!!! */
if ((totlen < 0) || (totlen > (cio_numbytesleft(cio) + 8))) {
opj_event_msg(j2k->cinfo, EVT_ERROR,
"JPWL: bad tile byte size (%d bytes against %d bytes left)\n",
totlen, cio_numbytesleft(cio) + 8);
if (!JPWL_ASSUME) {
opj_event_msg(j2k->cinfo, EVT_ERROR, "JPWL: giving up\n");
return;
}
/* we try to correct */
totlen = 0;
opj_event_msg(j2k->cinfo, EVT_WARNING, "- trying to adjust this\n"
"- setting Psot to %d => assuming it is the last tile\n",
totlen);
}
};
#endif /* USE_JPWL */
if (!totlen)
totlen = cio_numbytesleft(cio) + 8;
partno = cio_read(cio, 1);
numparts = cio_read(cio, 1);
j2k->curtileno = tileno;
j2k->cur_tp_num = partno;
j2k->eot = cio_getbp(cio) - 12 + totlen;
j2k->state = J2K_STATE_TPH;
tcp = &cp->tcps[j2k->curtileno];
/* Index */
if (j2k->cstr_info) {
if (tcp->first) {
if (tileno == 0)
j2k->cstr_info->main_head_end = cio_tell(cio) - 13;
j2k->cstr_info->tile[tileno].tileno = tileno;
j2k->cstr_info->tile[tileno].start_pos = cio_tell(cio) - 12;
j2k->cstr_info->tile[tileno].end_pos = j2k->cstr_info->tile[tileno].start_pos + totlen - 1;
j2k->cstr_info->tile[tileno].num_tps = numparts;
if (numparts)
j2k->cstr_info->tile[tileno].tp = (opj_tp_info_t *) opj_malloc(numparts * sizeof(opj_tp_info_t));
else
j2k->cstr_info->tile[tileno].tp = (opj_tp_info_t *) opj_malloc(10 * sizeof(opj_tp_info_t)); // Fixme (10)
}
else {
j2k->cstr_info->tile[tileno].end_pos += totlen;
}
j2k->cstr_info->tile[tileno].tp[partno].tp_start_pos = cio_tell(cio) - 12;
j2k->cstr_info->tile[tileno].tp[partno].tp_end_pos =
j2k->cstr_info->tile[tileno].tp[partno].tp_start_pos + totlen - 1;
}
if (tcp->first == 1) {
/* Initialization PPT */
opj_tccp_t *tmp = tcp->tccps;
memcpy(tcp, j2k->default_tcp, sizeof(opj_tcp_t));
tcp->ppt = 0;
tcp->ppt_data = NULL;
tcp->ppt_data_first = NULL;
tcp->tccps = tmp;
for (i = 0; i < j2k->image->numcomps; i++) {
tcp->tccps[i] = j2k->default_tcp->tccps[i];
}
cp->tcps[j2k->curtileno].first = 0;
}
}
static void j2k_write_sod(opj_j2k_t *j2k, void *tile_coder) {
int l, layno;
int totlen;
opj_tcp_t *tcp = NULL;
opj_codestream_info_t *cstr_info = NULL;
opj_tcd_t *tcd = (opj_tcd_t*)tile_coder; /* cast is needed because of conflicts in header inclusions */
opj_cp_t *cp = j2k->cp;
opj_cio_t *cio = j2k->cio;
tcd->tp_num = j2k->tp_num ;
tcd->cur_tp_num = j2k->cur_tp_num;
cio_write(cio, J2K_MS_SOD, 2);
if (j2k->curtileno == 0) {
j2k->sod_start = cio_tell(cio) + j2k->pos_correction;
}
/* INDEX >> */
cstr_info = j2k->cstr_info;
if (cstr_info) {
if (!j2k->cur_tp_num ) {
cstr_info->tile[j2k->curtileno].end_header = cio_tell(cio) + j2k->pos_correction - 1;
j2k->cstr_info->tile[j2k->curtileno].tileno = j2k->curtileno;
}
else{
if(cstr_info->tile[j2k->curtileno].packet[cstr_info->packno - 1].end_pos < cio_tell(cio))
cstr_info->tile[j2k->curtileno].packet[cstr_info->packno].start_pos = cio_tell(cio);
}
/* UniPG>> */
#ifdef USE_JPWL
/* update markers struct */
j2k_add_marker(j2k->cstr_info, J2K_MS_SOD, j2k->sod_start, 2);
#endif /* USE_JPWL */
/* <<UniPG */
}
/* << INDEX */
tcp = &cp->tcps[j2k->curtileno];
for (layno = 0; layno < tcp->numlayers; layno++) {
if (tcp->rates[layno]>(j2k->sod_start / (cp->th * cp->tw))) {
tcp->rates[layno]-=(j2k->sod_start / (cp->th * cp->tw));
} else if (tcp->rates[layno]) {
tcp->rates[layno]=1;
}
}
if(j2k->cur_tp_num == 0){
tcd->tcd_image->tiles->packno = 0;
if(cstr_info)
cstr_info->packno = 0;
}
l = tcd_encode_tile(tcd, j2k->curtileno, cio_getbp(cio), cio_numbytesleft(cio) - 2, cstr_info);
/* Writing Psot in SOT marker */
totlen = cio_tell(cio) + l - j2k->sot_start;
cio_seek(cio, j2k->sot_start + 6);
cio_write(cio, totlen, 4);
cio_seek(cio, j2k->sot_start + totlen);
/* Writing Ttlm and Ptlm in TLM marker */
if(cp->cinema){
cio_seek(cio, j2k->tlm_start + 6 + (5*j2k->cur_tp_num));
cio_write(cio, j2k->curtileno, 1);
cio_write(cio, totlen, 4);
}
cio_seek(cio, j2k->sot_start + totlen);
}
static void j2k_read_sod(opj_j2k_t *j2k) {
int len, truncate = 0, i;
unsigned char *data = NULL, *data_ptr = NULL;
opj_cio_t *cio = j2k->cio;
int curtileno = j2k->curtileno;
/* Index */
if (j2k->cstr_info) {
j2k->cstr_info->tile[j2k->curtileno].tp[j2k->cur_tp_num].tp_end_header =
cio_tell(cio) + j2k->pos_correction - 1;
if (j2k->cur_tp_num == 0)
j2k->cstr_info->tile[j2k->curtileno].end_header = cio_tell(cio) + j2k->pos_correction - 1;
j2k->cstr_info->packno = 0;
}
len = int_min(j2k->eot - cio_getbp(cio), cio_numbytesleft(cio) + 1);
if (len == cio_numbytesleft(cio) + 1) {
truncate = 1; /* Case of a truncate codestream */
}
data = j2k->tile_data[curtileno];
data = (unsigned char*) opj_realloc(data, (j2k->tile_len[curtileno] + len) * sizeof(unsigned char));
data_ptr = data + j2k->tile_len[curtileno];
for (i = 0; i < len; i++) {
data_ptr[i] = cio_read(cio, 1);
}
j2k->tile_len[curtileno] += len;
j2k->tile_data[curtileno] = data;
if (!truncate) {
j2k->state = J2K_STATE_TPHSOT;
} else {
j2k->state = J2K_STATE_NEOC; /* RAJOUTE !! */
}
j2k->cur_tp_num++;
}
static void j2k_write_rgn(opj_j2k_t *j2k, int compno, int tileno) {
opj_cp_t *cp = j2k->cp;
opj_tcp_t *tcp = &cp->tcps[tileno];
opj_cio_t *cio = j2k->cio;
int numcomps = j2k->image->numcomps;
cio_write(cio, J2K_MS_RGN, 2); /* RGN */
cio_write(cio, numcomps <= 256 ? 5 : 6, 2); /* Lrgn */
cio_write(cio, compno, numcomps <= 256 ? 1 : 2); /* Crgn */
cio_write(cio, 0, 1); /* Srgn */
cio_write(cio, tcp->tccps[compno].roishift, 1); /* SPrgn */
}
static void j2k_read_rgn(opj_j2k_t *j2k) {
int len, compno, roisty;
opj_cp_t *cp = j2k->cp;
opj_tcp_t *tcp = j2k->state == J2K_STATE_TPH ? &cp->tcps[j2k->curtileno] : j2k->default_tcp;
opj_cio_t *cio = j2k->cio;
int numcomps = j2k->image->numcomps;
len = cio_read(cio, 2); /* Lrgn */
compno = cio_read(cio, numcomps <= 256 ? 1 : 2); /* Crgn */
roisty = cio_read(cio, 1); /* Srgn */
#ifdef USE_JPWL
if (j2k->cp->correct) {
/* totlen is negative or larger than the bytes left!!! */
if (compno >= numcomps) {
opj_event_msg(j2k->cinfo, EVT_ERROR,
"JPWL: bad component number in RGN (%d when there are only %d)\n",
compno, numcomps);
if (!JPWL_ASSUME || JPWL_ASSUME) {
opj_event_msg(j2k->cinfo, EVT_ERROR, "JPWL: giving up\n");
return;
}
}
};
#endif /* USE_JPWL */
tcp->tccps[compno].roishift = cio_read(cio, 1); /* SPrgn */
}
static void j2k_write_eoc(opj_j2k_t *j2k) {
opj_cio_t *cio = j2k->cio;
/* opj_event_msg(j2k->cinfo, "%.8x: EOC\n", cio_tell(cio) + j2k->pos_correction); */
cio_write(cio, J2K_MS_EOC, 2);
/* UniPG>> */
#ifdef USE_JPWL
/* update markers struct */
j2k_add_marker(j2k->cstr_info, J2K_MS_EOC, cio_tell(cio) - 2, 2);
#endif /* USE_JPWL */
/* <<UniPG */
}
static void j2k_read_eoc(opj_j2k_t *j2k) {
int i, tileno;
bool success;
/* if packets should be decoded */
if (j2k->cp->limit_decoding != DECODE_ALL_BUT_PACKETS) {
opj_tcd_t *tcd = tcd_create(j2k->cinfo);
tcd_malloc_decode(tcd, j2k->image, j2k->cp);
for (i = 0; i < j2k->cp->tileno_size; i++) {
tcd_malloc_decode_tile(tcd, j2k->image, j2k->cp, i, j2k->cstr_info);
tileno = j2k->cp->tileno[i];
success = tcd_decode_tile(tcd, j2k->tile_data[tileno], j2k->tile_len[tileno], tileno, j2k->cstr_info);
opj_free(j2k->tile_data[tileno]);
j2k->tile_data[tileno] = NULL;
tcd_free_decode_tile(tcd, i);
if (success == false) {
j2k->state |= J2K_STATE_ERR;
break;
}
}
tcd_free_decode(tcd);
tcd_destroy(tcd);
}
/* if packets should not be decoded */
else {
for (i = 0; i < j2k->cp->tileno_size; i++) {
tileno = j2k->cp->tileno[i];
opj_free(j2k->tile_data[tileno]);
j2k->tile_data[tileno] = NULL;
}
}
if (j2k->state & J2K_STATE_ERR)
j2k->state = J2K_STATE_MT + J2K_STATE_ERR;
else
j2k->state = J2K_STATE_MT;
}
typedef struct opj_dec_mstabent {
/** marker value */
int id;
/** value of the state when the marker can appear */
int states;
/** action linked to the marker */
void (*handler) (opj_j2k_t *j2k);
} opj_dec_mstabent_t;
opj_dec_mstabent_t j2k_dec_mstab[] = {
{J2K_MS_SOC, J2K_STATE_MHSOC, j2k_read_soc},
{J2K_MS_SOT, J2K_STATE_MH | J2K_STATE_TPHSOT, j2k_read_sot},
{J2K_MS_SOD, J2K_STATE_TPH, j2k_read_sod},
{J2K_MS_EOC, J2K_STATE_TPHSOT, j2k_read_eoc},
{J2K_MS_SIZ, J2K_STATE_MHSIZ, j2k_read_siz},
{J2K_MS_COD, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_cod},
{J2K_MS_COC, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_coc},
{J2K_MS_RGN, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_rgn},
{J2K_MS_QCD, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_qcd},
{J2K_MS_QCC, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_qcc},
{J2K_MS_POC, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_poc},
{J2K_MS_TLM, J2K_STATE_MH, j2k_read_tlm},
{J2K_MS_PLM, J2K_STATE_MH, j2k_read_plm},
{J2K_MS_PLT, J2K_STATE_TPH, j2k_read_plt},
{J2K_MS_PPM, J2K_STATE_MH, j2k_read_ppm},
{J2K_MS_PPT, J2K_STATE_TPH, j2k_read_ppt},
{J2K_MS_SOP, 0, 0},
{J2K_MS_CRG, J2K_STATE_MH, j2k_read_crg},
{J2K_MS_COM, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_com},
#ifdef USE_JPWL
{J2K_MS_EPC, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_epc},
{J2K_MS_EPB, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_epb},
{J2K_MS_ESD, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_esd},
{J2K_MS_RED, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_red},
#endif /* USE_JPWL */
#ifdef USE_JPSEC
{J2K_MS_SEC, J2K_STATE_MH, j2k_read_sec},
{J2K_MS_INSEC, 0, j2k_read_insec},
#endif /* USE_JPSEC */
{0, J2K_STATE_MH | J2K_STATE_TPH, j2k_read_unk}
};
static void j2k_read_unk(opj_j2k_t *j2k) {
opj_event_msg(j2k->cinfo, EVT_WARNING, "Unknown marker\n");
#ifdef USE_JPWL
if (j2k->cp->correct) {
int m = 0, id, i;
int min_id = 0, min_dist = 17, cur_dist = 0, tmp_id;
cio_seek(j2k->cio, cio_tell(j2k->cio) - 2);
id = cio_read(j2k->cio, 2);
opj_event_msg(j2k->cinfo, EVT_ERROR,
"JPWL: really don't know this marker %x\n",
id);
if (!JPWL_ASSUME) {
opj_event_msg(j2k->cinfo, EVT_ERROR,
"- possible synch loss due to uncorrectable codestream errors => giving up\n");
return;
}
/* OK, activate this at your own risk!!! */
/* we look for the marker at the minimum hamming distance from this */
while (j2k_dec_mstab[m].id) {
/* 1's where they differ */
tmp_id = j2k_dec_mstab[m].id ^ id;
/* compute the hamming distance between our id and the current */
cur_dist = 0;
for (i = 0; i < 16; i++) {
if ((tmp_id >> i) & 0x0001) {
cur_dist++;
}
}
/* if current distance is smaller, set the minimum */
if (cur_dist < min_dist) {
min_dist = cur_dist;
min_id = j2k_dec_mstab[m].id;
}
/* jump to the next marker */
m++;
}
/* do we substitute the marker? */
if (min_dist < JPWL_MAXIMUM_HAMMING) {
opj_event_msg(j2k->cinfo, EVT_ERROR,
"- marker %x is at distance %d from the read %x\n",
min_id, min_dist, id);
opj_event_msg(j2k->cinfo, EVT_ERROR,
"- trying to substitute in place and crossing fingers!\n");
cio_seek(j2k->cio, cio_tell(j2k->cio) - 2);
cio_write(j2k->cio, min_id, 2);
/* rewind */
cio_seek(j2k->cio, cio_tell(j2k->cio) - 2);
}
};
#endif /* USE_JPWL */
}
/**
Read the lookup table containing all the marker, status and action
@param id Marker value
*/
static opj_dec_mstabent_t *j2k_dec_mstab_lookup(int id) {
opj_dec_mstabent_t *e;
for (e = j2k_dec_mstab; e->id != 0; e++) {
if (e->id == id) {
break;
}
}
return e;
}
/* ----------------------------------------------------------------------- */
/* J2K / JPT decoder interface */
/* ----------------------------------------------------------------------- */
opj_j2k_t* j2k_create_decompress(opj_common_ptr cinfo) {
opj_j2k_t *j2k = (opj_j2k_t*) opj_calloc(1, sizeof(opj_j2k_t));
if(!j2k)
return NULL;
j2k->default_tcp = (opj_tcp_t*) opj_calloc(1, sizeof(opj_tcp_t));
if(!j2k->default_tcp) {
opj_free(j2k);
return NULL;
}
j2k->cinfo = cinfo;
j2k->tile_data = NULL;
return j2k;
}
void j2k_destroy_decompress(opj_j2k_t *j2k) {
int i = 0;
if(j2k->tile_len != NULL) {
opj_free(j2k->tile_len);
}
if(j2k->tile_data != NULL) {
opj_free(j2k->tile_data);
}
if(j2k->default_tcp != NULL) {
opj_tcp_t *default_tcp = j2k->default_tcp;
if(default_tcp->ppt_data_first != NULL) {
opj_free(default_tcp->ppt_data_first);
}
if(j2k->default_tcp->tccps != NULL) {
opj_free(j2k->default_tcp->tccps);
}
opj_free(j2k->default_tcp);
}
if(j2k->cp != NULL) {
opj_cp_t *cp = j2k->cp;
if(cp->tcps != NULL) {
for(i = 0; i < cp->tw * cp->th; i++) {
if(cp->tcps[i].ppt_data_first != NULL) {
opj_free(cp->tcps[i].ppt_data_first);
}
if(cp->tcps[i].tccps != NULL) {
opj_free(cp->tcps[i].tccps);
}
}
opj_free(cp->tcps);
}
if(cp->ppm_data_first != NULL) {
opj_free(cp->ppm_data_first);
}
if(cp->tileno != NULL) {
opj_free(cp->tileno);
}
if(cp->comment != NULL) {
opj_free(cp->comment);
}
opj_free(cp);
}
opj_free(j2k);
}
void j2k_setup_decoder(opj_j2k_t *j2k, opj_dparameters_t *parameters) {
if(j2k && parameters) {
/* create and initialize the coding parameters structure */
opj_cp_t *cp = (opj_cp_t*) opj_calloc(1, sizeof(opj_cp_t));
cp->reduce = parameters->cp_reduce;
cp->layer = parameters->cp_layer;
cp->limit_decoding = parameters->cp_limit_decoding;
#ifdef USE_JPWL
cp->correct = parameters->jpwl_correct;
cp->exp_comps = parameters->jpwl_exp_comps;
cp->max_tiles = parameters->jpwl_max_tiles;
#endif /* USE_JPWL */
/* keep a link to cp so that we can destroy it later in j2k_destroy_decompress */
j2k->cp = cp;
}
}
opj_image_t* j2k_decode(opj_j2k_t *j2k, opj_cio_t *cio, opj_codestream_info_t *cstr_info) {
opj_image_t *image = NULL;
opj_common_ptr cinfo = j2k->cinfo;
j2k->cio = cio;
j2k->cstr_info = cstr_info;
if (cstr_info)
memset(cstr_info, 0, sizeof(opj_codestream_info_t));
/* create an empty image */
image = opj_image_create0();
j2k->image = image;
j2k->state = J2K_STATE_MHSOC;
for (;;) {
opj_dec_mstabent_t *e;
int id = cio_read(cio, 2);
#ifdef USE_JPWL
/* we try to honor JPWL correction power */
if (j2k->cp->correct) {
int orig_pos = cio_tell(cio);
bool status;
/* call the corrector */
status = jpwl_correct(j2k);
/* go back to where you were */
cio_seek(cio, orig_pos - 2);
/* re-read the marker */
id = cio_read(cio, 2);
/* check whether it begins with ff */
if (id >> 8 != 0xff) {
opj_event_msg(cinfo, EVT_ERROR,
"JPWL: possible bad marker %x at %d\n",
id, cio_tell(cio) - 2);
if (!JPWL_ASSUME) {
opj_image_destroy(image);
opj_event_msg(cinfo, EVT_ERROR, "JPWL: giving up\n");
return 0;
}
/* we try to correct */
id = id | 0xff00;
cio_seek(cio, cio_tell(cio) - 2);
cio_write(cio, id, 2);
opj_event_msg(cinfo, EVT_WARNING, "- trying to adjust this\n"
"- setting marker to %x\n",
id);
}
}
#endif /* USE_JPWL */
if (id >> 8 != 0xff) {
opj_image_destroy(image);
opj_event_msg(cinfo, EVT_ERROR, "%.8x: expected a marker instead of %x\n", cio_tell(cio) - 2, id);
return 0;
}
e = j2k_dec_mstab_lookup(id);
// Check if the marker is known
if (!(j2k->state & e->states)) {
opj_image_destroy(image);
opj_event_msg(cinfo, EVT_ERROR, "%.8x: unexpected marker %x\n", cio_tell(cio) - 2, id);
return 0;
}
// Check if the decoding is limited to the main header
if (e->id == J2K_MS_SOT && j2k->cp->limit_decoding == LIMIT_TO_MAIN_HEADER) {
opj_event_msg(cinfo, EVT_INFO, "Main Header decoded.\n");
return image;
}
if (e->handler) {
(*e->handler)(j2k);
}
if (j2k->state & J2K_STATE_ERR)
return NULL;
if (j2k->state == J2K_STATE_MT) {
break;
}
if (j2k->state == J2K_STATE_NEOC) {
break;
}
}
if (j2k->state == J2K_STATE_NEOC) {
j2k_read_eoc(j2k);
}
if (j2k->state != J2K_STATE_MT) {
opj_event_msg(cinfo, EVT_WARNING, "Incomplete bitstream\n");
}
return image;
}
/*
* Read a JPT-stream and decode file
*
*/
opj_image_t* j2k_decode_jpt_stream(opj_j2k_t *j2k, opj_cio_t *cio, opj_codestream_info_t *cstr_info) {
opj_image_t *image = NULL;
opj_jpt_msg_header_t header;
int position;
opj_common_ptr cinfo = j2k->cinfo;
j2k->cio = cio;
/* create an empty image */
image = opj_image_create0();
j2k->image = image;
j2k->state = J2K_STATE_MHSOC;
/* Initialize the header */
jpt_init_msg_header(&header);
/* Read the first header of the message */
jpt_read_msg_header(cinfo, cio, &header);
position = cio_tell(cio);
if (header.Class_Id != 6) { /* 6 : Main header data-bin message */
opj_image_destroy(image);
opj_event_msg(cinfo, EVT_ERROR, "[JPT-stream] : Expecting Main header first [class_Id %d] !\n", header.Class_Id);
return 0;
}
for (;;) {
opj_dec_mstabent_t *e = NULL;
int id;
if (!cio_numbytesleft(cio)) {
j2k_read_eoc(j2k);
return image;
}
/* data-bin read -> need to read a new header */
if ((unsigned int) (cio_tell(cio) - position) == header.Msg_length) {
jpt_read_msg_header(cinfo, cio, &header);
position = cio_tell(cio);
if (header.Class_Id != 4) { /* 4 : Tile data-bin message */
opj_image_destroy(image);
opj_event_msg(cinfo, EVT_ERROR, "[JPT-stream] : Expecting Tile info !\n");
return 0;
}
}
id = cio_read(cio, 2);
if (id >> 8 != 0xff) {
opj_image_destroy(image);
opj_event_msg(cinfo, EVT_ERROR, "%.8x: expected a marker instead of %x\n", cio_tell(cio) - 2, id);
return 0;
}
e = j2k_dec_mstab_lookup(id);
if (!(j2k->state & e->states)) {
opj_image_destroy(image);
opj_event_msg(cinfo, EVT_ERROR, "%.8x: unexpected marker %x\n", cio_tell(cio) - 2, id);
return 0;
}
if (e->handler) {
(*e->handler)(j2k);
}
if (j2k->state == J2K_STATE_MT) {
break;
}
if (j2k->state == J2K_STATE_NEOC) {
break;
}
}
if (j2k->state == J2K_STATE_NEOC) {
j2k_read_eoc(j2k);
}
if (j2k->state != J2K_STATE_MT) {
opj_event_msg(cinfo, EVT_WARNING, "Incomplete bitstream\n");
}
return image;
}
/* ----------------------------------------------------------------------- */
/* J2K encoder interface */
/* ----------------------------------------------------------------------- */
opj_j2k_t* j2k_create_compress(opj_common_ptr cinfo) {
opj_j2k_t *j2k = (opj_j2k_t*) opj_calloc(1, sizeof(opj_j2k_t));
if(j2k) {
j2k->cinfo = cinfo;
}
return j2k;
}
void j2k_destroy_compress(opj_j2k_t *j2k) {
int tileno;
if(!j2k) return;
if(j2k->cp != NULL) {
opj_cp_t *cp = j2k->cp;
if(cp->comment) {
opj_free(cp->comment);
}
if(cp->matrice) {
opj_free(cp->matrice);
}
for (tileno = 0; tileno < cp->tw * cp->th; tileno++) {
opj_free(cp->tcps[tileno].tccps);
}
opj_free(cp->tcps);
opj_free(cp);
}
opj_free(j2k);
}
void j2k_setup_encoder(opj_j2k_t *j2k, opj_cparameters_t *parameters, opj_image_t *image) {
int i, j, tileno, numpocs_tile;
opj_cp_t *cp = NULL;
if(!j2k || !parameters || ! image) {
return;
}
/* create and initialize the coding parameters structure */
cp = (opj_cp_t*) opj_calloc(1, sizeof(opj_cp_t));
/* keep a link to cp so that we can destroy it later in j2k_destroy_compress */
j2k->cp = cp;
/* set default values for cp */
cp->tw = 1;
cp->th = 1;
/*
copy user encoding parameters
*/
cp->cinema = parameters->cp_cinema;
cp->max_comp_size = parameters->max_comp_size;
cp->rsiz = parameters->cp_rsiz;
cp->disto_alloc = parameters->cp_disto_alloc;
cp->fixed_alloc = parameters->cp_fixed_alloc;
cp->fixed_quality = parameters->cp_fixed_quality;
/* mod fixed_quality */
if(parameters->cp_matrice) {
size_t array_size = parameters->tcp_numlayers * parameters->numresolution * 3 * sizeof(int);
cp->matrice = (int *) opj_malloc(array_size);
memcpy(cp->matrice, parameters->cp_matrice, array_size);
}
/* tiles */
cp->tdx = parameters->cp_tdx;
cp->tdy = parameters->cp_tdy;
/* tile offset */
cp->tx0 = parameters->cp_tx0;
cp->ty0 = parameters->cp_ty0;
/* comment string */
if(parameters->cp_comment) {
cp->comment = (char*)opj_malloc(strlen(parameters->cp_comment) + 1);
if(cp->comment) {
strcpy(cp->comment, parameters->cp_comment);
}
}
/*
calculate other encoding parameters
*/
if (parameters->tile_size_on) {
cp->tw = int_ceildiv(image->x1 - cp->tx0, cp->tdx);
cp->th = int_ceildiv(image->y1 - cp->ty0, cp->tdy);
} else {
cp->tdx = image->x1 - cp->tx0;
cp->tdy = image->y1 - cp->ty0;
}
if(parameters->tp_on){
cp->tp_flag = parameters->tp_flag;
cp->tp_on = 1;
}
cp->img_size = 0;
for(i=0;i<image->numcomps ;i++){
cp->img_size += (image->comps[i].w *image->comps[i].h * image->comps[i].prec);
}
#ifdef USE_JPWL
/*
calculate JPWL encoding parameters
*/
if (parameters->jpwl_epc_on) {
int i;
/* set JPWL on */
cp->epc_on = true;
cp->info_on = false; /* no informative technique */
/* set EPB on */
if ((parameters->jpwl_hprot_MH > 0) || (parameters->jpwl_hprot_TPH[0] > 0)) {
cp->epb_on = true;
cp->hprot_MH = parameters->jpwl_hprot_MH;
for (i = 0; i < JPWL_MAX_NO_TILESPECS; i++) {
cp->hprot_TPH_tileno[i] = parameters->jpwl_hprot_TPH_tileno[i];
cp->hprot_TPH[i] = parameters->jpwl_hprot_TPH[i];
}
/* if tile specs are not specified, copy MH specs */
if (cp->hprot_TPH[0] == -1) {
cp->hprot_TPH_tileno[0] = 0;
cp->hprot_TPH[0] = parameters->jpwl_hprot_MH;
}
for (i = 0; i < JPWL_MAX_NO_PACKSPECS; i++) {
cp->pprot_tileno[i] = parameters->jpwl_pprot_tileno[i];
cp->pprot_packno[i] = parameters->jpwl_pprot_packno[i];
cp->pprot[i] = parameters->jpwl_pprot[i];
}
}
/* set ESD writing */
if ((parameters->jpwl_sens_size == 1) || (parameters->jpwl_sens_size == 2)) {
cp->esd_on = true;
cp->sens_size = parameters->jpwl_sens_size;
cp->sens_addr = parameters->jpwl_sens_addr;
cp->sens_range = parameters->jpwl_sens_range;
cp->sens_MH = parameters->jpwl_sens_MH;
for (i = 0; i < JPWL_MAX_NO_TILESPECS; i++) {
cp->sens_TPH_tileno[i] = parameters->jpwl_sens_TPH_tileno[i];
cp->sens_TPH[i] = parameters->jpwl_sens_TPH[i];
}
}
/* always set RED writing to false: we are at the encoder */
cp->red_on = false;
} else {
cp->epc_on = false;
}
#endif /* USE_JPWL */
/* initialize the mutiple tiles */
/* ---------------------------- */
cp->tcps = (opj_tcp_t*) opj_calloc(cp->tw * cp->th, sizeof(opj_tcp_t));
for (tileno = 0; tileno < cp->tw * cp->th; tileno++) {
opj_tcp_t *tcp = &cp->tcps[tileno];
tcp->numlayers = parameters->tcp_numlayers;
for (j = 0; j < tcp->numlayers; j++) {
if(cp->cinema){
if (cp->fixed_quality) {
tcp->distoratio[j] = parameters->tcp_distoratio[j];
}
tcp->rates[j] = parameters->tcp_rates[j];
}else{
if (cp->fixed_quality) { /* add fixed_quality */
tcp->distoratio[j] = parameters->tcp_distoratio[j];
} else {
tcp->rates[j] = parameters->tcp_rates[j];
}
}
}
tcp->csty = parameters->csty;
tcp->prg = parameters->prog_order;
tcp->mct = parameters->tcp_mct;
numpocs_tile = 0;
tcp->POC = 0;
if (parameters->numpocs) {
/* initialisation of POC */
tcp->POC = 1;
for (i = 0; i < parameters->numpocs; i++) {
if((tileno == parameters->POC[i].tile - 1) || (parameters->POC[i].tile == -1)) {
opj_poc_t *tcp_poc = &tcp->pocs[numpocs_tile];
tcp_poc->resno0 = parameters->POC[numpocs_tile].resno0;
tcp_poc->compno0 = parameters->POC[numpocs_tile].compno0;
tcp_poc->layno1 = parameters->POC[numpocs_tile].layno1;
tcp_poc->resno1 = parameters->POC[numpocs_tile].resno1;
tcp_poc->compno1 = parameters->POC[numpocs_tile].compno1;
tcp_poc->prg1 = parameters->POC[numpocs_tile].prg1;
tcp_poc->tile = parameters->POC[numpocs_tile].tile;
numpocs_tile++;
}
}
tcp->numpocs = numpocs_tile -1 ;
}else{
tcp->numpocs = 0;
}
tcp->tccps = (opj_tccp_t*) opj_calloc(image->numcomps, sizeof(opj_tccp_t));
for (i = 0; i < image->numcomps; i++) {
opj_tccp_t *tccp = &tcp->tccps[i];
tccp->csty = parameters->csty & 0x01; /* 0 => one precinct || 1 => custom precinct */
tccp->numresolutions = parameters->numresolution;
tccp->cblkw = int_floorlog2(parameters->cblockw_init);
tccp->cblkh = int_floorlog2(parameters->cblockh_init);
tccp->cblksty = parameters->mode;
tccp->qmfbid = parameters->irreversible ? 0 : 1;
tccp->qntsty = parameters->irreversible ? J2K_CCP_QNTSTY_SEQNT : J2K_CCP_QNTSTY_NOQNT;
tccp->numgbits = 2;
if (i == parameters->roi_compno) {
tccp->roishift = parameters->roi_shift;
} else {
tccp->roishift = 0;
}
if(parameters->cp_cinema)
{
//Precinct size for lowest frequency subband=128
tccp->prcw[0] = 7;
tccp->prch[0] = 7;
//Precinct size at all other resolutions = 256
for (j = 1; j < tccp->numresolutions; j++) {
tccp->prcw[j] = 8;
tccp->prch[j] = 8;
}
}else{
if (parameters->csty & J2K_CCP_CSTY_PRT) {
int p = 0;
for (j = tccp->numresolutions - 1; j >= 0; j--) {
if (p < parameters->res_spec) {
if (parameters->prcw_init[p] < 1) {
tccp->prcw[j] = 1;
} else {
tccp->prcw[j] = int_floorlog2(parameters->prcw_init[p]);
}
if (parameters->prch_init[p] < 1) {
tccp->prch[j] = 1;
}else {
tccp->prch[j] = int_floorlog2(parameters->prch_init[p]);
}
} else {
int res_spec = parameters->res_spec;
int size_prcw = parameters->prcw_init[res_spec - 1] >> (p - (res_spec - 1));
int size_prch = parameters->prch_init[res_spec - 1] >> (p - (res_spec - 1));
if (size_prcw < 1) {
tccp->prcw[j] = 1;
} else {
tccp->prcw[j] = int_floorlog2(size_prcw);
}
if (size_prch < 1) {
tccp->prch[j] = 1;
} else {
tccp->prch[j] = int_floorlog2(size_prch);
}
}
p++;
/*printf("\nsize precinct for level %d : %d,%d\n", j,tccp->prcw[j], tccp->prch[j]); */
} //end for
} else {
for (j = 0; j < tccp->numresolutions; j++) {
tccp->prcw[j] = 15;
tccp->prch[j] = 15;
}
}
}
dwt_calc_explicit_stepsizes(tccp, image->comps[i].prec);
}
}
}
bool j2k_encode(opj_j2k_t *j2k, opj_cio_t *cio, opj_image_t *image, opj_codestream_info_t *cstr_info) {
int tileno, compno;
opj_cp_t *cp = NULL;
opj_tcd_t *tcd = NULL; /* TCD component */
j2k->cio = cio;
j2k->image = image;
cp = j2k->cp;
/* j2k_dump_cp(stdout, image, cp); */
/* INDEX >> */
j2k->cstr_info = cstr_info;
if (cstr_info) {
int compno;
cstr_info->tile = (opj_tile_info_t *) opj_malloc(cp->tw * cp->th * sizeof(opj_tile_info_t));
cstr_info->image_w = image->x1 - image->x0;
cstr_info->image_h = image->y1 - image->y0;
cstr_info->prog = (&cp->tcps[0])->prg;
cstr_info->tw = cp->tw;
cstr_info->th = cp->th;
cstr_info->tile_x = cp->tdx; /* new version parser */
cstr_info->tile_y = cp->tdy; /* new version parser */
cstr_info->tile_Ox = cp->tx0; /* new version parser */
cstr_info->tile_Oy = cp->ty0; /* new version parser */
cstr_info->numcomps = image->numcomps;
cstr_info->numlayers = (&cp->tcps[0])->numlayers;
cstr_info->numdecompos = (int*) opj_malloc(image->numcomps * sizeof(int));
for (compno=0; compno < image->numcomps; compno++) {
cstr_info->numdecompos[compno] = (&cp->tcps[0])->tccps->numresolutions - 1;
}
cstr_info->D_max = 0.0; /* ADD Marcela */
cstr_info->main_head_start = cio_tell(cio); /* position of SOC */
cstr_info->maxmarknum = 100;
cstr_info->marker = (opj_marker_info_t *) opj_malloc(cstr_info->maxmarknum * sizeof(opj_marker_info_t));
cstr_info->marknum = 0;
}
/* << INDEX */
j2k_write_soc(j2k);
j2k_write_siz(j2k);
j2k_write_cod(j2k);
j2k_write_qcd(j2k);
if(cp->cinema){
for (compno = 1; compno < image->numcomps; compno++) {
j2k_write_coc(j2k, compno);
j2k_write_qcc(j2k, compno);
}
}
for (compno = 0; compno < image->numcomps; compno++) {
opj_tcp_t *tcp = &cp->tcps[0];
if (tcp->tccps[compno].roishift)
j2k_write_rgn(j2k, compno, 0);
}
if (cp->comment != NULL) {
j2k_write_com(j2k);
}
j2k->totnum_tp = j2k_calculate_tp(cp,image->numcomps,image,j2k);
/* TLM Marker*/
if(cp->cinema){
j2k_write_tlm(j2k);
if (cp->cinema == CINEMA4K_24) {
j2k_write_poc(j2k);
}
}
/* uncomment only for testing JPSEC marker writing */
/* j2k_write_sec(j2k); */
/* INDEX >> */
if(cstr_info) {
cstr_info->main_head_end = cio_tell(cio) - 1;
}
/* << INDEX */
/**** Main Header ENDS here ***/
/* create the tile encoder */
tcd = tcd_create(j2k->cinfo);
/* encode each tile */
for (tileno = 0; tileno < cp->tw * cp->th; tileno++) {
int pino;
int tilepartno=0;
/* UniPG>> */
int acc_pack_num = 0;
/* <<UniPG */
opj_tcp_t *tcp = &cp->tcps[tileno];
opj_event_msg(j2k->cinfo, EVT_INFO, "tile number %d / %d\n", tileno + 1, cp->tw * cp->th);
j2k->curtileno = tileno;
j2k->cur_tp_num = 0;
tcd->cur_totnum_tp = j2k->cur_totnum_tp[j2k->curtileno];
/* initialisation before tile encoding */
if (tileno == 0) {
tcd_malloc_encode(tcd, image, cp, j2k->curtileno);
} else {
tcd_init_encode(tcd, image, cp, j2k->curtileno);
}
/* INDEX >> */
if(cstr_info) {
cstr_info->tile[j2k->curtileno].start_pos = cio_tell(cio) + j2k->pos_correction;
}
/* << INDEX */
for(pino = 0; pino <= tcp->numpocs; pino++) {
int tot_num_tp;
tcd->cur_pino=pino;
/*Get number of tile parts*/
tot_num_tp = j2k_get_num_tp(cp,pino,tileno);
tcd->tp_pos = cp->tp_pos;
for(tilepartno = 0; tilepartno < tot_num_tp ; tilepartno++){
j2k->tp_num = tilepartno;
/* INDEX >> */
if(cstr_info)
cstr_info->tile[j2k->curtileno].tp[j2k->cur_tp_num].tp_start_pos =
cio_tell(cio) + j2k->pos_correction;
/* << INDEX */
j2k_write_sot(j2k);
if(j2k->cur_tp_num == 0 && cp->cinema == 0){
for (compno = 1; compno < image->numcomps; compno++) {
j2k_write_coc(j2k, compno);
j2k_write_qcc(j2k, compno);
}
if (cp->tcps[tileno].numpocs) {
j2k_write_poc(j2k);
}
}
/* INDEX >> */
if(cstr_info)
cstr_info->tile[j2k->curtileno].tp[j2k->cur_tp_num].tp_end_header =
cio_tell(cio) + j2k->pos_correction + 1;
/* << INDEX */
j2k_write_sod(j2k, tcd);
/* INDEX >> */
if(cstr_info) {
cstr_info->tile[j2k->curtileno].tp[j2k->cur_tp_num].tp_end_pos =
cio_tell(cio) + j2k->pos_correction - 1;
cstr_info->tile[j2k->curtileno].tp[j2k->cur_tp_num].tp_start_pack =
acc_pack_num;
cstr_info->tile[j2k->curtileno].tp[j2k->cur_tp_num].tp_numpacks =
cstr_info->packno - acc_pack_num;
acc_pack_num = cstr_info->packno;
}
/* << INDEX */
j2k->cur_tp_num++;
}
}
if(cstr_info) {
cstr_info->tile[j2k->curtileno].end_pos = cio_tell(cio) + j2k->pos_correction - 1;
}
/*
if (tile->PPT) { // BAD PPT !!!
FILE *PPT_file;
int i;
PPT_file=fopen("PPT","rb");
fprintf(stderr,"%c%c%c%c",255,97,tile->len_ppt/256,tile->len_ppt%256);
for (i=0;i<tile->len_ppt;i++) {
unsigned char elmt;
fread(&elmt, 1, 1, PPT_file);
fwrite(&elmt,1,1,f);
}
fclose(PPT_file);
unlink("PPT");
}
*/
}
/* destroy the tile encoder */
tcd_free_encode(tcd);
tcd_destroy(tcd);
opj_free(j2k->cur_totnum_tp);
j2k_write_eoc(j2k);
if(cstr_info) {
cstr_info->codestream_size = cio_tell(cio) + j2k->pos_correction;
/* UniPG>> */
/* The following adjustment is done to adjust the codestream size */
/* if SOD is not at 0 in the buffer. Useful in case of JP2, where */
/* the first bunch of bytes is not in the codestream */
cstr_info->codestream_size -= cstr_info->main_head_start;
/* <<UniPG */
}
#ifdef USE_JPWL
/*
preparation of JPWL marker segments
*/
if(cp->epc_on) {
/* encode according to JPWL */
jpwl_encode(j2k, cio, image);
}
#endif /* USE_JPWL */
return true;
}
|
jalberti/cf-abacus | lib/aggregation/aggregator/src/index.js | 'use strict';
const { each, extend, filter, find, map, rest, object } = require('underscore');
const cluster = require('abacus-cluster');
const dataflow = require('abacus-dataflow');
const dbclient = require('abacus-dbclient');
const router = require('abacus-router');
const seqid = require('abacus-seqid');
const lrucache = require('abacus-lrucache');
const moment = require('abacus-moment');
const mconfigcb = require('abacus-metering-config');
const oauth = require('abacus-oauth');
const rconfigcb = require('abacus-rating-config');
const timewindow = require('abacus-timewindow');
const urienv = require('abacus-urienv');
const webapp = require('abacus-webapp');
const yieldable = require('abacus-yieldable');
const mconfig = yieldable(mconfigcb);
const rconfig = yieldable(rconfigcb);
/* eslint camelcase: 1 */
// Setup debug log
const debug = require('abacus-debug')('abacus-usage-aggregator');
const edebug = require('abacus-debug')('e-abacus-usage-aggregator');
let systemToken;
// Secure the routes or not
const secured = () => process.env.SECURED === 'true';
// Resolve service URIs
const uris = urienv({
auth_server: 9882,
sink: undefined
});
// Return OAuth system scopes needed to write input docs
const iwscope = (udoc) =>
secured()
? { system: ['abacus.usage.write'] }
: undefined;
// Return OAuth system scopes needed to read input and output docs
const rscope = (udoc) =>
secured()
? { system: ['abacus.usage.read'] }
: undefined;
// Return the keys and times of our docs
const ikey = (udoc) => udoc.organization_id;
const itime = (udoc) => seqid();
const igroups = (udoc) => [
udoc.organization_id,
[udoc.organization_id, udoc.space_id, udoc.consumer_id || 'UNKNOWN'].join('/'),
[udoc.organization_id, udoc.space_id].join('/'),
[
udoc.organization_id,
udoc.resource_instance_id,
udoc.consumer_id || 'UNKNOWN',
udoc.plan_id,
udoc.metering_plan_id,
udoc.rating_plan_id,
udoc.pricing_plan_id
].join('/')
];
const okeys = (udoc, ikey) => [
udoc.organization_id,
[udoc.organization_id, udoc.space_id, udoc.consumer_id || 'UNKNOWN'].join('/'),
[udoc.organization_id, udoc.space_id].join('/'),
[
udoc.organization_id,
udoc.resource_instance_id,
udoc.consumer_id || 'UNKNOWN',
udoc.plan_id,
udoc.metering_plan_id,
udoc.rating_plan_id,
udoc.pricing_plan_id
].join('/')
];
const skeys = (udoc) => [udoc.account_id, udoc.account_id, undefined];
// Configure reduction result doc sampling, to store reduction results
// in a single doc per min, hour or day for example instead of creating
// a new doc for each new result
const sampling = process.env.SAMPLING;
const otimes = (udoc, itime) => [
seqid.sample(itime, sampling),
seqid.sample(itime, sampling),
seqid.sample(itime, sampling),
map([udoc.end, udoc.start], seqid.pad16).join('/')
];
const stimes = (udoc, itime) => [seqid.sample(itime, sampling), undefined];
// The scaling factor of each time window for creating the date string
// [Second, Minute, Hour, Day, Month]
const slack = () =>
/^[0-9]+[MDhms]$/.test(process.env.SLACK)
? {
scale: process.env.SLACK.charAt(process.env.SLACK.length - 1),
width: process.env.SLACK.match(/[0-9]+/)[0]
}
: {
scale: 'm',
width: 10
};
// Time dimension keys corresponding to their respective window positions
const dimensionsMap = {
s: 'seconds',
m: 'minutes',
h: 'hours',
D: 'days',
M: 'months'
};
// Checks if a consumer usage should be pruned from the aggregated usage
// based upon whether the current time exceeds 2 months + slack
const shouldNotPrune = (time) => {
const t = parseInt(time);
const d = moment.utc(t);
d.add(2, 'months');
d.add(slack().width, dimensionsMap[slack().scale]);
return d.valueOf() > moment.now();
};
// Find an element with the specified id in a list, and lazily construct and
// add a new one if no element is found
const lazyCons = (l, prop, id, cons) => {
const f = filter(l, (e) => e[prop] === id);
if (f.length) return f[0];
const e = new cons(id);
l.push(e);
return e;
};
// Define the objects used to represent a hiearchy of aggregated usage inside
// an organization
// Represent an org, aggregated resource usage and the spaces it contains
const Org = function(id) {
extend(this, {
organization_id: id,
resources: [],
spaces: []
});
};
const newOrg = function(id) {
return new Org(id);
};
Org.prototype.resource = function(id) {
return lazyCons(this.resources, 'resource_id', id, Org.Resource);
};
Org.prototype.space = function(id, doctime) {
const space = lazyCons(this.spaces, 'space_id', id, Org.Space);
space.t = doctime;
};
Org.Space = function(id) {
extend(this, {
space_id: id
});
};
const Space = function(id) {
extend(this, {
space_id: id,
consumers: [],
resources: []
});
};
const newSpace = function(id) {
return new Space(id);
};
Space.prototype.consumer = function(id, doctime) {
// Construct or retrieve the consumer object
const consumer = lazyCons(this.consumers, 'id', id, Space.Consumer);
consumer.t = doctime;
this.consumers = filter(this.consumers, (c) => shouldNotPrune(c.t.match(/(\d+)/)[0]));
};
// Represent a consumer reference
// id represents consumer_id
// t represents time component of the consumer aggregated doc id.
Space.Consumer = function(id) {
extend(this, {
id: id
});
};
Space.Resource = function(id) {
extend(this, {
resource_id: id,
plans: []
});
};
Space.prototype.resource = function(id) {
return lazyCons(this.resources, 'resource_id', id, Space.Resource);
};
Space.Plan = function(id) {
extend(
this,
{
plan_id: id,
aggregated_usage: []
},
object(['metering_plan_id', 'rating_plan_id', 'pricing_plan_id'], rest(id.split('/')))
);
};
Space.Resource.prototype.plan = function(id) {
return lazyCons(this.plans, 'plan_id', id, Space.Plan);
};
Space.Plan.prototype.metric = function(metric) {
return lazyCons(this.aggregated_usage, 'metric', metric, Org.Metric);
};
// Represent a resource instance reference
Space.ResourceInstance = function(rid) {
extend(this, {
id: rid
});
};
// Represent a consumer and aggregated resource usage
const Consumer = function(id) {
extend(this, {
consumer_id: id,
resources: []
});
};
const newConsumer = function(id) {
return new Consumer(id);
};
// Represent a resource and its aggregated metric usage
Org.Resource = function(id) {
extend(this, {
resource_id: id,
plans: []
});
};
Org.Resource.prototype.plan = function(id) {
return lazyCons(this.plans, 'plan_id', id, Org.Plan);
};
// Represent a plan and its aggregated metric usage
Org.Plan = function(id) {
extend(
this,
{
plan_id: id,
aggregated_usage: []
},
object(['metering_plan_id', 'rating_plan_id', 'pricing_plan_id'], rest(id.split('/')))
);
};
Org.Plan.prototype.metric = function(metric) {
return lazyCons(this.aggregated_usage, 'metric', metric, Org.Metric);
};
// Represent a metric based aggregation windows
Org.Metric = function(metric) {
extend(this, {
metric: metric,
windows: [[null], [null], [null], [null], [null]]
});
};
Consumer.Resource = function(id) {
extend(this, {
resource_id: id,
plans: []
});
};
Consumer.prototype.resource = function(id) {
return lazyCons(this.resources, 'resource_id', id, Consumer.Resource);
};
Consumer.Plan = function(id) {
extend(
this,
{
plan_id: id,
aggregated_usage: [],
resource_instances: []
},
object(['metering_plan_id', 'rating_plan_id', 'pricing_plan_id'], rest(id.split('/')))
);
};
Consumer.Resource.prototype.plan = function(id) {
return lazyCons(this.plans, 'plan_id', id, Consumer.Plan);
};
Consumer.Plan.prototype.metric = function(metric) {
return lazyCons(this.aggregated_usage, 'metric', metric, Org.Metric);
};
// Represent a resource instance reference
Consumer.ResourceInstance = function(rid) {
extend(this, {
id: rid
});
};
Consumer.Plan.prototype.resource_instance = function(rid, time, processed) {
const instance = lazyCons(this.resource_instances, 'id', rid, Consumer.ResourceInstance);
instance.t = time;
instance.p = processed;
this.resource_instances = filter(this.resource_instances, (ri) => shouldNotPrune(ri.p));
return instance;
};
// Revive an org object
const reviveOrg = (org) => {
org.resource = Org.prototype.resource;
org.space = Org.prototype.space;
each(org.resources, (s) => {
s.plan = Org.Resource.prototype.plan;
each(s.plans, (s) => {
s.metric = Org.Plan.prototype.metric;
});
});
each(org.spaces, (s) => {
s.consumer = Org.Space.prototype.consumer;
});
return org;
};
// Revive a consumer object
const reviveCon = (con) => {
con.resource = Consumer.prototype.resource;
each(con.resources, (r) => {
r.plan = Consumer.Resource.prototype.plan;
each(r.plans, (p) => {
p.metric = Consumer.Plan.prototype.metric;
p.resource_instance = Consumer.Plan.prototype.resource_instance;
});
});
return con;
};
// Revive a space object
const reviveSpace = (space) => {
space.resource = Space.prototype.resource;
space.consumer = Space.prototype.consumer;
each(space.resources, (r) => {
r.plan = Space.Resource.prototype.plan;
each(r.plans, (p) => {
p.metric = Space.Plan.prototype.metric;
p.resource_instance = Space.Plan.prototype.resource_instance;
});
});
return space;
};
// find info with error and reason to redirect
// usage to error db and stop processing it to the next pipeline.
const findError = (info) => find(info, (i) => i.error);
// Purge previous quantities
const purgeOldQuantities = (doc) => {
const purgeInResource = (r) => {
if (r.aggregated_usage)
each(r.aggregated_usage, (au) =>
each(au.windows, (tw) =>
each(tw, (q) => {
if (q) delete q.previous_quantity;
})
)
);
each(r.plans, (p) =>
each(p.aggregated_usage, (au) =>
each(au.windows, (tw) =>
each(tw, (q) => {
if (q) delete q.previous_quantity;
})
)
)
);
};
each(doc.resources, (r) => purgeInResource(r));
// if (doc.spaces) each(doc.spaces, (s) => each(s.resources, (r) => purgeInResource(r)));
};
// Return the configured price for the metrics that is attached in the usage document
const price = (pricings, metric) => filter(pricings, (m) => m.name === metric)[0].price;
// Return the rate function for a given metric
const ratefn = (metrics, metric) => filter(metrics, (m) => m.name === metric)[0].ratefn;
// Return the aggregate function for a given metric
const aggrfn = (metrics, metric) => filter(metrics, (m) => m.name === metric)[0].aggregatefn;
const maxAge = process.env.RESULTS_CACHE_MAX_AGE ? parseInt(process.env.RESULTS_CACHE_MAX_AGE) : 120000;
const lruOpts = {
max: 100,
maxAge: maxAge
};
const functionCache = lrucache(lruOpts);
const aggregateHashFunction = (aggregator, previous, current, aggCell, accCell) =>
`${JSON.stringify(aggregator)}${JSON.stringify(previous)}${JSON.stringify(current)}`;
const rateHashFunction = (metricPrice, quantity) => `${metricPrice}${JSON.stringify(quantity)}`;
const aggregationFunction = (meteringPlanId, metrics, metricName) => {
const aggregationFnKey = `${meteringPlanId}${metricName}aggrFn`;
let aggregationFn = functionCache.get(aggregationFnKey);
if (!aggregationFn) {
aggregationFn = lrucache.memoize(aggrfn(metrics, metricName), aggregateHashFunction, lruOpts);
functionCache.set(aggregationFnKey, aggregationFn);
}
return aggregationFn;
};
const rateFunction = (ratingPlanId, metrics, metricName) => {
const rateFnKey = `${ratingPlanId}${metricName}rateFn`;
let rateFn = functionCache.get(rateFnKey);
if (!rateFn) {
rateFn = lrucache.memoize(ratefn(metrics, metricName), rateHashFunction, lruOpts);
functionCache.set(rateFnKey, rateFn);
}
return rateFn;
};
const getSpace = (orgDoc, usageDoc) => {
const spaceId = usageDoc.space_id;
const f = filter(orgDoc.spaces, (s) => s.space_id === spaceId);
if (f.length) {
// remove the space from the org document
orgDoc.spaces.splice(orgDoc.spaces.indexOf(f[0]), 1);
return reviveSpace(extend(JSON.parse(JSON.stringify(f[0]), {
start: usageDoc.start,
end: usageDoc.end,
organization_id: usageDoc.organization_id
})));
}
return newSpace(spaceId);
};
// Aggregate usage and return new aggregated usage
const aggregate = function*(aggrs, u) {
debug('Aggregating usage %o from %d and new usage %o from %d', aggrs[0], aggrs[0] ? aggrs[0].end : 0, u, u.end);
// Aggregate usage into two docs, the first one contains usage at the
// org level, the second one contains usage at the consumer level
const a = aggrs[0];
const c = aggrs[1];
const s = aggrs[2];
const meteringPlanId = u.metering_plan_id;
const ratingPlanId = u.rating_plan_id;
// Retrieve the metering plan and rating plan
const [mplan, rplan] = yield [
mconfig(meteringPlanId, systemToken && systemToken()),
rconfig(ratingPlanId, systemToken && systemToken())
];
// find errors
const e = findError([mplan, rplan]);
if (e) {
debug('The usage submitted has business errors %o', e);
return [extend({}, u, e)];
}
// Compute the aggregated usage time and new usage time
const newend = u.processed;
const docend = u.end;
// Deep clone and revive the org aggregated usage object behavior
const newa = a
? extend(reviveOrg(JSON.parse(JSON.stringify(a))), {
account_id: u.account_id,
start: u.start,
end: u.end,
resource_instance_id: u.resource_instance_id,
consumer_id: u.consumer_id,
resource_id: u.resource_id,
plan_id: u.plan_id,
pricing_country: u.pricing_country,
prices: u.prices
})
: extend(newOrg(u.organization_id), {
account_id: u.account_id,
start: u.start,
end: u.end,
resource_instance_id: u.resource_instance_id,
consumer_id: u.consumer_id,
resource_id: u.resource_id,
plan_id: u.plan_id,
pricing_country: u.pricing_country,
prices: u.prices
});
const newc = c ? reviveCon(JSON.parse(JSON.stringify(c))) : newConsumer(u.consumer_id || 'UNKNOWN');
extend(newc, {
start: u.start,
end: u.end,
organization_id: u.organization_id,
resource_instance_id: u.resource_instance_id,
resource_id: u.resource_id,
plan_id: u.plan_id,
pricing_country: u.pricing_country,
prices: u.prices
});
const news = s ? reviveSpace(JSON.parse(JSON.stringify(s))) : getSpace(newa, u);
extend(news, {
start: u.start,
end: u.end,
organization_id: u.organization_id
});
// An empty doc only used to detect duplicate usage
const iddoc = {};
timewindow.shift(newa, a, u.processed);
timewindow.shift(newc, c, u.processed);
timewindow.shift(news, s, u.processed);
purgeOldQuantities(newa);
purgeOldQuantities(newc);
purgeOldQuantities(news);
newa.space(u.space_id, seqid.sample(u.processed_id, sampling));
// Go through the incoming accumulated usage metrics
each(u.accumulated_usage, (accumulatedUsage) => {
const metricName = accumulatedUsage.metric;
const aggregationFn = aggregationFunction(meteringPlanId, mplan.metering_plan.metrics, metricName);
const metricPrice = price(u.prices.metrics, metricName);
const rateFn = rateFunction(ratingPlanId, rplan.rating_plan.metrics, metricName);
// getCell on incoming usage's time windows
const accGetCell = timewindow.cellfn(accumulatedUsage.windows, newend, docend);
const aggr = (am) => {
// getCell on previous aggregated usage's time windows
const aggGetCell = timewindow.cellfn(am.windows, newend, docend);
// We're mutating the input windows property here
// but it's really the simplest way to apply the aggregation formula
am.windows = map(am.windows, (w, i) => {
if (!timewindow.isDimensionSupported(timewindow.dimensions[i]))
return [null];
// If the number of slack windows in the aggregated usage is less than
// the number in the incoming accumulated usage, push until they equal
if (w.length < accumulatedUsage.windows[i].length)
each(Array(accumulatedUsage.windows[i].length - w.length), () => w.push(null));
const twi = timewindow.timeWindowIndex(w, newend, docend, timewindow.dimensions[i]);
/* eslint complexity: [1, 6] */
const quantities = map(w, (q, j) => {
// Instead of returning undefined or null, returning previously aggregated quantity
// TODO: Calculation has to use slack window to determine what to do here
if (!accumulatedUsage.windows[i][j] || twi !== j) return q;
const newQuantity = aggregationFn(
(q && q.quantity) || 0,
accumulatedUsage.windows[i][j].quantity.previous || 0,
accumulatedUsage.windows[i][j].quantity.current,
aggGetCell,
accGetCell
);
// Throw error on: NaN, undefined and null results with previous aggregation
if (q && q.quantity && !newQuantity)
throw new Error('Aggregation resulted in invalid value: ' + newQuantity);
return {
quantity: newQuantity,
previous_quantity: q && q.quantity ? q.quantity : null
};
});
return map(quantities, (q) =>
q
? extend(q, { cost: q.quantity ? rateFn(metricPrice, q.quantity) : 0 })
: null
);
});
};
// Apply the aggregate function to the aggregated usage tree
const pid = [u.plan_id, u.metering_plan_id, u.rating_plan_id, u.pricing_plan_id].join('/');
aggr(
newa
.resource(u.resource_id)
.plan(pid)
.metric(metricName)
);
aggr(
news
.resource(u.resource_id)
.plan(pid)
.metric(metricName)
);
// Apply the aggregate function to the consumer usage tree
news.consumer(u.consumer_id || 'UNKNOWN', seqid.sample(u.processed_id, sampling));
aggr(
newc
.resource(u.resource_id)
.plan(pid)
.metric(metricName)
);
newc
.resource(u.resource_id)
.plan(pid)
.resource_instance(u.resource_instance_id, dbclient.t(u.accumulated_usage_id), u.processed);
});
timewindow.shift(newa, u, parseInt(u.processed_id));
timewindow.shift(newc, u, parseInt(u.processed_id));
timewindow.shift(news, u, parseInt(u.processed_id));
// Remove aggregated usage object behavior and return
const jsa = JSON.parse(JSON.stringify([newa, newc, news, iddoc]));
debug('New aggregated usage %o', jsa);
return jsa;
};
// Create an aggregator service app
const aggregator = (token) => {
// Configure Node cluster to use a single process as we want to serialize
// accumulation requests per db partition and app instance
cluster.singleton();
// Create the Webapp
const app = webapp();
// Secure metering and batch routes using an OAuth bearer access token
if (secured()) app.use(/^\/v1\/metering|^\/batch$/, oauth.validator(process.env.JWTKEY, process.env.JWTALGO));
const authFn = () => secured() ? token : () => {};
const reducer = dataflow.reducer(aggregate, {
input: {
type: 'accumulated_usage',
post: '/v1/metering/accumulated/usage',
get: '/v1/metering/accumulated/usage/t/:tseq/k/:korganization_id',
dbname: 'abacus-aggregator-accumulated-usage',
wscope: iwscope,
rscope: rscope,
key: ikey,
time: itime,
groups: igroups
},
output: {
type: 'aggregated_usage',
get: '/v1/metering/aggregated/usage/k/:korganization_id/t/:tseq',
dbname: 'abacus-aggregator-aggregated-usage',
rscope: rscope,
keys: okeys,
times: otimes
},
sink: {
host: process.env.SINK ? uris.sink : undefined,
apps: process.env.AGGREGATOR_SINK_APPS,
posts: ['/v1/metering/aggregated/usage', '/v1/metering/aggregated/usage', undefined],
keys: skeys,
times: stimes,
authentication: authFn()
}
});
app.use(reducer);
app.use(router.batch(app));
app.reducer = reducer;
return app;
};
const startApp = (token) => {
const app = aggregator(token);
app.listen();
if (!cluster.isWorker() || cluster.isDisabled()) {
debug('Starting replay ...');
dataflow.replay(app.reducer, 0, (err) => {
if (err) edebug('Replay failed with error %o', err);
else debug('Replay completed successfully');
});
}
};
const runCLI = () => {
if (secured()) {
systemToken = oauth.cache(
uris.auth_server,
process.env.CLIENT_ID,
process.env.CLIENT_SECRET,
'abacus.usage.write abacus.usage.read'
);
systemToken.start((err) => {
if (err) edebug('Unable to obtain oAuth token due to %o', err);
else startApp(systemToken);
});
} else startApp();
};
// Export our public functions
module.exports = aggregator;
module.exports.aggregate = aggregate;
module.exports.newOrg = newOrg;
module.exports.reviveOrg = reviveOrg;
module.exports.runCLI = runCLI;
|
oliveirajonathas/python_estudos | pacote-download/pythonProject/exercicios_python_guanabara/ex047.py | <gh_stars>0
"""
Faça um programa que mostre na tela todos os números pares entre 1 e 50
"""
for i in range(1, 51):
if i % 2 == 0:
print(i, end=' ')
print('FIM')
|
Archive-42/a-whole-bunch-o-gatsby-templates | gatsby-theme-simple-docs/src/tokens/colors.js | export default {
black: '#211111',
gray100: '#faeaea',
gray200: '#dacaca',
gray700: '#645454',
primary: '#452323',
white: '#fffefe'
};
|
Peefy/CLRS_dugu_code-master | src/chapter32/__init__.py |
# python src/chapter32/chapter32note.py
# python3 src/chapter32/chapter32note.py
|
Katala-Jonni/react-admin | src/modules/Shop/totalDay.js | import moment from "moment/min/moment-with-locales";
export default {
[moment()
.set("date", 6)
.format("DD.MM.YY")]: {},
[moment()
.set("date", 13)
.format("DD.MM.YY")]: {
open: []
}
};
|
MichaelWolloch/aiida-vasp | aiida_vasp/parsers/file_parsers/vasprun.py | <filename>aiida_vasp/parsers/file_parsers/vasprun.py
"""
vasprun parser.
---------------
The file parser that handles the parsing of vasprun.xml files.
"""
# pylint: disable=too-many-public-methods, protected-access
import sys
import numpy as np
from parsevasp.vasprun import Xml
from parsevasp.kpoints import Kpoint
from parsevasp import constants as parsevaspct
from aiida_vasp.parsers.file_parsers.parser import BaseFileParser, SingleFile
from aiida_vasp.utils.compare_bands import get_band_properties
DEFAULT_OPTIONS = {
'quantities_to_parse': [
'structure',
'eigenvalues',
'dos',
'bands',
'kpoints',
'occupancies',
'trajectory',
'energies',
'projectors',
'dielectrics',
'born_charges',
'hessian',
'dynmat',
'forces',
'stress',
'total_energies',
'maximum_force',
'maximum_stress',
'band_properties',
#'run_status',
'version',
],
'energy_type': ['energy_extrapolated'],
'electronic_step_energies': False
}
class VasprunParser(BaseFileParser):
"""Interface to parsevasp's xml parser."""
PARSABLE_ITEMS = {
'structure': {
'inputs': [],
'name': 'structure',
'prerequisites': [],
'alternatives': ['poscar-structure']
},
'eigenvalues': {
'inputs': [],
'name': 'eigenvalues',
'prerequisites': [],
'alternatives': ['eigenval-eigenvalues']
},
'dos': {
'inputs': [],
'name': 'dos',
'prerequisites': [],
'alternatives': ['doscar-dos']
},
'kpoints': {
'inputs': [],
'name': 'kpoints',
'prerequisites': [],
'alternatives': ['kpoints-kpoints']
},
'occupancies': {
'inputs': [],
'name': 'occupancies',
'prerequisites': [],
},
'trajectory': {
'inputs': [],
'name': 'trajectory',
'prerequisites': [],
},
'energies': {
'inputs': [],
'name': 'energies',
'prerequisites': [],
},
'total_energies': {
'inputs': [],
'name': 'total_energies',
'prerequisites': [],
},
'projectors': {
'inputs': [],
'name': 'projectors',
'prerequisites': [],
},
'dielectrics': {
'inputs': [],
'name': 'dielectrics',
'prerequisites': [],
},
'stress': {
'inputs': [],
'name': 'stress',
'prerequisites': [],
},
'forces': {
'inputs': [],
'name': 'forces',
'prerequisites': [],
},
'born_charges': {
'inputs': [],
'name': 'born_charges',
'prerequisites': [],
},
'hessian': {
'inputs': [],
'name': 'hessian',
'prerequisites': [],
},
'dynmat': {
'inputs': [],
'name': 'dynmat',
'prerequisites': [],
},
'fermi_level': {
'inputs': [],
'name': 'fermi_level',
'prerequisites': [],
},
'maximum_force': {
'inputs': [],
'name': 'maximum_force',
'prerequisites': []
},
'maximum_stress': {
'inputs': [],
'name': 'maximum_stress',
'prerequisites': []
},
'band_properties': {
'inputs': [],
'name': 'band_properties',
'prerequisites': [],
},
# 'run_status': {
# 'inputs': [],
# 'name': 'run_status',
# 'prerequisites': [],
# },
'version': {
'inputs': [],
'name': 'version',
'prerequisites': [],
}
}
def __init__(self, *args, **kwargs):
"""
Initialize vasprun.xml parser
file_path : str
File path.
data : SingleFileData
AiiDA Data class install to store a single file.
settings : ParserSettings
exit_codes : CalcJobNode.process_class.exit_codes
"""
super(VasprunParser, self).__init__(*args, **kwargs)
self._xml = None
self._xml_truncated = False
self._settings = kwargs.get('settings', None)
self._exit_codes = kwargs.get('exit_codes', None)
if 'file_path' in kwargs:
self._init_xml(kwargs['file_path'])
if 'data' in kwargs:
self._init_xml(kwargs['data'].get_file_abs_path())
def _init_xml(self, path):
"""Create parsevasp Xml instance"""
self._data_obj = SingleFile(path=path)
# Since vasprun.xml can be fairly large, we will parse it only
# once and store the parsevasp Xml object.
try:
self._xml = Xml(file_path=path, k_before_band=True, logger=self._logger)
# Let us also check if the xml was truncated as the parser uses lxml and its
# recovery mode in case we can use some of the results.
self._xml_truncated = self._xml.truncated
except SystemExit:
self._logger.warning('Parsevasp exited abruptly. Returning None.')
self._xml = None
def _parse_file(self, inputs):
"""Parse the quantities related to this file parser."""
# Since all quantities will be returned by properties, we can't pass
# inputs as a parameter, so we store them in self._parsed_data
for key, value in inputs.items():
self._parsed_data[key] = value
quantities_to_parse = DEFAULT_OPTIONS.get('quantities_to_parse')
if self._settings is not None and self._settings.quantity_names_to_parse:
quantities_to_parse = self._settings.quantity_names_to_parse
result = {}
if self._xml is None:
# parsevasp threw an exception, which means vasprun.xml could not be parsed.
for quantity in quantities_to_parse:
if quantity in self._parsable_items:
result[quantity] = None
return result
for quantity in quantities_to_parse:
if quantity in self._parsable_items:
result[quantity] = getattr(self, quantity)
# Now we make sure that if some of the requested quantities sets an error during parsing and
# the xml file is in recover mode, the calculation is simply garbage. Also, exit_code is not always set, or
# its status can be zero.
if self._exit_code is None:
self._exit_code = self._exit_codes.NO_ERROR
if self._exit_code.status:
if (self._xml_truncated and self._exit_code.status == self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.status):
self._exit_code = self._exit_codes.ERROR_RECOVERY_PARSING_OF_XML_FAILED.format(quantities=list(result.keys()))
return result
@property
def version(self):
"""Fetch the VASP version from parsevasp and return it as a string object."""
# fetch version
version = self._xml.get_version()
if version is None:
# version not present
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
return version
@property
def eigenvalues(self):
"""Fetch eigenvalues from parsevasp."""
# fetch eigenvalues
eigenvalues = self._xml.get_eigenvalues()
if eigenvalues is None:
# eigenvalues not present
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
eigen = []
eigen.append(eigenvalues.get('total'))
if eigen[0] is None:
# spin decomposed?
eigen[0] = eigenvalues.get('up')
eigen.append(eigenvalues.get('down'))
if eigen[0] is None:
# safety, should not really happen?
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
return eigen
@property
def occupancies(self):
"""Fetch occupancies from parsevasp."""
# fetch occupancies
occupancies = self._xml.get_occupancies()
if occupancies is None:
# occupancies not present, should not really happen?
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
occ = []
occ.append(occupancies.get('total'))
if occ[0] is None:
# spin decomposed
occ[0] = occupancies.get('up')
occ.append(occupancies.get('down'))
if occ[0] is None:
# should not really happen
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
return occ
@property
def kpoints(self):
"""Fetch the kpoints from parsevasp an store in KpointsData."""
kpts = self._xml.get_kpoints()
kptsw = self._xml.get_kpointsw()
kpoints_data = None
if (kpts is not None) and (kptsw is not None):
# create a KpointsData object and store k-points
kpoints_data = {}
kpoints_data['mode'] = 'explicit'
kpoints_data['points'] = []
for kpt, kptw in zip(kpts, kptsw):
kpoints_data['points'].append(Kpoint(kpt, weight=kptw))
return kpoints_data
@property
def structure(self):
"""
Fetch a given structure.
Which structure to fetch is controlled by inputs.
eFL: Need to clean this so that we can set different
structures to pull from the outside. Could be usefull not
pulling the whole trajectory.
Currently defaults to the last structure.
"""
return self.last_structure
@property
def last_structure(self):
"""
Fetch the structure.
After or at the last recorded ionic step from parsevasp.
"""
last_lattice = self._xml.get_lattice('last')
if last_lattice is None:
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
return _build_structure(last_lattice)
@property
def final_structure(self):
"""
Fetch the structure.
After or at the last recorded ionic step from parsevasp. Should in
principle be the same as the method above.
"""
return self.last_structure
@property
def last_forces(self):
"""
Fetch forces.
After or at the last recorded ionic step from parsevasp.
"""
force = self._xml.get_forces('last')
return force
@property
def final_forces(self):
"""
Fetch forces.
After or at the last recorded ionic step from parsevasp.
"""
return self.last_forces
@property
def forces(self):
"""
Fetch forces.
This container should contain all relevant forces.
Currently, it only contains the final forces, which can be obtain
by the id `final_forces`.
"""
final_forces = self.final_forces
forces = {'final': final_forces}
return forces
@property
def maximum_force(self):
"""Fetch the maximum force of at the last ionic run."""
forces = self.final_forces
if forces is None:
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
norm = np.linalg.norm(forces, axis=1)
return np.amax(np.abs(norm))
@property
def last_stress(self):
"""
Fetch stess.
After or at the last recorded ionic step from parsevasp.
"""
stress = self._xml.get_stress('last')
return stress
@property
def final_stress(self):
"""
Fetch stress.
After or at the last recorded ionic step from parsevasp.
"""
return self.last_stress
@property
def stress(self):
"""
Fetch stress.
This container should contain all relevant stress.
Currently, it only contains the final stress, which can be obtain
by the id `final_stress`.
"""
final_stress = self.final_stress
stress = {'final': final_stress}
return stress
@property
def maximum_stress(self):
"""Fetch the maximum stress of at the last ionic run."""
stress = self.final_stress
if stress is None:
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
norm = np.linalg.norm(stress, axis=1)
return np.amax(np.abs(norm))
@property
def trajectory(self):
"""
Fetch unitcells, positions, species, forces and stress.
For all calculation steps from parsevasp.
"""
unitcell = self._xml.get_unitcell('all')
positions = self._xml.get_positions('all')
species = self._xml.get_species()
forces = self._xml.get_forces('all')
stress = self._xml.get_stress('all')
# make sure all are sorted, first to last calculation
# (species is constant)
unitcell = sorted(unitcell.items())
positions = sorted(positions.items())
forces = sorted(forces.items())
stress = sorted(stress.items())
# convert to numpy
unitcell = np.asarray([item[1] for item in unitcell])
positions = np.asarray([item[1] for item in positions])
forces = np.asarray([item[1] for item in forces])
stress = np.asarray([item[1] for item in stress])
# Aiida wants the species as symbols, so invert
elements = _invert_dict(parsevaspct.elements)
symbols = np.asarray([elements[item].title() for item in species.tolist()])
if (unitcell is not None) and (positions is not None) and \
(species is not None) and (forces is not None) and \
(stress is not None):
trajectory_data = {}
keys = ('cells', 'positions', 'symbols', 'forces', 'stress', 'steps')
stepids = np.arange(unitcell.shape[0])
for key, data in zip(keys, (unitcell, positions, symbols, forces, stress, stepids)):
trajectory_data[key] = data
return trajectory_data
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
@property
def total_energies(self):
"""Fetch the total energies after the last ionic run."""
energies = self.energies
if energies is None:
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
energies_dict = {}
for etype in self._settings.get('energy_type', DEFAULT_OPTIONS['energy_type']):
energies_dict[etype] = energies[etype][-1]
return energies_dict
@property
def energies(self):
"""Fetch the total energies."""
# Check if we want total energy entries for each electronic step.
electronic_step_energies = self._settings.get('electronic_step_energies', DEFAULT_OPTIONS['electronic_step_energies'])
return self._energies(nosc=not electronic_step_energies)
def _energies(self, nosc):
"""
Fetch the total energies for all energy types, calculations (ionic steps) and electronic steps.
The returned dict from the parser contains the total energy types as a key (plus the _final, which is
the final total energy ejected by VASP after the closure of the electronic steps). The energies can then
be found in the flattened ndarray where the key `electronic_steps` indicate how many electronic steps
there is per ionic step. Using the combination, one can rebuild the electronic step energy per ionic step etc.
"""
etype = self._settings.get('energy_type', DEFAULT_OPTIONS['energy_type'])
energies = self._xml.get_energies(status='all', etype=etype, nosc=nosc)
if energies is None:
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=str(sys._getframe().f_code.co_name))
return None
return energies
@property
def projectors(self):
"""Fetch the projectors."""
proj = self._xml.get_projectors()
if proj is None:
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
projectors = {}
prj = []
try:
prj.append(proj['total']) # pylint: disable=unsubscriptable-object
except KeyError:
try:
prj.append(proj['up']) # pylint: disable=unsubscriptable-object
prj.append(proj['down']) # pylint: disable=unsubscriptable-object
except KeyError:
self._logger.error('Did not detect any projectors. Returning.')
if len(prj) == 1:
projectors['projectors'] = prj[0]
else:
projectors['projectors'] = np.asarray(prj)
return projectors
@property
def dielectrics(self):
"""Fetch the dielectric function."""
diel = self._xml.get_dielectrics()
if diel is None:
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
dielectrics = {}
energy = diel.get('energy')
idiel = diel.get('imag')
rdiel = diel.get('real')
epsilon = diel.get('epsilon')
epsilon_ion = diel.get('epsilon_ion')
if energy is not None:
dielectrics['ediel'] = energy
if idiel is not None:
dielectrics['rdiel'] = rdiel
if rdiel is not None:
dielectrics['idiel'] = idiel
if epsilon is not None:
dielectrics['epsilon'] = epsilon
if epsilon_ion is not None:
dielectrics['epsilon_ion'] = epsilon_ion
return dielectrics
@property
def born_charges(self):
"""Fetch the Born effective charges."""
brn = self._xml.get_born()
if brn is None:
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
born = {'born_charges': brn}
return born
@property
def hessian(self):
"""Fetch the Hessian matrix."""
hessian = self._xml.get_hessian()
if hessian is None:
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
hess = {'hessian': hessian}
return hess
@property
def dynmat(self):
"""Fetch the dynamical eigenvectors and eigenvalues."""
dynmat = self._xml.get_dynmat()
if dynmat is None:
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
dyn = {}
dyn['dynvec'] = dynmat['eigenvectors'] # pylint: disable=unsubscriptable-object
dyn['dyneig'] = dynmat['eigenvalues'] # pylint: disable=unsubscriptable-object
return dyn
@property
def dos(self):
"""Fetch the total density of states."""
dos = self._xml.get_dos()
if dos is None:
self._exit_code = self._exit_codes.ERROR_NOT_ABLE_TO_PARSE_QUANTITY.format(quantity=sys._getframe().f_code.co_name)
return None
densta = {}
# energy is always there, regardless of
# total, spin or partial
energy = dos['total']['energy'] # pylint: disable=unsubscriptable-object
densta['energy'] = energy
tdos = None
pdos = None
upspin = dos.get('up')
downspin = dos.get('down')
total = dos.get('total')
if (upspin is not None) and (downspin is not None):
tdos = np.stack((upspin['total'], downspin['total']))
if (upspin['partial'] is not None) and \
(downspin['partial'] is not None):
pdos = np.stack((upspin['partial'], downspin['partial']))
else:
tdos = total['total']
pdos = total['partial']
densta['tdos'] = tdos
if pdos is not None:
densta['pdos'] = pdos
return densta
@property
def fermi_level(self):
"""Fetch Fermi level."""
return self._xml.get_fermi_level()
@property
def band_properties(self):
"""Fetch miscellaneous electronic structure data"""
eigenvalues = self.eigenvalues
occupations = self.occupancies
if eigenvalues is None:
return None
# Convert to np.ndarray
eigenvalues = np.stack(eigenvalues, axis=0)
occupations = np.stack(occupations, axis=0)
return get_band_properties(eigenvalues, occupations)
@property
def run_status(self):
"""Fetch run_status information"""
info = {}
# First check electronic convergence by comparing executed steps to the
# maximum allowed number of steps (NELM).
energies = self._xml.get_energies('last', nosc=False)
parameters = self._xml.get_parameters()
info['finished'] = not self._xml_truncated
# Only set to true for untruncated run to avoid false positives
if energies is None:
info['electronic_converged'] = False
elif energies.get('electronic_steps')[0] < parameters['nelm'] and not self._xml_truncated:
info['electronic_converged'] = True
else:
info['electronic_converged'] = False
# Then check the ionic convergence by comparing executed steps to the
# maximum allowed number of steps (NSW).
energies = self._xml.get_energies('all', nosc=True)
if energies is None:
info['ionic_converged'] = False
else:
if len(energies.get('electronic_steps')) < parameters['nsw'] and not self._xml_truncated:
info['ionic_converged'] = True
else:
info['ionic_converged'] = False
# Override if nsw is 0 - no ionic steps are performed
if parameters['nsw'] < 1:
info['ionic_converged'] = None
return info
def _build_structure(lattice):
"""Builds a structure according to AiiDA spec."""
structure_dict = {}
structure_dict['unitcell'] = lattice['unitcell']
structure_dict['sites'] = []
# AiiDA wants the species as symbols, so invert
elements = _invert_dict(parsevaspct.elements)
for pos, specie in zip(lattice['positions'], lattice['species']):
site = {}
site['position'] = np.dot(pos, lattice['unitcell'])
site['symbol'] = elements[specie].title()
site['kind_name'] = elements[specie].title()
structure_dict['sites'].append(site)
return structure_dict
def _invert_dict(dct):
return dct.__class__(map(reversed, dct.items()))
|
ODCleanStore/ODCleanStore | odcleanstore/engine/src/main/java/cz/cuni/mff/odcleanstore/engine/pipeline/PipelineGraphTransformerExecutorException.java | <reponame>ODCleanStore/ODCleanStore
package cz.cuni.mff.odcleanstore.engine.pipeline;
import cz.cuni.mff.odcleanstore.core.ODCleanStoreException;
import cz.cuni.mff.odcleanstore.engine.db.model.PipelineCommand;
/**
* A exception arising from PipelineGraphTransformerExecutor class.
*
* @author <NAME>
*/
public class PipelineGraphTransformerExecutorException extends ODCleanStoreException {
private static final long serialVersionUID = -7628445944871159626L;
private PipelineCommand command;
/**
* @return Command which caused exception, may be null
*/
PipelineCommand getCommand() {
return command;
}
/**
* Constructs a new exception with the given message.
* @param message the detail message
*/
PipelineGraphTransformerExecutorException(String message, PipelineCommand command) {
super(message);
this.command = command;
}
/**
* Constructs a new exception with the given message and cause.
* @param message the detail message
* @param cause the cause
*/
PipelineGraphTransformerExecutorException(String message, PipelineCommand command, Throwable cause) {
super(message, cause);
this.command = command;
}
}
|
liuzyw/study-hello | study-lucene/src/main/java/com/study/luence/day1/CreateIndex.java | package com.study.luence.day1;
import java.io.IOException;
import java.io.Serializable;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Date;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.IntPoint;
import org.apache.lucene.document.StoredField;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
/**
* Created on 2019-02-20
*
* @author liuzhaoyuan
*/
public class CreateIndex implements Serializable {
private static final Long serialVersionUID = 1L;
public static void main(String[] args) throws IOException {
//创建 3 个 News 对象
News news1 = new News();
news1.setId(1);
news1.setTitle("习近平会见美国总统奥巴马,学习国外经验");
news1.setContent("国家主席习近平 9月 3日在杭州西湖国宾馆会见前米出席二 十国集团领导人杭州峰会的美国总统奥巴马.");
news1.setReply(672);
News news2 = new News();
news2.setId(2);
news2.setTitle("北大迎 4380名新生农村学生 700多人近年最多");
news2.setContent("昨天,北京大学迎来 4380名来自全国各地及数卡个同家的本科新生。其中,农村学生共 700 余名,为近年最多...");
news2.setReply(995);
News news3 = new News();
news3.setId(3);
news3.setTitle("特朗普宣誓(<NAME>)就任美国第 45任总统");
news3.setContent("当地时间 1月 20日,唐纳德·特朗普在美国国会宣誓就就职,正式成为美国第45任总统");
news3.setReply(1872);
Analyzer analyzer = new IKAnalyzer6x();
IndexWriterConfig icw = new IndexWriterConfig(analyzer);
icw.setOpenMode(OpenMode.CREATE);
Directory directory = null;
IndexWriter writer = null;
Path indexPath = Paths.get("/Users/liuzhaoyuan/gitwork/study-hello/study-lucene/indexdir");
Date start = new Date();
directory = FSDirectory.open(indexPath);
writer = new IndexWriter(directory, icw);
FieldType idType = new FieldType();
idType.setIndexOptions(IndexOptions.DOCS);
idType.setStored(true);
FieldType titleType = new FieldType();
titleType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
titleType.setStored(true);
titleType.setTokenized(true);
FieldType contentType = new FieldType();
contentType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
contentType.setStored(true);
contentType.setTokenized(true);
contentType.setStoreTermVectors(true);
contentType.setStoreTermVectorPositions(true);
contentType.setStoreTermVectorOffsets(true);
contentType.setStoreTermVectorPayloads(true);
Document doc1 = new Document();
doc1.add(new Field("id", String.valueOf(news1.getId()), idType));
doc1.add(new Field("title", news1.getTitle(), titleType));
doc1.add(new Field("content", news1.getContent(), contentType));
doc1.add(new IntPoint("reply", news1.getReply()));
doc1.add(new StoredField("reply display", news1.getReply()));
Document doc2 = new Document();
doc2.add(new Field("id", String.valueOf(news2.getId()), idType));
doc2.add(new Field("title", news2.getTitle(), titleType));
doc2.add(new Field("content", news2.getContent(), contentType));
doc2.add(new IntPoint("reply", news2.getReply()));
doc2.add(new StoredField("reply display", news2.getReply()));
Document doc3 = new Document();
doc3.add(new Field("id", String.valueOf(news3.getId()), idType));
doc3.add(new Field("title", news3.getTitle(), titleType));
doc3.add(new Field("content", news3.getContent(), contentType));
doc3.add(new IntPoint("reply", news3.getReply()));
doc3.add(new StoredField("reply display", news3.getReply()));
writer.addDocument(doc1);
writer.addDocument(doc2);
writer.addDocument(doc3);
writer.commit();
writer.close();
directory.close();
Date end = new Date();
System.out.println("索引文档用时:" + (end.getTime() - start.getTime()));
}
}
|
flon-chou/file-system-service | qc-service/src/main/java/com/cj/qc/mqreceiver/RabbitReceiver.java | <reponame>flon-chou/file-system-service<filename>qc-service/src/main/java/com/cj/qc/mqreceiver/RabbitReceiver.java
package com.cj.qc.mqreceiver;
import cn.hutool.core.thread.ThreadUtil;
import com.cj.qc.fastmybatismapper.TaskQueueAutoMapper;
import com.cj.qc.service.TaskEngineService;
import com.rabbitmq.client.Channel;
import lombok.extern.slf4j.Slf4j;
import org.springframework.amqp.core.Message;
import org.springframework.amqp.rabbit.annotation.Exchange;
import org.springframework.amqp.rabbit.annotation.Queue;
import org.springframework.amqp.rabbit.annotation.QueueBinding;
import org.springframework.amqp.rabbit.annotation.RabbitListener;
import org.springframework.stereotype.Component;
import javax.annotation.Resource;
import java.io.IOException;
import java.util.List;
/**
* @author chenzhaowen
* @description Rabbit监听器
* @date 2019/4/18 17:37
*/
@Slf4j
@Component
public class RabbitReceiver {
@Resource
private TaskEngineService taskEngineService;
/**
* fanout模式.
* 自动质检任务队列消费
* @param message the message
* @param channel the channel
* @throws IOException the io exception 这里异常需要处理
*/
@RabbitListener(queues = {"fanout.queue.task.auto"})
public void consumeTaskQueueAuto(Message message, Channel channel) throws IOException {
channel.basicAck(message.getMessageProperties().getDeliveryTag(), false);
// System.out.println("-----------------------------[监听消息]自动质检任务队列消费:----------"+new String (message.getBody()));
log.info("自动质检任务队列消费:{}",new String (message.getBody()));
//执行每日质检任务
taskEngineService.executeAutoTaskForDay(new String (message.getBody()));
//todo:可能后期需要返回执行成功信息给我的消息
}
/**
* fanout模式.
* 手动质检任务队列消费
* @param message the message
* @param channel the channel
* @throws IOException the io exception 这里异常需要处理
*/
@RabbitListener(queues = {"fanout.queue.task.hand"})
public void consumeTaskQueueHand(Message message, Channel channel) throws IOException {
channel.basicAck(message.getMessageProperties().getDeliveryTag(), false);
// System.out.println("-----------------------------[监听消息]手动质检任务队列消费:----------"+new String (message.getBody()));
log.info("手动质检任务队列消费:任务id={}",new String (message.getBody()));
//执行单个手动质检任务
taskEngineService.executeSingleHandTask(Long.valueOf(new String (message.getBody())));
//todo:可能后期需要返回执行成功信息给我的消息
}
}
|
hovmikayelyan/Data_Structures_and_Algorithms | Data Structures/week4_hash_tables/2_hash_chains/submited.cpp | <reponame>hovmikayelyan/Data_Structures_and_Algorithms
/**
* @file submited.cpp
* @author <NAME> (<EMAIL>)
* @brief
* @version 0.1
* @date 2021-12-17
*
* @copyright Copyright (c) 2021
*
*/
#include <bits/stdc++.h>
using namespace std;
struct Query
{
string type, name;
size_t number;
};
class QueryProcessor
{
int BUCKET;
unordered_map<int, list<string>> table;
size_t hash(const string &s) const
{
static const size_t multiplier = 263;
static const size_t prime = 1000000007;
unsigned long long hash = 0;
for (int i = static_cast<int>(s.size()) - 1; i >= 0; --i)
hash = (hash * multiplier + s[i]) % prime;
return hash % BUCKET;
}
public:
QueryProcessor(int V)
{
this->BUCKET = V;
}
void insertItem(const string &s)
{
if (findItem(s))
return;
size_t key = hash(s);
table[key].push_front(s);
}
void deleteItem(const string &s)
{
size_t key = hash(s);
auto &row = table[key];
auto it = std::find(row.begin(), row.end(), s);
if (it != row.end())
row.erase(it);
}
bool findItem(const string &s)
{
size_t key = hash(s);
auto &row = table[key];
auto it = std::find(row.begin(), row.end(), s);
return (it != row.end()) ? true : false;
}
string checkItem(int key)
{
ostringstream elems;
auto &row = table[key];
copy(row.begin(), row.end(), ostream_iterator<string>(elems, " "));
return elems.str();
}
};
vector<Query> read_queries(int S)
{
vector<Query> queries(S);
for (int i = 0; i < S; ++i)
{
cin >> queries[i].type;
if (queries[i].type != "check")
cin >> queries[i].name;
else
cin >> queries[i].number;
}
return queries;
}
void process_queries(const vector<Query> &queries, int &buckets_size, int &queries_size)
{
QueryProcessor hashes(buckets_size);
vector<string> result;
for (size_t i = 0; i < queries_size; ++i)
{
string cmd = queries[i].type;
if (cmd == "add")
{
hashes.insertItem(queries[i].name);
}
if (cmd == "del")
{
hashes.deleteItem(queries[i].name);
}
if (cmd == "find")
{
bool found = hashes.findItem(queries[i].name);
found ? result.emplace_back("yes") : result.emplace_back("no");
}
if (cmd == "check")
{
string found = hashes.checkItem(queries[i].number);
result.emplace_back(found);
}
}
copy(result.begin(), result.end(), ostream_iterator<string>(cout, "\n"));
}
int main()
{
ios_base::sync_with_stdio(false);
int buckets_size, queries_size;
cin >> buckets_size >> queries_size;
process_queries(read_queries(queries_size), buckets_size, queries_size);
return 0;
} |
OLR-xray/OLR-3.0 | src/xray/xr_3da/xrGame/console_registrator_script.cpp | #include "pch_script.h"
#include "console_registrator.h"
#include "../xr_ioconsole.h"
#include "../xr_ioc_cmd.h"
using namespace luabind;
CConsole* console()
{
return Console;
}
int get_console_integer(CConsole* c, LPCSTR cmd)
{
int val=0,min=0,max=0;
c->GetInteger ( cmd, val, min, max);
return val;
}
float get_console_float(CConsole* c, LPCSTR cmd)
{
float val=0,min=0,max=0;
c->GetFloat ( cmd, val, min, max);
return val;
}
bool get_console_bool(CConsole* c, LPCSTR cmd)
{
BOOL val;
val = c->GetBool (cmd, val);
return !!val;
}
IConsole_Command* find_cmd(CConsole *c, LPCSTR cmd)
{
CConsole::vecCMD_IT I = c->Commands.find(cmd);
IConsole_Command *icmd = NULL;
if (I != c->Commands.end())
icmd = I->second;
return icmd;
}
void disable_cmd(CConsole *c, LPCSTR cmd)
{
IConsole_Command *icmd = find_cmd(c, cmd);
if (icmd)
icmd->SetEnabled (false);
}
void enable_cmd(CConsole *c, LPCSTR cmd)
{
IConsole_Command *icmd = find_cmd(c, cmd);
if (icmd)
icmd->SetEnabled(true);
}
#pragma optimize("s",on)
void console_registrator::script_register(lua_State *L)
{
module(L)
[
def("get_console", &console),
class_<CConsole>("CConsole")
.def("disable_command", &disable_cmd)
.def("enable_command", &enable_cmd)
.def("execute", &CConsole::Execute)
.def("execute_script", &CConsole::ExecuteScript)
.def("show", &CConsole::Show)
.def("hide", &CConsole::Hide)
// .def("save", &CConsole::Save)
.def("get_string", &CConsole::GetString)
.def("get_integer", &get_console_integer)
.def("get_bool", &get_console_bool)
.def("get_float", &get_console_float)
.def("get_token", &CConsole::GetToken)
.def_readonly ("visible", &CConsole::bVisible)
// .def("", &CConsole::)
];
} |
emouse2010/duband | wrist-rom-open/Source/src/bd_factory_test.c | /* Copyright (c) [2014 Baidu]. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* File Name :
* Author :
* Version : $Revision:$
* Date : $Date:$
* Description :
*
* HISTORY:
* Date | Modification | Author
* 28/03/2014 | Initial Revision |
*/
#include "bd_factory_test.h"
#include "app_timer.h"
#include "config.h"
#include "bd_led_flash.h"
#include "app_error.h"
#include "bd_battery.h"
#include "nrf.h"
#include "nrf51_bitfields.h"
#include "nrf_delay.h"
#include "hal_acc.h"
#include "bd_spi_master.h"
#include "spi_master_config.h" // This file must be in the application folder
#include "nrf_gpio.h"
#include "nrf_assert.h"
#include "bd_communicate_protocol.h"
#include "app_scheduler.h"
#include "nrf_soc.h"
#include "bd_low_power_mode.h"
#include "nordic_common.h"
#include "ble_flash.h"
extern AxesRaw_t accData[32]; // Read acc data from FIFO
extern uint8_t global_reponse_buffer[50];
extern uint16_t g_battery_voltage_mv;
uint8_t g_send_flag = true;
uint8_t g_write_sn_done = true;
uint8_t g_write_flag_done = true;
uint8_t g_testMode = 0;
uint8_t SERIAL_NUMBER[SERIAL_NUMBER_LENGTH];
uint8_t is_factory_test_done(void)
{
uint32_t *flag_addr;
ble_flash_page_addr(FLASH_PAGE_SN_FACTORY_FLAG, &flag_addr );
return (*((uint8_t*)(flag_addr + FACTORY_TEST_FLAG_OFF))) == (uint8_t)0x00;
}
void get_sensor_data(uint8_t *value)
{
uint8_t transfer_size;
hal_acc_GetFifoData(&transfer_size);
value[0] = accData[0].AXIS_X >> 8;
value[1] = accData[0].AXIS_X & 0x00FF;
value[2] = accData[0].AXIS_Y >> 8;
value[3] = accData[0].AXIS_Y & 0x00FF;
value[4] = accData[0].AXIS_Z >> 8;
value[5] = accData[0].AXIS_Z & 0x00FF;
}
uint8_t sensor_test()
{
uint8_t result = true;
uint8_t transfer_size;
int16_t accX,accY,accZ;
int32_t compose = 0;
uint8_t index = 0;
hal_acc_GetFifoData(&transfer_size);
for(index = 0; index <= transfer_size; index++) {
//printAccRaw(accData[index].AXIS_X,accData[index].AXIS_Y,accData[index].AXIS_Z);
accX = accData[index].AXIS_X;
accY = accData[index].AXIS_Y;
accZ = accData[index].AXIS_Z;
compose = compose + (accX*accX + accY*accY + accZ*accZ);
}
if( !(MIN_SENSOR_VALUE <= (compose / transfer_size) <= MAX_SENSOR_VALUE )) {
result = false;
}
return result;
}
void all_led_flash()
{
nrf_gpio_port_clear((nrf_gpio_port_select_t)NRF_GPIO_PORT_SELECT_PORT0, 0xE0);//BAIDU_LED_0, BAIDU_LED_1, BAIDU_LED_2
nrf_gpio_port_clear((nrf_gpio_port_select_t)NRF_GPIO_PORT_SELECT_PORT2, 0x0C);//BAIDU_LED_3, BAIDU_LED_4
nrf_delay_ms(400);
nrf_gpio_port_set((nrf_gpio_port_select_t)NRF_GPIO_PORT_SELECT_PORT0, 0xE0);//BAIDU_LED_0, BAIDU_LED_1, BAIDU_LED_2
nrf_gpio_port_set((nrf_gpio_port_select_t)NRF_GPIO_PORT_SELECT_PORT2, 0x0C);//BAIDU_LED_3, BAIDU_LED_4
nrf_delay_ms(400);
}
void bootup_check()
{
//1.Check LEDs first
all_led_flash();
//2.check test flag
if(is_factory_test_done())
return;
//3.SENSOR TEST
if(sensor_test()) {
nrf_gpio_pin_clear(LED_SENSOR_TEST_RESULT);
nrf_delay_ms(400);
nrf_gpio_pin_set(LED_SENSOR_TEST_RESULT);
}
//4.init flash sn & factory test flag
uint32_t *flag_addr = 0;
ble_flash_page_addr(FLASH_PAGE_SN_FACTORY_FLAG, &flag_addr );
if( *flag_addr != 0xFFFFFFFF){
ble_flash_page_erase(FLASH_PAGE_SN_FACTORY_FLAG);
}
}
void system_off(void* data, uint16_t length)
{
UNUSED_VARIABLE(data);
UNUSED_VARIABLE(length);
sd_system_off();
}
void vibrator_test()
{
#ifdef FEATURE_MOTOR
nrf_gpio_pin_set(BAIDU_MOTOR_0);
nrf_delay_ms(500);
nrf_gpio_pin_clear(BAIDU_MOTOR_0);
#endif
}
void send_package(L2_Send_Content *content)
{
if(g_send_flag) {
L1_send(content);
g_send_flag = false;
}
}
void send_callback(SEND_STATUS status )
{
if(status == SEND_SUCCESS) {
g_send_flag = true;
}
}
void generate_l2_package(
L2_Send_Content *content,
BLUETOOTH_COMMUNICATE_COMMAND id,
uint8_t key,
uint16_t length,
uint8_t* value)
{
global_reponse_buffer[0] = id; /*command id*/
global_reponse_buffer[1] = L2_HEADER_VERSION; /*L2 header version */
global_reponse_buffer[2] = key; /*echo return*/
global_reponse_buffer[3] = length >> 8;
global_reponse_buffer[4] = (uint8_t)(length & 0x00FF);
for(int i = 0; i < length; i++) {
global_reponse_buffer[5+i] = value[i];
}
content->callback = send_callback;
content->content = global_reponse_buffer;
content->length = L2_HEADER_SIZE + L2_PAYLOAD_HEADER_SIZE + length; /*length of whole L2*/
}
void write_flash_handler(void * data, uint16_t length)
{
uint8_t size;
uint32_t *addr;
uint32_t *value;
switch(*((uint8_t*)data)) {
case KEY_WRITE_SN:
size = SERIAL_NUMBER_LENGTH/sizeof(uint32_t);
ble_flash_page_addr(FLASH_PAGE_SN_FACTORY_FLAG, &addr );
value = (uint32_t*)SERIAL_NUMBER;
ble_flash_block_write(addr, value, size);
g_write_sn_done = true;
break;
case KEY_WRITE_FLAG:
ble_flash_page_addr(FLASH_PAGE_SN_FACTORY_FLAG, &addr );
addr += FACTORY_TEST_FLAG_OFF;
ble_flash_word_write(addr, (uint32_t)0);
g_write_flag_done = true;
break;
default:
return;
}
}
void write_test_flag()
{
uint32_t err_code;
uint8_t data = KEY_WRITE_FLAG;
g_write_flag_done = false;
err_code = app_sched_event_put(&data, 1, (app_sched_event_handler_t)write_flash_handler);
APP_ERROR_CHECK(err_code);
}
void read_flag(void *data, uint16_t length)
{
if(g_send_flag) {
L2_Send_Content content;
uint32_t *addr;
ble_flash_page_addr(FLASH_PAGE_SN_FACTORY_FLAG, &addr );
addr += FACTORY_TEST_FLAG_OFF;
generate_l2_package(&content, FACTORY_TEST_COMMAND_ID, KEY_RETURN_FLAG, 1, (uint8_t*)addr );
send_package(&content);
} else {
uint32_t err_code;
err_code = app_sched_event_put(NULL, 0, (app_sched_event_handler_t)read_flag);
APP_ERROR_CHECK(err_code);
}
}
void write_sn(uint8_t* data, int16_t length)
{
uint32_t err_code;
uint8_t value = KEY_WRITE_SN;
g_write_sn_done = false;
err_code = app_sched_event_put(&value, 1, (app_sched_event_handler_t)write_flash_handler);
APP_ERROR_CHECK(err_code);
for(int i=0;i<length;i++) {
SERIAL_NUMBER[i]=data[i+L2_PAYLOAD_HEADER_SIZE];
}
}
void read_sn(void *data, uint16_t length)
{
if(g_send_flag && g_write_sn_done) {
L2_Send_Content content;
uint32_t *addr;
ble_flash_page_addr(FLASH_PAGE_SN_FACTORY_FLAG, &addr );
generate_l2_package(&content, FACTORY_TEST_COMMAND_ID, KEY_RETURN_SN, SERIAL_NUMBER_LENGTH, (uint8_t*)addr);
send_package(&content);
} else {
uint32_t err_code;
err_code = app_sched_event_put(NULL, 0, (app_sched_event_handler_t)read_sn);
APP_ERROR_CHECK(err_code);
}
}
void request_echo(void *data, uint16_t length)
{
if(g_send_flag) {
L2_Send_Content content;
generate_l2_package(&content, FACTORY_TEST_COMMAND_ID, KEY_RETURN_ECHO, length, data);
send_package(&content);
} else {
uint32_t err_code;
err_code = app_sched_event_put(data, length , (app_sched_event_handler_t)request_echo);
APP_ERROR_CHECK(err_code);
}
}
void request_sensor_data(void *data, uint16_t length)
{
if(g_send_flag) {
uint8_t value[6];
get_sensor_data(value);
L2_Send_Content content;
generate_l2_package(&content, FACTORY_TEST_COMMAND_ID, KEY_RETURN_SENSOR, 6, value);
send_package(&content);
}
}
void enter_test_mode()
{
if(!g_testMode) {
g_testMode = 1;
}
}
void exit_test_mode()
{
g_testMode = 0;
uint32_t err_code;
err_code = app_sched_event_put(NULL, 0, (app_sched_event_handler_t)system_off);
APP_ERROR_CHECK(err_code);
}
void request_charge(void * data, uint16_t length)
{
if(g_send_flag) {
uint8_t value[2];
value[0] = g_battery_voltage_mv >> 8;
value[1] = g_battery_voltage_mv & 0x00FF;
L2_Send_Content content;
generate_l2_package(&content, FACTORY_TEST_COMMAND_ID, KEY_RETURN_CHARGE, 2, value);
send_package(&content);
} else {
uint32_t err_code;
err_code = app_sched_event_put(NULL, 0, (app_sched_event_handler_t)request_charge);
APP_ERROR_CHECK(err_code);
}
}
void do_test(uint8_t *data, uint16_t length)
{
if((KEY_ENTER_TEST_MODE != *data) && !g_testMode)
return;
uint32_t err_code;
switch((FACTORY_TEST_KEY)(*data)) {
case KEY_REQUEST_ECHO:
err_code = app_sched_event_put(data + L2_PAYLOAD_HEADER_SIZE, length , (app_sched_event_handler_t)request_echo);
APP_ERROR_CHECK(err_code);
break;
case KEY_REQUEST_SENSOR:
err_code = app_sched_event_put(NULL, 0, (app_sched_event_handler_t)request_sensor_data);
APP_ERROR_CHECK(err_code);
break;
case KEY_LED_TEST:
all_led_flash();
break;
case KEY_VIBRATOR_TEST:
vibrator_test();
break;
case KEY_WRITE_FLAG:
if(!is_factory_test_done())
write_test_flag();
break;
case KEY_WRITE_SN:
if(!is_factory_test_done())
write_sn(data, length);
break;
case KEY_READ_FLAG:
err_code = app_sched_event_put(NULL, 0, (app_sched_event_handler_t)read_flag);
APP_ERROR_CHECK(err_code);
break;
case KEY_READ_SN:
err_code = app_sched_event_put(NULL, 0, (app_sched_event_handler_t)read_sn);
APP_ERROR_CHECK(err_code);
break;
case KEY_ENTER_TEST_MODE:
enter_test_mode();
break;
case KEY_EXIT_TEST_MODE:
exit_test_mode();
break;
case KEY_REQUEST_CHARGE:
err_code = app_sched_event_put(NULL, 0, (app_sched_event_handler_t)request_charge);
APP_ERROR_CHECK(err_code);
break;
default:
break;
}
}
|
longwind09/sampling | src/main/java/org/felix/ml/sampling/IConfig.java | package org.felix.ml.sampling;
import org.felix.ml.sampling.exception.ConfigException;
import java.io.File;
import java.io.InputStream;
import static org.felix.ml.sampling.util.Constant.LABEL_VAR;
import static org.felix.ml.sampling.util.Constant.SAMPLE_VAR;
import static org.apache.commons.lang.StringUtils.trim;
/**
*
* */
public interface IConfig {
public static final String CONFIG_KEY = "config_key";
public void load(String string) throws ConfigException;
public void load(File File) throws ConfigException;
public void load(InputStream in) throws ConfigException;
public ITag getTag();
public IFilter getRootFilter();
public IFilter getPreFilter();
public IFilter getInnerFilter();
public Out getOut();
public String getProperty(String key);
public IRowConvert getRowConvert();
public enum Out {
LABEL(LABEL_VAR), SAMPLE(SAMPLE_VAR);
private String name;
private Out(String str) {
this.name = str;
}
public static Out str2Enum(String str) {
if (LABEL_VAR.equals(trim(str)))
return Out.LABEL;
else if (SAMPLE_VAR.equals(trim(str)))
return Out.SAMPLE;
return null;
}
public boolean equals(Out out) {
return this.name.equals(out.name);
}
}
}
|
battlecatsultimate/PackPack | src/main/java/mandarin/packpack/supporter/bc/CustomMaskUnit.java | <reponame>battlecatsultimate/PackPack<gh_stars>1-10
package mandarin.packpack.supporter.bc;
import common.CommonStatic;
import common.battle.data.DataUnit;
import common.util.anim.MaAnim;
import common.util.unit.UnitLevel;
import mandarin.packpack.supporter.StaticStore;
public class CustomMaskUnit extends DataUnit {
public final String[] data;
public final UnitLevel curve;
public final MaAnim anim;
public final int rarity, max, maxp;
public final int[][] evo;
public CustomMaskUnit(String[] data, String[] curve, MaAnim anim, String[] rare) {
super(null, null, data);
this.data = data;
this.curve = new UnitLevel(toCurve(curve));
this.anim = anim;
this.rarity = Integer.parseInt(rare[13]);
this.max = Integer.parseInt(rare[50]);
this.maxp = Integer.parseInt(rare[51]);
int et = Integer.parseInt(rare[23]);
if (et >= 15000 && et < 17000) {
evo = new int[6][2];
evo[0][0] = Integer.parseInt(rare[27]);
for (int i = 0; i < 5; i++) {
evo[i + 1][0] = Integer.parseInt(rare[28 + i * 2]);
evo[i + 1][1] = Integer.parseInt(rare[29 + i * 2]);
}
} else {
evo = null;
}
}
private int[] toCurve(String[] curve) {
int[] result = new int[curve.length];
for(int i = 0; i < curve.length; i++) {
if(StaticStore.isNumeric(curve[i])) {
result[i] = StaticStore.safeParseInt(curve[i]);
}
}
return result;
}
@Override
public int getAnimLen() {
return anim.len;
}
}
|
lanpinguo/apple-sauce | ofagent/indigo/submodules/bigcode/modules/PPE/module/src/ppe_utm.c | /****************************************************************
*
* Copyright 2013, Big Switch Networks, Inc.
*
* Licensed under the Eclipse Public License, Version 1.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.eclipse.org/legal/epl-v10.html
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*
***************************************************************/
#include <PPE/ppe_config.h>
#if PPE_CONFIG_INCLUDE_UTM == 1
#include <PPE/uCli/ppe_utm.h>
#include <uCli/ucli_argparse.h>
#include "ppe_util.h"
/**
* Cast and assign our control pointer in every command handler
*/
#undef UCLI_COMMAND_INIT
#define UCLI_COMMAND_INIT \
AIM_VAR_PCAST_SAFE(ppe_utm_ctrl_t*, ppec, uc, uc->cookie); \
AIM_REFERENCE(ppec)
static ucli_status_t
ppe_ucli_utm__config__(ucli_context_t* uc)
{
UCLI_COMMAND_INFO(uc,
"config", 0,
"Show the PPE build configuration.");
ppe_config_show(&uc->pvs);
return 0;
}
static inline int
field_required__(ucli_context_t* uc, ppe_field_t f)
{
AIM_VAR_PCAST_SAFE(ppe_utm_ctrl_t*, ppec, uc, uc->cookie);
int e = ppe_field_exists(&ppec->ppep, f);
if(e == 1) {
return 0;
}
else if(e == 0) {
return ucli_error(uc, "field %{ppe_field} does not exist in the packet.",
f);
}
else {
return ucli_e_internal(uc, "ppe_field_exists() failed internally.");
}
}
/**
* Report an error and return if the given field does not exists
*/
#define PPE_FIELD_EXISTS_OR_RETURN(_uc, _f) \
do { \
int rv = field_required__(_uc, _f); \
if(rv < 0) { \
return rv; \
} \
} while(0)
/**
* Report an error if field get fails.
*/
#define PPE_FIELD_GET_OR_RETURN(_uc, _ppep, _f, _rv) \
do { \
int rv = ppe_field_get(_ppep, _f, _rv); \
if(rv < 0) { \
return ucli_e_internal(_uc, "ppe_field_get(%{ppe_field})", _f); \
} \
} while(0)
/**
* Report an error if field set fails.
*/
#define PPE_FIELD_SET_OR_RETURN(_uc, _ppep, _f, _sv) \
do { \
int rv = ppe_field_set(_ppep, _f, _sv); \
if(rv < 0) { \
return ucli_e_internal(_uc, "ppe_field_set(%{ppe_field}", _f); \
} \
} while(0)
/**
* Report an error if a wide_field get fails
*/
#define PPE_WIDE_FIELD_GET_OR_RETURN(_uc, _ppep, _f, _rv) \
do { \
int rv = ppe_wide_field_get(_ppep, _f, _rv); \
if(rv < 0) { \
return ucli_e_internal(_uc, "ppe_wide_field_get(%{ppe_field}", _f); \
} \
} while(0)
/**
* Report an error if a wide_field set fails
*/
#define PPE_WIDE_FIELD_SET_OR_RETURN(_uc, _ppep, _f, _sv) \
do { \
int rv = ppe_wide_field_set(_ppep, _f, _sv); \
if(rv < 0) { \
return ucli_e_internal(_uc, "ppe_wide_field_set(%{ppe_field})", _f); \
} \
} while(0)
/**
* Report an error if a field is greater than 32 bits
*/
#define PPE_FIELD32_OR_RETURN(_uc, _size_bits) \
do { \
if(_size_bits > 32) { \
return ucli_error(_uc, "only fields that are 32 bits or less can be used with this command."); \
} \
} while(0)
static ucli_status_t
ppe_ucli_utm__update__(ucli_context_t* uc)
{
UCLI_COMMAND_INFO(uc,
"update", -1,
"Update all dynamic/checksum fields in the packet.");
ppe_packet_update(&ppec->ppep);
return 0;
}
static ucli_status_t
ppe_ucli_utm__chm__(ucli_context_t* uc)
{
ppe_header_t header;
int bool;
UCLI_COMMAND_INFO(uc,
"chm", 2,
"Check the header mask.");
UCLI_ARGPARSE_OR_RETURN(uc, "{ppe_header}{bool}", &header, &bool);
if(bool && !(ppec->ppep.header_mask & (1 << header))) {
return ucli_error(uc, "header bit %{ppe_header} is 0.", header);
}
if(!bool && (ppec->ppep.header_mask & (1 << header))) {
return ucli_error(uc, "header bit %{ppe_header} is 1.", header);
}
return UCLI_STATUS_OK;
}
static ucli_status_t
ppe_ucli_utm__setheader__(ucli_context_t* uc)
{
uint8_t* data;
int size;
ppe_header_t header;
UCLI_COMMAND_INFO(uc,
"setheader", 2,
"Set header data.");
UCLI_ARGPARSE_OR_RETURN(uc, "{ppe_header}{data}", &header, &data, &size);
if(ppe_header_get(&ppec->ppep, header)) {
aim_free(ppe_header_get(&ppec->ppep, header));
}
if(ppe_header_set(&ppec->ppep, header, data) < 0) {
return ucli_e_internal(uc, "ppe_set_header()");
}
return UCLI_STATUS_OK;
}
static ucli_status_t
ppe_ucli_utm__format__(ucli_context_t* uc)
{
ppe_header_t header;
UCLI_COMMAND_INFO(uc,
"format", 1,
"Change the format of the current packet.");
UCLI_ARGPARSE_OR_RETURN(uc, "{ppe_header}", &header);
if(header == PPE_HEADER_8021Q || header == PPE_HEADER_ETHERII) {
int rv;
rv = ppe_packet_format_set(&ppec->ppep, header);
if(rv < 0) {
return ucli_error(uc, "packet format conversion failed.");
}
if(rv == 1) {
aim_free(ppec->ppep._data);
}
}
else {
return ucli_error(uc, "%{ppe_header} is not a valid conversion.",
header);
}
return UCLI_STATUS_OK;
}
static ucli_status_t
ppe_ucli_utm__fdump__(ucli_context_t* uc)
{
ppe_field_t field;
ppe_header_t format;
UCLI_COMMAND_INFO(uc,
"fdump", 0,
"Dump all packet fields.");
ppe_packet_format_get(&ppec->ppep, &format);
ucli_printf(uc, "format=%{ppe_header}, size=%d\n",
format, ppec->ppep.size);
for(field = 0; field < PPE_FIELD_COUNT; field++) {
const ppe_field_info_t* fi = ppe_field_info_get(field);
if(fi->size_bits == 0) {
continue;
}
if(ppe_field_exists(&ppec->ppep, field) == 0) {
continue;
}
if(fi->size_bits <= 32) {
uint32_t data;
ppe_field_get(&ppec->ppep, field, &data);
ucli_printf(uc, "%{ppe_field} = 0x%x (%d)\n",
field, data, data);
}
else {
uint8_t* p = ppe_fieldp_get(&ppec->ppep, field);
ucli_printf(uc, "%{ppe_field} = %{data}\n", field,
p, fi->size_bits/8);
}
}
return UCLI_STATUS_OK;
}
static ucli_status_t
ppe_ucli_utm__set__(ucli_context_t* uc)
{
int data;
ppe_field_info_t* fi;
UCLI_COMMAND_INFO(uc,
"set", 2,
"Set a packet field.");
UCLI_ARGPARSE_OR_RETURN(uc, "{ppe_field_info}i", &fi, &data);
PPE_FIELD32_OR_RETURN(uc, fi->size_bits);
PPE_FIELD_EXISTS_OR_RETURN(uc, fi->field);
PPE_FIELD_SET_OR_RETURN(uc, &ppec->ppep, fi->field, data);
return UCLI_STATUS_OK;
}
static ucli_status_t
ppe_ucli_utm__get__(ucli_context_t* uc)
{
ppe_field_info_t* fi;
UCLI_COMMAND_INFO(uc,
"get", 1,
"Get a packet field.");
UCLI_ARGPARSE_OR_RETURN(uc, "{ppe_field_info}", &fi);
PPE_FIELD_EXISTS_OR_RETURN(uc, fi->field);
if(fi->size_bits <= 32) {
uint32_t value;
PPE_FIELD_GET_OR_RETURN(uc, &ppec->ppep, fi->field, &value);
ucli_printf(uc, "%{ppe_field} = 0x%x (%d)\n",
fi->field, value, value);
return UCLI_STATUS_OK;
}
else {
int rv;
int size = fi->size_bits/8;
uint8_t* data = aim_zmalloc(size);
rv = ppe_wide_field_get(&ppec->ppep, fi->field, data);
if(rv < 0) {
ucli_e_internal(uc, "ppe_wide_field_get(%{ppe_field})", fi->field);
}
else {
ucli_printf(uc, "%{ppe_field} = %{data}\n", data, size);
}
aim_free(data);
return rv;
}
}
static ucli_status_t
ppe_ucli_utm__dump__(ucli_context_t* uc)
{
UCLI_COMMAND_INFO(uc,
"dump", 0,
"Dump raw packet contents.");
ucli_printf(uc, "%{data}", ppec->ppep.data, ppec->ppep.size);
return UCLI_STATUS_OK;
}
static ucli_status_t
ppe_ucli_utm__data__(ucli_context_t* uc)
{
uint8_t* data;
int size;
UCLI_COMMAND_INFO(uc,
"data", 1,
"Assign packet data.");
UCLI_ARGPARSE_OR_RETURN(uc, "{data}", &data, &size);
if(ppec->ppep.data) {
aim_free(ppec->ppep.data);
}
if(ppe_packet_init(&ppec->ppep, data, size) < 0) {
return ucli_e_internal(uc, "ppe_packet_init()");
}
if(ppe_parse(&ppec->ppep) < 0) {
return ucli_e_internal(uc, "ppe_parse()");
}
return UCLI_STATUS_OK;
}
static ucli_status_t
ppe_ucli_utm__missing__(ucli_context_t* uc)
{
ppe_field_t f;
int rv;
UCLI_COMMAND_INFO(uc,
"missing", 1,
"Check that a field is missing in the packet.");
UCLI_ARGPARSE_OR_RETURN(uc, "{ppe_field}", &f);
rv = ppe_field_exists(&ppec->ppep, f);
if(rv == 0) {
return UCLI_STATUS_OK;
}
else if(rv == 1) {
return ucli_error(uc, "field %{ppe_field} exists in the packet.",
f);
}
else {
return ucli_e_internal(uc, "ppe_field_exists()");
}
}
static ucli_status_t
ppe_ucli_utm__checkf__(ucli_context_t* uc)
{
ppe_header_t cheader;
ppe_header_t pheader;
UCLI_COMMAND_INFO(uc,
"checkf", 1,
"Check the packet format.");
UCLI_ARGPARSE_OR_RETURN(uc, "{ppe_header}", &cheader);
ppe_packet_format_get(&ppec->ppep, &pheader);
if(pheader != cheader) {
return ucli_error(uc, "packet format is currently %{ppe_header}.",
pheader);
}
return UCLI_STATUS_OK;
}
static ucli_status_t
ppe_ucli_utm__check__(ucli_context_t* uc)
{
ppe_field_info_t* fi;
uint32_t cvalue;
uint32_t pvalue;
aim_datatype_map_t* operation;
aim_datatype_map_t operation_map[] = {
{ "==", 'e' }, { "!=", 'n' }, { NULL }
};
UCLI_COMMAND_INFO(uc,
"check", 3,
"Check packet field values and status.");
UCLI_ARGPARSE_OR_RETURN(uc, "{ppe_field_info}{map}i", &fi,
&operation, operation_map, "operation",
&cvalue);
PPE_FIELD_EXISTS_OR_RETURN(uc, fi->field);
PPE_FIELD32_OR_RETURN(uc, fi->size_bits);
PPE_FIELD_GET_OR_RETURN(uc, &ppec->ppep, fi->field, &pvalue);
switch(operation->i)
{
case 'e':
{
if(pvalue != cvalue) {
return ucli_error(uc,
"field %{ppe_field} is 0x%x (%d) (should be 0x%x (%d)",
fi->field, pvalue, pvalue, cvalue, cvalue);
}
return UCLI_STATUS_OK;
break;
}
case 'n':
{
if(pvalue == cvalue) {
return ucli_error(uc,
"field %{ppe_field} is 0x%x (%d)",
fi->field, pvalue, pvalue);
}
return UCLI_STATUS_OK;
break;
}
default:
return ucli_e_internal(uc, "unknown operation.");
}
}
static ucli_status_t
ppe_ucli_utm__checkw__(ucli_context_t* uc)
{
ppe_field_info_t* fi;
uint8_t* cvalue;
uint8_t pvalue[128];
unsigned int csize;
aim_datatype_map_t* operation;
aim_datatype_map_t operation_map[] = {
{ "==", 'e' }, { "!=", 'n' }, { NULL }
};
UCLI_COMMAND_INFO(uc,
"checkw", 3,
"Check wide packet field values and status.");
UCLI_ARGPARSE_OR_RETURN(uc, "{ppe_field_info}{map}{data}", &fi,
&operation, operation_map, "operation",
&cvalue, &csize);
PPE_FIELD_EXISTS_OR_RETURN(uc, fi->field);
if(fi->size_bits/8 != csize) {
return ucli_error(uc,
"field %{ppe_field} is %d bytes wide.", fi->field,
fi->size_bits/8);
}
PPE_WIDE_FIELD_GET_OR_RETURN(uc, &ppec->ppep, fi->field, pvalue);
switch(operation->i)
{
case 'e':
{
if(PPE_MEMCMP(pvalue, cvalue, csize)) {
return ucli_error(uc,
"field %{ppe_field} is %{data} (should be %{data}",
fi->field, pvalue, csize, cvalue, csize);
}
return UCLI_STATUS_OK;
break;
}
case 'n':
{
if(!PPE_MEMCMP(pvalue, cvalue, csize)) {
return ucli_error(uc,
"field %{ppe_field} is %{data}",
fi->field, pvalue, csize);
}
return UCLI_STATUS_OK;
break;
}
default:
return ucli_e_internal(uc, "unknown operation.");
}
}
static ucli_status_t
ppe_ucli_utm__listf__(ucli_context_t* uc)
{
ppe_field_info_t* fi;
UCLI_COMMAND_INFO(uc,
"listf", 0,
"List known packet fields.");
for(fi = ppe_field_info_table; fi->field != -1; fi++) {
if(fi->size_bits != 0) {
ucli_printf(uc, "%{ppe_field} sizebits=%d offset=%d shiftbits=%d\n",
fi->field, fi->size_bits, fi->offset_bytes, fi->shift_bits);
}
}
return UCLI_STATUS_OK;
}
static ucli_status_t
ppe_ucli_utm__dfk__(ucli_context_t* uc)
{
ppe_dfk_t dfk;
ppe_field_t fields[4];
uint8_t* verify_data;
unsigned int verify_data_size;
int rv = UCLI_STATUS_OK;
unsigned int i;
UCLI_COMMAND_INFO(uc,
"dfk", AIM_ARRAYSIZE(fields)+1,
"Generate and verify a dynamic field key.");
UCLI_ARGPARSE_OR_RETURN(uc,
"{ppe_field}{ppe_field}{ppe_field}{ppe_field}{data}",
fields+0, fields+1, fields+2, fields+3,
&verify_data, &verify_data_size);
ppe_dfk_init(&dfk, fields, AIM_ARRAYSIZE(fields));
i = ppe_packet_dfk(&ppec->ppep, &dfk);
if(i != verify_data_size) {
rv = ucli_error(uc, "dfk size is %d, verify data size is %d",
i, verify_data_size);
goto dfk_error;
}
for(i = 0; i < AIM_ARRAYSIZE(fields); i++) {
const ppe_field_info_t* fi = ppe_field_info_get(fields[i]);
int exists = ppe_field_exists(&ppec->ppep, fi->field);
if(exists && ( (dfk.mask & (1<<i)) == 0)) {
/* Should be in the field key but isn't.*/
rv = ucli_error(uc, "%{ppe_field} exists in packet but not in field key.",
fi->field);
goto dfk_error;
}
if(!(exists) && (dfk.mask & (1<<i))) {
/* Should not be in the field key but is. */
rv = ucli_error(uc, "%{ppe_field} is in the key but not the packet.",
fi->field);
goto dfk_error;
}
}
for(i = 0; i < verify_data_size; i++) {
if(verify_data[i] != dfk.data[i]) {
rv = ucli_error(uc, "key data mismatch at byte %d.\nkey=%{data}, verify=%{data}",
i, dfk.data, verify_data_size, verify_data, verify_data_size);
goto dfk_error;
}
}
for(i = 0; i < AIM_ARRAYSIZE(fields); i++) {
if(dfk.mask & (1<<i)) {
const ppe_field_info_t* fi = ppe_field_info_get(fields[i]);
if(fi->size_bits <= 32) {
uint32_t pdata;
uint32_t kdata;
ppe_field_get(&ppec->ppep, fi->field, &pdata);
ppe_dfk_field_get(&dfk, fi->field, &kdata);
if(pdata != kdata) {
rv = ucli_error(uc, "field_get mismatch: p=0x%x, k=0x%x");
goto dfk_error;
}
}
else {
unsigned int i;
uint8_t pdata[128];
uint8_t kdata[128];
ppe_wide_field_get(&ppec->ppep, fi->field, pdata);
ppe_dfk_wide_field_get(&dfk, fi->field, kdata);
for(i = 0; i < fi->size_bits/8; i++) {
if(pdata[i] != kdata[i]) {
rv = ucli_error(uc, "wide_field_get mismatch @ %d: p=0x%x k=0x%x",
i, pdata[i], kdata[i]);
goto dfk_error;
}
}
}
}
}
aim_free(verify_data);
ppe_dfk_destroy(&dfk);
return UCLI_STATUS_OK;
dfk_error:
ucli_printf(uc, "key: ");
ppe_dfk_show(&dfk, &uc->pvs);
ppe_dfk_destroy(&dfk);
aim_free(verify_data);
ucli_printf(uc, "\n");
return rv;
}
static ucli_status_t
ppe_ucli_utm__rwall__(ucli_context_t* uc)
{
ppe_packet_t ppep;
ppe_header_t header;
ppe_field_t f;
int rv = UCLI_STATUS_OK;
UCLI_COMMAND_INFO(uc,
"rwall", 0,
"Read and write all packet fields in all headers.");
ppe_packet_init(&ppep, NULL, 0);
/**
* Allocate and assign a header pointer for every header type.
* All bits will be initialized to 1.
*/
for(header = 0; header < PPE_HEADER_COUNT; header++) {
uint8_t* hp = aim_zmalloc(1000);
PPE_MEMSET(hp, 0xFF, 1000);
ppe_header_set(&ppep, header, hp);
}
/**
* Check that every field reads back as all 1's, with the correct width
*/
for(f = 0; f < PPE_FIELD_COUNT; f++) {
const ppe_field_info_t* fi = ppe_field_info_get(f);
if(fi->size_bits == 0) {
continue;
}
if(fi->size_bits <= 32) {
uint32_t v;
ppe_field_get(&ppep, f, &v);
if(fi->size_bits == 32) {
if(v != 0xFFFFFFFF) {
rv = ucli_error(uc, "first read: field %{ppe_field} is 0x%x, should be 0x%x",
f, v, -1);
}
}
else {
if(v != ( (1U << fi->size_bits) - 1)) {
rv = ucli_error(uc, "first read: field %{ppe_field} is 0x%x, should be 0x%x (%d bits)",
f, v, (1<<fi->size_bits) - 1, fi->size_bits);
}
}
/** clear field and re-read */
ppe_field_set(&ppep, f, 0);
ppe_field_get(&ppep, f, &v);
if(v != 0) {
rv = ucli_error(uc, "second read: field %{ppe_field} is 0x%x when it should be 0.",
f, v);
}
}
else {
uint8_t vb[1024];
int bytes = ppe_wide_field_get(&ppep, f, vb);
int i;
for(i = 0; i < bytes; i++) {
if(vb[i] != 0xFF) {
rv = ucli_error(uc, "first read: field %{ppe_field}[%d] is 0x%.2x, should be 0x%.2x",
f, i, vb[i], 0xFF);
}
}
PPE_MEMSET(vb, 0, sizeof(vb));
/** clear field and re-read */
ppe_wide_field_set(&ppep, f, NULL);
PPE_MEMSET(vb, 0xFF, sizeof(vb));
ppe_wide_field_get(&ppep, f, vb);
for(i = 0; i < bytes; i++) {
if(vb[i] != 0) {
rv = ucli_error(uc, "second read: field %{ppe_field}[%d] is 0x%.2x, should be 0.",
f, i, vb[i]);
}
}
}
/** continue reading other fields, making sure the field we just cleared
* does not change the value of fields we have not yet visited. */
}
for(header = 0; header < PPE_HEADER_COUNT; header++) {
aim_free(ppe_header_get(&ppep, header));
}
return rv;
}
/* <auto.ucli.handlers.start> */
/******************************************************************************
*
* These handler table(s) were autogenerated from the symbols in this
* source file.
*
*****************************************************************************/
static ucli_command_handler_f ppe_ucli_utm_handlers__[] =
{
ppe_ucli_utm__config__,
ppe_ucli_utm__update__,
ppe_ucli_utm__chm__,
ppe_ucli_utm__setheader__,
ppe_ucli_utm__format__,
ppe_ucli_utm__fdump__,
ppe_ucli_utm__set__,
ppe_ucli_utm__get__,
ppe_ucli_utm__dump__,
ppe_ucli_utm__data__,
ppe_ucli_utm__missing__,
ppe_ucli_utm__checkf__,
ppe_ucli_utm__check__,
ppe_ucli_utm__checkw__,
ppe_ucli_utm__listf__,
ppe_ucli_utm__dfk__,
ppe_ucli_utm__rwall__,
NULL
};
/******************************************************************************/
/* <auto.ucli.handlers.end> */
static void
ppe_utm_module_destroy__(ucli_module_t* module)
{
ppe_utm_ctrl_t* ppec = (ppe_utm_ctrl_t*)(module->cookie);
if(ppec->ppep.data) {
aim_free(ppec->ppep.data);
}
aim_free(ppec);
}
static ucli_module_t
ppe_utm_module__ =
{
"ppe_utm",
NULL,
ppe_ucli_utm_handlers__,
NULL,
ppe_utm_module_destroy__
};
ucli_module_t*
ppe_utm_create(void)
{
ppe_utm_ctrl_t* ppec = aim_zmalloc(sizeof(*ppec));
ppe_utm_module__.cookie = ppec;
ucli_module_init(&ppe_utm_module__);
return &ppe_utm_module__;
}
#endif /* PPE_CONFIG_INCLUDE_UTM */
|
naga-project/webfx | webfx-kit/webfx-kit-javafxgraphics-emul/src/main/java/com/sun/javafx/cursor/CursorFrame.java | /*
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.sun.javafx.cursor;
/**
* Represents a frame of an animated cursor (ImageCursor created from an
* animated image), non animated cursors and standard cursors have only a single
* CursorFrame.
*/
public abstract class CursorFrame {
public abstract CursorType getCursorType();
/*
private Class<?> firstPlatformCursorClass;
private Object firstPlatformCursor;
private Map<Class<?>, Object> otherPlatformCursors;
public <T> T getPlatformCursor(final Class<T> platformCursorClass) {
if (firstPlatformCursorClass == platformCursorClass) {
return (T) firstPlatformCursor;
}
if (otherPlatformCursors != null) {
return (T) otherPlatformCursors.get(platformCursorClass);
}
return null;
}
public <T> void setPlatforCursor(final Class<T> platformCursorClass,
final T platformCursor) {
if ((firstPlatformCursorClass == null)
|| (firstPlatformCursorClass == platformCursorClass)) {
// most common case
firstPlatformCursorClass = platformCursorClass;
firstPlatformCursor = platformCursor;
return;
}
if (otherPlatformCursors == null) {
otherPlatformCursors = new HashMap<Class<?>, Object>();
}
otherPlatformCursors.put(platformCursorClass, platformCursor);
}
*/
}
|
VirtueDev/synced_repo | src/java/com/threerings/msoy/mail/gwt/PresentPayload.java | //
// $Id$
package com.threerings.msoy.mail.gwt;
import com.threerings.orth.data.MediaDesc;
import com.threerings.msoy.data.all.HashMediaDesc;
import com.threerings.msoy.item.data.all.Item;
import com.threerings.msoy.item.data.all.ItemIdent;
/**
* Contains information on an item gifted from one player to another.
*/
public class PresentPayload extends MailPayload
{
/** The identifier for the item being gifted. */
public ItemIdent ident;
/** The name of this item. */
public String name;
/** This item's preview thumbnail (may be null). */
public HashMediaDesc thumbMedia;
/**
* An empty constructor for deserialization.
*/
public PresentPayload ()
{
}
/**
* Create a new {@link PresentPayload} with the supplied configuration.
*/
public PresentPayload (ItemIdent ident, String name, MediaDesc thumb)
{
this.ident = ident;
this.name = name;
// Because our JSON marshaller won't encode the actual type of a value, it relies on
// data objects such as this to declare concrete types. Thus without enormous amounts
// of trickery, our thumbMedia pretty much has to be a HashMediaType. Luckily, it will
// always be a HashMediaDesc except in the one circumstance of an item that doesn't
// its own thumbnail, in which case it'll be a StaticMediaDesc. We handle this case
// by persisting an explicit null instead, and substituting in a default thumbnail in
// getThumb() below.
this.thumbMedia = (thumb instanceof HashMediaDesc) ? (HashMediaDesc) thumb : null;
}
public MediaDesc getThumb ()
{
return (thumbMedia != null) ? thumbMedia : Item.getDefaultThumbnailMediaFor(ident.type);
}
@Override
public int getType ()
{
return MailPayload.TYPE_PRESENT;
}
}
|
hetjagani/Indeed-Clone | chat/model.js | <filename>chat/model.js
const { DataTypes } = require('sequelize');
const Chat = global.DB.define('chat', {
_id: {
type: DataTypes.STRING,
primaryKey: true,
unique: true,
},
employerId: {
type: DataTypes.STRING,
allowNull: false,
},
userId: {
type: DataTypes.STRING,
allowNull: false,
},
subject: {
type: DataTypes.TEXT,
allowNull: false,
},
});
const Message = global.DB.define('message', {
_id: {
type: DataTypes.STRING,
primaryKey: true,
unique: true,
},
content: {
type: DataTypes.TEXT,
allowNull: false,
},
to: {
type: DataTypes.STRING,
allowNull: false,
},
from: {
type: DataTypes.STRING,
allowNull: false,
},
});
Chat.hasMany(Message, {
foreignKey: 'chatId',
sourceKey: '_id',
});
Message.belongsTo(Chat, {
foreignKey: 'chatId',
targetKey: '_id',
});
const runMigration = async (force) => {
if (!global.DB) {
return Promise.reject(new Error('please initialize DB'));
}
await Chat.sync({ force });
await Message.sync({ force });
return Promise.resolve(global.DB);
};
module.exports = { Chat, Message, runMigration };
|
huangchengmin97/fili | fili-core/src/main/java/com/yahoo/bard/webservice/async/jobs/stores/NoOpApiJobStore.java | // Copyright 2016 Yahoo Inc.
// Licensed under the terms of the Apache license. Please see LICENSE.md file distributed with this work for terms.
package com.yahoo.bard.webservice.async.jobs.stores;
import com.yahoo.bard.webservice.async.jobs.jobrows.JobRow;
import rx.Observable;
import java.util.Set;
import javax.inject.Singleton;
/**
* An ApiJobStore that doesn't actually do anything. Used as a default binding implementation for the
* ApiJobStore, allowing users who have no interest in asynchronous requests to set up a Bard instance without
* having to setup any asynchronous infrastructure.
*/
@Singleton
public class NoOpApiJobStore implements ApiJobStore {
@Override
public Observable<JobRow> get(String id) {
return Observable.empty();
}
@Override
public Observable<JobRow> save(JobRow metadata) {
return Observable.empty();
}
@Override
public Observable<JobRow> getAllRows() {
return Observable.empty();
}
/**
* This method ignores the filters and returns an empty Observable.
*
* @param jobRowFilter A Set of JobRowFilters where each JobRowFilter contains the JobField to be
* filtered on, the filter operation and the values to be compared to.
*
* @return An empty Observable.
*/
@Override
public Observable<JobRow> getFilteredRows(Set<JobRowFilter> jobRowFilter) {
return Observable.empty();
}
}
|
aguestuser/drivetrain | app/enums/account_status.rb | <reponame>aguestuser/drivetrain
class AccountStatus < ClassyEnum::Base
end
class AccountStatus::NewAccount < AccountStatus
end
class AccountStatus::Stable < AccountStatus
end
class AccountStatus::AtRisk < AccountStatus
end
class AccountStatus::AtHighRisk < AccountStatus
end
class AccountStatus::EmergencyOnly < AccountStatus
end
class AccountStatus::VariableNeeds < AccountStatus
end
class AccountStatus::Inactive < AccountStatus
end
|
tusharchoudhary0003/Custom-Football-Game | sources/com/mopub/mobileads/C11418k.java | <reponame>tusharchoudhary0003/Custom-Football-Game<gh_stars>1-10
package com.mopub.mobileads;
import com.mopub.common.logging.MoPubLog;
/* renamed from: com.mopub.mobileads.k */
/* compiled from: AppLovinBanner */
class C11418k implements Runnable {
/* renamed from: a */
final /* synthetic */ int f35306a;
/* renamed from: b */
final /* synthetic */ C11421l f35307b;
C11418k(C11421l this$1, int i) {
this.f35307b = this$1;
this.f35306a = i;
}
public void run() {
StringBuilder sb = new StringBuilder();
sb.append("Failed to load banner ad with code: ");
sb.append(this.f35306a);
MoPubLog.m37080d(sb.toString());
try {
if (this.f35307b.f35310b != null) {
this.f35307b.f35310b.onBannerFailed(AppLovinBanner.m37273b(this.f35306a));
}
} catch (Throwable th) {
MoPubLog.m37083e("Unable to notify listener of failure to receive ad.", th);
}
}
}
|
vasukas/rodent | utils/time_utils.hpp | #ifndef TIME_UTILS_HPP
#define TIME_UTILS_HPP
#include "vaslib/vas_cpp_utils.hpp"
#include "vaslib/vas_time.hpp"
float time_sine(TimeSpan full_period, float min = 0, float max = 1, TimeSpan time = TimeSpan::since_start());
struct SmoothSwitch
{
/*
Smooths switching of some option (usually in UI).
Value gradually increases to 1 while option is enabled,
and decreases to 0 when disabled.
If option is disabled before min_sus, value is sustained at 1 for that time.
*/
TimeSpan tmo_in, tmo_out;
TimeSpan min_sus = {}; ///< Minimal time value is sustained at 1
bool blink_mode = false; ///< If true, continues to increase value even if enabled is false
SmoothSwitch(TimeSpan tmo = {}, std::optional<TimeSpan> tmo_out = {});
void reset(TimeSpan tmo, std::optional<TimeSpan> tmo_out = {});
void step(TimeSpan passed, bool enabled);
float value() const; ///< [0-1]
bool is_zero() const {return get_state() == OUT_ZERO;}
enum OutputState {OUT_ZERO, OUT_RISING, OUT_ONE, OUT_FADING};
OutputState get_state() const;
private:
enum Stage {S_ZERO, S_UP, S_ENAB, S_SUST, S_DOWN};
Stage stage = S_ZERO;
TimeSpan tcou;
void set_v(float v);
};
struct SmoothBlink
{
/*
Produce interpolation value for smooth blinking effect.
Note: disabled if AppSettings::plr_status_blink is false
*/
TimeSpan full_period;
SmoothBlink(TimeSpan full_period = TimeSpan::seconds(0.9))
: full_period(full_period) {}
/// Returns [t_min, t_max], changing by sine
float get_sine(bool enabled);
/// Returns [0, 1], changing linearly
float get_blink(bool enabled = false);
void trigger();
void force_reset();
private:
float t_base(bool enabled, float def, callable_ref<float(float)> proc);
TimeSpan time;
};
#endif // TIME_UTILS_HPP
|
jshier/Pixen | Pixel Art Core/Canvas/PXCanvas_Layers.h | //
// PXCanvas_Layers.h
// Pixen
//
// Created by <NAME> on 2005.07.31.
// Copyright 2005 Open Sword Group. All rights reserved.
//
#import <Cocoa/Cocoa.h>
#import "PXCanvas.h"
@interface PXCanvas(Layers)
- (PXLayer *) activeLayer;
- (void)activateLayer:(PXLayer *) aLayer;
- (NSArray *) layers;
- (NSUInteger)indexOfLayer:(PXLayer *) aLayer;
- (void)setLayers:(NSArray *) newLayers;
- (void)setLayers:(NSArray*)layers fromLayers:(NSArray *)oldLayers;
- (void)setLayers:(NSArray *) newLayers fromLayers:(NSArray *)oldLayers withDescription:(NSString *)desc;
- (void)setLayersNoResize:(NSArray *) newLayers fromLayers:(NSArray *)oldLayers;
- (void)addLayer:(PXLayer *) aLayer suppressingNotification:(BOOL)suppress;
- (void)addLayer:(PXLayer *)aLayer;
- (void)insertLayer:(PXLayer *) aLayer atIndex:(NSUInteger)index suppressingNotification:(BOOL)suppress;
- (void)insertLayer:(PXLayer *) aLayer atIndex:(NSUInteger)index;
- (void)removeLayer: (PXLayer*) aLayer;
- (void)removeLayer: (PXLayer*) aLayer suppressingNotification:(BOOL)suppress;
- (void)removeLayerAtIndex:(NSUInteger)index;
- (void)removeLayerAtIndex:(NSUInteger)index suppressingNotification:(BOOL)suppress;
- (void)moveLayer:(PXLayer *)aLayer toIndex:(NSUInteger)anIndex;
- (void)layersChanged;
- (void)rotateLayer:(PXLayer *)layer byDegrees:(NSUInteger)degrees;
- (void)duplicateLayerAtIndex:(NSUInteger)index;
- (void)flipLayerHorizontally:aLayer;
- (void)flipLayerVertically:aLayer;
- (void)mergeDownLayer:aLayer;
- (void)moveLayer:(PXLayer *)aLayer byX:(NSUInteger)x y:(NSUInteger)y;
- (void)replaceLayer:(PXLayer *)oldLayer withLayer:(PXLayer *)newLayer actionName:(NSString *)act;
@end
|
MAAP-Project/mmt | spec/features/collections/delete_collection_spec.rb | describe 'Delete Collection', js: true do
before :all do
collection_with_granule_title = SecureRandom.uuid
@collection_with_granule_ingest_response, _collection_concept_response = publish_collection_draft(entry_title: collection_with_granule_title)
_granule_ingest_response, _granule_concept_response = ingest_granules(collection_entry_title: collection_with_granule_title, count: 1)
collection_with_granule_and_variable_title = SecureRandom.uuid
@collection_with_granule_and_variable_ingest_response, _collection_concept_response = publish_collection_draft(entry_title: collection_with_granule_and_variable_title)
_granule_ingest_response, _granule_concept_response = ingest_granules(collection_entry_title: collection_with_granule_and_variable_title, count: 2)
_variable_ingest_response, _variable_concept_response = publish_variable_draft(collection_concept_id: @collection_with_granule_and_variable_ingest_response['concept-id'])
@collection_with_variable_ingest_response, _collection_concept_response = publish_collection_draft
_variable_ingest_response, _variable_concept_response = publish_variable_draft(collection_concept_id: @collection_with_variable_ingest_response['concept-id'])
_variable_ingest_response, _variable_concept_response = publish_variable_draft(collection_concept_id: @collection_with_variable_ingest_response['concept-id'])
@ingested_collection_for_delete_messages, _concept_response = publish_collection_draft
wait_for_cmr # improves reliabliity of finding granules for collection
end
before do
login
end
context 'when viewing a published collection' do
context 'when the collection is in the current provider context' do
before do
ingest_response, _concept_response = publish_collection_draft
visit collection_path(ingest_response['concept-id'])
end
context 'when the collection has no granules' do
it 'displays a delete link' do
expect(page).to have_content('Delete Collection Record')
end
context 'when clicking the delete link' do
before do
click_on 'Delete Collection Record'
within '#delete-record-modal' do
click_on 'Yes'
end
end
it 'redirects to the revisions page and displays a confirmation message' do
expect(page).to have_content('Collection Deleted Successfully!')
expect(page).to have_content('Revision History')
expect(page).to have_selector('tbody > tr', count: 2)
within first('tbody > tr') do
expect(page).to have_content('Deleted')
end
expect(page).to have_content('Reinstate', count: 1)
end
end
end
end
context 'when switching provider context while deleting' do
before do
ingest_response_2, _concept_response = publish_collection_draft
login(provider: 'LARC', providers: %w[MMT_2 LARC])
visit collection_path(ingest_response_2['concept-id'])
end
context 'when the collection has no granules or variables' do
context 'when clicking the delete link' do
before do
click_on 'Delete Collection Record'
end
it 'displays the provider context switch modal' do
expect(page).to have_content('Deleting this collection requires you change your provider context to MMT_2. Would you like to change your provider context and perform this action?')
end
context 'when changing provider context' do
before do
find('.not-current-provider-link').click
end
it 'redirects to the revisions page and displays a confirmation message' do
expect(page).to have_content('Collection Deleted Successfully!')
expect(page).to have_content('Revision History')
expect(page).to have_selector('tbody > tr', count: 2)
within first('tbody > tr') do
expect(page).to have_content('Deleted')
end
expect(page).to have_content('Reinstate', count: 1)
end
end
end
end
end
end
context 'when viewing a published collection with records that will be cascade deleted' do
context 'when the collection is in the current provider context' do
context 'when viewing a published collection with granules' do
before do
visit collection_path(@collection_with_granule_ingest_response['concept-id'])
end
it 'displays the number of granules' do
expect(page).to have_content('Granules (1)')
end
context 'when clicking the delete link' do
before do
click_on 'Delete Collection Record'
end
it 'displays the correct warning text and has a confirmation text field' do
expect(page).to have_content('This collection has 1 associated granule.')
expect(page).to have_content('Deleting this collection will delete all associated granules.')
expect(page).to have_content('Please confirm that you wish to continue by entering "I want to delete this collection and the associated records" below.')
expect(page).to have_field('confirmation-text')
within '#cascade-delete-modal' do
expect(page).to have_link('Cancel', href: 'javascript:void(0);')
expect(page).to have_link('Close', href: 'javascript:void(0);')
end
end
context 'when the user provides the incorrect confirmation text' do
before do
fill_in 'confirmation-text', with: 'Incorrect confirmation text'
click_on 'Delete Collection'
end
it 'does not delete the record' do
expect(page).to have_content('Collection was not deleted because incorrect confirmation text was provided.')
end
end
end
end
context 'when viewing a published collection with granules and variables' do
before do
visit collection_path(@collection_with_granule_and_variable_ingest_response['concept-id'])
end
it 'displays the number of granules' do
expect(page).to have_content('Granules (2)')
end
context 'when clicking the delete link' do
before do
click_on 'Delete Collection Record'
end
it 'displays the correct warning text and has a confirmation text field' do
expect(page).to have_content('This collection has 2 associated granules and 1 associated variable.')
expect(page).to have_content('Deleting this collection will delete all associated granules and variables.')
expect(page).to have_content('Please confirm that you wish to continue by entering "I want to delete this collection and the associated records" below.')
expect(page).to have_field('confirmation-text')
within '#cascade-delete-modal' do
expect(page).to have_link('Cancel', href: 'javascript:void(0);')
expect(page).to have_link('Close', href: 'javascript:void(0);')
end
end
context 'when the user provides the incorrect confirmation text' do
before do
fill_in 'confirmation-text', with: 'Incorrect confirmation text'
click_on 'Delete Collection'
end
it 'does not delete the record' do
expect(page).to have_content('Collection was not deleted because incorrect confirmation text was provided.')
end
end
end
end
context 'when viewing a published collection with variables' do
before do
visit collection_path(@collection_with_variable_ingest_response['concept-id'])
end
it 'displays the number of granules' do
expect(page).to have_content('Granules (0)')
end
context 'when clicking the delete link' do
before do
click_on 'Delete Collection Record'
end
it 'displays the correct warning text and has a confirmation text field' do
expect(page).to have_content('This collection has 2 associated variables.')
expect(page).to have_content('Deleting this collection will delete all associated variables.')
expect(page).to have_content('Please confirm that you wish to continue by entering "I want to delete this collection and the associated records" below.')
expect(page).to have_field('confirmation-text')
within '#cascade-delete-modal' do
expect(page).to have_link('Cancel', href: 'javascript:void(0);')
expect(page).to have_link('Close', href: 'javascript:void(0);')
end
end
context 'when the user provides the incorrect confirmation text' do
before do
fill_in 'confirmation-text', with: 'Incorrect confirmation text'
click_on 'Delete Collection'
end
it 'does not delete the record' do
expect(page).to have_content('Collection was not deleted because incorrect confirmation text was provided.')
end
end
end
end
context 'when deleting the collection by providing the correct confirmation text' do
context 'when the collection has granules' do
before do
collection_title = SecureRandom.uuid
collection_ingest_response, _collection_concept_response = publish_collection_draft(entry_title: collection_title)
_granule_ingest_response, _granule_concept_response = ingest_granules(collection_entry_title: collection_title, count: 1)
wait_for_cmr # improves reliabliity of finding granules for collection
visit collection_path(collection_ingest_response['concept-id'])
end
context 'when the user provides the correct confirmation text' do
before do
click_on 'Delete Collection Record'
fill_in 'confirmation-text', with: 'I want to delete this collection and the associated records'
click_on 'Delete Collection'
end
it 'deletes the record' do
expect(page).to have_content('Collection Deleted Successfully!')
end
end
end
context 'when the collection has variables' do
before do
collection_ingest_response, _collection_concept_response = publish_collection_draft
_variable_ingest_response, _variable_concept_response = publish_variable_draft(collection_concept_id: collection_ingest_response['concept-id'])
visit collection_path(collection_ingest_response['concept-id'])
end
context 'when the user provides the correct confirmation text' do
before do
click_on 'Delete Collection Record'
fill_in 'confirmation-text', with: 'I want to delete this collection and the associated records'
click_on 'Delete Collection'
end
it 'deletes the record' do
expect(page).to have_content('Collection Deleted Successfully!')
end
end
end
end
end
context 'when switching provider context while deleting' do
before do
login(provider: 'LARC', providers: %w[MMT_2 LARC])
end
context 'when viewing a published collection with granules and variables' do
before do
visit collection_path(@collection_with_granule_and_variable_ingest_response['concept-id'])
end
it 'displays the number of granules' do
expect(page).to have_content('Granules (2)')
end
context 'when clicking the delete link' do
before do
click_on 'Delete Collection Record'
end
it 'displays the provider context switch modal' do
expect(page).to have_content('Deleting this collection requires you change your provider context to MMT_2. Would you like to change your provider context and perform this action?')
end
context 'when changing provider context' do
before do
find('.not-current-provider-link').click
end
it 'displays the correct warning text and has a confirmation text field' do
expect(page).to have_content('This collection has 2 associated granules and 1 associated variable.')
expect(page).to have_content('Deleting this collection will delete all associated granules and variables.')
expect(page).to have_content('Please confirm that you wish to continue by entering "I want to delete this collection and the associated records" below.')
expect(page).to have_field('confirmation-text')
within '#cascade-delete-modal' do
expect(page).to have_link('Cancel', href: collection_path(@collection_with_granule_and_variable_ingest_response['concept-id']))
expect(page).to have_link('Close', href: collection_path(@collection_with_granule_and_variable_ingest_response['concept-id']))
end
end
context 'when the user provides the incorrect confirmation text' do
before do
fill_in 'confirmation-text', with: 'Incorrect confirmation text'
click_on 'Delete Collection'
end
it 'does not delete the record' do
expect(page).to have_content('Collection was not deleted because incorrect confirmation text was provided.')
end
end
context 'when the user cancels from the delete confirmation modal' do
before do
click_on 'Cancel'
end
it 'refreshes the page' do
within '.eui-badge--sm.daac' do
expect(page).to have_content('MMT_2')
end
end
end
context 'when the user closes the delete confirmation modal' do
before do
within '#cascade-delete-modal' do
click_on 'Close'
end
end
it 'refreshes the page' do
within '.eui-badge--sm.daac' do
expect(page).to have_content('MMT_2')
end
end
end
end
end
end
context 'when deleting the collection by providing the correct confirmation text' do
before do
collection_title = SecureRandom.uuid
collection_ingest_response, _collection_concept_response = publish_collection_draft(entry_title: collection_title)
_granule_ingest_response, _granule_concept_response = ingest_granules(collection_entry_title: collection_title, count: 1)
wait_for_cmr # improves reliabliity of finding granules for collection
visit collection_path(collection_ingest_response['concept-id'])
end
context 'when clicking the delete link' do
before do
click_on 'Delete Collection Record'
end
context 'when changing provider context' do
before do
find('.not-current-provider-link').click
end
context 'when the user provides the correct confirmation text' do
before do
fill_in 'confirmation-text', with: 'I want to delete this collection and the associated records'
click_on 'Delete Collection'
end
it 'deletes the record' do
expect(page).to have_content('Collection Deleted Successfully!')
end
end
end
end
end
end
end
context 'when viewing a published collection with a non url encoded native id' do
before do
ingest_response, _concept_response = publish_collection_draft(native_id: 'not & url, encoded / native id')
visit collection_path(ingest_response['concept-id'])
end
context 'when clicking the delete link' do
before do
click_on 'Delete Collection Record'
within '#delete-record-modal' do
click_on 'Yes'
end
end
it 'displays a confirmation message' do
expect(page).to have_content('Collection Deleted Successfully!')
end
end
end
context 'when deleting the collection will fail' do
before do
visit collection_path(@ingested_collection_for_delete_messages['concept-id'])
end
context 'when CMR provides a message' do
before do
error_body = '{"errors": ["You do not have permission to perform that action."]}'
error_response = Cmr::Response.new(Faraday::Response.new(status: 401, body: JSON.parse(error_body), response_headers: {}))
allow_any_instance_of(Cmr::CmrClient).to receive(:delete_collection).and_return(error_response)
click_on 'Delete Collection Record'
within '#delete-record-modal' do
click_on 'Yes'
end
end
it 'displays the CMR error message' do
expect(page).to have_css('.eui-banner--danger', text: 'You do not have permission to perform that action.')
end
end
context 'when CMR does not provide a message' do
before do
error_body = '{"message": "useless message"}'
error_response = Cmr::Response.new(Faraday::Response.new(status: 401, body: JSON.parse(error_body), response_headers: {}))
allow_any_instance_of(Cmr::CmrClient).to receive(:delete_collection).and_return(error_response)
click_on 'Delete Collection Record'
within '#delete-record-modal' do
click_on 'Yes'
end
end
it 'displays the CMR error message' do
expect(page).to have_css('.eui-banner--danger', text: 'Collection was not deleted successfully')
end
end
end
end
|
shashup/https-github.com-apache-httpcomponents-client | httpclient/src/test/java/org/apache/http/impl/cookie/TestBasicClientCookie2.java | /*
* ====================================================================
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*
*/
package org.apache.http.impl.cookie;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import org.junit.Assert;
import org.junit.Test;
/**
* Unit tests for {@link BasicClientCookie2}.
*/
public class TestBasicClientCookie2 {
@SuppressWarnings("unused")
@Test
public void testConstructor() {
final BasicClientCookie2 cookie = new BasicClientCookie2("name", "value");
Assert.assertEquals("name", cookie.getName());
Assert.assertEquals("value", cookie.getValue());
try {
new BasicClientCookie2(null, null);
Assert.fail("IllegalArgumentException should have been thrown");
} catch (final IllegalArgumentException ex) {
//expected
}
}
@Test
public void testCloning() throws Exception {
final BasicClientCookie2 orig = new BasicClientCookie2("name", "value");
orig.setDomain("domain");
orig.setPath("/");
orig.setAttribute("attrib", "stuff");
orig.setPorts(new int[] {80, 8080});
final BasicClientCookie2 clone = (BasicClientCookie2) orig.clone();
Assert.assertEquals(orig.getName(), clone.getName());
Assert.assertEquals(orig.getValue(), clone.getValue());
Assert.assertEquals(orig.getDomain(), clone.getDomain());
Assert.assertEquals(orig.getPath(), clone.getPath());
Assert.assertEquals(orig.getAttribute("attrib"), clone.getAttribute("attrib"));
Assert.assertEquals(orig.getPorts().length, clone.getPorts().length);
Assert.assertEquals(orig.getPorts()[0], clone.getPorts()[0]);
Assert.assertEquals(orig.getPorts()[1], clone.getPorts()[1]);
}
@Test
public void testHTTPCLIENT_1031() throws Exception {
final BasicClientCookie2 orig = new BasicClientCookie2("name", "value");
orig.setDomain("domain");
orig.setPath("/");
orig.setAttribute("attrib", "stuff");
final BasicClientCookie2 clone = (BasicClientCookie2) orig.clone();
Assert.assertEquals(orig.getName(), clone.getName());
Assert.assertEquals(orig.getValue(), clone.getValue());
Assert.assertEquals(orig.getDomain(), clone.getDomain());
Assert.assertEquals(orig.getPath(), clone.getPath());
Assert.assertEquals(orig.getAttribute("attrib"), clone.getAttribute("attrib"));
Assert.assertNull(clone.getPorts());
}
@Test
public void testSerialization() throws Exception {
final BasicClientCookie2 orig = new BasicClientCookie2("name", "value");
orig.setDomain("domain");
orig.setPath("/");
orig.setAttribute("attrib", "stuff");
orig.setPorts(new int[] {80, 8080});
final ByteArrayOutputStream outbuffer = new ByteArrayOutputStream();
final ObjectOutputStream outStream = new ObjectOutputStream(outbuffer);
outStream.writeObject(orig);
outStream.close();
final byte[] raw = outbuffer.toByteArray();
final ByteArrayInputStream inBuffer = new ByteArrayInputStream(raw);
final ObjectInputStream inStream = new ObjectInputStream(inBuffer);
final BasicClientCookie2 clone = (BasicClientCookie2) inStream.readObject();
Assert.assertEquals(orig.getName(), clone.getName());
Assert.assertEquals(orig.getValue(), clone.getValue());
Assert.assertEquals(orig.getDomain(), clone.getDomain());
Assert.assertEquals(orig.getPath(), clone.getPath());
Assert.assertEquals(orig.getAttribute("attrib"), clone.getAttribute("attrib"));
final int[] expected = orig.getPorts();
final int[] clones = clone.getPorts();
Assert.assertNotNull(expected);
Assert.assertNotNull(clones);
Assert.assertEquals(expected.length, clones.length);
for (int i = 0; i < expected.length; i++) {
Assert.assertEquals(expected[i], clones[i]);
}
}
}
|
ekhalilbsq/iaso | hat/assets/js/apps/Iaso/domains/projects/messages.js | <filename>hat/assets/js/apps/Iaso/domains/projects/messages.js
import { defineMessages } from 'react-intl';
const MESSAGES = defineMessages({
projects: {
defaultMessage: 'Projects',
id: 'iaso.label.projects',
},
create: {
defaultMessage: 'Create project',
id: 'iaso.projects.create',
},
projectName: {
defaultMessage: 'Project name',
id: 'iaso.projects.name',
},
appId: {
defaultMessage: 'App ID',
id: 'iaso.projects.appId',
},
needsAuthentication: {
defaultMessage: 'Requires Authentication',
id: 'iaso.projects.needsAuthentication',
},
true: {
defaultMessage: 'User needs authentication',
id: 'iaso.projects.true',
},
false: {
defaultMessage: "User doesn't need authentication",
id: 'iaso.projects.false',
},
featureFlags: {
defaultMessage: 'Feature flags',
id: 'iaso.label.featureFlags',
},
updateProject: {
defaultMessage: 'Update project',
id: 'iaso.projects.update',
},
actions: {
defaultMessage: 'Action(s)',
id: 'iaso.label.actions',
},
edit: {
defaultMessage: 'Edit',
id: 'iaso.label.edit',
},
infos: {
defaultMessage: 'Infos',
id: 'iaso.orgUnits.infos',
},
cancel: {
id: 'iaso.label.cancel',
defaultMessage: 'Cancel',
},
save: {
id: 'iaso.label.save',
defaultMessage: 'Save',
},
});
export default MESSAGES;
|
rightscale/right_link | spec/instance/system_configuration_spec.rb | <gh_stars>1-10
#--
# Copyright (c) 2012 RightScale, Inc, All Rights Reserved Worldwide.
#
# THIS PROGRAM IS CONFIDENTIAL AND PROPRIETARY TO RIGHTSCALE
# AND CONSTITUTES A VALUABLE TRADE SECRET. Any unauthorized use,
# reproduction, modification, or disclosure of this program is
# strictly prohibited. Any use of this program by an authorized
# licensee is strictly subject to the terms and conditions,
# including confidentiality obligations, set forth in the applicable
# License Agreement between RightScale, Inc. and
# the licensee.
#++
require File.expand_path(File.join(File.dirname(__FILE__), 'spec_helper'))
describe RightScale::SystemConfiguration do
it 'logs execution of reload action' do
RightScale::SystemConfiguration.reload
end
end |
hbeatty/incubator-trafficcontrol | traffic_ops/traffic_ops_golang/deliveryservice/consistenthash/consistenthash.go | package consistenthash
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"database/sql"
"encoding/json"
"errors"
"github.com/apache/trafficcontrol/traffic_ops/traffic_ops_golang/api"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
)
// struct for the response object from Traffic Router
type TRConsistentHashResult struct {
ResultingPathToConsistentHash string `json:"resultingPathToConsistentHash"`
ConsistentHashRegex string `json:"consistentHashRegex"`
RequestPath string `json:"requestPath"`
}
// struct for the incoming request object
type TRConsistentHashRequest struct {
ConsistentHashRegex string `json:"regex"`
RequestPath string `json:"requestPath"`
CdnID int64 `json:"cdnId"`
}
// endpoint to test Traffic Router's Pattern-Based Consistent Hashing feature
// /api/1.5/consistenthash
func Post(w http.ResponseWriter, r *http.Request) {
inf, userErr, sysErr, errCode := api.NewInfo(r, nil, nil)
if userErr != nil || sysErr != nil {
api.HandleErr(w, r, inf.Tx.Tx, errCode, userErr, sysErr)
return
}
defer inf.Close()
req := TRConsistentHashRequest{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
api.HandleErr(w, r, inf.Tx.Tx, http.StatusBadRequest, errors.New("malformed JSON: "+err.Error()), nil)
return
}
responseFromTR, err := getPatternBasedConsistentHash(inf.Tx.Tx, req.ConsistentHashRegex, req.RequestPath, req.CdnID)
if err != nil {
api.HandleErr(w, r, inf.Tx.Tx, http.StatusInternalServerError, nil, errors.New("getting pattern based consistent hash from Traffic Router: "+err.Error()))
return
}
consistentHashResult := TRConsistentHashResult{}
json.Unmarshal(responseFromTR, &consistentHashResult)
api.WriteResp(w, r, consistentHashResult)
}
const RouterRequestTimeout = time.Second * 10
// queries database for active Traffic Router on the CDN specified by cdnId
// passes regex and requestPath to the Traffic Router via API request,
// and returns the response
func getPatternBasedConsistentHash(tx *sql.Tx, regex string, requestPath string, cdnId int64) ([]byte, error) {
q := `
SELECT concat(server.host_name, '.', server.domain_name) AS fqdn,
parameter.value AS apiport
FROM (((server
JOIN profile ON ((profile.id = server.profile)))
JOIN profile_parameter ON ((profile_parameter.profile = profile.id)))
JOIN parameter ON ((parameter.id = profile_parameter.parameter)))
JOIN status ON ((server.status = status.id))
WHERE ((server.type = (select id from type where name = 'CCR')) AND
(parameter.name = 'api.port'::text) AND
(status.name = 'ONLINE') AND
(server.cdn_id = $1))
ORDER BY RANDOM()
LIMIT 1
`
trafficRouter := ""
apiPort := ""
if err := tx.QueryRow(q, cdnId).Scan(&trafficRouter, &apiPort); err != nil {
return nil, errors.New("querying for eligible Traffic Router to test pattern based consistent hashing: " + err.Error())
}
if trafficRouter == "" {
return nil, errors.New("no eligible Traffic Router found for pattern based consistent hashing with cdn Id: " + strconv.FormatInt(cdnId, 10))
}
if apiPort == "" {
return nil, errors.New("no parameter 'api.port' found for pattern based consistent hashing with cdn Id: " + strconv.FormatInt(cdnId, 10))
}
trafficRouterAPI := "http://" + trafficRouter + ":" + apiPort + "/crs/consistenthash/patternbased/regex?regex=" + url.QueryEscape(regex) + "&requestPath=" + url.QueryEscape(requestPath)
trClient := &http.Client{
Timeout: RouterRequestTimeout,
}
r, err := trClient.Get(trafficRouterAPI)
if err != nil {
return nil, errors.New("Error creating request to Traffic Router: " + err.Error())
}
defer r.Body.Close()
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, errors.New("failed to read body: " + err.Error())
}
if r.StatusCode != 200 {
return nil, errors.New("Traffic Router returned " + strconv.Itoa(r.StatusCode))
}
return body, nil
}
|
lonnibesancon/utymap | core/src/builders/misc/BarrierBuilder.hpp | #ifndef BUILDERS_MISC_BARRIER_BUILDER_HPP_DEFINED
#define BUILDERS_MISC_BARRIER_BUILDER_HPP_DEFINED
#include "builders/ElementBuilder.hpp"
namespace utymap { namespace builders {
/// Provides the way to build barrier.
class BarrierBuilder final : public ElementBuilder
{
public:
explicit BarrierBuilder(const utymap::builders::BuilderContext& context) :
ElementBuilder(context)
{
}
void visitNode(const utymap::entities::Node&) override;
void visitArea(const utymap::entities::Area&) override;
void visitWay(const utymap::entities::Way& way) override;
void visitRelation(const utymap::entities::Relation&) override { }
void complete() override { }
private:
template <typename T>
void buildBarrier(const T& element);
};
}}
#endif // BUILDERS_MISC_BARRIER_BUILDER_HPP_DEFINED
|
mpayne2/FHIR | fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/util/type/NewLocationParmBehaviorUtil.java | /*
* (C) Copyright IBM Corp. 2021
*
* SPDX-License-Identifier: Apache-2.0
*/
package com.ibm.fhir.persistence.jdbc.util.type;
import static com.ibm.fhir.database.utils.query.expression.ExpressionSupport.bind;
import static com.ibm.fhir.database.utils.query.expression.ExpressionSupport.col;
import static com.ibm.fhir.persistence.jdbc.JDBCConstants.LATITUDE_VALUE;
import static com.ibm.fhir.persistence.jdbc.JDBCConstants.LONGITUDE_VALUE;
import java.util.List;
import java.util.stream.Collectors;
import com.ibm.fhir.database.utils.query.WhereFragment;
import com.ibm.fhir.search.exception.FHIRSearchException;
import com.ibm.fhir.search.location.NearLocationHandler;
import com.ibm.fhir.search.location.bounding.Bounding;
import com.ibm.fhir.search.location.bounding.BoundingBox;
import com.ibm.fhir.search.location.bounding.BoundingMissing;
import com.ibm.fhir.search.location.bounding.BoundingRadius;
import com.ibm.fhir.search.location.bounding.BoundingType;
/**
* Location Behavior Util generates SQL and loads the variables into bind
* variables.
*/
public class NewLocationParmBehaviorUtil {
// the radius of the earth in km (using a spherical approximation)
private static int AVERAGE_RADIUS_OF_EARTH = 6371;
/**
* build location search query based on the bounding areas.
*
* @param whereClauseSegment
* @param bindVariables
* @param boundingAreas
*/
public void buildLocationSearchQuery(WhereFragment whereClauseSegment,
List<Bounding> boundingAreas, String paramTableAlias) {
int instance = 0;
boolean first = true;
int processed = 0;
// Strips out the MISSING bounds.
for (Bounding area : boundingAreas.stream()
.filter(area -> !BoundingType.MISSING.equals(area.getType())).collect(Collectors.toList())) {
if (instance == area.instance()) {
processed++;
if (instance > 0) {
whereClauseSegment.rightParen().and().leftParen();
} else {
whereClauseSegment.leftParen();
}
instance++;
first = true;
}
if (!first) {
// If this is not the first, we want to make this a co-joined set of conditions.
whereClauseSegment.or();
} else {
first = false;
}
// Switch between the various types of bounding and queries.
switch (area.getType()) {
case RADIUS:
buildQueryForBoundingRadius(whereClauseSegment, paramTableAlias, (BoundingRadius)area);
break;
case MISSING:
buildQueryForBoundingMissing(whereClauseSegment, (BoundingMissing) area);
break;
case BOX:
default:
buildQueryForBoundingBox(whereClauseSegment, (BoundingBox) area, paramTableAlias);
break;
}
}
if (processed > 0) {
whereClauseSegment.rightParen();
}
}
/**
* Process missing area. No longer performed here, but in another missing
* clause.
* @param whereClauseSegment
* @param missingArea
*/
@Deprecated
public void buildQueryForBoundingMissing(WhereFragment whereClauseSegment,
BoundingMissing missingArea) {
// No Operation - the main logic is contained in the process Missing parameter
}
/**
* build query for bounding box.
*
* @param whereClauseSegment
* @param boundingBox
* @param paramTableAlias
*/
public void buildQueryForBoundingBox(WhereFragment whereClauseSegment,
BoundingBox boundingBox, String paramTableAlias) {
// Now build the piece that compares the BoundingBox longitude and latitude values
// to the persisted longitude and latitude parameters.
whereClauseSegment.leftParen()
// LAT <= ? --- LAT >= MIN_LAT
.col(paramTableAlias, LATITUDE_VALUE).gte().bind(boundingBox.getMinLatitude())
// LAT <= ? --- LAT <= MAX_LAT
.and()
.col(paramTableAlias, LATITUDE_VALUE).lte().bind(boundingBox.getMaxLatitude())
// LON <= ? --- LON >= MIN_LON
.and()
.col(paramTableAlias, LONGITUDE_VALUE).gte().bind(boundingBox.getMinLongitude())
// LON <= ? --- LON <= MAX_LON
.and()
.col(paramTableAlias, LONGITUDE_VALUE).lte().bind(boundingBox.getMaxLongitude())
.rightParen();
}
/**
* build query for bounding radius.
*
* @param whereClauseSegment
* @param paramAlias
* @param boundingBox
*/
public void buildQueryForBoundingRadius(WhereFragment whereClauseSegment, String paramAlias,
BoundingRadius boundingRadius) {
// First, emit our bounding box query to scope the points down to just those in the general vicinity
BoundingBox boundingBox;
try {
boundingBox = new NearLocationHandler()
.createBoundingBox(boundingRadius.getLatitude(), boundingRadius.getLongitude(), boundingRadius.getRadius(), "km");
} catch (FHIRSearchException e) {
// createBoundingBox throws if the units are invalid, but we've already normalized them to km by this point
throw new IllegalStateException("Unexpected exception while computing the bounding box for radius search", e);
}
buildQueryForBoundingBox(whereClauseSegment, boundingBox, paramAlias);
whereClauseSegment.and();
// Then further refine it by having the db calculate the distance bewtween the locations
// This section of code is based on code from http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates
// ACOS(SIN(boundingRadiusLatitude) * SIN(LATITUDE_VALUE) + COS(boundingRadiusLatitude) * COS(LATITUDE_VALUE) * COS(boundingRadiusLongitude - LONGITUDE_VALUE) * R
double queryLatitudeInRadians = Math.toRadians(boundingRadius.getLatitude());
double queryLongitudeInRadians = Math.toRadians(boundingRadius.getLongitude());
// First, build the fragments for the longitudinal difference (in radians) and the radian of the latitude in the db
WhereFragment longitudeDiff = new WhereFragment().bind(queryLongitudeInRadians).sub().radians(col(paramAlias, LONGITUDE_VALUE));
WhereFragment radianLatitude = new WhereFragment().radians(col(paramAlias, LATITUDE_VALUE));
// Then, use those to build the expression that gets passed to ACOS
WhereFragment arcRadius = new WhereFragment()
.sin(bind(queryLatitudeInRadians))
.mult()
.sin(radianLatitude.getExpression())
.add()
.cos(bind(queryLatitudeInRadians))
.mult()
.cos(radianLatitude.getExpression())
.mult()
.cos(longitudeDiff.getExpression());
// Finally, put it all together
whereClauseSegment.leftParen()
.acos(arcRadius.getExpression()).mult().literal(AVERAGE_RADIUS_OF_EARTH)
.lte()
.bind(boundingRadius.getRadius())
.rightParen();
}
} |
djdrisco/cfn_nag | spec/cfn_nag_integration/cfn_nag_rds_instance_spec.rb | require 'spec_helper'
require 'cfn-nag/cfn_nag_config'
require 'cfn-nag/cfn_nag'
describe CfnNag do
before(:all) do
CfnNagLogging.configure_logging(debug: false)
@cfn_nag = CfnNag.new(config: CfnNagConfig.new)
end
context 'one RDS instance with public access' do
it 'flags a violation' do
template_name = 'json/rds_instance/rds_instance_publicly_accessible.json'
expected_aggregate_results = [
{
filename: test_template_path(template_name),
file_results: {
failure_count: 1,
violations: [
Violation.new(id: 'F22',
type: Violation::FAILING_VIOLATION,
message:
'RDS instance should not be publicly accessible',
logical_resource_ids: %w[PublicDB],
line_numbers: [4])
]
}
}
]
actual_aggregate_results = @cfn_nag.audit_aggregate_across_files input_path: test_template_path(template_name)
expect(actual_aggregate_results).to eq expected_aggregate_results
end
end
context 'one RDS instance with default credentials and no-echo is true' do
it 'flags a violation' do
template_name = 'json/rds_instance/rds_instance_no_echo_with_default_password.json'
expected_aggregate_results = [
{
filename: test_template_path(template_name),
file_results: {
failure_count: 2,
violations: [
Violation.new(
id: 'F23', type: Violation::FAILING_VIOLATION,
message: 'RDS instance master user password must not be a plaintext string or a Ref to a NoEcho Parameter with a Default value.',
logical_resource_ids: %w[BadDb2],
line_numbers: [11]
),
Violation.new(id: 'F22',
type: Violation::FAILING_VIOLATION,
message:
'RDS instance should not be publicly accessible',
logical_resource_ids: %w[BadDb2],
line_numbers: [11])
]
}
}
]
actual_aggregate_results = @cfn_nag.audit_aggregate_across_files input_path: test_template_path(template_name)
expect(actual_aggregate_results).to eq expected_aggregate_results
end
end
context 'RDS instances with non-encrypted credentials' do
it 'flags a violation' do
template_name =
'json/rds_instance/rds_instances_with_public_credentials.json'
expected_aggregate_results = [
{
filename: test_template_path(template_name),
file_results: {
failure_count: 4,
violations: [
Violation.new(
id: 'F23', type: Violation::FAILING_VIOLATION,
message: 'RDS instance master user password must not be a plaintext string or a Ref to a NoEcho Parameter with a Default value.',
logical_resource_ids: %w[BadDb1 BadDb2],
line_numbers: [14, 28]
),
Violation.new(
id: 'F24', type: Violation::FAILING_VIOLATION,
message: 'RDS instance master username must not be a plaintext string or a Ref to a NoEcho Parameter with a Default value.',
logical_resource_ids: %w[BadDb1 BadDb2],
line_numbers: [14, 28]
)
]
}
}
]
actual_aggregate_results = @cfn_nag.audit_aggregate_across_files input_path: test_template_path(template_name)
expect(actual_aggregate_results).to eq expected_aggregate_results
end
end
end
|
xdeltax/xdx-rippms | client/src/ui/components/effects/TextfieldMultiSelect.js | <gh_stars>1-10
import React from 'react';
import { withStyles } from '@material-ui/core/styles';
import Input from '@material-ui/core/Input';
import InputLabel from '@material-ui/core/InputLabel';
import ListItemText from '@material-ui/core/ListItemText';
import Select from '@material-ui/core/Select';
import MenuItem from '@material-ui/core/MenuItem';
import FormControl from '@material-ui/core/FormControl';
import Checkbox from '@material-ui/core/Checkbox';
import Chip from '@material-ui/core/Chip';
const styles = theme => ({
root: {
display: 'flex',
flexWrap: 'wrap',
},
formControl: {
//margin: theme.spacing.unit,
minWidth: 50,
//maxWidth: 300,
},
items: {
display: 'flex',
flexWrap: 'wrap',
},
item: {
//margin: theme.spacing.unit / 4,
margin: 2, //theme.spacing.unit,
height: 15,
},
chips: {
display: 'flex',
flexWrap: 'wrap',
},
chip: {
//margin: theme.spacing.unit / 4,
margin: 2, //theme.spacing.unit,
height: 15,
},
noLabel: {
//marginTop: theme.spacing.unit * 3,
},
});
const ITEM_HEIGHT = 48;
const ITEM_PADDING_TOP = 8;
export default withStyles(styles, { withTheme: true }) (class extends React.Component {
state = {
//selectedItems: [],
}
propTypes: {
classes: propTypes.object.isRequired,
}
handleChange = (event, child) => {
//this.setState({ selectedItems: event.target.value });
//global.ddd("change event", event, child)
if (this.props.hasOwnProperty("onUpdate")) this.props.onUpdate(event.target.value /*this.state.selectedItems*/); // callback
};
handleClose = (event, child) => {
//this.setState({ selectedItems: event.target.value });
//global.ddd("change event", event, child)
//if (this.props.hasOwnProperty("onUpdate")) this.props.onUpdate(event.target.value /*this.state.selectedItems*/); // callback
if (this.props.hasOwnProperty("onClose")) this.props.onClose(event.target.value /*this.state.selectedItems*/); // callback
};
handleChangeMultiple = event => {
const { options } = event.target;
const value = [];
for (let i = 0, l = options.length; i < l; i += 1) {
if (options[i].selected) {
value.push(options[i].value);
}
}
this.setState({
name: value,
});
};
renderValue = (items) => {
//return items.join(', '); // todo: sort
// {items.join(', ')}
const list = this.props.multiple ? items : [items];
return (
<div className={this.props.classes.items}>
{list.map(value => (
<div className={this.props.classes.chip}
key={value}
label={value}
color={this.props.color ? this.props.color : "primary"}
variant="outlined"
>{value}{this.props.multiple ? "," : ""}</div>
))}
</div>
)
}
renderChip = (items) => {
const list = this.props.multiple ? items : [items];
return (
<div className={this.props.classes.chips}>
{list.map(value => (
<Chip className={this.props.classes.chip}
key={value}
label={value}
color={this.props.color ? this.props.color : "primary"}
variant="outlined"
/>
))}
</div>
)
}
render() {
const { classes,
value,
defaultValue,
style,
IconComponent,
menuRotate,
menuBackground,
menuWidth,
isErrorCondition, noEmptyMenuEntry, showMenuTitleBar, menuTitleBarValue, onUpdate, onClose, id, chips, selectionList, multiple, label, helperText, autoWidth, ...otherprops} = this.props;
let slist = ((multiple) ? selectionList : [""].concat(selectionList)) || ["none"];
return (
<FormControl {...otherprops} className={classes.formControl} style={style} error={(isErrorCondition) ? isErrorCondition(value) : false}>
<InputLabel shrink htmlFor={id || "select-multiple"}>{label}</InputLabel>
<Select
multiple={multiple ? true : false}
autoWidth
defaultValue={defaultValue}
value={value}
onChange={this.handleChange}
onClose={this.handleClose}
input={<Input id={id || label} />}
inputProps={{ }}
renderValue={chips ? this.renderChip : this.renderValue}
MenuProps={{
PaperProps: {
style: {
maxHeight: ITEM_HEIGHT * 8.5 + ITEM_PADDING_TOP,
width: menuWidth || '70%',
transform: `rotate(${menuRotate})`,
background: menuBackground,
boxShadow: "15px 20px 15px gray",
},
},
}}
IconComponent={IconComponent}
>
{showMenuTitleBar && (<MenuItem value="none" disabled>{(menuTitleBarValue) ? showMenuTitleBar : label}</MenuItem>)}
{slist.map(item => (item === "") ? (noEmptyMenuEntry) ? null
: ( <MenuItem key={"emptyitm"+Math.random()} value=""><em> </em></MenuItem> )
: ( // display emtpy element to select on "not multiple"
<MenuItem key={"menuitm"+item+Math.random()} value={item}>
{(multiple && <Checkbox key={"chkbox"+Math.random()} checked={/*this.state.selectedItems.*/value.indexOf(item) > -1} />)}
<ListItemText key={"listitm"+Math.random()} primary={item} />
</MenuItem>
))}
</Select>
</FormControl>
);
}
});
|
landeddeveloper/idpt | frontend/src/modules/video/importer/videoImporterActions.js | <filename>frontend/src/modules/video/importer/videoImporterActions.js
import importerActions from 'modules/shared/importer/importerActions'
import selectors from 'modules/video/importer/videoImporterSelectors'
import VideoService from 'modules/video/videoService'
import fields from 'modules/video/importer/videoImporterFields'
import { i18n } from 'i18n'
export default importerActions(
'VIDEO_IMPORTER',
selectors,
VideoService.import,
fields,
i18n('entities.video.importer.fileName')
)
|
xbwcgnt1997/kylin | datasource-sdk/src/main/java/org/apache/kylin/sdk/datasource/framework/SourceConnectorFactory.java | <reponame>xbwcgnt1997/kylin
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kylin.sdk.datasource.framework;
import org.apache.kylin.common.KylinConfig;
import org.apache.kylin.sdk.datasource.adaptor.AdaptorConfig;
import org.apache.kylin.sdk.datasource.adaptor.DefaultAdaptor;
import org.apache.kylin.sdk.datasource.adaptor.MysqlAdaptor;
public class SourceConnectorFactory {
public static JdbcConnector getJdbcConnector(KylinConfig config) {
String jdbcUrl = config.getJdbcSourceConnectionUrl();
String jdbcDriver = config.getJdbcSourceDriver();
String jdbcUser = config.getJdbcSourceUser();
String jdbcPass = config.getJdbcSourcePass();
String adaptorClazz = config.getJdbcSourceAdaptor();
AdaptorConfig jdbcConf = new AdaptorConfig(jdbcUrl, jdbcDriver, jdbcUser, jdbcPass);
jdbcConf.poolMaxIdle = config.getPoolMaxIdle();
jdbcConf.poolMinIdle = config.getPoolMinIdle();
jdbcConf.poolMaxTotal = config.getPoolMaxTotal();
jdbcConf.datasourceId = config.getJdbcSourceDialect();
if (adaptorClazz == null)
adaptorClazz = decideAdaptorClassName(jdbcConf.datasourceId);
try {
return new JdbcConnector(AdaptorFactory.createJdbcAdaptor(adaptorClazz, jdbcConf));
} catch (Exception e) {
throw new RuntimeException("Failed to get JdbcConnector from env.", e);
}
}
private static String decideAdaptorClassName(String dataSourceId) {
switch (dataSourceId) {
case "mysql":
return MysqlAdaptor.class.getName();
default:
return DefaultAdaptor.class.getName();
}
}
}
|
guohaoqiang/leetcode | Str/539.cc | <gh_stars>0
class Solution {
public:
int findMinDifference(vector<string>& timePoints) {
vector<int> temp;
for (auto s:timePoints){
temp.push_back(stoi(s.substr(0,2))*60 + stoi(s.substr(3)));
}
sort(temp.begin(),temp.end());
int ans = 24*60;
temp.push_back(temp[0]+ans);
for (size_t i=1; i<temp.size(); ++i){
ans = min(ans, temp[i]-temp[i-1]);
}
return ans;
}
};
|
trydent-io/quercus | io.artoo.parry/src/main/java/io/artoo/parry/stream/impl/InboundBuffer.java | <filename>io.artoo.parry/src/main/java/io/artoo/parry/stream/impl/InboundBuffer.java
/*
* Copyright (c) 2011-2019 Contributors to the Eclipse Foundation
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
* which is available at https://www.apache.org/licenses/LICENSE-2.0.
*
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
*/
package io.artoo.parry.stream.impl;
import io.netty.util.concurrent.FastThreadLocalThread;
import io.vertx.core.Context;
import io.vertx.core.Handler;
import io.vertx.core.impl.ContextInternal;
import java.util.ArrayDeque;
/**
* A buffer that transfers elements to an handler with back-pressure.
* <p/>
* The buffer is softly bounded, i.e the producer can {@link #write} any number of elements and shall
* cooperate with the buffer to not overload it.
*
* <h3>Writing to the buffer</h3>
* When the producer writes an element to the buffer, the boolean value returned by the {@link #write} method indicates
* whether it can continue safely adding more elements or stop.
* <p/>
* The producer can set a {@link #drainHandler} to be signaled when it can resume writing again. When a {@code write}
* returns {@code false}, the drain handler will be called when the buffer becomes writable again. Note that subsequent
* call to {@code write} will not prevent the drain handler to be called.
*
* <h3>Reading from the buffer</h3>
* The consumer should set an {@link #handler} to consume elements.
*
* <h3>Buffer mode</h3>
* The buffer is either in <i>flowing</i> or <i>fetch</i> mode.
* <ul>
* <i>Initially the buffer is in <i>flowing</i> mode.</i>
* <li>When the buffer is in <i>flowing</i> mode, elements are delivered to the {@code handler}.</li>
* <li>When the buffer is in <i>fetch</i> mode, only the number of requested elements will be delivered to the {@code handler}.</li>
* </ul>
* The mode can be changed with the {@link #pause()}, {@link #resume()} and {@link #fetch} methods:
* <ul>
* <li>Calling {@link #resume()} sets the <i>flowing</i> mode</li>
* <li>Calling {@link #pause()} sets the <i>fetch</i> mode and resets the demand to {@code 0}</li>
* <li>Calling {@link #fetch(long)} requests a specific amount of elements and adds it to the actual demand</li>
* </ul>
*
* <h3>Concurrency</h3>
*
* To avoid data races, write methods must be called from the context thread associated with the buffer, when that's
* not the case, an {@code IllegalStateException} is thrown.
* <p/>
* Other methods can be called from any thread.
* <p/>
* The handlers will always be called from a context thread.
* <p/>
* <strong>WARNING</strong>: this class is mostly useful for implementing the {@link io.vertx.core.streams.ReadStream}
* and has little or no use within a regular application.
*
* @author <a href="mailto:<EMAIL>"><NAME></a>
*/
public class InboundBuffer<E> {
/**
* A reusable sentinel for signaling the end of a stream.
*/
public static final Object END_SENTINEL = new Object();
private final ContextInternal context;
private final ArrayDeque<E> pending;
private final long highWaterMark;
private long demand;
private Handler<E> handler;
private boolean overflow;
private Handler<Void> drainHandler;
private Handler<Void> emptyHandler;
private Handler<Throwable> exceptionHandler;
private boolean emitting;
public InboundBuffer(Context context) {
this(context, 16L);
}
public InboundBuffer(Context context, long highWaterMark) {
if (context == null) {
throw new NullPointerException("context must not be null");
}
if (highWaterMark < 0) {
throw new IllegalArgumentException("highWaterMark " + highWaterMark + " >= 0");
}
this.context = (ContextInternal) context;
this.highWaterMark = highWaterMark;
this.demand = Long.MAX_VALUE;
this.pending = new ArrayDeque<>();
}
private void checkThread() {
Thread thread = Thread.currentThread();
if (!(thread instanceof FastThreadLocalThread)) {
throw new IllegalStateException("This operation must be called from a Vert.x thread");
}
}
/**
* Write an {@code element} to the buffer. The element will be delivered synchronously to the handler when
* it is possible, otherwise it will be queued for later delivery.
*
* @param element the element to add
* @return {@code false} when the producer should stop writing
*/
public boolean write(E element) {
checkThread();
Handler<E> handler;
synchronized (this) {
if (demand == 0L || emitting) {
pending.add(element);
return checkWritable();
} else {
if (demand != Long.MAX_VALUE) {
--demand;
}
emitting = true;
handler = this.handler;
}
}
handleEvent(handler, element);
return emitPending();
}
private boolean checkWritable() {
if (demand == Long.MAX_VALUE) {
return true;
} else {
long actual = pending.size() - demand;
boolean writable = actual < highWaterMark;
overflow |= !writable;
return writable;
}
}
/**
* Write an {@code iterable} of {@code elements}.
*
* @see #write(E)
* @param elements the elements to add
* @return {@code false} when the producer should stop writing
*/
public boolean write(Iterable<E> elements) {
checkThread();
synchronized (this) {
for (E element : elements) {
pending.add(element);
}
if (demand == 0L || emitting) {
return checkWritable();
} else {
emitting = true;
}
}
return emitPending();
}
private boolean emitPending() {
E element;
Handler<E> h;
while (true) {
synchronized (this) {
int size = pending.size();
if (demand == 0L) {
emitting = false;
boolean writable = size < highWaterMark;
overflow |= !writable;
return writable;
} else if (size == 0) {
emitting = false;
return true;
}
if (demand != Long.MAX_VALUE) {
demand--;
}
element = pending.poll();
h = this.handler;
}
handleEvent(h, element);
}
}
/**
* Drain the buffer.
* <p/>
* Calling this assumes {@code (demand > 0L && !pending.isEmpty()) == true}
*/
private void drain() {
int emitted = 0;
Handler<Void> drainHandler;
Handler<Void> emptyHandler;
while (true) {
E element;
Handler<E> handler;
synchronized (this) {
int size = pending.size();
if (size == 0) {
emitting = false;
if (overflow) {
overflow = false;
drainHandler = this.drainHandler;
} else {
drainHandler = null;
}
emptyHandler = emitted > 0 ? this.emptyHandler : null;
break;
} else if (demand == 0L) {
emitting = false;
return;
}
emitted++;
if (demand != Long.MAX_VALUE) {
demand--;
}
element = pending.poll();
handler = this.handler;
}
handleEvent(handler, element);
}
if (drainHandler != null) {
handleEvent(drainHandler, null);
}
if (emptyHandler != null) {
handleEvent(emptyHandler, null);
}
}
private <T> void handleEvent(Handler<T> handler, T element) {
if (handler != null) {
try {
handler.handle(element);
} catch (Throwable t) {
handleException(t);
}
}
}
private void handleException(Throwable err) {
Handler<Throwable> handler;
synchronized (this) {
if ((handler = exceptionHandler) == null) {
return;
}
}
handler.handle(err);
}
/**
* Request a specific {@code amount} of elements to be fetched, the amount is added to the actual demand.
* <p/>
* Pending elements in the buffer will be delivered asynchronously on the context to the handler.
* <p/>
* This method can be called from any thread.
*
* @return {@code true} when the buffer will be drained
*/
public boolean fetch(long amount) {
if (amount < 0L) {
throw new IllegalArgumentException();
}
synchronized (this) {
demand += amount;
if (demand < 0L) {
demand = Long.MAX_VALUE;
}
if (emitting || (pending.isEmpty() && !overflow)) {
return false;
}
emitting = true;
}
context.runOnContext(v -> drain());
return true;
}
/**
* Read the most recent element synchronously.
* <p/>
* No handler will be called.
*
* @return the most recent element or {@code null} if no element was in the buffer
*/
public E read() {
synchronized (this) {
return pending.poll();
}
}
/**
* Clear the buffer synchronously.
* <p/>
* No handler will be called.
*
* @return a reference to this, so the API can be used fluently
*/
public synchronized InboundBuffer<E> clear() {
pending.clear();
return this;
}
/**
* Pause the buffer, it sets the buffer in {@code fetch} mode and clears the actual demand.
*
* @return a reference to this, so the API can be used fluently
*/
public synchronized InboundBuffer<E> pause() {
demand = 0L;
return this;
}
/**
* Resume the buffer, and sets the buffer in {@code flowing} mode.
* <p/>
* Pending elements in the buffer will be delivered asynchronously on the context to the handler.
* <p/>
* This method can be called from any thread.
*
* @return {@code true} when the buffer will be drained
*/
public boolean resume() {
return fetch(Long.MAX_VALUE);
}
/**
* Set an {@code handler} to be called with elements available from this buffer.
*
* @param handler the handler
* @return a reference to this, so the API can be used fluently
*/
public synchronized InboundBuffer<E> handler(Handler<E> handler) {
this.handler = handler;
return this;
}
/**
* Set an {@code handler} to be called when the buffer is drained and the producer can resume writing to the buffer.
*
* @param handler the handler to be called
* @return a reference to this, so the API can be used fluently
*/
public synchronized InboundBuffer<E> drainHandler(Handler<Void> handler) {
drainHandler = handler;
return this;
}
/**
* Set an {@code handler} to be called when the buffer becomes empty.
*
* @param handler the handler to be called
* @return a reference to this, so the API can be used fluently
*/
public synchronized InboundBuffer<E> emptyHandler(Handler<Void> handler) {
emptyHandler = handler;
return this;
}
/**
* Set an {@code handler} to be called when an exception is thrown by an handler.
*
* @param handler the handler
* @return a reference to this, so the API can be used fluently
*/
public synchronized InboundBuffer<E> exceptionHandler(Handler<Throwable> handler) {
exceptionHandler = handler;
return this;
}
/**
* @return whether the buffer is empty
*/
public synchronized boolean isEmpty() {
return pending.isEmpty();
}
/**
* @return whether the buffer is writable
*/
public synchronized boolean isWritable() {
return pending.size() < highWaterMark;
}
/**
* @return whether the buffer is paused, i.e it is in {@code fetch} mode and the demand is {@code 0}.
*/
public synchronized boolean isPaused() {
return demand == 0L;
}
/**
* @return the actual number of elements in the buffer
*/
public synchronized int size() {
return pending.size();
}
}
|
qinfangj/UniWebProject | components/userData/batchSubmission/formModels/sampleModel.js | <gh_stars>0
'use strict';
import inputTypes from '../../../constants/inputTypes';
import optionsStoreKeys from '../../../constants/optionsStoreKeys';
import fields from '../../../constants/fields';
export const sampleFields = [
fields.samples.NAME, fields.samples.SHORT_NAME, fields.samples.CONCENTRATION,
fields.samples.VOLUME, fields.samples.RIN, fields.samples.RATIO_260_280, fields.samples.RATIO_260_230,
fields.samples.DESCRIPTION, fields.samples.COMMENT_CUSTOMER,
fields.user_requests.INSERT_SIZE_MIN, fields.user_requests.INSERT_SIZE_MAX,
fields.user_requests.NB_LANES, fields.user_requests.MILLION_READS, fields.user_requests.MULTIPLEXING_GROUP,
];
const sampleModel = {
fields: [
{
name: fields.samples.NAME,
inputType: inputTypes.TEXT,
label: "Sample name",
required: true,
},{
name: fields.samples.SHORT_NAME,
inputType: inputTypes.TEXT,
label: "Short name",
required: true,
},{
name: fields.samples.CONCENTRATION,
inputType: inputTypes.NUMBER,
label: "Conc.[ng/μl]",
required: true,
},{
name: fields.samples.VOLUME,
inputType: inputTypes.NUMBER,
label: "Vol.[μl]",
required: true,
},{
name: fields.samples.RIN,
inputType: inputTypes.NUMBER,
label: "RIN",
required: true,
},{
name: fields.samples.RATIO_260_280,
inputType: inputTypes.NUMBER,
label: "Ratio 260/280",
required: true,
},{
name: fields.samples.RATIO_260_230,
inputType: inputTypes.NUMBER,
label: "Ratio 260/230",
required: true,
},{
name: fields.samples.DESCRIPTION,
inputType: inputTypes.TEXT,
label: "General description",
required: true,
},{
name: fields.samples.COMMENT_CUSTOMER,
inputType: inputTypes.TEXT,
label: "Comment",
},
// Request
{
name: fields.user_requests.INSERT_SIZE_MIN,
inputType: inputTypes.NUMBER,
label: "Insert size min.",
required: true,
},{
name: fields.user_requests.INSERT_SIZE_MAX,
inputType: inputTypes.NUMBER,
label: "Insert size max.",
required: true,
},{
name: fields.user_requests.NB_LANES,
inputType: inputTypes.NUMBER,
label: "Nb of lanes",
required: true,
},{
name: fields.user_requests.MILLION_READS,
inputType: inputTypes.NUMBER,
label: "Multiplex#",
required: true,
},{
name: fields.user_requests.MULTIPLEXING_GROUP,
inputType: inputTypes.TEXT,
label: "Multiplexing group",
},
],
model: "samples",
};
export default sampleModel;
|
tiwer/letv | src/main/java/com/google/protobuf/jpush/b.java | <filename>src/main/java/com/google/protobuf/jpush/b.java<gh_stars>10-100
package com.google.protobuf.jpush;
import java.util.Collection;
public abstract class b<BuilderType extends b> implements l {
private static final String z;
/* JADX WARNING: inconsistent code. */
/* Code decompiled incorrectly, please refer to instructions dump. */
static {
/*
r0 = "D\u0005|q&x\u0007=s=y\r=tot\u0019ipow\u0012ot66\u0014ug*a@|{o_/Xm,s\u0010i| x@5f'y\u0015qqox\u0005kp=6\b|e?s\u000e4;";
r0 = r0.toCharArray();
r1 = r0.length;
r2 = 0;
r3 = 1;
if (r1 > r3) goto L_0x0027;
L_0x000b:
r3 = r0;
r4 = r2;
r7 = r1;
r1 = r0;
r0 = r7;
L_0x0010:
r6 = r1[r2];
r5 = r4 % 5;
switch(r5) {
case 0: goto L_0x0035;
case 1: goto L_0x0038;
case 2: goto L_0x003b;
case 3: goto L_0x003e;
default: goto L_0x0017;
};
L_0x0017:
r5 = 79;
L_0x0019:
r5 = r5 ^ r6;
r5 = (char) r5;
r1[r2] = r5;
r2 = r4 + 1;
if (r0 != 0) goto L_0x0025;
L_0x0021:
r1 = r3;
r4 = r2;
r2 = r0;
goto L_0x0010;
L_0x0025:
r1 = r0;
r0 = r3;
L_0x0027:
if (r1 > r2) goto L_0x000b;
L_0x0029:
r1 = new java.lang.String;
r1.<init>(r0);
r0 = r1.intern();
z = r0;
return;
L_0x0035:
r5 = 22;
goto L_0x0019;
L_0x0038:
r5 = 96;
goto L_0x0019;
L_0x003b:
r5 = 29;
goto L_0x0019;
L_0x003e:
r5 = 21;
goto L_0x0019;
*/
throw new UnsupportedOperationException("Method not decompiled: com.google.protobuf.jpush.b.<clinit>():void");
}
protected static <T> void a(Iterable<T> iterable, Collection<? super T> collection) {
for (T t : iterable) {
if (t == null) {
throw new NullPointerException();
}
}
if (iterable instanceof Collection) {
collection.addAll((Collection) iterable);
return;
}
for (T t2 : iterable) {
collection.add(t2);
}
}
public abstract BuilderType a(d dVar, g gVar);
public final BuilderType a(byte[] bArr, int i, int i2) {
try {
d a = d.a(bArr, 0, i2);
a(a, g.a());
a.a(0);
return this;
} catch (j e) {
throw e;
} catch (Throwable e2) {
throw new RuntimeException(z, e2);
}
}
public /* synthetic */ l b(d dVar, g gVar) {
return a(dVar, gVar);
}
public /* synthetic */ Object clone() {
return d();
}
public abstract BuilderType d();
}
|
peterdemaeyer/JavaHamcrest | hamcrest-pure/src/test/java/org/hamcrest/pure/internal/SelfDescribingValueIteratorTest.java | package org.hamcrest.pure.internal;
import org.hamcrest.pure.Description;
import org.junit.jupiter.api.Test;
import org.mockito.InOrder;
import java.util.ArrayList;
import java.util.Iterator;
import static java.util.Arrays.asList;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.mock;
class SelfDescribingValueIteratorTest {
@Test
void iteration() {
ArrayList<String> values = new ArrayList<>(asList("a", "b", "c"));
Iterator<String> it = values.iterator();
SelfDescribingValueIterator<String> selfDescribingValuesIt = new SelfDescribingValueIterator<>(it);
Description description = mock(Description.class);
while (selfDescribingValuesIt.hasNext()) {
selfDescribingValuesIt.next().describeTo(description);
}
selfDescribingValuesIt.remove();
assertEquals(2, values.size());
InOrder inOrder = inOrder(description);
inOrder.verify(description).appendValue("a");
inOrder.verify(description).appendValue("b");
inOrder.verify(description).appendValue("c");
inOrder.verifyNoMoreInteractions();
}
} |
qudini/qudini-reactive | qudini-reactive-graphql/src/main/java/com/qudini/reactive/graphql/scalar/IntScalar.java | package com.qudini.reactive.graphql.scalar;
import graphql.language.IntValue;
import graphql.schema.CoercingParseLiteralException;
import graphql.schema.CoercingParseValueException;
import graphql.schema.CoercingSerializeException;
/**
* <p>Base class for int-based custom scalars.</p>
* <p>Example:</p>
* <pre><code>
* @Component
* public class YourIntBasedScalar extends IntScalar<YourJavaType> {
*
* public YourIntBasedScalar() {
* super("YourScalarName", "Your scalar description", YourJavaType.class);
* }
*
* @Override
* public Integer serialise(YourJavaType input) {
* return ...;
* }
*
* @Override
* public YourJavaType parse(Number input) {
* return ...;
* }
*
* }
* </code></pre>
*/
public abstract class IntScalar<T> extends Scalar<T, Number, Integer> {
public IntScalar(String name, String description, Class<T> type) {
super(name, description, type);
}
@Override
public final Integer serialize(Object input) throws CoercingSerializeException {
if (type.isInstance(input)) {
return serialise((T) input);
} else {
throw unexpectedInstanceType(type, input);
}
}
@Override
public final T parseValue(Object input) throws CoercingParseValueException {
if (input instanceof Number) {
return parse((Number) input);
} else {
throw unexpectedInstanceType(Number.class, input);
}
}
@Override
public final T parseLiteral(Object input) throws CoercingParseLiteralException {
if (input instanceof IntValue) {
return parse(((IntValue) input).getValue());
} else {
return null;
}
}
}
|
danielsunzhongyuan/java_advanced | algorithms/src/main/java/com/zsun/java/algorithms/arithmetic/FastPower.java | <filename>algorithms/src/main/java/com/zsun/java/algorithms/arithmetic/FastPower.java
package com.zsun.java.algorithms.arithmetic;
/**
* @author : zsun
* @date : 2020/08/21 11:18
*/
public class FastPower {
/**
* @param base 底数
* @param power 幂次
* @return 后两位数
*/
public static long calculate(long base, long power) throws Exception {
if (power < 0) {
throw new Exception("power should not less than zero");
}
if (power == 0) {
if (base == 0) {
throw new Exception("base & power can not be zero at the same time");
}
return 1;
} else if (power == 1) {
return base % 100;
}
if (base > 100) {
base = base % 100;
}
if (power % 2 == 0) {
return calculate(base * base, power / 2);
} else {
return (calculate(base * base, power / 2) * base) % 100;
}
}
public static void main(String[] args) throws Exception {
long start = System.currentTimeMillis();
System.out.println(FastPower.calculate(20200819L, 2020081920200819L));
long end = System.currentTimeMillis();
System.out.println("using " + (end - start) + " ms");
}
}
|
Miguel-David/1KSegundaEvaluacion | src/ExamenAnyoPasadoSegunda/Audible.java | <filename>src/ExamenAnyoPasadoSegunda/Audible.java
package ExamenAnyoPasadoSegunda;
public interface Audible {
String sonido();
}
|
maxime4000/react-laag | docs/src/pages/builder.js | import React from "react";
import Builder from "../builder";
export default Builder;
|
lzp979822781/standard-react-project | src/pages/med-market/data-analysis/models/displayAnalyse.js | <reponame>lzp979822781/standard-react-project
/* eslint-disable import/extensions */
// eslint-disable-next-line no-unused-vars
import cloneDeep from "lodash/cloneDeep";
import omit from "lodash/omit";
import {
// eslint-disable-next-line no-unused-vars
get,
post,
doExport,
queryDetail,
} from "../services/displayAnalyse";
import { genMsg } from "@/utils/utils";
function callBack(method, data) {
if (method) method(data);
}
const detailExportUrl = {
detail: "DETAIL_EXPORT",
};
export default {
namespace: "displayAnalyse",
state: {
analysisBaseInfo: {},
detailBaseInfo: {},
analyseBaseParm: {
actId: "",
factoryVenderId: "",
actTaskId: "",
pinType: -1, // -1:全部
},
detailBaseParm: {
actId: "",
factoryVenderId: "",
actTaskId: "",
},
analyseAreaParam: {
provinceOrCity: "province",
currentPage: 1,
pageSize: 10,
},
analyseAreaRes: {
totalCount: 0,
data: [],
},
detailListParam: {
currentPage: 1,
pageSize: 10,
},
detailListRes: {
totalCount: 0,
data: [],
},
},
effects: {
*resetActId({ payload }, { put, select }) {
const { field } = payload;
const { [field]: reqParam } = yield select(state => state.displayAnalyse);
const updateData = Object.assign({}, cloneDeep(reqParam), { actId: "" });
yield put({
type: "updateState",
payload: {
[field]: updateData,
},
});
},
*queryBaseInfo({ payload }, { call, put, select }) {
const { analyseBaseParm } = yield select(state => state.displayAnalyse);
const updateData = Object.assign({}, cloneDeep(analyseBaseParm), payload);
let res;
yield put({
type: "updateState",
payload: { analyseBaseParm: updateData },
});
try {
res = yield call(post, {
url: "GET_BASE_INFO", // UPLOAD_URL对应URL的key值
data: updateData, // 无论是get请求还是post请求,所有请求数据均放在data字段中
});
} finally {
const { success, data = {} } = res;
if (success) {
yield put({
type: "updateState",
payload: { analysisBaseInfo: data },
});
}
}
},
*queryAnalyseArea({ payload, callback }, { call, put, select }) {
const { analyseBaseParm, analyseAreaParam } = yield select(state => state.displayAnalyse);
const newArea = Object.assign({}, analyseAreaParam, payload);
const updateData = Object.assign({}, cloneDeep(analyseBaseParm), newArea);
let res;
yield put({
type: "updateState",
payload: { analyseAreaParam: newArea },
});
try {
res = yield call(post, {
url: "ANALISE_AREA", // UPLOAD_URL对应URL的key值
data: updateData, // 无论是get请求还是post请求,所有请求数据均放在data字段中
});
} finally {
const { success, data: { result = [], totalCount = 0 } = {} } = res;
if (success && Array.isArray(result)) {
yield put({
type: "updateState",
payload: { analyseAreaRes: { totalCount, data: result } },
});
}
const resObj = genMsg(res, "请求成功", "请求失败");
callBack(callback, resObj);
}
},
*queryDetailInfo({ payload }, { call, put, select }) {
const { detailBaseParm } = yield select(state => state.displayAnalyse);
const updateData = Object.assign({}, cloneDeep(detailBaseParm), payload);
let res;
yield put({
type: "updateState",
payload: { detailBaseParm: updateData },
});
try {
res = yield call(post, {
url: "GET_BASE_INFO", // UPLOAD_URL对应URL的key值
data: updateData, // 无论是get请求还是post请求,所有请求数据均放在data字段中
});
} finally {
const { success, data = {} } = res;
if (success) {
yield put({
type: "updateState",
payload: { detailBaseInfo: data },
});
}
}
},
*queryDetailList({ payload, callback }, { call, put, select }) {
const { detailBaseParm, detailListParam } = yield select(state => state.displayAnalyse);
const newArea = Object.assign({}, detailListParam, payload);
const updateData = Object.assign({}, cloneDeep(detailBaseParm), newArea);
let res;
yield put({
type: "updateState",
payload: { analyseAreaParam: newArea },
});
try {
res = yield call(post, {
url: "DETAIL_LIST", // UPLOAD_URL对应URL的key值
data: updateData, // 无论是get请求还是post请求,所有请求数据均放在data字段中
});
} finally {
const { success, data: { result = [], totalCount = 0 } = {} } = res;
if (success && Array.isArray(result)) {
yield put({
type: "updateState",
payload: { detailListRes: { totalCount, data: result } },
});
}
const resObj = genMsg(res, "请求成功", "请求失败");
callBack(callback, resObj);
}
},
*queryActs({ payload, callback }, { call }) {
let res;
try {
res = yield call(queryDetail, {
url: "GET_ACTS",
data: payload,
});
} finally {
const resObj = genMsg(res, "查询活动列表成功", "查询活动列表失败");
callBack(callback, resObj);
}
},
*analyseExport({ payload = {} }, { select, call }) {
const {
analyseBaseParm,
analyseAreaParam: { provinceOrCity },
} = yield select(state => state.displayAnalyse);
yield call(doExport, {
url: "ANALISE_EXPORT",
data: { ...analyseBaseParm, provinceOrCity, ...payload },
}) || {};
},
*detailExport({ payload = {} }, { select, call }) {
const { type } = payload;
const { detailBaseParm } = yield select(state => state.displayAnalyse);
yield call(doExport, {
url: detailExportUrl[type],
data: { ...detailBaseParm, ...payload },
}) || {};
},
*initSearch({ payload = {}, callback }, { call }) {
const { type } = payload;
const url = type === "company" ? "GET_COMPANY" : "GET_TASK_LIST";
let res;
try {
res = yield call(post, {
url,
data: omit(payload, ["type"]),
});
} finally {
const resObj = genMsg(res, "查询成功", "查询失败");
callBack(callback, resObj);
}
},
},
reducers: {
updateState(state, { payload }) {
return {
...state,
...payload,
};
},
},
};
|
seksek13/tp | src/main/java/seedu/address/logic/commands/SortRecordCommand.java | package seedu.address.logic.commands;
import static java.util.Objects.requireNonNull;
import java.util.Comparator;
import seedu.address.commons.core.Messages;
import seedu.address.model.Model;
import seedu.address.model.record.Record;
/**
* Sorts all record in record book ascending/descending order in terms of start/end date
*/
public class SortRecordCommand extends Command {
public static final String COMMAND_WORD = "sort";
public static final String SORT_START_DATE_ASC = "sa";
public static final String SORT_START_DATE_DES = "sd";
public static final String SORT_END_DATE_ASC = "ea";
public static final String SORT_END_DATE_DES = "ed";
public static final String MESSAGE_USAGE = COMMAND_WORD
+ ": Sorts all record in record book in ascending or descending order in terms of start/end date.\n"
+ "Example: " + COMMAND_WORD + " -r sa " + "('sa' for sort start date in ascending order). \n"
+ COMMAND_WORD + " -r ed " + "('ed' for sort end date in descending order). \n";
private final String sortOption;
/**
* @param sortOption sort in ascending/descending order
*/
public SortRecordCommand(String sortOption) {
this.sortOption = sortOption;
}
@Override
public CommandResult execute(Model model) {
requireNonNull(model);
Comparator<Record> comparator;
String message = "";
switch (sortOption) {
case SORT_START_DATE_ASC:
comparator = Comparator.comparing(Record::getStartLocalDate);
message = String.format(Messages.MESSAGE_RECORD_SORTED_START_TIME_ASCENDING,
model.getFilteredRecordList().size());
break;
case SORT_START_DATE_DES:
comparator = (Record a1, Record a2) -> a2.getStartLocalDate().compareTo(a1.getStartLocalDate());
message = String.format(Messages.MESSAGE_RECORD_SORTED_START_TIME_DESCENDING,
model.getFilteredRecordList().size());
break;
case SORT_END_DATE_ASC:
comparator = Comparator.comparing(Record::getEndLocalDate);
message = String.format(Messages.MESSAGE_RECORD_SORTED_END_TIME_ASCENDING,
model.getFilteredRecordList().size());
break;
case SORT_END_DATE_DES:
comparator = (Record a1, Record a2) -> a2.getEndLocalDate().compareTo(a1.getEndLocalDate());
message = String.format(Messages.MESSAGE_RECORD_SORTED_END_TIME_DESCENDING,
model.getFilteredRecordList().size());
break;
default:
// A comparator that does nothing
comparator = (Record a1, Record a2) -> 0;
}
model.sortRecordBook(comparator);
return new CommandResult(message, false, false, false,
true, false, false, false, false);
}
@Override
public boolean equals(Object other) {
return other == this // short circuit if same object
|| (other instanceof SortRecordCommand // instanceof handles nulls
&& sortOption.equals(((SortRecordCommand) other).sortOption)); // state check
}
}
|
EdMcBane/emaze-dysfunctional | src/test/java/net/emaze/dysfunctional/ZipsTest.java | package net.emaze.dysfunctional;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Optional;
import net.emaze.dysfunctional.order.ComparableComparator;
import net.emaze.dysfunctional.order.NextIntegerSequencingPolicy;
import net.emaze.dysfunctional.ranges.DenseRange;
import net.emaze.dysfunctional.ranges.Range.Endpoint;
import net.emaze.dysfunctional.order.JustBeforeNothingComparator;
import net.emaze.dysfunctional.ranges.Range;
import net.emaze.dysfunctional.tuples.Pair;
import org.junit.Assert;
import org.junit.Test;
/**
* if you are going to add a test here, consider that Zips should be of a thin
facade, and tests on ZipsTest should be of "smoke tests"
*
* @author rferranti
*/
public class ZipsTest {
public static Pair<Integer, Integer> p(int f, int l) {
return Pair.of(f, l);
}
public static Pair<Optional<Integer>, Optional<Integer>> p(Optional<Integer> f, Optional<Integer> l) {
return Pair.of(f, l);
}
@Test
public void canZipShortestWithIterators() {
final List<Integer> former = Arrays.asList(1, 3);
final List<Integer> latter = Arrays.asList(2, 4, 5);
final Iterator<Pair<Integer, Integer>> got = Zips.shortest(former.iterator(), latter.iterator());
Assert.assertEquals(Arrays.asList(p(1, 2), p(3, 4)), Consumers.all(got));
}
@Test
public void canZipShortestWithArrays() {
final Integer[] former = new Integer[]{1, 3};
final Integer[] latter = new Integer[]{2, 4, 5};
final Iterator<Pair<Integer, Integer>> got = Zips.shortest(former, latter);
Assert.assertEquals(Arrays.asList(p(1, 2), p(3, 4)), Consumers.all(got));
}
@Test
public void canZipShortestWithIterables() {
final List<Integer> former = Arrays.asList(1, 3);
final List<Integer> latter = Arrays.asList(2, 4, 5);
final Iterator<Pair<Integer, Integer>> got = Zips.shortest(former, latter);
Assert.assertEquals(Arrays.asList(p(1, 2), p(3, 4)), Consumers.all(got));
}
@Test(expected = IllegalArgumentException.class)
public void cannotZipShortestWithNullFormerIterable() {
final List<Integer> former = null;
final List<Integer> latter = Arrays.asList(2, 4, 5);
Zips.shortest(former, latter);
}
@Test(expected = IllegalArgumentException.class)
public void cannotZipShortestWithNullLatterIterable() {
final List<Integer> former = Arrays.asList(2, 4, 5);
final List<Integer> latter = null;
Zips.shortest(former, latter);
}
@Test
public void canZipLongestWithIterators() {
final List<Integer> former = Arrays.asList(1, 3);
final List<Integer> latter = Arrays.asList(2, 4, 5);
final Iterator<Pair<Optional<Integer>, Optional<Integer>>> got = Zips.longest(former.iterator(), latter.iterator());
final List<Pair<Optional<Integer>, Optional<Integer>>> expected = new ArrayList<Pair<Optional<Integer>, Optional<Integer>>>();
expected.add(p(Optional.of(1), Optional.of(2)));
expected.add(p(Optional.of(3), Optional.of(4)));
expected.add(p(Optional.<Integer>empty(), Optional.of(5)));
Assert.assertEquals(expected, Consumers.all(got));
}
@Test
public void canZipLongestWithArrays() {
final Integer[] former = new Integer[]{1, 3};
final Integer[] latter = new Integer[]{2, 4, 5};
final Iterator<Pair<Optional<Integer>, Optional<Integer>>> got = Zips.longest(former, latter);
final List<Pair<Optional<Integer>, Optional<Integer>>> expected = new ArrayList<Pair<Optional<Integer>, Optional<Integer>>>();
expected.add(p(Optional.of(1), Optional.of(2)));
expected.add(p(Optional.of(3), Optional.of(4)));
expected.add(p(Optional.<Integer>empty(), Optional.of(5)));
Assert.assertEquals(expected, Consumers.all(got));
}
@Test
public void canZipLongestWithIterables() {
final List<Integer> former = Arrays.asList(1, 3);
final List<Integer> latter = Arrays.asList(2, 4, 5);
Iterator<Pair<Optional<Integer>, Optional<Integer>>> got = Zips.longest(former, latter);
final List<Pair<Optional<Integer>, Optional<Integer>>> expected = new ArrayList<Pair<Optional<Integer>, Optional<Integer>>>();
expected.add(p(Optional.of(1), Optional.of(2)));
expected.add(p(Optional.of(3), Optional.of(4)));
expected.add(p(Optional.<Integer>empty(), Optional.of(5)));
Assert.assertEquals(expected, Consumers.all(got));
}
@Test(expected = IllegalArgumentException.class)
public void canZipLongestWithNullFormerIterable() {
final List<Integer> former = null;
final List<Integer> latter = Arrays.asList(2, 4, 5);
Zips.longest(former, latter);
}
@Test(expected = IllegalArgumentException.class)
public void canZipLongestWithNullLatterIterable() {
final List<Integer> former = Arrays.asList(2, 4, 5);
final List<Integer> latter = null;
Zips.longest(former, latter);
}
@Test
public void canMakeAnIterableCounted() {
final Iterable<String> bucket = Iterations.iterable("a", "b");
final Iterator<Pair<Integer, String>> iterator = Zips.counted(bucket);
final Iterator<Pair<Integer, String>> expected = Iterations.iterator(Pair.of(0, "a"), Pair.of(1, "b"));
Assert.assertEquals(Consumers.all(expected), Consumers.all(iterator));
}
@Test
public void canMakeAnIterableCountedWithRange() {
final Range<Integer> range = new DenseRange<Integer>(new NextIntegerSequencingPolicy(), new JustBeforeNothingComparator<Integer>(new ComparableComparator<Integer>()), Endpoint.Include, 1, Optional.of(10), Endpoint.Include);
final Iterable<String> bucket = Iterations.iterable("a", "b");
final Iterator<Pair<Integer, String>> iterator = Zips.counted(bucket, range);
final Iterator<Pair<Integer, String>> expected = Iterations.iterator(Pair.of(1, "a"), Pair.of(2, "b"));
Assert.assertEquals(Consumers.all(expected), Consumers.all(iterator));
}
@Test
public void canMakeAnIteratorCounted() {
final Iterator<String> bucket = Iterations.iterator("a", "b");
final Iterator<Pair<Integer, String>> iterator = Zips.counted(bucket);
final Iterator<Pair<Integer, String>> expected = Iterations.iterator(Pair.of(0, "a"), Pair.of(1, "b"));
Assert.assertEquals(Consumers.all(expected), Consumers.all(iterator));
}
@Test
public void canMakeAnIteratorCountedWithRange() {
final Range<Integer> range = new DenseRange<Integer>(new NextIntegerSequencingPolicy(), new JustBeforeNothingComparator<Integer>(new ComparableComparator<Integer>()), Endpoint.Include, 1, Optional.of(10), Endpoint.Include);
final Iterator<String> bucket = Iterations.iterator("a", "b");
final Iterator<Pair<Integer, String>> iterator = Zips.counted(bucket, range);
final Iterator<Pair<Integer, String>> expected = Iterations.iterator(Pair.of(1, "a"), Pair.of(2, "b"));
Assert.assertEquals(Consumers.all(expected), Consumers.all(iterator));
}
@Test
public void canMakeAnArrayCounted() {
final String[] bucket = {"a", "b"};
final Iterator<Pair<Integer, String>> iterator = Zips.counted(bucket);
final Iterator<Pair<Integer, String>> expected = Iterations.iterator(Pair.of(0, "a"), Pair.of(1, "b"));
Assert.assertEquals(Consumers.all(expected), Consumers.all(iterator));
}
@Test
public void canMakeAnArrayCountedWithRange() {
final Range<Integer> range = new DenseRange<Integer>(new NextIntegerSequencingPolicy(), new JustBeforeNothingComparator<Integer>(new ComparableComparator<Integer>()), Endpoint.Include, 1, Optional.of(10), Endpoint.Include);
final String[] bucket = {"a", "b"};
final Iterator<Pair<Integer, String>> iterator = Zips.counted(bucket, range);
final Iterator<Pair<Integer, String>> expected = Iterations.iterator(Pair.of(1, "a"), Pair.of(2, "b"));
Assert.assertEquals(Consumers.all(expected), Consumers.all(iterator));
}
@Test(expected = IllegalArgumentException.class)
public void cannotCallCountedWithANullIterable() {
final Iterable<Object> iterable = null;
Zips.counted(iterable);
}
@Test(expected = IllegalArgumentException.class)
public void cannotCallCountedWithANullIterableAndARange() {
final Iterable<Object> iterable = null;
final StubRange range = new StubRange();
Zips.counted(iterable, range);
}
@Test(expected = IllegalArgumentException.class)
public void cannotCallCountedWithANullArray() {
final Object[] iterable = null;
Zips.counted(iterable);
}
@Test(expected = IllegalArgumentException.class)
public void cannotCallCountedWithANullArrayAndARange() {
final Object[] iterable = null;
final StubRange range = new StubRange();
Zips.counted(iterable, range);
}
private static class StubRange implements Range<Object> {
@Override
public boolean contains(Object element) {
return false;
}
@Override
public boolean overlaps(Range<Object> rhs) {
return false;
}
@Override
public Object begin() {
return null;
}
@Override
public Optional<Object> end() {
return null;
}
@Override
public List<DenseRange<Object>> densified() {
return null;
}
@Override
public Iterator<Object> iterator() {
return null;
}
@Override
public int compareTo(Range<Object> o) {
return 0;
}
}
@Test
public void facadeIsNotFinal() {
new Zips() {
};
}
}
|
sebobr/dontpaytheferryman | opentsdb/test/search/TestTimeSeriesLookup.java | // This file is part of OpenTSDB.
// Copyright (C) 2014 The OpenTSDB Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 2.1 of the License, or (at your
// option) any later version. This program is distributed in the hope that it
// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
// General Public License for more details. You should have received a copy
// of the GNU Lesser General Public License along with this program. If not,
// see <http://www.gnu.org/licenses/>.
package net.opentsdb.search;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.when;
import static org.powermock.api.mockito.PowerMockito.mock;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.List;
import net.opentsdb.core.Const;
import net.opentsdb.core.TSDB;
import net.opentsdb.meta.TSMeta;
import net.opentsdb.storage.MockBase;
import net.opentsdb.uid.NoSuchUniqueName;
import net.opentsdb.uid.UniqueId;
import net.opentsdb.utils.Config;
import net.opentsdb.utils.Pair;
import org.hbase.async.HBaseClient;
import org.hbase.async.KeyValue;
import org.hbase.async.Scanner;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
@RunWith(PowerMockRunner.class)
@PowerMockIgnore({"javax.management.*", "javax.xml.*",
"ch.qos.*", "org.slf4j.*",
"com.sum.*", "org.xml.*"})
@PrepareForTest({TSDB.class, Config.class, UniqueId.class, HBaseClient.class,
KeyValue.class, Scanner.class, TimeSeriesLookup.class})
public class TestTimeSeriesLookup {
private Config config;
private TSDB tsdb = null;
private HBaseClient client = mock(HBaseClient.class);
private UniqueId metrics = mock(UniqueId.class);
private UniqueId tag_names = mock(UniqueId.class);
private UniqueId tag_values = mock(UniqueId.class);
private MockBase storage = null;
// tsuids
private static List<byte[]> test_tsuids = new ArrayList<byte[]>(7);
static {
test_tsuids.add(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 });
test_tsuids.add(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 2 });
test_tsuids.add(new byte[] { 0, 0, 2, 0, 0, 1, 0, 0, 1 });
test_tsuids.add(new byte[] { 0, 0, 3, 0, 0, 1, 0, 0, 1, 0, 0, 4, 0, 0, 5});
test_tsuids.add(new byte[] { 0, 0, 3, 0, 0, 1, 0, 0, 2, 0, 0, 4, 0, 0, 5});
test_tsuids.add(new byte[] { 0, 0, 3, 0, 0, 6, 0, 0, 7, 0, 0, 8, 0, 0, 1,
0, 0, 9, 0, 0, 3});
test_tsuids.add(new byte[] { 0, 0, 3, 0, 0, 6, 0, 0, 7, 0, 0, 8, 0, 0, 10,
0, 0, 9, 0, 0, 3});
}
@Before
public void before() throws Exception {
PowerMockito.whenNew(HBaseClient.class)
.withArguments(anyString(), anyString()).thenReturn(client);
config = new Config(false);
tsdb = new TSDB(config);
// replace the "real" field objects with mocks
Field met = tsdb.getClass().getDeclaredField("metrics");
met.setAccessible(true);
met.set(tsdb, metrics);
Field tagk = tsdb.getClass().getDeclaredField("tag_names");
tagk.setAccessible(true);
tagk.set(tsdb, tag_names);
Field tagv = tsdb.getClass().getDeclaredField("tag_values");
tagv.setAccessible(true);
tagv.set(tsdb, tag_values);
// mock UniqueId
when(metrics.getId("sys.cpu.user")).thenReturn(new byte[] { 0, 0, 1 });
when(metrics.getId("sys.cpu.system"))
.thenThrow(new NoSuchUniqueName("sys.cpu.system", "metric"));
when(metrics.getId("sys.cpu.nice")).thenReturn(new byte[] { 0, 0, 2 });
when(metrics.getId("sys.cpu.idle")).thenReturn(new byte[] { 0, 0, 3 });
when(metrics.getId("no.values")).thenReturn(new byte[] { 0, 0, 11 });
when(tag_names.getId("host")).thenReturn(new byte[] { 0, 0, 1 });
when(tag_names.getId("dc"))
.thenThrow(new NoSuchUniqueName("dc", "metric"));
when(tag_names.getId("owner")).thenReturn(new byte[] { 0, 0, 4 });
when(tag_values.getId("web01")).thenReturn(new byte[] { 0, 0, 1 });
when(tag_values.getId("web02")).thenReturn(new byte[] { 0, 0, 2 });
when(tag_values.getId("web03"))
.thenThrow(new NoSuchUniqueName("web03", "metric"));
when(metrics.width()).thenReturn((short)3);
when(tag_names.width()).thenReturn((short)3);
when(tag_values.width()).thenReturn((short)3);
}
@Test
public void metricOnlyMeta() throws Exception {
generateMeta();
final SearchQuery query = new SearchQuery("sys.cpu.user");
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(2, tsuids.size());
assertArrayEquals(test_tsuids.get(0), tsuids.get(0));
assertArrayEquals(test_tsuids.get(1), tsuids.get(1));
}
// returns everything
@Test
public void metricOnlyMetaStar() throws Exception {
generateMeta();
final SearchQuery query = new SearchQuery("*");
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(7, tsuids.size());
}
@Test
public void metricOnlyData() throws Exception {
generateData();
final SearchQuery query = new SearchQuery("sys.cpu.user");
query.setUseMeta(false);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(2, tsuids.size());
assertArrayEquals(test_tsuids.get(0), tsuids.get(0));
assertArrayEquals(test_tsuids.get(1), tsuids.get(1));
}
@Test
public void metricOnly2Meta() throws Exception {
generateMeta();
final SearchQuery query = new SearchQuery("sys.cpu.nice");
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(1, tsuids.size());
assertArrayEquals(test_tsuids.get(2), tsuids.get(0));
}
@Test
public void metricOnly2Data() throws Exception {
generateData();
final SearchQuery query = new SearchQuery("sys.cpu.nice");
query.setUseMeta(false);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(1, tsuids.size());
assertArrayEquals(test_tsuids.get(2), tsuids.get(0));
}
@Test (expected = NoSuchUniqueName.class)
public void noSuchMetricMeta() throws Exception {
final SearchQuery query = new SearchQuery("sys.cpu.system");
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
lookup.lookup();
}
@Test
public void metricOnlyNoValuesMeta() throws Exception {
generateMeta();
final SearchQuery query = new SearchQuery("no.values");
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(0, tsuids.size());
}
@Test
public void metricOnlyNoValuesData() throws Exception {
generateData();
final SearchQuery query = new SearchQuery("no.values");
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
query.setUseMeta(false);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(0, tsuids.size());
}
@Test
public void tagkOnlyMeta() throws Exception {
generateMeta();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("host", null));
final SearchQuery query = new SearchQuery(tags);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(5, tsuids.size());
for (int i = 0; i < 5; i++) {
assertArrayEquals(test_tsuids.get(i), tsuids.get(i));
}
}
@Test
public void tagkOnlyMetaStar() throws Exception {
generateMeta();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("host", "*"));
final SearchQuery query = new SearchQuery(tags);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(5, tsuids.size());
for (int i = 0; i < 5; i++) {
assertArrayEquals(test_tsuids.get(i), tsuids.get(i));
}
}
@Test
public void tagkOnlyData() throws Exception {
generateData();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("host", null));
final SearchQuery query = new SearchQuery(tags);
query.setUseMeta(false);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(5, tsuids.size());
for (int i = 0; i < 5; i++) {
assertArrayEquals(test_tsuids.get(i), tsuids.get(i));
}
}
@Test
public void tagkOnly2Meta() throws Exception {
generateMeta();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("owner", null));
final SearchQuery query = new SearchQuery(tags);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(2, tsuids.size());
assertArrayEquals(test_tsuids.get(3), tsuids.get(0));
assertArrayEquals(test_tsuids.get(4), tsuids.get(1));
}
@Test
public void tagkOnly2Data() throws Exception {
generateData();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("owner", null));
final SearchQuery query = new SearchQuery(tags);
query.setUseMeta(false);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(2, tsuids.size());
assertArrayEquals(test_tsuids.get(3), tsuids.get(0));
assertArrayEquals(test_tsuids.get(4), tsuids.get(1));
}
@Test (expected = NoSuchUniqueName.class)
public void noSuchTagkMeta() throws Exception {
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("dc", null));
final SearchQuery query = new SearchQuery(tags);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
lookup.lookup();
}
@Test
public void tagvOnlyMeta() throws Exception {
generateMeta();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>(null, "web01"));
final SearchQuery query = new SearchQuery(tags);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(4, tsuids.size());
assertArrayEquals(test_tsuids.get(0), tsuids.get(0));
assertArrayEquals(test_tsuids.get(2), tsuids.get(1));
assertArrayEquals(test_tsuids.get(3), tsuids.get(2));
assertArrayEquals(test_tsuids.get(5), tsuids.get(3));
}
@Test
public void tagvOnlyMetaStar() throws Exception {
generateMeta();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("*", "web01"));
final SearchQuery query = new SearchQuery(tags);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(4, tsuids.size());
assertArrayEquals(test_tsuids.get(0), tsuids.get(0));
assertArrayEquals(test_tsuids.get(2), tsuids.get(1));
assertArrayEquals(test_tsuids.get(3), tsuids.get(2));
assertArrayEquals(test_tsuids.get(5), tsuids.get(3));
}
@Test
public void tagvOnlyData() throws Exception {
generateData();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>(null, "web01"));
final SearchQuery query = new SearchQuery(tags);
query.setUseMeta(false);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(4, tsuids.size());
assertArrayEquals(test_tsuids.get(0), tsuids.get(0));
assertArrayEquals(test_tsuids.get(2), tsuids.get(1));
assertArrayEquals(test_tsuids.get(3), tsuids.get(2));
assertArrayEquals(test_tsuids.get(5), tsuids.get(3));
}
@Test
public void tagvOnly2Meta() throws Exception {
generateMeta();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>(null, "web02"));
final SearchQuery query = new SearchQuery(tags);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(2, tsuids.size());
assertArrayEquals(test_tsuids.get(1), tsuids.get(0));
assertArrayEquals(test_tsuids.get(4), tsuids.get(1));
}
@Test
public void tagvOnly2Data() throws Exception {
generateData();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>(null, "web02"));
final SearchQuery query = new SearchQuery(tags);
query.setUseMeta(false);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(2, tsuids.size());
assertArrayEquals(test_tsuids.get(1), tsuids.get(0));
assertArrayEquals(test_tsuids.get(4), tsuids.get(1));
}
@Test (expected = NoSuchUniqueName.class)
public void noSuchTagvMeta() throws Exception {
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>(null, "web03"));
final SearchQuery query = new SearchQuery(tags);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
lookup.lookup();
}
@Test
public void metricAndTagkMeta() throws Exception {
generateMeta();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("host", null));
final SearchQuery query = new SearchQuery("sys.cpu.nice",
tags);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(1, tsuids.size());
assertArrayEquals(test_tsuids.get(2), tsuids.get(0));
}
@Test
public void metricAndTagkMetaStar() throws Exception {
generateMeta();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("host", "*"));
final SearchQuery query = new SearchQuery("sys.cpu.nice",
tags);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(1, tsuids.size());
assertArrayEquals(test_tsuids.get(2), tsuids.get(0));
}
@Test
public void metricAndTagkData() throws Exception {
generateData();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("host", null));
final SearchQuery query = new SearchQuery("sys.cpu.nice",
tags);
query.setUseMeta(false);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(1, tsuids.size());
assertArrayEquals(test_tsuids.get(2), tsuids.get(0));
}
@Test
public void metricAndTagvMeta() throws Exception {
generateMeta();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>(null, "web02"));
final SearchQuery query = new SearchQuery("sys.cpu.idle",
tags);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(1, tsuids.size());
assertArrayEquals(test_tsuids.get(4), tsuids.get(0));
}
@Test
public void metricAndTagvMetaStar() throws Exception {
generateMeta();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("*", "web02"));
final SearchQuery query = new SearchQuery("sys.cpu.idle",
tags);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(1, tsuids.size());
assertArrayEquals(test_tsuids.get(4), tsuids.get(0));
}
@Test
public void metricAndTagvData() throws Exception {
generateData();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>(null, "web02"));
final SearchQuery query = new SearchQuery("sys.cpu.idle",
tags);
query.setUseMeta(false);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(1, tsuids.size());
assertArrayEquals(test_tsuids.get(4), tsuids.get(0));
}
@Test
public void metricAndTagPairMeta() throws Exception {
generateMeta();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("host", "web01"));
final SearchQuery query = new SearchQuery("sys.cpu.idle",
tags);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(1, tsuids.size());
assertArrayEquals(test_tsuids.get(3), tsuids.get(0));
}
@Test
public void metricAndTagPairData() throws Exception {
generateData();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("host", "web01"));
final SearchQuery query = new SearchQuery("sys.cpu.idle",
tags);
query.setUseMeta(false);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
query.setUseMeta(false);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(1, tsuids.size());
assertArrayEquals(test_tsuids.get(3), tsuids.get(0));
}
@Test
public void tagPairOnlyMeta() throws Exception {
generateMeta();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("host", "web01"));
final SearchQuery query = new SearchQuery(tags);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(3, tsuids.size());
assertArrayEquals(test_tsuids.get(0), tsuids.get(0));
assertArrayEquals(test_tsuids.get(2), tsuids.get(1));
assertArrayEquals(test_tsuids.get(3), tsuids.get(2));
}
@Test
public void tagPairOnlyData() throws Exception {
generateData();
final List<Pair<String, String>> tags =
new ArrayList<Pair<String, String>>(1);
tags.add(new Pair<String, String>("host", "web01"));
final SearchQuery query = new SearchQuery(tags);
query.setUseMeta(false);
final TimeSeriesLookup lookup = new TimeSeriesLookup(tsdb, query);
final List<byte[]> tsuids = lookup.lookup();
assertNotNull(tsuids);
assertEquals(3, tsuids.size());
assertArrayEquals(test_tsuids.get(0), tsuids.get(0));
assertArrayEquals(test_tsuids.get(2), tsuids.get(1));
assertArrayEquals(test_tsuids.get(3), tsuids.get(2));
}
// TODO test the dump to stdout
/**
* Stores some data in the mock tsdb-meta table for unit testing
*/
private void generateMeta() {
storage = new MockBase(tsdb, client, true, true, true, true);
storage.setFamily("t".getBytes(MockBase.ASCII()));
final byte[] val = new byte[] { 0, 0, 0, 0, 0, 0, 0, 1 };
for (final byte[] tsuid : test_tsuids) {
storage.addColumn(tsuid, TSMeta.COUNTER_QUALIFIER(), val);
}
}
/**
* Stores some data in the mock tsdb data table for unit testing
*/
private void generateData() {
storage = new MockBase(tsdb, client, true, true, true, true);
storage.setFamily("t".getBytes(MockBase.ASCII()));
final byte[] qual = new byte[] { 0, 0 };
final byte[] val = new byte[] { 1 };
for (final byte[] tsuid : test_tsuids) {
byte[] row_key = new byte[tsuid.length + Const.TIMESTAMP_BYTES];
System.arraycopy(tsuid, 0, row_key, 0, TSDB.metrics_width());
System.arraycopy(tsuid, TSDB.metrics_width(), row_key,
TSDB.metrics_width() + Const.TIMESTAMP_BYTES,
tsuid.length - TSDB.metrics_width());
storage.addColumn(row_key, qual, val);
}
}
}
|
kanwarmuhammad/Laravel-Vuex-Ecomerence | resources/js/store/modules/midnight/getters.js | export default {
//get the categories array
allMidNightDeals: state => state.all_midNightDeals
}
|
folio-org/stripes-acq-components | lib/Tags/TagsBadge/index.js | <reponame>folio-org/stripes-acq-components<filename>lib/Tags/TagsBadge/index.js
export { default } from './TagsBadgeContainer';
|
ckamtsikis/cmssw | DataFormats/BTauReco/interface/TrackIPTagInfo.h | <filename>DataFormats/BTauReco/interface/TrackIPTagInfo.h
#ifndef BTauReco_TrackIpTagInfo_h
#define BTauReco_TrackIpTagInfo_h
#include "DataFormats/BTauReco/interface/RefMacros.h"
#include "DataFormats/TrackReco/interface/TrackFwd.h"
#include "DataFormats/BTauReco/interface/JTATagInfo.h"
#include "DataFormats/BTauReco/interface/IPTagInfo.h"
namespace reco {
typedef IPTagInfo<TrackRefVector, JTATagInfo> TrackIPTagInfo;
DECLARE_EDM_REFS(TrackIPTagInfo)
} // namespace reco
#endif
|
Inviz/lsd-outdated- | Source/ART/Widget/Trait/Dimensions.js | ART.Widget.Trait.Dimensions = new Class({
size: {},
setSize: function(width, height) {
var size = {width: width, height: height};
$extend(this.options, size);
this.refresh(size);
},
setHeight: function(value, light) {
value = Math.max(this.style.current.minHeight || 0, value);
if (!light && (this.size.height == value)) return;
this.size.height = value;
if (!light) this.setStyle('height', value);
return true;
},
setWidth: function(value, light) {
value = Math.max(this.style.current.minWidth || 0, value);
if (this.size.width == value) return;
this.size.width = value;
if (!light) this.setStyle('width', value);
return true;
},
getClientHeight: function() {
var styles = this.style.current;
var height = styles.height;
if (!height || (height == "auto")) {
height = this.element.offsetHeight;
if (height > 0) height -= ((this.offset.padding.top || 0) + (this.offset.padding.bottom || 0))
}
height += styles.paddingTop || 0;
height += styles.paddingBottom || 0;
return height;
},
getClientWidth: function() {
var width = this.element.offsetWidth;
if (width > 0) {
var styles = this.style.current;
var parent = this.parentNode;
if (styles.width == "auto" && styles.display != "block") width -= ((this.offset.inside.left || 0) + (this.offset.inside.right || 0))
width -= ((this.offset.paint.left || 0) + (this.offset.paint.right || 0))
}
return width;
},
getOffsetHeight: function() {;
var styles = this.style.current;
var height = this.getClientHeight();
height += (styles.strokeWidth || 0) * 2
height += styles.borderBottomWidth || 0;
height += styles.borderTopWidth || 0;
return height;
},
getOffsetWidth: function() {
var styles = this.style.current;
var width = this.getClientWidth();
width += (styles.strokeWidth || 0) * 2
width += styles.borderLeftWidth || 0;
width += styles.borderBottomWidth || 0;
return width;
},
getLayoutHeight: function() {
var height = this.getOffsetHeight();
height += ((this.offset.padding.top || 0) - (this.offset.inside.top || 0));
height += ((this.offset.padding.bottom || 0) - (this.offset.inside.bottom || 0));
return height;
},
getLayoutWidth: function() {
var width = this.getOffsetWidth();
width += ((this.offset.inside.left || 0) + (this.style.current.marginLeft || 0));
width += ((this.offset.inside.right || 0) + (this.style.current.marginRight || 0));
return width;
}
}) |
tourdownunder/opennem | opennem/spiders/nem/__init__.py | <reponame>tourdownunder/opennem<gh_stars>0
import io
import logging
from datetime import datetime
import scrapy
from opennem.pipelines.files import LinkExtract
from opennem.pipelines.nem import (
ExtractCSV,
ReadStringHandle,
TableRecordSplitter,
UnzipSingleFilePipeline,
)
from opennem.pipelines.wem.balancing_summary import WemStoreBalancingSummary
from opennem.spiders.dirlisting import DirlistingSpider
from opennem.utils.handlers import open
def get_date_component(format_str):
return datetime.now().strftime(format_str)
class NemXLSSpider(scrapy.Spider):
url_params = {
"day": get_date_component("%d"),
"month": get_date_component("%m"),
"year": get_date_component("%Y"),
}
def start_requests(self):
request_url = self.start_url.format(**self.url_params)
yield scrapy.Request(request_url)
def parse(self, response):
yield {"content": response.text}
class NemSingleMMSSpider(scrapy.Spider):
pipelines = set(
[
UnzipSingleFilePipeline,
ReadStringHandle,
ExtractCSV,
TableRecordSplitter,
]
)
def start_requests(self):
if not hasattr(self, "url"):
raise Exception("{} requires url parameter".format(self.__class__))
yield scrapy.Request(self.url)
def parse(self, response):
yield {"body_stream": io.BytesIO(response.body)}
|
bertilxi/Pegaso | GUI/test/listar_participante.h | #ifndef LISTAR_PARTICIPANTE_H
#define LISTAR_PARTICIPANTE_H
#include <QDialog>
#include "Participante.h"
#include "gui.h"
#include <QValidator>
#include <QFileDialog>
#include "ver_competencia.h"
class ver_competencia;
namespace Ui {
class listar_participante;
}
class listar_participante : public QDialog
{
Q_OBJECT
public:
listar_participante(GUI* guiP,Competencia* compP,ver_competencia* vc, QWidget *parent = 0);
~listar_participante();
private slots:
void on_pushButton_clicked();
void on_pushButton_2_clicked();
void on_pushButton_7_clicked();
void on_tabWidget_currentChanged(int index);
void on_pushButton_8_clicked();
void on_pushButton_6_clicked();
private:
Ui::listar_participante *ui;
QVector<Participante*> participantes;
Competencia* competencia;
GUI* gui;
QString imgUrl;
ver_competencia* verCompetencia;
};
QT_BEGIN_NAMESPACE
class QRegExp;
QT_END_NAMESPACE
class EmailValidator : public QValidator
{
Q_OBJECT
public:
explicit EmailValidator(QObject *parent = 0);
State validate(QString &text, int &pos) const;
void fixup(QString &text) const;
private:
const QRegExp m_validMailRegExp;
const QRegExp m_intermediateMailRegExp;
};
#endif // LISTAR_PARTICIPANTE_H
|
vibhorsingh11/LeetCode | LearnCards/src/test/java/org/phoenix/leetcode/learn/binarytree/Problem01_PreorderTraversalTest.java | <gh_stars>0
package org.phoenix.leetcode.learn.binarytree;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
import org.phoenix.leetcode.learn.binarytree.Problem01_PreorderTraversal.TreeNode;
import java.util.Arrays;
import java.util.List;
class Problem01_PreorderTraversalTest {
private final Problem01_PreorderTraversal test = new Problem01_PreorderTraversal();
@Test
void preorderTraversal() {
TreeNode n = new TreeNode(1);
n.right = new TreeNode(2);
n.right.left = new TreeNode(3);
List<Integer> expected = Arrays.asList(1, 2, 3);
assertEquals(expected, test.preorderTraversal(n));
}
} |
therealaldo/brassroots | app/actions/conversations/UpdateConversations/reducers.test.js | 'use strict';
/**
* @format
* @flow
*/
import reducer, {
initialState,
type State,
} from '../../../reducers/conversations';
import * as actions from '../UpdateConversations';
describe('update conversations reducer', () => {
it('returns initial state', () => {
expect(reducer(undefined, {})).toStrictEqual(initialState);
});
it('handles UPDATE_CONVERSATIONS', () => {
const updates: State = {newConversation: {text: 'foo', users: ['foo']}};
const newConversation = {...initialState.newConversation, text: 'foo', users: ['foo']};
const expectedState: State = {...initialState, newConversation};
expect(reducer(initialState, actions.updateConversations(updates))).toStrictEqual(expectedState);
});
}); |
kevinlq/Qt-Creator-Opensource-Study | qt-creator-opensource-src-4.6.1/src/plugins/cppeditor/cppuseselectionsupdater.cpp | /****************************************************************************
**
** Copyright (C) 2016 The Qt Company Ltd.
** Contact: https://www.qt.io/licensing/
**
** This file is part of Qt Creator.
**
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and The Qt Company. For licensing terms
** and conditions see https://www.qt.io/terms-conditions. For further
** information use the contact form at https://www.qt.io/contact-us.
**
** GNU General Public License Usage
** Alternatively, this file may be used under the terms of the GNU
** General Public License version 3 as published by the Free Software
** Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
** included in the packaging of this file. Please review the following
** information to ensure the GNU General Public License requirements will
** be met: https://www.gnu.org/licenses/gpl-3.0.html.
**
****************************************************************************/
#include "cppuseselectionsupdater.h"
#include "cppeditorwidget.h"
#include "cppeditordocument.h"
#include <cpptools/cpptoolsreuse.h>
#include <utils/textutils.h>
#include <QTextBlock>
#include <QTextCursor>
#include <utils/qtcassert.h>
enum { updateUseSelectionsInternalInMs = 500 };
namespace CppEditor {
namespace Internal {
CppUseSelectionsUpdater::CppUseSelectionsUpdater(TextEditor::TextEditorWidget *editorWidget)
: m_editorWidget(editorWidget)
, m_runnerRevision(-1)
{
m_timer.setSingleShot(true);
m_timer.setInterval(updateUseSelectionsInternalInMs);
connect(&m_timer, &QTimer::timeout, this, [this]() { update(); });
}
CppUseSelectionsUpdater::~CppUseSelectionsUpdater()
{
if (m_runnerWatcher)
m_runnerWatcher->cancel();
}
void CppUseSelectionsUpdater::scheduleUpdate()
{
m_timer.start();
}
void CppUseSelectionsUpdater::abortSchedule()
{
m_timer.stop();
}
CppUseSelectionsUpdater::RunnerInfo CppUseSelectionsUpdater::update(CallType callType)
{
auto *cppEditorWidget = qobject_cast<CppEditorWidget *>(m_editorWidget);
QTC_ASSERT(cppEditorWidget, return RunnerInfo::FailedToStart);
auto *cppEditorDocument = qobject_cast<CppEditorDocument *>(cppEditorWidget->textDocument());
QTC_ASSERT(cppEditorDocument, return RunnerInfo::FailedToStart);
CppTools::CursorInfoParams params;
params.semanticInfo = cppEditorWidget->semanticInfo();
params.textCursor = Utils::Text::wordStartCursor(cppEditorWidget->textCursor());
if (callType == CallType::Asynchronous) {
if (isSameIdentifierAsBefore(params.textCursor))
return RunnerInfo::AlreadyUpToDate;
if (m_runnerWatcher)
m_runnerWatcher->cancel();
m_runnerWatcher.reset(new QFutureWatcher<CursorInfo>);
connect(m_runnerWatcher.data(), &QFutureWatcherBase::finished,
this, &CppUseSelectionsUpdater::onFindUsesFinished);
m_runnerRevision = m_editorWidget->document()->revision();
m_runnerWordStartPosition = params.textCursor.position();
m_runnerWatcher->setFuture(cppEditorDocument->cursorInfo(params));
return RunnerInfo::Started;
} else { // synchronous case
abortSchedule();
const int startRevision = cppEditorDocument->document()->revision();
QFuture<CursorInfo> future = cppEditorDocument->cursorInfo(params);
if (future.isCanceled())
return RunnerInfo::Invalid;
// QFuture::waitForFinished seems to block completely, not even
// allowing to process events from QLocalSocket.
while (!future.isFinished()) {
if (future.isCanceled())
return RunnerInfo::Invalid;
QTC_ASSERT(startRevision == cppEditorDocument->document()->revision(),
return RunnerInfo::Invalid);
QCoreApplication::processEvents(QEventLoop::ExcludeUserInputEvents);
}
processResults(future.result());
return RunnerInfo::Invalid;
}
}
bool CppUseSelectionsUpdater::isSameIdentifierAsBefore(const QTextCursor &cursorAtWordStart) const
{
return m_runnerRevision != -1
&& m_runnerRevision == m_editorWidget->document()->revision()
&& m_runnerWordStartPosition == cursorAtWordStart.position();
}
void CppUseSelectionsUpdater::processResults(const CursorInfo &result)
{
ExtraSelections localVariableSelections;
if (!result.useRanges.isEmpty() || !currentUseSelections().isEmpty()) {
ExtraSelections selections = updateUseSelections(result.useRanges);
if (result.areUseRangesForLocalVariable)
localVariableSelections = selections;
}
updateUnusedSelections(result.unusedVariablesRanges);
emit selectionsForVariableUnderCursorUpdated(localVariableSelections);
emit finished(result.localUses, true);
}
void CppUseSelectionsUpdater::onFindUsesFinished()
{
QTC_ASSERT(m_runnerWatcher,
emit finished(CppTools::SemanticInfo::LocalUseMap(), false); return);
if (m_runnerWatcher->isCanceled()) {
emit finished(CppTools::SemanticInfo::LocalUseMap(), false);
return;
}
if (m_runnerRevision != m_editorWidget->document()->revision()) {
emit finished(CppTools::SemanticInfo::LocalUseMap(), false);
return;
}
if (m_runnerWordStartPosition
!= Utils::Text::wordStartCursor(m_editorWidget->textCursor()).position()) {
emit finished(CppTools::SemanticInfo::LocalUseMap(), false);
return;
}
processResults(m_runnerWatcher->result());
m_runnerWatcher.reset();
}
CppUseSelectionsUpdater::ExtraSelections
CppUseSelectionsUpdater::toExtraSelections(const CursorInfo::Ranges &ranges,
TextEditor::TextStyle style)
{
CppUseSelectionsUpdater::ExtraSelections selections;
selections.reserve(ranges.size());
for (const CursorInfo::Range &range : ranges) {
QTextDocument *document = m_editorWidget->document();
const int position
= document->findBlockByNumber(static_cast<int>(range.line) - 1).position()
+ static_cast<int>(range.column) - 1;
const int anchor = position + static_cast<int>(range.length);
QTextEdit::ExtraSelection sel;
sel.format = m_editorWidget->textDocument()->fontSettings().toTextCharFormat(style);
sel.cursor = QTextCursor(document);
sel.cursor.setPosition(anchor);
sel.cursor.setPosition(position, QTextCursor::KeepAnchor);
selections.append(sel);
}
return selections;
}
CppUseSelectionsUpdater::ExtraSelections
CppUseSelectionsUpdater::currentUseSelections() const
{
return m_editorWidget->extraSelections(TextEditor::TextEditorWidget::CodeSemanticsSelection);
}
CppUseSelectionsUpdater::ExtraSelections
CppUseSelectionsUpdater::updateUseSelections(const CursorInfo::Ranges &ranges)
{
const ExtraSelections selections = toExtraSelections(ranges, TextEditor::C_OCCURRENCES);
m_editorWidget->setExtraSelections(TextEditor::TextEditorWidget::CodeSemanticsSelection,
selections);
return selections;
}
void CppUseSelectionsUpdater::updateUnusedSelections(const CursorInfo::Ranges &ranges)
{
const ExtraSelections selections = toExtraSelections(ranges, TextEditor::C_OCCURRENCES_UNUSED);
m_editorWidget->setExtraSelections(TextEditor::TextEditorWidget::UnusedSymbolSelection,
selections);
}
} // namespace Internal
} // namespace CppEditor
|
ltg001/word_game | final-project/word_game/player.h | #ifndef PLAYER_H
#define PLAYER_H
#include "include.h"
#include "person.cpp"
namespace Ui {
class Player;
}
/**
* @brief
*
*/
class Player : public QDialog, public Person {
Q_OBJECT
public:
/**
* @brief
*
* @param parent
*/
explicit Player( QWidget *parent = 0 );
/**
* @brief
*
*/
~Player();
/**
* @brief
*
*/
void update();
public slots:
/**
* @brief
*
*/
void get_bonus();
private slots:
/**
* @brief
*
*/
void on_game_clicked();
/**
* @brief
*
*/
void on_questions_clicked();
/**
* @brief
*
*/
void on_change_clicked();
/**
* @brief
*
*/
void on_pushButton_clicked();
private:
Ui::Player *ui; /**< TODO: describe */
int bonus; /**< TODO: describe */
QString last_failed; /**< TODO: describe */
/**
* @brief
*
* @param question_level
*/
void check_levelup( int question_level );
/**
* @brief
*
* @return bool
*/
bool play_game();
/**
* @brief
*
* @param path
*/
void add_avatar( QString path );
// int get_level(QString s);
};
#endif // PLAYER_H
|
scgg/maplefetion | src/net/solosky/maplefetion/net/mina/MinaTransferFactory.java | <filename>src/net/solosky/maplefetion/net/mina/MinaTransferFactory.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Project : MapleFetion2
* Package : net.solosky.maplefetion.net.mina
* File : MinaTransferFactory.java
* Author : solosky < <EMAIL> >
* Created : 2010-6-19
* License : Apache License 2.0
*/
package net.solosky.maplefetion.net.mina;
import java.net.InetSocketAddress;
import java.util.concurrent.Executor;
import net.solosky.maplefetion.net.MutiConnectionTransferFactory;
import net.solosky.maplefetion.net.Port;
import net.solosky.maplefetion.net.Transfer;
import net.solosky.maplefetion.net.TransferException;
import org.apache.mina.core.future.ConnectFuture;
import org.apache.mina.transport.socket.nio.NioProcessor;
import org.apache.mina.transport.socket.nio.NioSocketConnector;
/**
*
* Mina传输工厂
* 可以使用Mina在同时登陆多个客户端的时候提高效率
*
* @author solosky <<EMAIL>>
*
*/
public class MinaTransferFactory extends MutiConnectionTransferFactory
{
/**
* Connector
*/
private NioSocketConnector connector;
/**
* 以一个执行器构造
* @param executor 线程执行器
*/
public MinaTransferFactory(Executor executor)
{
this.connector = new NioSocketConnector(new NioProcessor(executor));
this.connector.setHandler(new MinaNioHandler());
}
/**
* 以一个NioSocketConnector构建
* @param connector
*/
public MinaTransferFactory(NioSocketConnector connector)
{
this.connector = connector;
}
/* (non-Javadoc)
* @see net.solosky.maplefetion.net.MutiConnectionTransferFactory#getLocalPort(net.solosky.maplefetion.net.Transfer)
*/
@Override
protected Port getLocalPort(Transfer transfer)
{
MinaTransfer ts = (MinaTransfer) transfer;
InetSocketAddress addr = (InetSocketAddress) ts.getSession().getLocalAddress();
return new Port(addr.getAddress(), addr.getPort());
}
/* (non-Javadoc)
* @see net.solosky.maplefetion.net.TransferFactory#createTransfer(net.solosky.maplefetion.net.Port)
*/
@Override
public Transfer createTransfer(Port port) throws TransferException
{
ConnectFuture cf = connector.connect(new InetSocketAddress(port.getAddress(), port.getPort()));
//等待连接结果
try {
cf.await();
} catch (InterruptedException e) {
throw new TransferException(e);
}
//判断是否连接成功,如果不成功抛出异常
if(!cf.isConnected())
throw new TransferException("Connecting to "+port+" failed..");
Transfer transfer = new MinaTransfer(cf.getSession());
cf.getSession().setAttribute(MinaTransfer.class, transfer);
return transfer;
}
/**
* 真正的关闭连接池,释放资源
* 因为Mina是为了在登陆多个客户端时提高效率的,所以一个客户端关闭时,并不关闭这个工厂
* 需要用户手动的关闭这个工厂
*/
public void reallyCloseFactory()
{
this.connector.dispose();
}
}
|
sudhan499/ignite | modules/core/src/main/java/org/apache/ignite/internal/processors/metastorage/persistence/InMemoryCachedDistributedMetaStorageBridge.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.processors.metastorage.persistence;
import java.io.Serializable;
import java.util.Map;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.function.BiConsumer;
import org.apache.ignite.IgniteCheckedException;
import org.jetbrains.annotations.Nullable;
import static org.apache.ignite.internal.processors.metastorage.persistence.DistributedMetaStorageUtil.unmarshal;
/** */
class InMemoryCachedDistributedMetaStorageBridge implements DistributedMetaStorageBridge {
/** */
private DistributedMetaStorageImpl dms;
/** */
private final Map<String, byte[]> cache = new ConcurrentSkipListMap<>();
/** */
public InMemoryCachedDistributedMetaStorageBridge(DistributedMetaStorageImpl dms) {
this.dms = dms;
}
/** {@inheritDoc} */
@Override public Serializable read(String globalKey, boolean unmarshal) throws IgniteCheckedException {
byte[] valBytes = cache.get(globalKey);
return unmarshal ? unmarshal(dms.marshaller, valBytes) : valBytes;
}
/** {@inheritDoc} */
@Override public void iterate(
String globalKeyPrefix,
BiConsumer<String, ? super Serializable> cb,
boolean unmarshal
) throws IgniteCheckedException {
for (Map.Entry<String, byte[]> entry : cache.entrySet()) {
if (entry.getKey().startsWith(globalKeyPrefix))
cb.accept(entry.getKey(), unmarshal ? unmarshal(dms.marshaller, entry.getValue()) : entry.getValue());
}
}
/** {@inheritDoc} */
@Override public void write(String globalKey, @Nullable byte[] valBytes) {
if (valBytes == null)
cache.remove(globalKey);
else
cache.put(globalKey, valBytes);
}
/** {@inheritDoc} */
@Override public void onUpdateMessage(DistributedMetaStorageHistoryItem histItem) {
dms.setVer(dms.getVer().nextVersion(histItem));
}
/** {@inheritDoc} */
@Override public void removeHistoryItem(long ver) {
}
/** {@inheritDoc} */
@Override public DistributedMetaStorageKeyValuePair[] localFullData() {
return cache.entrySet().stream().map(
entry -> new DistributedMetaStorageKeyValuePair(entry.getKey(), entry.getValue())
).toArray(DistributedMetaStorageKeyValuePair[]::new);
}
/** */
public void restore(StartupExtras startupExtras) {
if (startupExtras.fullNodeData != null) {
DistributedMetaStorageClusterNodeData fullNodeData = startupExtras.fullNodeData;
dms.setVer(fullNodeData.ver);
for (DistributedMetaStorageKeyValuePair item : fullNodeData.fullData)
cache.put(item.key, item.valBytes);
for (int i = 0, len = fullNodeData.hist.length; i < len; i++) {
DistributedMetaStorageHistoryItem histItem = fullNodeData.hist[i];
dms.addToHistoryCache(dms.getVer().id + i + 1 - len, histItem);
}
}
}
}
|
laigukf/LaigukfSDK-iOS | Laigu-SDK-files/LGChatViewController/MessageModels/LGRichTextMessage.h | <reponame>laigukf/LaigukfSDK-iOS<gh_stars>0
//
// LGRichTextMessage.h
// Laigu-SDK-Demo
//
// Created by zhangshunxing on 16/6/14.
// Copyright © 2016年 Laigu. All rights reserved.
//
#import "LGBaseMessage.h"
@interface LGRichTextMessage : LGBaseMessage
@property (nonatomic, copy)NSString *thumbnail;
@property (nonatomic, copy)NSString *summary;
@property (nonatomic, copy)NSString *content;
- (id)initWithDictionary:(NSDictionary *)dictionary;
@end
|
muddessir/framework | machine/qemu/sources/u-boot/drivers/watchdog/xilinx_tb_wdt.c | // SPDX-License-Identifier: GPL-2.0+
/*
* Xilinx AXI platforms watchdog timer driver.
*
* Author(s): <NAME> <<EMAIL>>
* <NAME> <<EMAIL>>
*
* Copyright (c) 2011-2018 Xilinx Inc.
*/
#include <common.h>
#include <dm.h>
#include <log.h>
#include <wdt.h>
#include <linux/err.h>
#include <linux/io.h>
#define XWT_CSR0_WRS_MASK 0x00000008 /* Reset status Mask */
#define XWT_CSR0_WDS_MASK 0x00000004 /* Timer state Mask */
#define XWT_CSR0_EWDT1_MASK 0x00000002 /* Enable bit 1 Mask*/
#define XWT_CSRX_EWDT2_MASK 0x00000001 /* Enable bit 2 Mask */
struct watchdog_regs {
u32 twcsr0; /* 0x0 */
u32 twcsr1; /* 0x4 */
u32 tbr; /* 0x8 */
};
struct xlnx_wdt_plat {
bool enable_once;
struct watchdog_regs *regs;
};
static int xlnx_wdt_reset(struct udevice *dev)
{
u32 reg;
struct xlnx_wdt_plat *plat = dev_get_plat(dev);
debug("%s ", __func__);
/* Read the current contents of TCSR0 */
reg = readl(&plat->regs->twcsr0);
/* Clear the watchdog WDS bit */
if (reg & (XWT_CSR0_EWDT1_MASK | XWT_CSRX_EWDT2_MASK))
writel(reg | XWT_CSR0_WDS_MASK, &plat->regs->twcsr0);
return 0;
}
static int xlnx_wdt_stop(struct udevice *dev)
{
u32 reg;
struct xlnx_wdt_plat *plat = dev_get_plat(dev);
if (plat->enable_once) {
debug("Can't stop Xilinx watchdog.\n");
return -EBUSY;
}
/* Read the current contents of TCSR0 */
reg = readl(&plat->regs->twcsr0);
writel(reg & ~XWT_CSR0_EWDT1_MASK, &plat->regs->twcsr0);
writel(~XWT_CSRX_EWDT2_MASK, &plat->regs->twcsr1);
debug("Watchdog disabled!\n");
return 0;
}
static int xlnx_wdt_start(struct udevice *dev, u64 timeout, ulong flags)
{
struct xlnx_wdt_plat *plat = dev_get_plat(dev);
debug("%s:\n", __func__);
writel((XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK | XWT_CSR0_EWDT1_MASK),
&plat->regs->twcsr0);
writel(XWT_CSRX_EWDT2_MASK, &plat->regs->twcsr1);
return 0;
}
static int xlnx_wdt_probe(struct udevice *dev)
{
debug("%s: Probing wdt%u\n", __func__, dev_seq(dev));
return 0;
}
static int xlnx_wdt_of_to_plat(struct udevice *dev)
{
struct xlnx_wdt_plat *plat = dev_get_plat(dev);
plat->regs = (struct watchdog_regs *)dev_read_addr(dev);
if (IS_ERR(plat->regs))
return PTR_ERR(plat->regs);
plat->enable_once = dev_read_u32_default(dev, "xlnx,wdt-enable-once",
0);
debug("%s: wdt-enable-once %d\n", __func__, plat->enable_once);
return 0;
}
static const struct wdt_ops xlnx_wdt_ops = {
.start = xlnx_wdt_start,
.reset = xlnx_wdt_reset,
.stop = xlnx_wdt_stop,
};
static const struct udevice_id xlnx_wdt_ids[] = {
{ .compatible = "xlnx,xps-timebase-wdt-1.00.a", },
{ .compatible = "xlnx,xps-timebase-wdt-1.01.a", },
{},
};
U_BOOT_DRIVER(xlnx_wdt) = {
.name = "xlnx_wdt",
.id = UCLASS_WDT,
.of_match = xlnx_wdt_ids,
.probe = xlnx_wdt_probe,
.plat_auto = sizeof(struct xlnx_wdt_plat),
.of_to_plat = xlnx_wdt_of_to_plat,
.ops = &xlnx_wdt_ops,
};
|
zettadb/zettalib | src/vendor/mariadb-10.6.7/storage/mroonga/vendor/groonga/lib/request_canceler.c | <gh_stars>0
/* -*- c-basic-offset: 2 -*- */
/*
Copyright(C) 2014-2016 Brazil
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License version 2.1 as published by the Free Software Foundation.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*/
#include "grn_ctx.h"
#include "grn_ctx_impl.h"
#include "grn_request_canceler.h"
typedef struct _grn_request_canceler grn_request_canceler;
struct _grn_request_canceler {
grn_hash *entries;
grn_mutex mutex;
};
typedef struct _grn_request_canceler_entry grn_request_canceler_entry;
struct _grn_request_canceler_entry {
grn_ctx *ctx;
};
static grn_ctx grn_the_request_canceler_ctx;
static grn_request_canceler *grn_the_request_canceler = NULL;
grn_bool
grn_request_canceler_init(void)
{
grn_ctx *ctx = &grn_the_request_canceler_ctx;
grn_ctx_init(ctx, 0);
grn_the_request_canceler = GRN_MALLOC(sizeof(grn_request_canceler));
if (!grn_the_request_canceler) {
ERR(GRN_NO_MEMORY_AVAILABLE,
"[request-canceler] failed to allocate the global request canceler");
return GRN_FALSE;
}
grn_the_request_canceler->entries =
grn_hash_create(ctx, NULL, GRN_TABLE_MAX_KEY_SIZE,
sizeof(grn_request_canceler_entry), GRN_OBJ_KEY_VAR_SIZE);
if (!grn_the_request_canceler->entries) {
return GRN_FALSE;
}
MUTEX_INIT(grn_the_request_canceler->mutex);
return GRN_TRUE;
}
void
grn_request_canceler_register(grn_ctx *ctx,
const char *request_id, unsigned int size)
{
MUTEX_LOCK(grn_the_request_canceler->mutex);
{
grn_hash *entries = grn_the_request_canceler->entries;
grn_id id;
void *value;
id = grn_hash_add(&grn_the_request_canceler_ctx,
entries, request_id, size, &value, NULL);
if (id) {
grn_request_canceler_entry *entry = value;
entry->ctx = ctx;
}
}
MUTEX_UNLOCK(grn_the_request_canceler->mutex);
}
void
grn_request_canceler_unregister(grn_ctx *ctx,
const char *request_id, unsigned int size)
{
MUTEX_LOCK(grn_the_request_canceler->mutex);
{
grn_hash *entries = grn_the_request_canceler->entries;
grn_hash_delete(&grn_the_request_canceler_ctx,
entries, request_id, size, NULL);
}
MUTEX_UNLOCK(grn_the_request_canceler->mutex);
if (ctx->rc == GRN_CANCEL) {
ERRSET(ctx, GRN_LOG_NOTICE, ctx->rc,
"[request-canceler] a request is canceled: <%.*s>",
size, request_id);
}
}
static grn_bool
grn_request_canceler_cancel_entry(grn_request_canceler_entry *entry)
{
if (entry->ctx->rc == GRN_SUCCESS) {
entry->ctx->rc = GRN_CANCEL;
if (entry->ctx->impl->current_request_timer_id) {
void *timer_id = entry->ctx->impl->current_request_timer_id;
entry->ctx->impl->current_request_timer_id = NULL;
grn_request_timer_unregister(timer_id);
}
return GRN_TRUE;
} else {
return GRN_FALSE;
}
}
grn_bool
grn_request_canceler_cancel(const char *request_id, unsigned int size)
{
grn_bool canceled = GRN_FALSE;
MUTEX_LOCK(grn_the_request_canceler->mutex);
{
grn_ctx *ctx = &grn_the_request_canceler_ctx;
grn_hash *entries = grn_the_request_canceler->entries;
void *value;
if (grn_hash_get(ctx, entries, request_id, size, &value)) {
grn_request_canceler_entry *entry = value;
if (grn_request_canceler_cancel_entry(entry)) {
canceled = GRN_TRUE;
}
}
}
MUTEX_UNLOCK(grn_the_request_canceler->mutex);
return canceled;
}
grn_bool
grn_request_canceler_cancel_all(void)
{
grn_bool canceled = GRN_FALSE;
MUTEX_LOCK(grn_the_request_canceler->mutex);
{
grn_ctx *ctx = &grn_the_request_canceler_ctx;
grn_hash *entries = grn_the_request_canceler->entries;
grn_hash_cursor *cursor;
cursor = grn_hash_cursor_open(ctx, entries,
NULL, 0, NULL, 0,
0, -1, 0);
if (cursor) {
while (grn_hash_cursor_next(ctx, cursor) != GRN_ID_NIL) {
void *value;
if (grn_hash_cursor_get_value(ctx, cursor, &value) > 0) {
grn_request_canceler_entry *entry = value;
if (grn_request_canceler_cancel_entry(entry)) {
canceled = GRN_TRUE;
}
}
}
grn_hash_cursor_close(ctx, cursor);
}
}
MUTEX_UNLOCK(grn_the_request_canceler->mutex);
return canceled;
}
void
grn_request_canceler_fin(void)
{
grn_ctx *ctx = &grn_the_request_canceler_ctx;
grn_hash_close(ctx, grn_the_request_canceler->entries);
MUTEX_FIN(grn_the_request_canceler->mutex);
GRN_FREE(grn_the_request_canceler);
grn_the_request_canceler = NULL;
grn_ctx_fin(ctx);
}
|
410757864530-dead-salmonids/aaravosbot-geode | db/migrations/20190413221901_add_muted_users_table_to_database.rb | <filename>db/migrations/20190413221901_add_muted_users_table_to_database.rb
# Migration: AddMutedUsersTableToDatabase
Sequel.migration do
change do
create_table(:muted_users) do
primary_key :id
Time :end_time
end
end
end |
youngbloood/leetcode | june/slice.go | package june
/*
# Random Pick with Weight
# https://leetcode.com/explore/challenge/card/june-leetcoding-challenge/539/week-1-june-1st-june-7th/3351/
Given an array w of positive integers, where w[i] describes the weight of index i(0-indexed), write a function pickIndex which randomly picks an index in proportion to its weight.
For example, given an input list of values w = [2, 8], when we pick up a number out of it, the chance is that 8 times out of 10 we should pick the number 1 as the answer since it's the second element of the array (w[1] = 8).
Example 1:
Input
["Solution","pickIndex"]
[[[1]],[]]
Output
[null,0]
Explanation
Solution solution = new Solution([1]);
solution.pickIndex(); // return 0. Since there is only one single element on the array the only option is to return the first element.
Example 2:
Input
["Solution","pickIndex","pickIndex","pickIndex","pickIndex","pickIndex"]
[[[1,3]],[],[],[],[],[]]
Output
[null,1,1,1,1,0]
Explanation
Solution solution = new Solution([1, 3]);
solution.pickIndex(); // return 1. It's returning the second element (index = 1) that has probability of 3/4.
solution.pickIndex(); // return 1
solution.pickIndex(); // return 1
solution.pickIndex(); // return 1
solution.pickIndex(); // return 0. It's returning the first element (index = 0) that has probability of 1/4.
Since this is a randomization problem, multiple answers are allowed so the following outputs can be considered correct :
[null,1,1,1,1,0]
[null,1,1,1,1,1]
[null,1,1,1,0,0]
[null,1,1,1,0,1]
[null,1,0,1,0,0]
......
and so on.
Constraints:
1 <= w.length <= 10000
1 <= w[i] <= 10^5
pickIndex will be called at most 10000 times.
*/
type Solution struct {
}
func Constructor(w []int) Solution {
return Solution{}
}
func (this *Solution) PickIndex() int {
return 0
}
/**
* Your Solution object will be instantiated and called as such:
* obj := Constructor(w);
* param_1 := obj.PickIndex();
*/
/*
# Coin Change 2
# https://leetcode.com/explore/challenge/card/june-leetcoding-challenge/539/week-1-june-1st-june-7th/3353/
You are given coins of different denominations and a total amount of money. Write a function to compute the number of combinations that make up that amount. You may assume that you have infinite number of each kind of coin.
Example 1:
Input: amount = 5, coins = [1, 2, 5]
Output: 4
Explanation: there are four ways to make up the amount:
5=5
5=2+2+1
5=2+1+1+1
5=1+1+1+1+1
Example 2:
Input: amount = 3, coins = [2]
Output: 0
Explanation: the amount of 3 cannot be made up just with coins of 2.
Example 3:
Input: amount = 10, coins = [10]
Output: 1
Note:
You can assume that
0 <= amount <= 5000
1 <= coin <= 5000
the number of coins is less than 500
the answer is guaranteed to fit into signed 32-bit integer
*/
func CoinChange(amount int, coins []int) int {
return coinChange(amount, coins)
}
func coinChange(amount int, coins []int) int {
var count int
clen := len(coins)
for i := clen - 1; i >= 0; i-- {
coin := coins[i]
if coin%amount == 0 {
count++
}
}
return count
}
|
skavt/CRM | vuejs/src/store/modules/invitation/mutation-types.js | <gh_stars>0
export const SHOW_INVITE_MODAL = 'invitation/SHOW_INVITE_MODAL';
export const HIDE_INVITE_MODAL = 'invitation/HIDE_INVITE_MODAL';
export const SET_INVITATION_DATA = 'invitation/SET_INVITATION_DATA';
export const ADD_NEW_INVITED_USER = 'invitation/ADD_NEW_INVITED_USER';
export const DELETE_INVITED_USER = 'invitation/DELETE_INVITED_USER'; |
Seitenbau/BauPlan-Client | src/containers/Search/tests/reducer.test.js | import { fromJS } from 'immutable';
import searchFieldReducer from '../reducer';
describe('searchFieldReducer', () => {
it('returns the initial state', () => {
expect(searchFieldReducer(undefined, {})).toEqual(
fromJS({ focused: false, value: '' })
);
});
});
|
bitwizeshift/Alloy | lib/alloy-core/include/alloy/core/api.hpp | <filename>lib/alloy-core/include/alloy/core/api.hpp<gh_stars>1-10
/*****************************************************************************
* \file api.hpp
*
* \brief This header contains the import/export API macro guards for the
* Alloy::Core library
*****************************************************************************/
/*
The MIT License (MIT)
Copyright (c) 2019 <NAME> All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef ALLOY_CORE_API_HPP
#define ALLOY_CORE_API_HPP
#ifdef ALLOY_CORE_API_EXPORT
# error "ALLOY_CORE_API_EXPORT defined before inclusion of the config header"
#endif
#ifdef ALLOY_CORE_API_IMPORT
# error "ALLOY_CORE_API_IMPORT defined before inclusion of the config header"
#endif
#ifdef ALLOY_CORE_API
# error "ALLOY_CORE_API defined before inclusion of the config header"
#endif
#if defined(ALLOY_CORE_EXPORT) && defined(ALLOY_CORE_STATIC)
# error "ALLOY_CORE_EXPORT and ALLOY_CORE_STATIC cannot both be set"
#endif
//! \def ALLOY_CORE_API_EXPORT
//!
//! \brief Portable symbol for exporting a symbol during build/link.
//! \def ALLOY_CORE_API_IMPORT
//!
//! \brief Portable symbol for importing a symbol during consumption.
//! \def ALLOY_CORE_API
//!
//! \brief Portable export/import macros used for building/consuming the
//! library respectively.
#if defined(ALLOY_CORE_STATIC)
# define ALLOY_CORE_API_EXPORT
# define ALLOY_CORE_API_IMPORT
#else
# if defined(WIN32) || defined(_WIN32)
# define ALLOY_CORE_API_EXPORT __declspec(dllexport)
# define ALLOY_CORE_API_IMPORT __declspec(dllimport)
# ifdef _MSC_VER
# pragma warning(disable: 4251)
# endif
# elif defined(__GNUC__)
# define ALLOY_CORE_API_EXPORT __attribute__ ((__visibility__ ("default")))
# define ALLOY_CORE_API_IMPORT __attribute__ ((__visibility__ ("default")))
# endif
#endif
#if defined(ALLOY_CORE_EXPORT)
# define ALLOY_CORE_API ALLOY_CORE_API_EXPORT
#else
# define ALLOY_CORE_API ALLOY_CORE_API_IMPORT
#endif
#endif /* ALLOY_CORE_API_HPP */ |
akkelyn/One | Best/Best/Classes/FriendTrends/Controller/OneRecommendViewController.h | <reponame>akkelyn/One<gh_stars>1-10
//
// OneRecommendViewController.h
// One
//
// Created by akkelyn on 16-4-22.
// Copyright (c) 2016年 akkelyn. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface OneRecommendViewController : UIViewController
@end
|
rodavoce/weshareashare | server/tests/lifecycle.test.js | var sails = require('sails');
// Before running any tests...
before(function(done) {
// Increase the Mocha timeout so that Sails has enough time to lift, even if you have a bunch of assets.
this.timeout(20000);
process.env.PORT = 9999;
sails.lift({
// We might want to skip the Grunt hook,
// and disable all logs except errors and warnings:
hooks: { grunt: false },
log: { level: 'warn' },
}, function(err) {
if (err) { return done(err); }
// here you can load fixtures, etc.
// (for example, you might want to create some records in the database)
return done();
});
});
// After running tests
after(function(done) {
sails.lower(done);
});
|
dodgelafnitz/ASCII | ASCII/projects/DcUtility/include/Math/Shapes2D/ForwardDeclarations.h | <filename>ASCII/projects/DcUtility/include/Math/Shapes2D/ForwardDeclarations.h<gh_stars>0
/*
* Copywrite 2021 <NAME>
*/
#ifndef DCUTILITY_MATH_SHAPES2D_FORWARDDECLARATIONS_H
#define DCUTILITY_MATH_SHAPES2D_FORWARDDECLARATIONS_H
class LineSegment;
class Line;
class Ray;
class Circle;
class Rect;
class Polygon;
#endif // DCUTILITY_MATH_SHAPES2D_FORWARDDECLARATIONS_H
|
cpp-pm/boost | boost/intrusive/treap_algorithms.hpp | /////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright <NAME> 2006-2014.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/intrusive for documentation.
//
/////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_INTRUSIVE_TREAP_ALGORITHMS_HPP
#define BOOST_INTRUSIVE_TREAP_ALGORITHMS_HPP
#include <boost/intrusive/detail/config_begin.hpp>
#include <boost/intrusive/intrusive_fwd.hpp>
#include <cstddef>
#include <boost/intrusive/detail/assert.hpp>
#include <boost/intrusive/detail/algo_type.hpp>
#include <boost/intrusive/bstree_algorithms.hpp>
#if defined(BOOST_HAS_PRAGMA_ONCE)
# pragma once
#endif
namespace boost {
namespace intrusive {
#ifndef BOOST_INTRUSIVE_DOXYGEN_INVOKED
namespace detail
{
template<class ValueTraits, class NodePtrPrioCompare, class ExtraChecker>
struct treap_node_extra_checker
: public ExtraChecker
{
typedef ExtraChecker base_checker_t;
typedef ValueTraits value_traits;
typedef typename value_traits::node_traits node_traits;
typedef typename node_traits::const_node_ptr const_node_ptr;
typedef typename base_checker_t::return_type return_type;
treap_node_extra_checker(const NodePtrPrioCompare& prio_comp, ExtraChecker extra_checker)
: base_checker_t(extra_checker), prio_comp_(prio_comp)
{}
void operator () (const const_node_ptr& p,
const return_type& check_return_left, const return_type& check_return_right,
return_type& check_return)
{
if (node_traits::get_left(p))
BOOST_INTRUSIVE_INVARIANT_ASSERT(!prio_comp_(node_traits::get_left(p), p));
if (node_traits::get_right(p))
BOOST_INTRUSIVE_INVARIANT_ASSERT(!prio_comp_(node_traits::get_right(p), p));
base_checker_t::operator()(p, check_return_left, check_return_right, check_return);
}
const NodePtrPrioCompare prio_comp_;
};
} // namespace detail
#endif //#ifndef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! treap_algorithms provides basic algorithms to manipulate
//! nodes forming a treap.
//!
//! (1) the header node is maintained with links not only to the root
//! but also to the leftmost node of the tree, to enable constant time
//! begin(), and to the rightmost node of the tree, to enable linear time
//! performance when used with the generic set algorithms (set_union,
//! etc.);
//!
//! (2) when a node being deleted has two children its successor node is
//! relinked into its place, rather than copied, so that the only
//! pointers invalidated are those referring to the deleted node.
//!
//! treap_algorithms is configured with a NodeTraits class, which encapsulates the
//! information about the node to be manipulated. NodeTraits must support the
//! following interface:
//!
//! <b>Typedefs</b>:
//!
//! <tt>node</tt>: The type of the node that forms the treap
//!
//! <tt>node_ptr</tt>: A pointer to a node
//!
//! <tt>const_node_ptr</tt>: A pointer to a const node
//!
//! <b>Static functions</b>:
//!
//! <tt>static node_ptr get_parent(const_node_ptr n);</tt>
//!
//! <tt>static void set_parent(node_ptr n, node_ptr parent);</tt>
//!
//! <tt>static node_ptr get_left(const_node_ptr n);</tt>
//!
//! <tt>static void set_left(node_ptr n, node_ptr left);</tt>
//!
//! <tt>static node_ptr get_right(const_node_ptr n);</tt>
//!
//! <tt>static void set_right(node_ptr n, node_ptr right);</tt>
template<class NodeTraits>
class treap_algorithms
#ifndef BOOST_INTRUSIVE_DOXYGEN_INVOKED
: public bstree_algorithms<NodeTraits>
#endif
{
public:
typedef NodeTraits node_traits;
typedef typename NodeTraits::node node;
typedef typename NodeTraits::node_ptr node_ptr;
typedef typename NodeTraits::const_node_ptr const_node_ptr;
/// @cond
private:
typedef bstree_algorithms<NodeTraits> bstree_algo;
class rerotate_on_destroy
{
rerotate_on_destroy& operator=(const rerotate_on_destroy&);
public:
rerotate_on_destroy(node_ptr header, node_ptr p, std::size_t &n)
: header_(header), p_(p), n_(n), remove_it_(true)
{}
~rerotate_on_destroy()
{
if(remove_it_){
rotate_up_n(header_, p_, n_);
}
}
void release()
{ remove_it_ = false; }
const node_ptr header_;
const node_ptr p_;
std::size_t &n_;
bool remove_it_;
};
static void rotate_up_n(const node_ptr header, const node_ptr p, std::size_t n)
{
node_ptr p_parent(NodeTraits::get_parent(p));
node_ptr p_grandparent(NodeTraits::get_parent(p_parent));
while(n--){
if(p == NodeTraits::get_left(p_parent)){ //p is left child
bstree_algo::rotate_right(p_parent, p, p_grandparent, header);
}
else{ //p is right child
bstree_algo::rotate_left(p_parent, p, p_grandparent, header);
}
p_parent = p_grandparent;
p_grandparent = NodeTraits::get_parent(p_parent);
}
}
/// @endcond
public:
//! This type is the information that will be
//! filled by insert_unique_check
struct insert_commit_data
/// @cond
: public bstree_algo::insert_commit_data
/// @endcond
{
/// @cond
std::size_t rotations;
/// @endcond
};
#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree_algorithms::get_header(const const_node_ptr&)
static node_ptr get_header(const_node_ptr n);
//! @copydoc ::boost::intrusive::bstree_algorithms::begin_node
static node_ptr begin_node(const_node_ptr header);
//! @copydoc ::boost::intrusive::bstree_algorithms::end_node
static node_ptr end_node(const_node_ptr header);
//! @copydoc ::boost::intrusive::bstree_algorithms::swap_tree
static void swap_tree(node_ptr header1, node_ptr header2);
//! @copydoc ::boost::intrusive::bstree_algorithms::swap_nodes(node_ptr,node_ptr)
static void swap_nodes(node_ptr node1, node_ptr node2);
//! @copydoc ::boost::intrusive::bstree_algorithms::swap_nodes(node_ptr,node_ptr,node_ptr,node_ptr)
static void swap_nodes(node_ptr node1, node_ptr header1, node_ptr node2, node_ptr header2);
//! @copydoc ::boost::intrusive::bstree_algorithms::replace_node(node_ptr,node_ptr)
static void replace_node(node_ptr node_to_be_replaced, node_ptr new_node);
//! @copydoc ::boost::intrusive::bstree_algorithms::replace_node(node_ptr,node_ptr,node_ptr)
static void replace_node(node_ptr node_to_be_replaced, node_ptr header, node_ptr new_node);
#endif //#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree_algorithms::unlink(node_ptr)
template<class NodePtrPriorityCompare>
static void unlink(node_ptr node, NodePtrPriorityCompare pcomp)
{
node_ptr x = NodeTraits::get_parent(node);
if(x){
while(!bstree_algo::is_header(x))
x = NodeTraits::get_parent(x);
erase(x, node, pcomp);
}
}
#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree_algorithms::unlink_leftmost_without_rebalance
static node_ptr unlink_leftmost_without_rebalance(node_ptr header);
//! @copydoc ::boost::intrusive::bstree_algorithms::unique(const const_node_ptr&)
static bool unique(const_node_ptr node);
//! @copydoc ::boost::intrusive::bstree_algorithms::size(const const_node_ptr&)
static std::size_t size(const_node_ptr header);
//! @copydoc ::boost::intrusive::bstree_algorithms::next_node(const node_ptr&)
static node_ptr next_node(node_ptr node);
//! @copydoc ::boost::intrusive::bstree_algorithms::prev_node(const node_ptr&)
static node_ptr prev_node(node_ptr node);
//! @copydoc ::boost::intrusive::bstree_algorithms::init(node_ptr)
static void init(node_ptr node);
//! @copydoc ::boost::intrusive::bstree_algorithms::init_header(node_ptr)
static void init_header(node_ptr header);
#endif //#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree_algorithms::erase(node_ptr,node_ptr)
template<class NodePtrPriorityCompare>
static node_ptr erase(node_ptr header, node_ptr z, NodePtrPriorityCompare pcomp)
{
rebalance_for_erasure(header, z, pcomp);
bstree_algo::erase(header, z);
return z;
}
#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree_algorithms::clone(const const_node_ptr&,node_ptr,Cloner,Disposer)
template <class Cloner, class Disposer>
static void clone
(const_node_ptr source_header, node_ptr target_header, Cloner cloner, Disposer disposer);
//! @copydoc ::boost::intrusive::bstree_algorithms::clear_and_dispose(const node_ptr&,Disposer)
template<class Disposer>
static void clear_and_dispose(node_ptr header, Disposer disposer);
//! @copydoc ::boost::intrusive::bstree_algorithms::lower_bound(const const_node_ptr&,const KeyType&,KeyNodePtrCompare)
template<class KeyType, class KeyNodePtrCompare>
static node_ptr lower_bound
(const_node_ptr header, const KeyType &key, KeyNodePtrCompare comp);
//! @copydoc ::boost::intrusive::bstree_algorithms::upper_bound(const const_node_ptr&,const KeyType&,KeyNodePtrCompare)
template<class KeyType, class KeyNodePtrCompare>
static node_ptr upper_bound
(const_node_ptr header, const KeyType &key, KeyNodePtrCompare comp);
//! @copydoc ::boost::intrusive::bstree_algorithms::find(const const_node_ptr&, const KeyType&,KeyNodePtrCompare)
template<class KeyType, class KeyNodePtrCompare>
static node_ptr find
(const_node_ptr header, const KeyType &key, KeyNodePtrCompare comp);
//! @copydoc ::boost::intrusive::bstree_algorithms::equal_range(const const_node_ptr&,const KeyType&,KeyNodePtrCompare)
template<class KeyType, class KeyNodePtrCompare>
static std::pair<node_ptr, node_ptr> equal_range
(const_node_ptr header, const KeyType &key, KeyNodePtrCompare comp);
//! @copydoc ::boost::intrusive::bstree_algorithms::bounded_range(const const_node_ptr&,const KeyType&,const KeyType&,KeyNodePtrCompare,bool,bool)
template<class KeyType, class KeyNodePtrCompare>
static std::pair<node_ptr, node_ptr> bounded_range
(const_node_ptr header, const KeyType &lower_key, const KeyType &upper_key, KeyNodePtrCompare comp
, bool left_closed, bool right_closed);
//! @copydoc ::boost::intrusive::bstree_algorithms::count(const const_node_ptr&,const KeyType&,KeyNodePtrCompare)
template<class KeyType, class KeyNodePtrCompare>
static std::size_t count(const_node_ptr header, const KeyType &key, KeyNodePtrCompare comp);
#endif //#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! <b>Requires</b>: "h" must be the header node of a tree.
//! NodePtrCompare is a function object that induces a strict weak
//! ordering compatible with the strict weak ordering used to create the
//! the tree. NodePtrCompare compares two node_ptrs.
//! NodePtrPriorityCompare is a priority function object that induces a strict weak
//! ordering compatible with the one used to create the
//! the tree. NodePtrPriorityCompare compares two node_ptrs.
//!
//! <b>Effects</b>: Inserts new_node into the tree before the upper bound
//! according to "comp" and rotates the tree according to "pcomp".
//!
//! <b>Complexity</b>: Average complexity for insert element is at
//! most logarithmic.
//!
//! <b>Throws</b>: If "comp" throw or "pcomp" throw.
template<class NodePtrCompare, class NodePtrPriorityCompare>
static node_ptr insert_equal_upper_bound
(node_ptr h, node_ptr new_node, NodePtrCompare comp, NodePtrPriorityCompare pcomp)
{
insert_commit_data commit_data;
bstree_algo::insert_equal_upper_bound_check(h, new_node, comp, commit_data);
rebalance_check_and_commit(h, new_node, pcomp, commit_data);
return new_node;
}
//! <b>Requires</b>: "h" must be the header node of a tree.
//! NodePtrCompare is a function object that induces a strict weak
//! ordering compatible with the strict weak ordering used to create the
//! the tree. NodePtrCompare compares two node_ptrs.
//! NodePtrPriorityCompare is a priority function object that induces a strict weak
//! ordering compatible with the one used to create the
//! the tree. NodePtrPriorityCompare compares two node_ptrs.
//!
//! <b>Effects</b>: Inserts new_node into the tree before the upper bound
//! according to "comp" and rotates the tree according to "pcomp".
//!
//! <b>Complexity</b>: Average complexity for insert element is at
//! most logarithmic.
//!
//! <b>Throws</b>: If "comp" throws.
template<class NodePtrCompare, class NodePtrPriorityCompare>
static node_ptr insert_equal_lower_bound
(node_ptr h, node_ptr new_node, NodePtrCompare comp, NodePtrPriorityCompare pcomp)
{
insert_commit_data commit_data;
bstree_algo::insert_equal_lower_bound_check(h, new_node, comp, commit_data);
rebalance_check_and_commit(h, new_node, pcomp, commit_data);
return new_node;
}
//! <b>Requires</b>: "header" must be the header node of a tree.
//! NodePtrCompare is a function object that induces a strict weak
//! ordering compatible with the strict weak ordering used to create the
//! the tree. NodePtrCompare compares two node_ptrs. "hint" is node from
//! the "header"'s tree.
//! NodePtrPriorityCompare is a priority function object that induces a strict weak
//! ordering compatible with the one used to create the
//! the tree. NodePtrPriorityCompare compares two node_ptrs.
//!
//! <b>Effects</b>: Inserts new_node into the tree, using "hint" as a hint to
//! where it will be inserted. If "hint" is the upper_bound
//! the insertion takes constant time (two comparisons in the worst case).
//! Rotates the tree according to "pcomp".
//!
//! <b>Complexity</b>: Logarithmic in general, but it is amortized
//! constant time if new_node is inserted immediately before "hint".
//!
//! <b>Throws</b>: If "comp" throw or "pcomp" throw.
template<class NodePtrCompare, class NodePtrPriorityCompare>
static node_ptr insert_equal
(node_ptr h, node_ptr hint, node_ptr new_node, NodePtrCompare comp, NodePtrPriorityCompare pcomp)
{
insert_commit_data commit_data;
bstree_algo::insert_equal_check(h, hint, new_node, comp, commit_data);
rebalance_check_and_commit(h, new_node, pcomp, commit_data);
return new_node;
}
//! <b>Requires</b>: "header" must be the header node of a tree.
//! "pos" must be a valid node of the tree (including header end) node.
//! "pos" must be a node pointing to the successor to "new_node"
//! once inserted according to the order of already inserted nodes. This function does not
//! check "pos" and this precondition must be guaranteed by the caller.
//! NodePtrPriorityCompare is a priority function object that induces a strict weak
//! ordering compatible with the one used to create the
//! the tree. NodePtrPriorityCompare compares two node_ptrs.
//!
//! <b>Effects</b>: Inserts new_node into the tree before "pos"
//! and rotates the tree according to "pcomp".
//!
//! <b>Complexity</b>: Constant-time.
//!
//! <b>Throws</b>: If "pcomp" throws, strong guarantee.
//!
//! <b>Note</b>: If "pos" is not the successor of the newly inserted "new_node"
//! tree invariants might be broken.
template<class NodePtrPriorityCompare>
static node_ptr insert_before
(node_ptr header, node_ptr pos, node_ptr new_node, NodePtrPriorityCompare pcomp)
{
insert_commit_data commit_data;
bstree_algo::insert_before_check(header, pos, commit_data);
rebalance_check_and_commit(header, new_node, pcomp, commit_data);
return new_node;
}
//! <b>Requires</b>: "header" must be the header node of a tree.
//! "new_node" must be, according to the used ordering no less than the
//! greatest inserted key.
//! NodePtrPriorityCompare is a priority function object that induces a strict weak
//! ordering compatible with the one used to create the
//! the tree. NodePtrPriorityCompare compares two node_ptrs.
//!
//! <b>Effects</b>: Inserts x into the tree in the last position
//! and rotates the tree according to "pcomp".
//!
//! <b>Complexity</b>: Constant-time.
//!
//! <b>Throws</b>: If "pcomp" throws, strong guarantee.
//!
//! <b>Note</b>: If "new_node" is less than the greatest inserted key
//! tree invariants are broken. This function is slightly faster than
//! using "insert_before".
template<class NodePtrPriorityCompare>
static void push_back(node_ptr header, node_ptr new_node, NodePtrPriorityCompare pcomp)
{
insert_commit_data commit_data;
bstree_algo::push_back_check(header, commit_data);
rebalance_check_and_commit(header, new_node, pcomp, commit_data);
}
//! <b>Requires</b>: "header" must be the header node of a tree.
//! "new_node" must be, according to the used ordering, no greater than the
//! lowest inserted key.
//! NodePtrPriorityCompare is a priority function object that induces a strict weak
//! ordering compatible with the one used to create the
//! the tree. NodePtrPriorityCompare compares two node_ptrs.
//!
//! <b>Effects</b>: Inserts x into the tree in the first position
//! and rotates the tree according to "pcomp".
//!
//! <b>Complexity</b>: Constant-time.
//!
//! <b>Throws</b>: If "pcomp" throws, strong guarantee.
//!
//! <b>Note</b>: If "new_node" is greater than the lowest inserted key
//! tree invariants are broken. This function is slightly faster than
//! using "insert_before".
template<class NodePtrPriorityCompare>
static void push_front(node_ptr header, node_ptr new_node, NodePtrPriorityCompare pcomp)
{
insert_commit_data commit_data;
bstree_algo::push_front_check(header, commit_data);
rebalance_check_and_commit(header, new_node, pcomp, commit_data);
}
//! <b>Requires</b>: "header" must be the header node of a tree.
//! KeyNodePtrCompare is a function object that induces a strict weak
//! ordering compatible with the strict weak ordering used to create the
//! the tree. NodePtrCompare compares KeyType with a node_ptr.
//!
//! <b>Effects</b>: Checks if there is an equivalent node to "key" in the
//! tree according to "comp" and obtains the needed information to realize
//! a constant-time node insertion if there is no equivalent node.
//!
//! <b>Returns</b>: If there is an equivalent value
//! returns a pair containing a node_ptr to the already present node
//! and false. If there is not equivalent key can be inserted returns true
//! in the returned pair's boolean and fills "commit_data" that is meant to
//! be used with the "insert_commit" function to achieve a constant-time
//! insertion function.
//!
//! <b>Complexity</b>: Average complexity is at most logarithmic.
//!
//! <b>Throws</b>: If "comp" throws.
//!
//! <b>Notes</b>: This function is used to improve performance when constructing
//! a node is expensive and the user does not want to have two equivalent nodes
//! in the tree: if there is an equivalent value
//! the constructed object must be discarded. Many times, the part of the
//! node that is used to impose the order is much cheaper to construct
//! than the node and this function offers the possibility to use that part
//! to check if the insertion will be successful.
//!
//! If the check is successful, the user can construct the node and use
//! "insert_commit" to insert the node in constant-time. This gives a total
//! logarithmic complexity to the insertion: check(O(log(N)) + commit(O(1)).
//!
//! "commit_data" remains valid for a subsequent "insert_unique_commit" only
//! if no more objects are inserted or erased from the set.
template<class KeyType, class KeyNodePtrCompare, class PrioType, class PrioNodePtrPrioCompare>
static std::pair<node_ptr, bool> insert_unique_check
( const_node_ptr header
, const KeyType &key, KeyNodePtrCompare comp
, const PrioType &prio, PrioNodePtrPrioCompare pcomp
, insert_commit_data &commit_data)
{
std::pair<node_ptr, bool> ret =
bstree_algo::insert_unique_check(header, key, comp, commit_data);
if(ret.second)
rebalance_after_insertion_check(header, commit_data.node, prio, pcomp, commit_data.rotations);
return ret;
}
//! <b>Requires</b>: "header" must be the header node of a tree.
//! KeyNodePtrCompare is a function object that induces a strict weak
//! ordering compatible with the strict weak ordering used to create the
//! the tree. NodePtrCompare compares KeyType with a node_ptr.
//! "hint" is node from the "header"'s tree.
//!
//! <b>Effects</b>: Checks if there is an equivalent node to "key" in the
//! tree according to "comp" using "hint" as a hint to where it should be
//! inserted and obtains the needed information to realize
//! a constant-time node insertion if there is no equivalent node.
//! If "hint" is the upper_bound the function has constant time
//! complexity (two comparisons in the worst case).
//!
//! <b>Returns</b>: If there is an equivalent value
//! returns a pair containing a node_ptr to the already present node
//! and false. If there is not equivalent key can be inserted returns true
//! in the returned pair's boolean and fills "commit_data" that is meant to
//! be used with the "insert_commit" function to achieve a constant-time
//! insertion function.
//!
//! <b>Complexity</b>: Average complexity is at most logarithmic, but it is
//! amortized constant time if new_node should be inserted immediately before "hint".
//!
//! <b>Throws</b>: If "comp" throws.
//!
//! <b>Notes</b>: This function is used to improve performance when constructing
//! a node is expensive and the user does not want to have two equivalent nodes
//! in the tree: if there is an equivalent value
//! the constructed object must be discarded. Many times, the part of the
//! node that is used to impose the order is much cheaper to construct
//! than the node and this function offers the possibility to use that part
//! to check if the insertion will be successful.
//!
//! If the check is successful, the user can construct the node and use
//! "insert_commit" to insert the node in constant-time. This gives a total
//! logarithmic complexity to the insertion: check(O(log(N)) + commit(O(1)).
//!
//! "commit_data" remains valid for a subsequent "insert_unique_commit" only
//! if no more objects are inserted or erased from the set.
template<class KeyType, class KeyNodePtrCompare, class PrioType, class PrioNodePtrPrioCompare>
static std::pair<node_ptr, bool> insert_unique_check
( const_node_ptr header, node_ptr hint
, const KeyType &key, KeyNodePtrCompare comp
, const PrioType &prio, PrioNodePtrPrioCompare pcomp
, insert_commit_data &commit_data)
{
std::pair<node_ptr, bool> ret =
bstree_algo::insert_unique_check(header, hint, key, comp, commit_data);
if(ret.second)
rebalance_after_insertion_check(header, commit_data.node, prio, pcomp, commit_data.rotations);
return ret;
}
//! <b>Requires</b>: "header" must be the header node of a tree.
//! "commit_data" must have been obtained from a previous call to
//! "insert_unique_check". No objects should have been inserted or erased
//! from the set between the "insert_unique_check" that filled "commit_data"
//! and the call to "insert_commit".
//!
//!
//! <b>Effects</b>: Inserts new_node in the set using the information obtained
//! from the "commit_data" that a previous "insert_check" filled.
//!
//! <b>Complexity</b>: Constant time.
//!
//! <b>Throws</b>: Nothing.
//!
//! <b>Notes</b>: This function has only sense if a "insert_unique_check" has been
//! previously executed to fill "commit_data". No value should be inserted or
//! erased between the "insert_check" and "insert_commit" calls.
static void insert_unique_commit
(node_ptr header, node_ptr new_node, const insert_commit_data &commit_data)
{
bstree_algo::insert_unique_commit(header, new_node, commit_data);
rotate_up_n(header, new_node, commit_data.rotations);
}
//! @copydoc ::boost::intrusive::bstree_algorithms::transfer_unique
template<class NodePtrCompare, class PrioNodePtrPrioCompare>
static bool transfer_unique
(node_ptr header1, NodePtrCompare comp, PrioNodePtrPrioCompare pcomp, node_ptr header2, node_ptr z)
{
insert_commit_data commit_data;
bool const transferable = insert_unique_check(header1, z, comp, z, pcomp, commit_data).second;
if(transferable){
erase(header2, z, pcomp);
insert_unique_commit(header1, z, commit_data);
}
return transferable;
}
//! @copydoc ::boost::intrusive::bstree_algorithms::transfer_equal
template<class NodePtrCompare, class PrioNodePtrPrioCompare>
static void transfer_equal
(node_ptr header1, NodePtrCompare comp, PrioNodePtrPrioCompare pcomp, node_ptr header2, node_ptr z)
{
insert_commit_data commit_data;
bstree_algo::insert_equal_upper_bound_check(header1, z, comp, commit_data);
rebalance_after_insertion_check(header1, commit_data.node, z, pcomp, commit_data.rotations);
rebalance_for_erasure(header2, z, pcomp);
bstree_algo::erase(header2, z);
bstree_algo::insert_unique_commit(header1, z, commit_data);
rotate_up_n(header1, z, commit_data.rotations);
}
#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::bstree_algorithms::is_header
static bool is_header(const_node_ptr p);
#endif //#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
/// @cond
private:
template<class NodePtrPriorityCompare>
static void rebalance_for_erasure(node_ptr header, node_ptr z, NodePtrPriorityCompare pcomp)
{
std::size_t n = 0;
rerotate_on_destroy rb(header, z, n);
node_ptr z_left = NodeTraits::get_left(z);
node_ptr z_right = NodeTraits::get_right(z);
while(z_left || z_right){
const node_ptr z_parent(NodeTraits::get_parent(z));
if(!z_right || (z_left && pcomp(z_left, z_right))){
bstree_algo::rotate_right(z, z_left, z_parent, header);
}
else{
bstree_algo::rotate_left(z, z_right, z_parent, header);
}
++n;
z_left = NodeTraits::get_left(z);
z_right = NodeTraits::get_right(z);
}
rb.release();
}
template<class NodePtrPriorityCompare>
static void rebalance_check_and_commit
(node_ptr h, node_ptr new_node, NodePtrPriorityCompare pcomp, insert_commit_data &commit_data)
{
rebalance_after_insertion_check(h, commit_data.node, new_node, pcomp, commit_data.rotations);
//No-throw
bstree_algo::insert_unique_commit(h, new_node, commit_data);
rotate_up_n(h, new_node, commit_data.rotations);
}
template<class Key, class KeyNodePriorityCompare>
static void rebalance_after_insertion_check
(const_node_ptr header, const_node_ptr up, const Key &k
, KeyNodePriorityCompare pcomp, std::size_t &num_rotations)
{
const_node_ptr upnode(up);
//First check rotations since pcomp can throw
num_rotations = 0;
std::size_t n = 0;
while(upnode != header && pcomp(k, upnode)){
++n;
upnode = NodeTraits::get_parent(upnode);
}
num_rotations = n;
}
template<class NodePtrPriorityCompare>
static bool check_invariant(const_node_ptr header, NodePtrPriorityCompare pcomp)
{
node_ptr beg = begin_node(header);
node_ptr end = end_node(header);
while(beg != end){
node_ptr p = NodeTraits::get_parent(beg);
if(p != header){
if(pcomp(beg, p))
return false;
}
beg = next_node(beg);
}
return true;
}
/// @endcond
};
/// @cond
template<class NodeTraits>
struct get_algo<TreapAlgorithms, NodeTraits>
{
typedef treap_algorithms<NodeTraits> type;
};
template <class ValueTraits, class NodePtrCompare, class ExtraChecker>
struct get_node_checker<TreapAlgorithms, ValueTraits, NodePtrCompare, ExtraChecker>
{
typedef detail::bstree_node_checker<ValueTraits, NodePtrCompare, ExtraChecker> type;
};
/// @endcond
} //namespace intrusive
} //namespace boost
#include <boost/intrusive/detail/config_end.hpp>
#endif //BOOST_INTRUSIVE_TREAP_ALGORITHMS_HPP
|
hiyingnn/main | src/test/java/systemtests/AddMemberCommandSystemTest.java | package systemtests;
import static seedu.address.commons.core.Messages.MESSAGE_INVALID_COMMAND_FORMAT;
import static seedu.address.logic.commands.CommandTestUtil.MEMBER_LOYALTY_POINTS_DESC_AMY;
import static seedu.address.logic.commands.CommandTestUtil.MEMBER_LOYALTY_POINTS_DESC_BOB;
import static seedu.address.logic.commands.CommandTestUtil.PERSON_EMAIL_DESC_AMY;
import static seedu.address.logic.commands.CommandTestUtil.PERSON_EMAIL_DESC_BOB;
import static seedu.address.logic.commands.CommandTestUtil.PERSON_INVALID_EMAIL_DESC;
import static seedu.address.logic.commands.CommandTestUtil.PERSON_INVALID_NAME_DESC;
import static seedu.address.logic.commands.CommandTestUtil.PERSON_INVALID_PHONE_DESC;
import static seedu.address.logic.commands.CommandTestUtil.PERSON_NAME_DESC_AMY;
import static seedu.address.logic.commands.CommandTestUtil.PERSON_NAME_DESC_BOB;
import static seedu.address.logic.commands.CommandTestUtil.PERSON_PHONE_DESC_AMY;
import static seedu.address.logic.commands.CommandTestUtil.PERSON_PHONE_DESC_BOB;
import static seedu.address.logic.commands.CommandTestUtil.PERSON_VALID_EMAIL_BOB;
import static seedu.address.logic.commands.CommandTestUtil.PERSON_VALID_NAME_BOB;
import static seedu.address.logic.commands.CommandTestUtil.PERSON_VALID_PHONE_BOB;
import static seedu.address.testutil.TypicalMembers.ALICE;
import static seedu.address.testutil.TypicalMembers.AMY;
import static seedu.address.testutil.TypicalMembers.BOB;
import static seedu.address.testutil.TypicalMembers.HOON;
import static seedu.address.testutil.TypicalMembers.IDA;
import static seedu.address.testutil.TypicalMembers.KEYWORD_MATCHING_MEIER;
import org.junit.Test;
import seedu.address.commons.core.Messages;
import seedu.address.logic.commands.RedoCommand;
import seedu.address.logic.commands.UndoCommand;
import seedu.address.logic.commands.member.AddMemberCommand;
import seedu.address.model.Model;
import seedu.address.model.person.Email;
import seedu.address.model.person.Name;
import seedu.address.model.person.Phone;
import seedu.address.model.person.member.Member;
import seedu.address.testutil.MemberBuilder;
import seedu.address.testutil.MemberUtil;
public class AddMemberCommandSystemTest extends RestaurantBookSystemTest {
@Test
public void add() {
Model model = getModel();
/* ------------------------ Perform add operations on the shown unfiltered list ----------------------------- */
/* Case: add a member to a non-empty address book, command with leading spaces and trailing spaces
* -> added
*/
Member toAdd = AMY;
String command = " " + AddMemberCommand.COMMAND_WORD + " " + PERSON_NAME_DESC_AMY + " "
+ PERSON_PHONE_DESC_AMY + " " + PERSON_EMAIL_DESC_AMY + " " + MEMBER_LOYALTY_POINTS_DESC_AMY;
assertCommandSuccess(command, toAdd);
/* Case: undo adding Amy to the list -> Amy deleted */
command = UndoCommand.COMMAND_WORD;
String expectedResultMessage = UndoCommand.MESSAGE_SUCCESS;
assertCommandSuccess(command, model, expectedResultMessage);
/* Case: redo adding Amy to the list -> Amy added again */
command = RedoCommand.COMMAND_WORD;
model.addMember(toAdd);
expectedResultMessage = RedoCommand.MESSAGE_SUCCESS;
assertCommandSuccess(command, model, expectedResultMessage);
/* Case: add a member with all fields same as another member in the address book except name -> added */
toAdd = new MemberBuilder(AMY).withName(PERSON_VALID_NAME_BOB).build();
command = AddMemberCommand.COMMAND_WORD + PERSON_NAME_DESC_BOB + PERSON_PHONE_DESC_AMY + PERSON_EMAIL_DESC_AMY
+ MEMBER_LOYALTY_POINTS_DESC_AMY;
assertCommandSuccess(command, toAdd);
/* Case: add a member with all fields same as another member in the address book except phone and email
* -> added
*/
toAdd = new MemberBuilder(AMY).withPhone(PERSON_VALID_PHONE_BOB).withEmail(PERSON_VALID_EMAIL_BOB).build();
command = MemberUtil.getAddCommand(toAdd);
assertCommandSuccess(command, toAdd);
/* Case: add to empty address book -> added */
deleteAllMembers();
assertCommandSuccess(ALICE);
/* Case: add a member, command with parameters in random order -> added */
toAdd = BOB;
command = AddMemberCommand.COMMAND_WORD + MEMBER_LOYALTY_POINTS_DESC_BOB + PERSON_PHONE_DESC_BOB
+ PERSON_NAME_DESC_BOB + PERSON_EMAIL_DESC_BOB;
assertCommandSuccess(command, toAdd);
/* Case: add a member -> added */
assertCommandSuccess(HOON);
/* -------------------------- Perform add operation on the shown filtered list ------------------------------ */
/* Case: filters the member list before adding -> added */
showMembersWithName(KEYWORD_MATCHING_MEIER);
assertCommandSuccess(IDA);
/* ----------------------------------- Perform invalid add operations --------------------------------------- */
/* Case: add a duplicate member -> rejected */
command = MemberUtil.getAddCommand(HOON);
assertCommandFailure(command, AddMemberCommand.MESSAGE_DUPLICATE_MEMBER);
/* Case: add a duplicate member except with different phone -> rejected */
toAdd = new MemberBuilder(HOON).withPhone(PERSON_VALID_PHONE_BOB).build();
command = MemberUtil.getAddCommand(toAdd);
assertCommandFailure(command, AddMemberCommand.MESSAGE_DUPLICATE_MEMBER);
/* Case: add a duplicate member except with different email -> rejected */
toAdd = new MemberBuilder(HOON).withEmail(PERSON_VALID_EMAIL_BOB).build();
command = MemberUtil.getAddCommand(toAdd);
assertCommandFailure(command, AddMemberCommand.MESSAGE_DUPLICATE_MEMBER);
/* Case: missing name -> rejected */
command = AddMemberCommand.COMMAND_WORD + PERSON_PHONE_DESC_AMY + PERSON_EMAIL_DESC_AMY;
assertCommandFailure(command, String.format(MESSAGE_INVALID_COMMAND_FORMAT, AddMemberCommand.MESSAGE_USAGE));
/* Case: missing phone -> rejected */
command = AddMemberCommand.COMMAND_WORD + PERSON_NAME_DESC_AMY + PERSON_EMAIL_DESC_AMY;
assertCommandFailure(command, String.format(MESSAGE_INVALID_COMMAND_FORMAT, AddMemberCommand.MESSAGE_USAGE));
/* Case: missing email -> rejected */
command = AddMemberCommand.COMMAND_WORD + PERSON_NAME_DESC_AMY + PERSON_PHONE_DESC_AMY;
assertCommandFailure(command, String.format(MESSAGE_INVALID_COMMAND_FORMAT, AddMemberCommand.MESSAGE_USAGE));
/* Case: invalid keyword -> rejected */
command = "adds " + MemberUtil.getMemberDetails(toAdd);
assertCommandFailure(command, Messages.MESSAGE_UNKNOWN_COMMAND);
/* Case: invalid name -> rejected */
command = AddMemberCommand.COMMAND_WORD + PERSON_INVALID_NAME_DESC
+ PERSON_PHONE_DESC_AMY + PERSON_EMAIL_DESC_AMY;
assertCommandFailure(command, Name.MESSAGE_CONSTRAINTS);
/* Case: invalid phone -> rejected */
command = AddMemberCommand.COMMAND_WORD + PERSON_NAME_DESC_AMY
+ PERSON_INVALID_PHONE_DESC + PERSON_EMAIL_DESC_AMY;
assertCommandFailure(command, Phone.MESSAGE_CONSTRAINTS);
/* Case: invalid email -> rejected */
command = AddMemberCommand.COMMAND_WORD + PERSON_NAME_DESC_AMY
+ PERSON_PHONE_DESC_AMY + PERSON_INVALID_EMAIL_DESC;
assertCommandFailure(command, Email.MESSAGE_CONSTRAINTS);
}
/**
* Executes the {@code AddMemberCommand} that adds {@code toAdd} to the model and asserts that the,<br>
* 1. Command box displays an empty string.<br>
* 2. Command box has the default style class.<br>
* 3. Result display box displays the success message of executing {@code AddMemberCommand} with the details of
* {@code toAdd}.<br>
* 4. {@code Storage} and {@code MemberListPanel} equal to the corresponding components in
* the current model added with {@code toAdd}.<br>
* 5. Browser url and selected card remain unchanged.<br>
* 6. Status bar's sync status changes.<br>
* Verifications 1, 3 and 4 are performed by
* {@code RestaurantBookSystemTest#assertApplicationDisplaysExpected(String, String, Model)}.<br>
* @see RestaurantBookSystemTest#assertApplicationDisplaysExpected(String, String, Model)
*/
private void assertCommandSuccess(Member toAdd) {
assertCommandSuccess(MemberUtil.getAddCommand(toAdd), toAdd);
}
/**
* Performs the same verification as {@code assertCommandSuccess(Member)}. Executes {@code command}
* instead.
* @see AddMemberCommandSystemTest#assertCommandSuccess(Member)
*/
private void assertCommandSuccess(String command, Member toAdd) {
Model expectedModel = getModel();
expectedModel.addMember(toAdd);
String expectedResultMessage = String.format(AddMemberCommand.MESSAGE_SUCCESS, toAdd);
assertCommandSuccess(command, expectedModel, expectedResultMessage);
}
/**
* Performs the same verification as {@code assertCommandSuccess(String, Member)} except asserts that
* the,<br>
* 1. Result display box displays {@code expectedResultMessage}.<br>
* 2. {@code Storage} and {@code MemberListPanel} equal to the corresponding components in
* {@code expectedModel}.<br>
* @see AddMemberCommandSystemTest#assertCommandSuccess(String, Member)
*/
private void assertCommandSuccess(String command, Model expectedModel, String expectedResultMessage) {
executeCommand(command);
assertApplicationDisplaysExpected("", expectedResultMessage, expectedModel);
assertSelectedCardUnchanged();
assertCommandBoxShowsDefaultStyle();
assertStatusBarUnchangedExceptSyncStatus();
}
/**
* Executes {@code command} and asserts that the,<br>
* 1. Command box displays {@code command}.<br>
* 2. Command box has the error style class.<br>
* 3. Result display box displays {@code expectedResultMessage}.<br>
* 4. {@code Storage} and {@code MemberListPanel} remain unchanged.<br>
* 5. Browser url, selected card and status bar remain unchanged.<br>
* Verifications 1, 3 and 4 are performed by
* {@code RestaurantBookSystemTest#assertApplicationDisplaysExpected(String, String, Model)}.<br>
* @see RestaurantBookSystemTest#assertApplicationDisplaysExpected(String, String, Model)
*/
private void assertCommandFailure(String command, String expectedResultMessage) {
Model expectedModel = getModel();
executeCommand(command);
assertApplicationDisplaysExpected(command, expectedResultMessage, expectedModel);
assertSelectedCardUnchanged();
assertCommandBoxShowsErrorStyle();
assertStatusBarUnchanged();
}
}
|
tipabu/ProxyFS | retryrpc/stress_test.go | <gh_stars>0
package retryrpc
import (
"fmt"
"math/rand"
"sync"
"testing"
"time"
/* DEBUG for pprof
_ "net/http/pprof"
*/
"github.com/stretchr/testify/assert"
"github.com/swiftstack/ProxyFS/retryrpc/rpctest"
)
func TestStress(t *testing.T) {
/*
* DEBUG - used to debug memory leaks
* Run " go tool pprof http://localhost:12123/debug/pprof/heap"
* to look at memory inuse
// Start the ws that listens for pprof requests
go http.ListenAndServe("localhost:12123", nil)
*/
testLoop(t)
testLoopClientAckTrim(t)
testLoopTTLTrim(t)
testSendLargeRPC(t)
}
func testLoop(t *testing.T) {
var (
agentCount = 15
sendCount = 250
)
assert := assert.New(t)
zero := 0
assert.Equal(0, zero)
// Create new rpctest server - needed for calling
// RPCs
myJrpcfs := rpctest.NewServer()
rrSvr, ipAddr, port := getNewServer(65*time.Second, false)
assert.NotNil(rrSvr)
// Register the Server - sets up the methods supported by the
// server
err := rrSvr.Register(myJrpcfs)
assert.Nil(err)
// Start listening for requests on the ipaddr/port
startErr := rrSvr.Start()
assert.Nil(startErr, "startErr is not nil")
// Tell server to start accepting and processing requests
rrSvr.Run()
// Start up the agents
parallelAgentSenders(t, rrSvr, ipAddr, port, agentCount, "RpcPing", sendCount, rrSvr.Creds.RootCAx509CertificatePEM)
rrSvr.Close()
}
// testLoopClientAckTrim tests that we are correctly trimming messages
// based on the shorter term trimmer. The shorter term trimmer relies
// on the client code saying "this is the highest consecutive sqn we have
// seen". Then the server can throw away messages up to and including the
// highest consecutive sqn.
func testLoopClientAckTrim(t *testing.T) {
var (
agentCount = 15
sendCount = 250
)
assert := assert.New(t)
zero := 0
assert.Equal(0, zero)
// Create new rpctest server - needed for calling
// RPCs
myJrpcfs := rpctest.NewServer()
whenTTL := 10 * time.Millisecond
rrSvr, ipAddr, port := getNewServer(whenTTL, true)
assert.NotNil(rrSvr)
// Register the Server - sets up the methods supported by the
// server
err := rrSvr.Register(myJrpcfs)
assert.Nil(err)
// Start listening for requests on the ipaddr/port
startErr := rrSvr.Start()
assert.Nil(startErr, "startErr is not nil")
// Tell server to start accepting and processing requests
rrSvr.Run()
// Start up the agents
parallelAgentSenders(t, rrSvr, ipAddr, port, agentCount, "RpcPing", sendCount, rrSvr.Creds.RootCAx509CertificatePEM)
// Now for both trimmers to run
tm := time.Now()
// First the 100ms trimmer - this will leave 1 entry on completed request queue
// for each agent since there is no remaining client request to say it is completed.
//
// We need the TTL timer to clean up the last entry
rrSvr.trimCompleted(tm, false)
assert.Equal(agentCount, cntNotTrimmed(rrSvr), "Should have agentCount messages remaining")
// Make sure the queue messages will be old enough to be trimmed
time.Sleep(whenTTL)
// Now the TTL timer to cleanup the last
tmTTL := time.Now()
rrSvr.trimCompleted(tmTTL, true)
// All messages should be trimmed at this point
assert.Equal(0, cntNotTrimmed(rrSvr), "Still have incomplete messages")
/*
* DEBUG - allows user to use pprof to check for memory leaks
// The caller of this test will block and we can check for memory leaks with pprof
fmt.Printf("\n=========== SLEEP 5 minutes ===================\n")
time.Sleep(5 * time.Minute)
*/
rrSvr.Close()
}
func testLoopTTLTrim(t *testing.T) {
var (
agentCount = 15
sendCount = 250
)
assert := assert.New(t)
zero := 0
assert.Equal(0, zero)
// Create new rpctest server - needed for calling
// RPCs
myJrpcfs := rpctest.NewServer()
whenTTL := 10 * time.Millisecond
rrSvr, ipAddr, port := getNewServer(whenTTL, true)
assert.NotNil(rrSvr)
// Register the Server - sets up the methods supported by the
// server
err := rrSvr.Register(myJrpcfs)
assert.Nil(err)
// Start listening for requests on the ipaddr/port
startErr := rrSvr.Start()
assert.Nil(startErr, "startErr is not nil")
// Tell server to start accepting and processing requests
rrSvr.Run()
// Start up the agents
parallelAgentSenders(t, rrSvr, ipAddr, port, agentCount, "RpcPing", sendCount, rrSvr.Creds.RootCAx509CertificatePEM)
// Use the TTL trimmer to remove all messages after guaranteeing we are
// past time when they should be removed
time.Sleep(whenTTL)
tmTTL := time.Now()
rrSvr.trimCompleted(tmTTL, true)
assert.Equal(0, cntNotTrimmed(rrSvr), "Still have incomplete messages")
/*
* DEBUG - all time for pprof tool to be used for tracking down memory leaks
// The caller of this test will block and we can check for memory leaks with pprof
fmt.Printf("\n=========== SLEEP 5 minutes ===================\n")
time.Sleep(5 * time.Minute)
*/
rrSvr.Close()
}
func testSendLargeRPC(t *testing.T) {
var (
agentCount = 15
sendCount = 250
)
assert := assert.New(t)
zero := 0
assert.Equal(0, zero)
// Create new rpctest server - needed for calling
// RPCs
myJrpcfs := rpctest.NewServer()
whenTTL := 10 * time.Millisecond
rrSvr, ipAddr, port := getNewServer(whenTTL, true)
assert.NotNil(rrSvr)
// Register the Server - sets up the methods supported by the
// server
err := rrSvr.Register(myJrpcfs)
assert.Nil(err)
// Start listening for requests on the ipaddr/port
startErr := rrSvr.Start()
assert.Nil(startErr, "startErr is not nil")
// Tell server to start accepting and processing requests
rrSvr.Run()
// Start up the agents
parallelAgentSenders(t, rrSvr, ipAddr, port, agentCount, "RpcPingLarge", sendCount, rrSvr.Creds.RootCAx509CertificatePEM)
// Now for both trimmers to run
tm := time.Now()
// First the 100ms trimmer - this will leave 1 entry on completed request queue
// for each agent since there is no remaining client request to say it is completed.
//
// We need the TTL timer to clean up the last entry
rrSvr.trimCompleted(tm, false)
assert.Equal(agentCount, cntNotTrimmed(rrSvr), "Should have agentCount messages remaining")
// Make sure the queue messages will be old enough to be trimmed
time.Sleep(whenTTL)
// Now the TTL timer to cleanup the last
tmTTL := time.Now()
rrSvr.trimCompleted(tmTTL, true)
/*
* DEBUG - sleep for a time for pprof tool to be used for tracking down memory leaks
// The caller of this test will block and we can check for memory leaks with pprof
fmt.Printf("\n=========== SLEEP 5 minutes ===================\n")
time.Sleep(5 * time.Minute)
*/
// All messages should be trimmed at this point
assert.Equal(0, cntNotTrimmed(rrSvr), "Still have incomplete messages")
rrSvr.Close()
}
// testLoopClientAckTrim tests that we are correctly trimming messages
func cntNotTrimmed(server *Server) (numItems int) {
server.Lock()
for _, ci := range server.perClientInfo {
ci.Lock()
if len(ci.completedRequest) != 0 {
numItems += len(ci.completedRequest)
} else {
if ci.completedRequestLRU.Len() != 0 {
numItems += ci.completedRequestLRU.Len()
}
}
ci.Unlock()
}
server.Unlock()
return
}
func ping(t *testing.T, client *Client, i int, agentID uint64, assert *assert.Assertions) {
// Send a ping RPC and print the results
msg := fmt.Sprintf("Ping Me - %v", i)
pingRequest := &rpctest.PingReq{Message: msg}
pingReply := &rpctest.PingReply{}
expectedReply := fmt.Sprintf("pong %d bytes", len(msg))
err := client.Send("RpcPing", pingRequest, pingReply)
assert.Nil(err, "client.Send() returned an error")
if expectedReply != pingReply.Message {
fmt.Printf(" client - AGENTID: %v\n", agentID)
fmt.Printf(" client.Send(RpcPing) reply '%+v'\n", pingReply)
fmt.Printf(" client.Send(RpcPing) expected '%s' but received '%s'\n", expectedReply, pingReply.Message)
fmt.Printf(" client.Send(RpcPing) SENT: msg '%v' but received '%s'\n", msg, pingReply.Message)
fmt.Printf(" client.Send(RpcPing) len(pingRequest.Message): '%d' i: %v\n", len(pingRequest.Message), i)
}
assert.Equal(expectedReply, pingReply.Message, "Received different output then expected")
}
// pingLarge responds to the RPC with a large packet
func pingLarge(t *testing.T, client *Client, i int, agentID uint64, assert *assert.Assertions) {
// Send a ping RPC and print the results
msg := fmt.Sprintf("Ping Me - %v", i)
pingRequest := &rpctest.PingReq{Message: msg}
pingReply := &rpctest.PingReply{}
err := client.Send("RpcPingLarge", pingRequest, pingReply)
assert.Nil(err, "client.Send() returned an error")
}
func sendIt(t *testing.T, client *Client, z int, sendCnt int, sendWg *sync.WaitGroup, prevWg *sync.WaitGroup, agentID uint64, method string, i int) {
assert := assert.New(t)
defer sendWg.Done()
switch method {
case "RpcPing":
ping(t, client, z, agentID, assert)
break
case "RpcPingLarge":
pingLarge(t, client, z, agentID, assert)
break
}
// The last send is blocked until the previous send has completed. This
// is how we test the short trimmer.
if i <= (sendCnt - 2) {
prevWg.Done()
}
}
type stressMyClient struct {
sync.Mutex
cond *sync.Cond // Signal that received Interrupt() callback
sawCallback bool // True if Interrupt() was called
interruptCnt int // Count of Interrupt() calls received (best effort)
}
func (cb *stressMyClient) Interrupt(payload []byte) {
cb.Lock()
cb.sawCallback = true
cb.interruptCnt++
cb.cond.Broadcast()
cb.Unlock()
return
}
// Represents a pfsagent - sepearate client
func pfsagent(t *testing.T, rrSvr *Server, ipAddr string, port int, agentID uint64, method string,
agentWg *sync.WaitGroup, sendCnt int, rootCAx509CertificatePEM []byte) {
defer agentWg.Done()
cb := &stressMyClient{}
cb.cond = sync.NewCond(&cb.Mutex)
clientID := fmt.Sprintf("client - %v", agentID)
clientConfig := &ClientConfig{MyUniqueID: clientID, IPAddr: ipAddr, Port: port,
RootCAx509CertificatePEM: rootCAx509CertificatePEM, Callbacks: cb, DeadlineIO: 5 * time.Second}
client, err := NewClient(clientConfig)
if err != nil {
fmt.Printf("Dial() failed with err: %v\n", err)
return
}
defer client.Close()
// WG to verify all messages sent
var sendWg sync.WaitGroup
// WG to verify all but the last send() has been sent and
// received. This is needed to test the consecutive sequence
// trimmer is working.
var prevWg sync.WaitGroup
var z, r int
var msg1 []byte = []byte("server msg back to client")
for i := 0; i < sendCnt; i++ {
z = (z + i) * 10
if i == (sendCnt - 1) {
// Give server time to process messages. This last
// call gets us closer to highestConsecutive set to sendCnt - 1.
prevWg.Wait()
// The highest consecutive number is updated in the background with
// a goroutine when send() returns.
//
// Therefore, we loop waiting for it to hit (sendCnt - 1)
for {
var currentHighest requestID
client.Lock()
currentHighest = client.highestConsecutive
client.Unlock()
if int(currentHighest) == (sendCnt - 1) {
break
}
time.Sleep(10 * time.Millisecond)
}
} else {
prevWg.Add(1)
}
sendWg.Add(1)
go func(z int, i int) {
sendIt(t, client, z, sendCnt, &sendWg, &prevWg, agentID, method, i)
rrSvr.SendCallback(clientID, msg1)
}(z, i)
// Occasionally drop the connection to the server to
// simulate retransmits
r = i % 10
if r == 0 && (i != 0) {
rrSvr.CloseClientConn()
}
}
sendWg.Wait()
}
// Start a bunch of "pfsagents" in parallel
func parallelAgentSenders(t *testing.T, rrSrv *Server, ipAddr string, port int, agentCnt int,
method string, sendCnt int, rootCAx509CertificatePEM []byte) {
var agentWg sync.WaitGroup
// Figure out random seed for runs
r := rand.New(rand.NewSource(99))
clientSeed := r.Uint64()
// Start parallel pfsagents - each agent doing sendCnt parallel sends
var agentID uint64
for i := 0; i < agentCnt; i++ {
agentID = clientSeed + uint64(i)
agentWg.Add(1)
go pfsagent(t, rrSrv, ipAddr, port, agentID, method, &agentWg, sendCnt, rootCAx509CertificatePEM)
}
agentWg.Wait()
}
|
mohdab98/cmps252_hw4.2 | src/cmps252/HW4_2/UnitTesting/record_3514.java | package cmps252.HW4_2.UnitTesting;
import static org.junit.jupiter.api.Assertions.*;
import java.io.FileNotFoundException;
import java.util.List;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import cmps252.HW4_2.Customer;
import cmps252.HW4_2.FileParser;
@Tag("47")
class Record_3514 {
private static List<Customer> customers;
@BeforeAll
public static void init() throws FileNotFoundException {
customers = FileParser.getCustomers(Configuration.CSV_File);
}
@Test
@DisplayName("Record 3514: FirstName is Gaylord")
void FirstNameOfRecord3514() {
assertEquals("Gaylord", customers.get(3513).getFirstName());
}
@Test
@DisplayName("Record 3514: LastName is Szuba")
void LastNameOfRecord3514() {
assertEquals("Szuba", customers.get(3513).getLastName());
}
@Test
@DisplayName("Record 3514: Company is Dantel Inc")
void CompanyOfRecord3514() {
assertEquals("Dantel Inc", customers.get(3513).getCompany());
}
@Test
@DisplayName("Record 3514: Address is 400 Kaiser Dr")
void AddressOfRecord3514() {
assertEquals("400 Kaiser Dr", customers.get(3513).getAddress());
}
@Test
@DisplayName("Record 3514: City is Folcroft")
void CityOfRecord3514() {
assertEquals("Folcroft", customers.get(3513).getCity());
}
@Test
@DisplayName("Record 3514: County is Delaware")
void CountyOfRecord3514() {
assertEquals("Delaware", customers.get(3513).getCounty());
}
@Test
@DisplayName("Record 3514: State is PA")
void StateOfRecord3514() {
assertEquals("PA", customers.get(3513).getState());
}
@Test
@DisplayName("Record 3514: ZIP is 19032")
void ZIPOfRecord3514() {
assertEquals("19032", customers.get(3513).getZIP());
}
@Test
@DisplayName("Record 3514: Phone is 610-876-3922")
void PhoneOfRecord3514() {
assertEquals("610-876-3922", customers.get(3513).getPhone());
}
@Test
@DisplayName("Record 3514: Fax is 610-876-3705")
void FaxOfRecord3514() {
assertEquals("610-876-3705", customers.get(3513).getFax());
}
@Test
@DisplayName("Record 3514: Email is <EMAIL>")
void EmailOfRecord3514() {
assertEquals("<EMAIL>", customers.get(3513).getEmail());
}
@Test
@DisplayName("Record 3514: Web is http://www.gaylordszuba.com")
void WebOfRecord3514() {
assertEquals("http://www.gaylordszuba.com", customers.get(3513).getWeb());
}
}
|
GeniusVentures/SuperGenius | src/crdt/globaldb/pubsub_broadcaster_ext.hpp | #ifndef SUPERGENIUS_CRDT_PUBSUB_BROADCASTER_EXT_HPP
#define SUPERGENIUS_CRDT_PUBSUB_BROADCASTER_EXT_HPP
#include <crdt/broadcaster.hpp>
#include <crdt/graphsync_dagsyncer.hpp>
#include <base/logger.hpp>
#include <ipfs_pubsub/gossip_pubsub_topic.hpp>
#include <queue>
namespace sgns::crdt
{
class CrdtDatastore;
class PubSubBroadcasterExt : public Broadcaster
{
public:
using GossipPubSub = sgns::ipfs_pubsub::GossipPubSub;
using GossipPubSubTopic = sgns::ipfs_pubsub::GossipPubSubTopic;
PubSubBroadcasterExt(
std::shared_ptr<GossipPubSubTopic> pubSubTopic,
std::shared_ptr<sgns::crdt::GraphsyncDAGSyncer> dagSyncer,
libp2p::multi::Multiaddress dagSyncerMultiaddress);
void SetLogger(const sgns::base::Logger& logger);
void SetCrdtDataStore(CrdtDatastore* dataStore);
/**
* Send {@param buff} payload to other replicas.
* @return outcome::success on success or outcome::failure on error
*/
outcome::result<void> Broadcast(const base::Buffer& buff) override;
/**
* Obtain the next {@return} payload received from the network.
* @return buffer value or outcome::failure on error
*/
outcome::result<base::Buffer> Next() override;
private:
void OnMessage(boost::optional<const GossipPubSub::Message&> message);
std::shared_ptr<GossipPubSubTopic> gossipPubSubTopic_;
std::shared_ptr<sgns::crdt::GraphsyncDAGSyncer> dagSyncer_;
CrdtDatastore* dataStore_;
libp2p::multi::Multiaddress dagSyncerMultiaddress_;
std::queue<std::tuple<libp2p::peer::PeerId, std::string>> messageQueue_;
sgns::base::Logger logger_ = nullptr;
std::mutex mutex_;
sgns::base::Logger m_logger = sgns::base::createLogger("PubSubBroadcasterExt");
};
}
#endif // SUPERGENIUS_CRDT_PUBSUB_BROADCASTER_EXT_HPP |
Pioneer-Robotics/FTCPioneer2020-2021 | TeamCode/src/main/java/org/firstinspires/ftc/teamcode/Troubleshooting/TestingOps/TestingOpMode4.java | <filename>TeamCode/src/main/java/org/firstinspires/ftc/teamcode/Troubleshooting/TestingOps/TestingOpMode4.java
package org.firstinspires.ftc.teamcode.Troubleshooting.TestingOps;
import com.qualcomm.robotcore.eventloop.opmode.Autonomous;
import com.qualcomm.robotcore.eventloop.opmode.LinearOpMode;
import com.qualcomm.robotcore.util.ElapsedTime;
import org.firstinspires.ftc.robotcore.external.navigation.DistanceUnit;
import org.firstinspires.ftc.teamcode.Robot.Robot;
import org.firstinspires.ftc.teamcode.Robot.RobotWallTrack;
@Autonomous(name = "TestingOpMode4", group = "Trouble Shooting")
public class TestingOpMode4 extends LinearOpMode {
private Robot robot = new Robot();
private ElapsedTime deltaTime = new ElapsedTime();
private int last_bl;
private int last_br;
@Override
public void runOpMode() {
robot.init(this, true);
waitForStart();
while (opModeIsActive()) {
telemetry.addData("Group 90: ", robot.wallTrack.sensorIDGroupPairs.get(RobotWallTrack.groupID.Group90).getDistanceAverage(DistanceUnit.CM));
telemetry.addData("Group 90 Angle: ", robot.wallTrack.sensorIDGroupPairs.get(RobotWallTrack.groupID.Group90).getWallAngle());
telemetry.addData("Group 180: ", robot.wallTrack.sensorIDGroupPairs.get(RobotWallTrack.groupID.Group180).getDistanceAverage(DistanceUnit.CM));
telemetry.addData("Group 180 Angle: ", robot.wallTrack.sensorIDGroupPairs.get(RobotWallTrack.groupID.Group180).getWallAngle());
telemetry.addData("Group 270: ", robot.wallTrack.sensorIDGroupPairs.get(RobotWallTrack.groupID.Group270).getDistanceAverage(DistanceUnit.CM));
telemetry.addData("Group 270 Angle: ", robot.wallTrack.sensorIDGroupPairs.get(RobotWallTrack.groupID.Group270).getWallAngle());
telemetry.update();
}
robot.shutdown();
}
} |
yacoota/whale | src/main/java/com/meiyouframework/bigwhale/util/SpringContextUtils.java | package com.meiyouframework.bigwhale.util;
import org.springframework.beans.BeansException;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.stereotype.Component;
/**
* @author Suxy
* @date 2019/8/29
* @description file description
*/
@Component
public class SpringContextUtils implements ApplicationContextAware {
private static ApplicationContext applicationContext;
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
SpringContextUtils.applicationContext = applicationContext;
}
public static ApplicationContext getApplicationContext() {
return applicationContext;
}
public static Object getBean(String beanName) throws BeansException {
return applicationContext.getBean(beanName);
}
public static <T> T getBean(String beanName, Class<T> clazz) throws BeansException {
return applicationContext.getBean(beanName, clazz);
}
public static <T> T getBean(Class<T> clazz) throws BeansException {
return applicationContext.getBean(clazz);
}
public static Object getBean(String beanName, Object... args) throws BeansException {
return applicationContext.getBean(beanName, args);
}
public static <T> T getBean(Class<T> clazz, Object... args) throws BeansException {
return applicationContext.getBean(clazz, args);
}
}
|
SayanGhoshBDA/code-backup | java_backup/my java/Nigga Stole My Bike/sort2.java | <filename>java_backup/my java/Nigga Stole My Bike/sort2.java
import java.util.*;
public class sort2
{
public static void main(String args[])
{
Scanner sc=new Scanner(System.in);
System.out.println("enter length");
int n=sc.nextInt();
int a[]=new int[n];
for(int i=0;i<n;i++)
{
a[i]=sc.nextInt();
}
int mx=a[0],mn=a[0];
for(int i=0;i<n;i++)
{
if(mx<a[i])
mx=a[i];
if(mn>a[i])
mn=a[i];
}
int md=0;
int eo=1,f=-1;
if(n%2==0)
{
md=n/2+1;
}
else if(n%2!=0)
{
md=n/2-1;
}
int c=0;
for(int i=mx;i>=mn;i--)
{
for(int j=0;j<n;j++)
{
if(a[j]==i)
{ c++;
eo=eo*-1;
if(c%2!=0||c==1)
f++;
System.out.println("f"+f);
int k=f*eo;
int tp=a[md+k];
a[md+k]=a[j];
a[j]=tp;
}
}
}
for(int i=0;i<n;i++)
{
System.out.print(a[i]+" , ");
}
}
}
|
jmhrpr/tor-prop-327 | src/ext/equix/hashx/src/virtual_memory.c | /* Copyright (c) 2020 tevador <<EMAIL>> */
/* See LICENSE for licensing information */
#include "virtual_memory.h"
#ifdef HASHX_WIN
#include <windows.h>
#else
#ifdef __APPLE__
#include <mach/vm_statistics.h>
#endif
#include <sys/types.h>
#include <sys/mman.h>
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
#define PAGE_READONLY PROT_READ
#define PAGE_READWRITE (PROT_READ | PROT_WRITE)
#define PAGE_EXECUTE_READ (PROT_READ | PROT_EXEC)
#define PAGE_EXECUTE_READWRITE (PROT_READ | PROT_WRITE | PROT_EXEC)
#endif
#ifdef HASHX_WIN
static int set_privilege(const char* pszPrivilege, BOOL bEnable) {
HANDLE hToken;
TOKEN_PRIVILEGES tp;
BOOL status;
DWORD error;
if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES
| TOKEN_QUERY, &hToken))
return 0;
if (!LookupPrivilegeValue(NULL, pszPrivilege, &tp.Privileges[0].Luid))
return 0;
tp.PrivilegeCount = 1;
if (bEnable)
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
else
tp.Privileges[0].Attributes = 0;
status = AdjustTokenPrivileges(hToken, FALSE, &tp, 0,
(PTOKEN_PRIVILEGES)NULL, 0);
error = GetLastError();
CloseHandle(hToken);
return status && (error == ERROR_SUCCESS);
}
#endif
void* hashx_vm_alloc(size_t bytes) {
void* mem;
#ifdef HASHX_WIN
mem = VirtualAlloc(NULL, bytes, MEM_COMMIT, PAGE_READWRITE);
#else
mem = mmap(NULL, bytes, PAGE_READWRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (mem == MAP_FAILED)
return NULL;
#endif
return mem;
}
static inline int page_protect(void* ptr, size_t bytes, int rules) {
#ifdef HASHX_WIN
DWORD oldp;
if (!VirtualProtect(ptr, bytes, (DWORD)rules, &oldp)) {
return 0;
}
#else
if (-1 == mprotect(ptr, bytes, rules))
return 0;
#endif
return 1;
}
void hashx_vm_rw(void* ptr, size_t bytes) {
page_protect(ptr, bytes, PAGE_READWRITE);
}
void hashx_vm_rx(void* ptr, size_t bytes) {
page_protect(ptr, bytes, PAGE_EXECUTE_READ);
}
void* hashx_vm_alloc_huge(size_t bytes) {
void* mem;
#ifdef HASHX_WIN
set_privilege("SeLockMemoryPrivilege", 1);
SIZE_T page_min = GetLargePageMinimum();
if (page_min > 0) {
mem = VirtualAlloc(NULL, ALIGN_SIZE(bytes, page_min), MEM_COMMIT
| MEM_RESERVE | MEM_LARGE_PAGES, PAGE_READWRITE);
}
else {
mem = NULL;
}
#else
#ifdef __APPLE__
mem = mmap(NULL, bytes, PAGE_READWRITE, MAP_PRIVATE | MAP_ANONYMOUS,
VM_FLAGS_SUPERPAGE_SIZE_2MB, 0);
#elif defined(__FreeBSD__)
mem = mmap(NULL, bytes, PAGE_READWRITE, MAP_PRIVATE | MAP_ANONYMOUS
| MAP_ALIGNED_SUPER, -1, 0);
#elif defined(__OpenBSD__)
mem = MAP_FAILED; // OpenBSD does not support huge pages
#else
mem = mmap(NULL, bytes, PAGE_READWRITE, MAP_PRIVATE | MAP_ANONYMOUS
| MAP_HUGETLB | MAP_POPULATE, -1, 0);
#endif
if (mem == MAP_FAILED) {
mem = NULL;
}
#endif
return mem;
}
void hashx_vm_free(void* ptr, size_t bytes) {
#ifdef HASHX_WIN
VirtualFree(ptr, 0, MEM_RELEASE);
#else
munmap(ptr, bytes);
#endif
}
|
ucarGroup/EserKnife | src/main/java/com/ucar/eser/core/bean/po/DataSourceMapping.java | <gh_stars>10-100
package com.ucar.eser.core.bean.po;
import java.util.Date;
/**
*
* Description: 数据源映射实体类
* All Rights Reserved.
* Created on 2017-3-10 下午4:29:12
*/
public class DataSourceMapping {
/**
* 主键
*/
private Long id;
/**
* 数据源在本系统的名称
*/
private String localName;
/**
* 数据源在数据交换平台的名称
*/
private String dataExchangeName;
/**
* 描述
*/
private String remark;
/**
* 操作人
*/
private String operator;
/**
* 操作时间
*/
private Date operateTime;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getLocalName() {
return localName;
}
public void setLocalName(String localName) {
this.localName = localName;
}
public String getDataExchangeName() {
return dataExchangeName;
}
public void setDataExchangeName(String dataExchangeName) {
this.dataExchangeName = dataExchangeName;
}
public String getRemark() {
return remark;
}
public void setRemark(String remark) {
this.remark = remark;
}
public String getOperator() {
return operator;
}
public void setOperator(String operator) {
this.operator = operator;
}
public Date getOperateTime() {
return operateTime;
}
public void setOperateTime(Date operateTime) {
this.operateTime = operateTime;
}
}
|
koenw/fullstack-hello | frontend/node_modules/.pnpm/@rsuite+icons@1.0.2_react-dom@17.0.2+react@17.0.2/node_modules/@rsuite/icons/lib/icons/legacy/Recycle.js | "use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
exports.__esModule = true;
exports["default"] = void 0;
var _createSvgIcon = _interopRequireDefault(require("../../createSvgIcon"));
var _Recycle = _interopRequireDefault(require("@rsuite/icon-font/lib/legacy/Recycle"));
// Generated by script, don't edit it please.
var Recycle = (0, _createSvgIcon["default"])({
as: _Recycle["default"],
ariaLabel: 'recycle',
category: 'legacy',
displayName: 'Recycle'
});
var _default = Recycle;
exports["default"] = _default;
module.exports = exports.default; |
netarch/batfish | projects/question/src/main/java/org/batfish/question/edges/EdgesAnswerer.java | <gh_stars>1-10
package org.batfish.question.edges;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.HashMultiset;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Multiset;
import com.google.common.graph.EndpointPair;
import com.google.common.graph.Network;
import com.google.common.graph.ValueGraph;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import org.batfish.common.Answerer;
import org.batfish.common.plugin.IBatfish;
import org.batfish.common.topology.Layer1Edge;
import org.batfish.common.topology.Layer1Topology;
import org.batfish.common.topology.Layer2Edge;
import org.batfish.common.topology.Layer2Topology;
import org.batfish.common.topology.TopologyUtil;
import org.batfish.datamodel.BgpPeerConfig;
import org.batfish.datamodel.BgpPeerConfigId;
import org.batfish.datamodel.BgpSessionProperties;
import org.batfish.datamodel.Configuration;
import org.batfish.datamodel.Edge;
import org.batfish.datamodel.EdgeType;
import org.batfish.datamodel.Interface;
import org.batfish.datamodel.InterfaceAddress;
import org.batfish.datamodel.Ip;
import org.batfish.datamodel.NetworkConfigurations;
import org.batfish.datamodel.RipNeighbor;
import org.batfish.datamodel.RipProcess;
import org.batfish.datamodel.Topology;
import org.batfish.datamodel.Vrf;
import org.batfish.datamodel.answers.AnswerElement;
import org.batfish.datamodel.answers.Schema;
import org.batfish.datamodel.bgp.BgpTopologyUtils;
import org.batfish.datamodel.collections.NodeInterfacePair;
import org.batfish.datamodel.eigrp.EigrpEdge;
import org.batfish.datamodel.eigrp.EigrpInterface;
import org.batfish.datamodel.eigrp.EigrpTopology;
import org.batfish.datamodel.isis.IsisEdge;
import org.batfish.datamodel.isis.IsisNode;
import org.batfish.datamodel.isis.IsisTopology;
import org.batfish.datamodel.ospf.OspfNeighbor;
import org.batfish.datamodel.ospf.OspfProcess;
import org.batfish.datamodel.ospf.OspfTopologyUtils;
import org.batfish.datamodel.pojo.Node;
import org.batfish.datamodel.questions.Question;
import org.batfish.datamodel.table.ColumnMetadata;
import org.batfish.datamodel.table.Row;
import org.batfish.datamodel.table.Row.RowBuilder;
import org.batfish.datamodel.table.TableAnswerElement;
import org.batfish.datamodel.table.TableMetadata;
public class EdgesAnswerer extends Answerer {
static final String COL_INTERFACE = "Interface";
static final String COL_REMOTE_INTERFACE = "Remote_Interface";
static final String COL_IPS = "IPs";
static final String COL_REMOTE_IPS = "Remote_IPs";
// BGP only
static final String COL_NODE = "Node";
static final String COL_REMOTE_NODE = "Remote_Node";
static final String COL_AS_NUMBER = "AS_Number";
static final String COL_REMOTE_AS_NUMBER = "Remote_AS_Number";
static final String COL_IP = "IP";
static final String COL_REMOTE_IP = "Remote_IP";
// Layer 2
static final String COL_VLAN = "VLAN";
static final String COL_REMOTE_VLAN = "Remote_VLAN";
EdgesAnswerer(Question question, IBatfish batfish) {
super(question, batfish);
}
@Override
public AnswerElement answer() {
EdgesQuestion question = (EdgesQuestion) _question;
Map<String, Configuration> configurations = _batfish.loadConfigurations();
Set<String> includeNodes = question.getNodes().getMatchingNodes(_batfish);
Set<String> includeRemoteNodes = question.getRemoteNodes().getMatchingNodes(_batfish);
TableAnswerElement answer = new TableAnswerElement(getTableMetadata(question.getEdgeType()));
Topology topology = _batfish.getEnvironmentTopology();
answer.postProcessAnswer(
_question,
generateRows(
configurations, topology, includeNodes, includeRemoteNodes, question.getEdgeType()));
return answer;
}
private Multiset<Row> generateRows(
Map<String, Configuration> configurations,
Topology topology,
Set<String> includeNodes,
Set<String> includeRemoteNodes,
EdgeType edgeType) {
Map<Ip, Set<String>> ipOwners = TopologyUtil.computeIpNodeOwners(configurations, true);
switch (edgeType) {
case BGP:
ValueGraph<BgpPeerConfigId, BgpSessionProperties> bgpTopology =
BgpTopologyUtils.initBgpTopology(configurations, ipOwners, false, false, null, null);
return getBgpEdges(configurations, includeNodes, includeRemoteNodes, bgpTopology);
case EIGRP:
Network<EigrpInterface, EigrpEdge> eigrpTopology =
EigrpTopology.initEigrpTopology(configurations, topology);
return getEigrpEdges(includeNodes, includeRemoteNodes, eigrpTopology);
case ISIS:
Network<IsisNode, IsisEdge> isisTopology =
IsisTopology.initIsisTopology(configurations, topology);
return getIsisEdges(includeNodes, includeRemoteNodes, isisTopology);
case OSPF:
return getOspfEdges(configurations, includeNodes, includeRemoteNodes, topology);
case RIP:
_batfish.initRemoteRipNeighbors(configurations, ipOwners, topology);
return getRipEdges(configurations, includeNodes, includeRemoteNodes);
case LAYER1:
Layer1Topology layer1Topology = _batfish.getLayer1Topology();
return getLayer1Edges(includeNodes, includeRemoteNodes, layer1Topology);
case LAYER2:
Layer2Topology layer2Topology = _batfish.getLayer2Topology();
return getLayer2Edges(includeNodes, includeRemoteNodes, layer2Topology);
case LAYER3:
default:
return getLayer3Edges(configurations, includeNodes, includeRemoteNodes, topology);
}
}
@VisibleForTesting
static Multiset<Row> getEigrpEdges(
Set<String> includeNodes,
Set<String> includeRemoteNodes,
Network<EigrpInterface, EigrpEdge> eigrpTopology) {
return eigrpTopology
.edges()
.stream()
.filter(
eigrpEdge ->
includeNodes.contains(eigrpEdge.getNode1().getHostname())
&& includeRemoteNodes.contains(eigrpEdge.getNode2().getHostname()))
.map(EdgesAnswerer::eigrpEdgeToRow)
.collect(Collectors.toCollection(HashMultiset::create));
}
@VisibleForTesting
static Multiset<Row> getBgpEdges(
Map<String, Configuration> configurations,
Set<String> includeNodes,
Set<String> includeRemoteNodes,
ValueGraph<BgpPeerConfigId, BgpSessionProperties> bgpTopology) {
Multiset<Row> rows = HashMultiset.create();
for (EndpointPair<BgpPeerConfigId> session : bgpTopology.edges()) {
BgpPeerConfigId bgpPeerConfigId = session.source();
BgpPeerConfigId remoteBgpPeerConfigId = session.target();
NetworkConfigurations nc = NetworkConfigurations.of(configurations);
BgpPeerConfig bgpPeerConfig = nc.getBgpPeerConfig(bgpPeerConfigId);
BgpPeerConfig remoteBgpPeerConfig = nc.getBgpPeerConfig(remoteBgpPeerConfigId);
if (bgpPeerConfig == null || remoteBgpPeerConfig == null) {
continue;
}
String hostname = bgpPeerConfigId.getHostname();
String remoteHostname = remoteBgpPeerConfigId.getHostname();
if (includeNodes.contains(hostname) && includeRemoteNodes.contains(remoteHostname)) {
rows.add(
getBgpEdgeRow(
hostname,
bgpPeerConfig.getLocalIp(),
bgpPeerConfig.getLocalAs(),
remoteHostname,
remoteBgpPeerConfig.getLocalIp(),
remoteBgpPeerConfig.getLocalAs()));
}
}
return rows;
}
@VisibleForTesting
static Multiset<Row> getIsisEdges(
Set<String> includeNodes,
Set<String> includeRemoteNodes,
Network<IsisNode, IsisEdge> isisTopology) {
return isisTopology
.edges()
.stream()
.filter(Objects::nonNull)
.filter(
isisEdge ->
includeNodes.contains(isisEdge.getNode1().getNode())
&& includeRemoteNodes.contains(isisEdge.getNode2().getNode()))
.map(EdgesAnswerer::isisEdgeToRow)
.collect(Collectors.toCollection(HashMultiset::create));
}
@VisibleForTesting
static Multiset<Row> getLayer1Edges(
Set<String> includeNodes,
Set<String> includeRemoteNodes,
@Nullable Layer1Topology layer1Topology) {
if (layer1Topology == null) {
return HashMultiset.create();
}
return layer1Topology
.getGraph()
.edges()
.stream()
.filter(
layer1Edge ->
includeNodes.contains(layer1Edge.getNode1().getHostname())
&& includeRemoteNodes.contains(layer1Edge.getNode2().getHostname()))
.map(EdgesAnswerer::layer1EdgeToRow)
.collect(Collectors.toCollection(HashMultiset::create));
}
@VisibleForTesting
static Multiset<Row> getLayer2Edges(
Set<String> includeNodes,
Set<String> includeRemoteNodes,
@Nullable Layer2Topology layer2Topology) {
if (layer2Topology == null) {
return HashMultiset.create();
}
return layer2Topology
.getGraph()
.edges()
.stream()
.filter(
layer2Edge ->
includeNodes.contains(layer2Edge.getNode1().getHostname())
&& includeRemoteNodes.contains(layer2Edge.getNode2().getHostname()))
.map(EdgesAnswerer::layer2EdgeToRow)
.collect(Collectors.toCollection(HashMultiset::create));
}
@VisibleForTesting
static Multiset<Row> getLayer3Edges(
Map<String, Configuration> configurations,
Set<String> includeNodes,
Set<String> includeRemoteNodes,
Topology topology) {
return topology
.getEdges()
.stream()
.filter(
layer3Edge ->
includeNodes.contains(layer3Edge.getNode1())
&& includeRemoteNodes.contains(layer3Edge.getNode2()))
.map(layer3edge -> layer3EdgeToRow(configurations, layer3edge))
.collect(Collectors.toCollection(HashMultiset::create));
}
@VisibleForTesting
static Multiset<Row> getOspfEdges(
Map<String, Configuration> configurations,
Set<String> includeNodes,
Set<String> includeRemoteNodes,
Topology topology) {
Multiset<Row> rows = HashMultiset.create();
OspfTopologyUtils.initRemoteOspfNeighbors(configurations, topology);
for (Configuration c : configurations.values()) {
String hostname = c.getHostname();
for (Vrf vrf : c.getVrfs().values()) {
OspfProcess proc = vrf.getOspfProcess();
if (proc != null) {
for (OspfNeighbor ospfNeighbor : proc.getOspfNeighbors().values()) {
OspfNeighbor remoteOspfNeighbor = ospfNeighbor.getRemoteOspfNeighbor();
if (remoteOspfNeighbor != null) {
Configuration remoteHost = remoteOspfNeighbor.getOwner();
String remoteHostname = remoteHost.getHostname();
if (includeNodes.contains(hostname) && includeRemoteNodes.contains(remoteHostname)) {
rows.add(
getOspfEdgeRow(
hostname,
ospfNeighbor.getIface().getName(),
remoteHostname,
remoteOspfNeighbor.getIface().getName()));
}
}
}
}
}
}
return rows;
}
@VisibleForTesting
static Multiset<Row> getRipEdges(
Map<String, Configuration> configurations,
Set<String> includeNodes,
Set<String> includeRemoteNodes) {
Multiset<Row> rows = HashMultiset.create();
for (Configuration c : configurations.values()) {
String hostname = c.getHostname();
for (Vrf vrf : c.getVrfs().values()) {
RipProcess proc = vrf.getRipProcess();
if (proc != null) {
for (RipNeighbor ripNeighbor : proc.getRipNeighbors().values()) {
RipNeighbor remoteRipNeighbor = ripNeighbor.getRemoteRipNeighbor();
if (remoteRipNeighbor != null) {
Configuration remoteHost = remoteRipNeighbor.getOwner();
String remoteHostname = remoteHost.getHostname();
if (includeNodes.contains(hostname) && includeRemoteNodes.contains(remoteHostname)) {
rows.add(
getRipEdgeRow(
ripNeighbor.getOwner().getHostname(),
ripNeighbor.getIface().getName(),
remoteRipNeighbor.getOwner().getHostname(),
remoteRipNeighbor.getIface().getName()));
}
}
}
}
}
}
return rows;
}
@VisibleForTesting
static Row eigrpEdgeToRow(EigrpEdge eigrpEdge) {
RowBuilder row = Row.builder();
row.put(
COL_INTERFACE,
new NodeInterfacePair(
eigrpEdge.getNode1().getHostname(), eigrpEdge.getNode1().getInterfaceName()))
.put(
COL_REMOTE_INTERFACE,
new NodeInterfacePair(
eigrpEdge.getNode2().getHostname(), eigrpEdge.getNode2().getInterfaceName()));
return row.build();
}
@VisibleForTesting
static Row getBgpEdgeRow(
String node,
@Nullable Ip ip,
@Nullable Long asNumber,
String remoteNode,
@Nullable Ip remoteIp,
@Nullable Long remoteAsNumber) {
RowBuilder row = Row.builder();
row.put(COL_NODE, new Node(node))
.put(COL_IP, ip)
.put(COL_AS_NUMBER, asNumber)
.put(COL_REMOTE_NODE, new Node(remoteNode))
.put(COL_REMOTE_IP, remoteIp)
.put(COL_REMOTE_AS_NUMBER, remoteAsNumber);
return row.build();
}
static Row isisEdgeToRow(IsisEdge isisEdge) {
RowBuilder row = Row.builder();
row.put(
COL_INTERFACE,
new NodeInterfacePair(
isisEdge.getNode1().getNode(), isisEdge.getNode1().getInterfaceName()))
.put(
COL_REMOTE_INTERFACE,
new NodeInterfacePair(
isisEdge.getNode2().getNode(), isisEdge.getNode2().getInterfaceName()));
return row.build();
}
@VisibleForTesting
static Row layer1EdgeToRow(Layer1Edge layer1Edge) {
RowBuilder row = Row.builder();
row.put(
COL_INTERFACE,
new NodeInterfacePair(
layer1Edge.getNode1().getHostname(), layer1Edge.getNode1().getInterfaceName()))
.put(
COL_REMOTE_INTERFACE,
new NodeInterfacePair(
layer1Edge.getNode2().getHostname(), layer1Edge.getNode2().getInterfaceName()));
return row.build();
}
@VisibleForTesting
static Row layer2EdgeToRow(Layer2Edge layer2Edge) {
RowBuilder row = Row.builder();
row.put(
COL_INTERFACE,
new NodeInterfacePair(
layer2Edge.getNode1().getHostname(), layer2Edge.getNode1().getInterfaceName()))
.put(COL_VLAN, layer2Edge.getNode1().getVlanId())
.put(
COL_REMOTE_INTERFACE,
new NodeInterfacePair(
layer2Edge.getNode2().getHostname(), layer2Edge.getNode2().getInterfaceName()))
.put(COL_REMOTE_VLAN, layer2Edge.getNode2().getVlanId());
return row.build();
}
@VisibleForTesting
static Row layer3EdgeToRow(Map<String, Configuration> configurations, Edge edge) {
Interface interface1 =
configurations.get(edge.getNode1()).getAllInterfaces().get(edge.getInt1());
Interface interface2 =
configurations.get(edge.getNode2()).getAllInterfaces().get(edge.getInt2());
Set<Ip> ips1 =
interface1
.getAllAddresses()
.stream()
.filter(Objects::nonNull)
.map(InterfaceAddress::getIp)
.collect(Collectors.toSet());
Set<Ip> ips2 =
interface2
.getAllAddresses()
.stream()
.filter(Objects::nonNull)
.map(InterfaceAddress::getIp)
.collect(Collectors.toSet());
RowBuilder row = Row.builder();
row.put(COL_INTERFACE, new NodeInterfacePair(edge.getNode1(), edge.getInt1()))
.put(COL_IPS, ips1)
.put(COL_REMOTE_INTERFACE, new NodeInterfacePair(edge.getNode2(), edge.getInt2()))
.put(COL_REMOTE_IPS, ips2);
return row.build();
}
@VisibleForTesting
static Row getOspfEdgeRow(String node, String iface, String remoteNode, String remoteIface) {
RowBuilder row = Row.builder();
row.put(COL_INTERFACE, new NodeInterfacePair(node, iface))
.put(COL_REMOTE_INTERFACE, new NodeInterfacePair(remoteNode, remoteIface));
return row.build();
}
static Row getRipEdgeRow(String node, String iface, String remoteNode, String remoteIface) {
RowBuilder row = Row.builder();
row.put(COL_INTERFACE, new NodeInterfacePair(node, iface))
.put(COL_REMOTE_INTERFACE, new NodeInterfacePair(remoteNode, remoteIface));
return row.build();
}
/** Generate the table metadata based on the type of edge requested */
@VisibleForTesting
static TableMetadata getTableMetadata(EdgeType edgeType) {
ImmutableList.Builder<ColumnMetadata> columnBuilder = ImmutableList.builder();
switch (edgeType) {
case LAYER3:
columnBuilder.add(
new ColumnMetadata(
COL_INTERFACE,
Schema.INTERFACE,
"Interface from which the edge originates",
Boolean.FALSE,
Boolean.TRUE));
columnBuilder.add(
new ColumnMetadata(COL_IPS, Schema.set(Schema.IP), "IPs", Boolean.FALSE, Boolean.TRUE));
columnBuilder.add(
new ColumnMetadata(
COL_REMOTE_INTERFACE,
Schema.INTERFACE,
"Interface at which the edge terminates",
Boolean.FALSE,
Boolean.TRUE));
columnBuilder.add(
new ColumnMetadata(
COL_REMOTE_IPS, Schema.set(Schema.IP), "Remote IPs", Boolean.FALSE, Boolean.TRUE));
break;
case LAYER2:
columnBuilder.add(
new ColumnMetadata(
COL_INTERFACE,
Schema.INTERFACE,
"Interface from which the edge originates",
Boolean.FALSE,
Boolean.TRUE));
columnBuilder.add(
new ColumnMetadata(
COL_VLAN,
Schema.STRING,
"VLAN containing the originator",
Boolean.FALSE,
Boolean.TRUE));
columnBuilder.add(
new ColumnMetadata(
COL_REMOTE_INTERFACE,
Schema.INTERFACE,
"Interface at which the edge terminates",
Boolean.FALSE,
Boolean.TRUE));
columnBuilder.add(
new ColumnMetadata(
COL_REMOTE_VLAN,
Schema.STRING,
"VLAN containing the remote node",
Boolean.FALSE,
Boolean.TRUE));
break;
case BGP:
columnBuilder.add(
new ColumnMetadata(
COL_NODE,
Schema.NODE,
"Node from which the edge originates",
Boolean.FALSE,
Boolean.TRUE));
columnBuilder.add(
new ColumnMetadata(
COL_IP, Schema.IP, "IP at the side of originator", Boolean.FALSE, Boolean.TRUE));
columnBuilder.add(
new ColumnMetadata(
COL_AS_NUMBER,
Schema.STRING,
"AS Number at the side of originator",
Boolean.FALSE,
Boolean.TRUE));
columnBuilder.add(
new ColumnMetadata(
COL_REMOTE_NODE,
Schema.NODE,
"Node at which the edge terminates",
Boolean.FALSE,
Boolean.TRUE));
columnBuilder.add(
new ColumnMetadata(
COL_REMOTE_IP,
Schema.IP,
"IP at the side of the responder",
Boolean.FALSE,
Boolean.TRUE));
columnBuilder.add(
new ColumnMetadata(
COL_REMOTE_AS_NUMBER,
Schema.STRING,
"AS Number at the side of responder",
Boolean.FALSE,
Boolean.TRUE));
break;
case OSPF:
case ISIS:
case EIGRP:
case RIP:
case LAYER1:
default:
columnBuilder.add(
new ColumnMetadata(
COL_INTERFACE,
Schema.INTERFACE,
"Interface from which the edge originates",
Boolean.FALSE,
Boolean.TRUE));
columnBuilder.add(
new ColumnMetadata(
COL_REMOTE_INTERFACE,
Schema.INTERFACE,
"Interface at which the edge terminates",
Boolean.FALSE,
Boolean.TRUE));
}
return new TableMetadata(columnBuilder.build(), "Display Edges");
}
}
|
wyz7155/SymDrive | s2e/qemu/target-s390x/helper.h | #include "def-helper.h"
DEF_HELPER_1(exception, void, i32)
DEF_HELPER_3(nc, i32, i32, i64, i64)
DEF_HELPER_3(oc, i32, i32, i64, i64)
DEF_HELPER_3(xc, i32, i32, i64, i64)
DEF_HELPER_3(mvc, void, i32, i64, i64)
DEF_HELPER_3(clc, i32, i32, i64, i64)
DEF_HELPER_2(mvcl, i32, i32, i32)
DEF_HELPER_FLAGS_1(set_cc_comp_s32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32)
DEF_HELPER_FLAGS_1(set_cc_comp_s64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64)
DEF_HELPER_FLAGS_2(set_cc_icm, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32)
DEF_HELPER_3(clm, i32, i32, i32, i64)
DEF_HELPER_3(stcm, void, i32, i32, i64)
DEF_HELPER_2(mlg, void, i32, i64)
DEF_HELPER_2(dlg, void, i32, i64)
DEF_HELPER_FLAGS_3(set_cc_add64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64, s64, s64)
DEF_HELPER_FLAGS_3(set_cc_addu64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64)
DEF_HELPER_FLAGS_3(set_cc_add32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32, s32, s32)
DEF_HELPER_FLAGS_3(set_cc_addu32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32, i32)
DEF_HELPER_FLAGS_3(set_cc_sub64, TCG_CALL_PURE|TCG_CALL_CONST, i32, s64, s64, s64)
DEF_HELPER_FLAGS_3(set_cc_subu64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64)
DEF_HELPER_FLAGS_3(set_cc_sub32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32, s32, s32)
DEF_HELPER_FLAGS_3(set_cc_subu32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32, i32)
DEF_HELPER_3(srst, i32, i32, i32, i32)
DEF_HELPER_3(clst, i32, i32, i32, i32)
DEF_HELPER_3(mvpg, void, i64, i64, i64)
DEF_HELPER_3(mvst, void, i32, i32, i32)
DEF_HELPER_3(csg, i32, i32, i64, i32)
DEF_HELPER_3(cdsg, i32, i32, i64, i32)
DEF_HELPER_3(cs, i32, i32, i64, i32)
DEF_HELPER_4(ex, i32, i32, i64, i64, i64)
DEF_HELPER_FLAGS_1(abs_i32, TCG_CALL_PURE|TCG_CALL_CONST, i32, s32)
DEF_HELPER_FLAGS_1(nabs_i32, TCG_CALL_PURE|TCG_CALL_CONST, s32, s32)
DEF_HELPER_FLAGS_1(abs_i64, TCG_CALL_PURE|TCG_CALL_CONST, i64, s64)
DEF_HELPER_FLAGS_1(nabs_i64, TCG_CALL_PURE|TCG_CALL_CONST, s64, s64)
DEF_HELPER_3(stcmh, void, i32, i64, i32)
DEF_HELPER_3(icmh, i32, i32, i64, i32)
DEF_HELPER_2(ipm, void, i32, i32)
DEF_HELPER_FLAGS_3(addc_u32, TCG_CALL_PURE|TCG_CALL_CONST, i32, i32, i32, i32)
DEF_HELPER_FLAGS_3(set_cc_addc_u64, TCG_CALL_PURE|TCG_CALL_CONST, i32, i64, i64, i64)
DEF_HELPER_3(stam, void, i32, i64, i32)
DEF_HELPER_3(lam, void, i32, i64, i32)
DEF_HELPER_3(mvcle, i32, i32, i64, i32)
DEF_HELPER_3(clcle, i32, i32, i64, i32)
DEF_HELPER_3(slb, i32, i32, i32, i32)
DEF_HELPER_4(slbg, i32, i32, i32, i64, i64)
DEF_HELPER_2(cefbr, void, i32, s32)
DEF_HELPER_2(cdfbr, void, i32, s32)
DEF_HELPER_2(cxfbr, void, i32, s32)
DEF_HELPER_2(cegbr, void, i32, s64)
DEF_HELPER_2(cdgbr, void, i32, s64)
DEF_HELPER_2(cxgbr, void, i32, s64)
DEF_HELPER_2(adbr, i32, i32, i32)
DEF_HELPER_2(aebr, i32, i32, i32)
DEF_HELPER_2(sebr, i32, i32, i32)
DEF_HELPER_2(sdbr, i32, i32, i32)
DEF_HELPER_2(debr, void, i32, i32)
DEF_HELPER_2(dxbr, void, i32, i32)
DEF_HELPER_2(mdbr, void, i32, i32)
DEF_HELPER_2(mxbr, void, i32, i32)
DEF_HELPER_2(ldebr, void, i32, i32)
DEF_HELPER_2(ldxbr, void, i32, i32)
DEF_HELPER_2(lxdbr, void, i32, i32)
DEF_HELPER_2(ledbr, void, i32, i32)
DEF_HELPER_2(lexbr, void, i32, i32)
DEF_HELPER_2(lpebr, i32, i32, i32)
DEF_HELPER_2(lpdbr, i32, i32, i32)
DEF_HELPER_2(lpxbr, i32, i32, i32)
DEF_HELPER_2(ltebr, i32, i32, i32)
DEF_HELPER_2(ltdbr, i32, i32, i32)
DEF_HELPER_2(ltxbr, i32, i32, i32)
DEF_HELPER_2(lcebr, i32, i32, i32)
DEF_HELPER_2(lcdbr, i32, i32, i32)
DEF_HELPER_2(lcxbr, i32, i32, i32)
DEF_HELPER_2(aeb, void, i32, i32)
DEF_HELPER_2(deb, void, i32, i32)
DEF_HELPER_2(meeb, void, i32, i32)
DEF_HELPER_2(cdb, i32, i32, i64)
DEF_HELPER_2(adb, i32, i32, i64)
DEF_HELPER_2(seb, void, i32, i32)
DEF_HELPER_2(sdb, i32, i32, i64)
DEF_HELPER_2(mdb, void, i32, i64)
DEF_HELPER_2(ddb, void, i32, i64)
DEF_HELPER_FLAGS_2(cebr, TCG_CALL_PURE, i32, i32, i32)
DEF_HELPER_FLAGS_2(cdbr, TCG_CALL_PURE, i32, i32, i32)
DEF_HELPER_FLAGS_2(cxbr, TCG_CALL_PURE, i32, i32, i32)
DEF_HELPER_3(cgebr, i32, i32, i32, i32)
DEF_HELPER_3(cgdbr, i32, i32, i32, i32)
DEF_HELPER_3(cgxbr, i32, i32, i32, i32)
DEF_HELPER_1(lzer, void, i32)
DEF_HELPER_1(lzdr, void, i32)
DEF_HELPER_1(lzxr, void, i32)
DEF_HELPER_3(cfebr, i32, i32, i32, i32)
DEF_HELPER_3(cfdbr, i32, i32, i32, i32)
DEF_HELPER_3(cfxbr, i32, i32, i32, i32)
DEF_HELPER_2(axbr, i32, i32, i32)
DEF_HELPER_2(sxbr, i32, i32, i32)
DEF_HELPER_2(meebr, void, i32, i32)
DEF_HELPER_2(ddbr, void, i32, i32)
DEF_HELPER_3(madb, void, i32, i64, i32)
DEF_HELPER_3(maebr, void, i32, i32, i32)
DEF_HELPER_3(madbr, void, i32, i32, i32)
DEF_HELPER_3(msdbr, void, i32, i32, i32)
DEF_HELPER_2(ldeb, void, i32, i64)
DEF_HELPER_2(lxdb, void, i32, i64)
DEF_HELPER_FLAGS_2(tceb, TCG_CALL_PURE, i32, i32, i64)
DEF_HELPER_FLAGS_2(tcdb, TCG_CALL_PURE, i32, i32, i64)
DEF_HELPER_FLAGS_2(tcxb, TCG_CALL_PURE, i32, i32, i64)
DEF_HELPER_2(flogr, i32, i32, i64)
DEF_HELPER_2(sqdbr, void, i32, i32)
DEF_HELPER_FLAGS_1(cvd, TCG_CALL_PURE|TCG_CALL_CONST, i64, s32)
DEF_HELPER_3(unpk, void, i32, i64, i64)
DEF_HELPER_3(tr, void, i32, i64, i64)
DEF_HELPER_2(servc, i32, i32, i64)
DEF_HELPER_3(diag, i64, i32, i64, i64)
DEF_HELPER_2(load_psw, void, i64, i64)
DEF_HELPER_1(program_interrupt, void, i32)
DEF_HELPER_FLAGS_1(stidp, TCG_CALL_CONST, void, i64)
DEF_HELPER_FLAGS_1(spx, TCG_CALL_CONST, void, i64)
DEF_HELPER_FLAGS_1(sck, TCG_CALL_CONST, i32, i64)
DEF_HELPER_1(stck, i32, i64)
DEF_HELPER_1(stcke, i32, i64)
DEF_HELPER_FLAGS_1(sckc, TCG_CALL_CONST, void, i64)
DEF_HELPER_FLAGS_1(stckc, TCG_CALL_CONST, void, i64)
DEF_HELPER_FLAGS_1(spt, TCG_CALL_CONST, void, i64)
DEF_HELPER_FLAGS_1(stpt, TCG_CALL_CONST, void, i64)
DEF_HELPER_3(stsi, i32, i64, i32, i32)
DEF_HELPER_3(lctl, void, i32, i64, i32)
DEF_HELPER_3(lctlg, void, i32, i64, i32)
DEF_HELPER_3(stctl, void, i32, i64, i32)
DEF_HELPER_3(stctg, void, i32, i64, i32)
DEF_HELPER_FLAGS_2(tprot, TCG_CALL_CONST, i32, i64, i64)
DEF_HELPER_FLAGS_1(iske, TCG_CALL_PURE|TCG_CALL_CONST, i64, i64)
DEF_HELPER_FLAGS_2(sske, TCG_CALL_CONST, void, i32, i64)
DEF_HELPER_FLAGS_2(rrbe, TCG_CALL_CONST, i32, i32, i64)
DEF_HELPER_2(csp, i32, i32, i32)
DEF_HELPER_3(mvcs, i32, i64, i64, i64)
DEF_HELPER_3(mvcp, i32, i64, i64, i64)
DEF_HELPER_3(sigp, i32, i64, i32, i64)
DEF_HELPER_1(sacf, void, i64)
DEF_HELPER_FLAGS_2(ipte, TCG_CALL_CONST, void, i64, i64)
DEF_HELPER_FLAGS_0(ptlb, TCG_CALL_CONST, void)
DEF_HELPER_2(lra, i32, i64, i32)
DEF_HELPER_2(stura, void, i64, i32)
DEF_HELPER_2(cksm, void, i32, i32)
DEF_HELPER_FLAGS_4(calc_cc, TCG_CALL_PURE|TCG_CALL_CONST,
i32, i32, i64, i64, i64)
#include "def-helper.h"
|
superxuzj/outteamManage | src/main/java/com/boliangshenghe/outteam/service/ResponseService.java | package com.boliangshenghe.outteam.service;
import java.util.Date;
import java.util.List;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.boliangshenghe.outteam.common.PageBean;
import com.boliangshenghe.outteam.entity.Company;
import com.boliangshenghe.outteam.entity.Hbplan;
import com.boliangshenghe.outteam.entity.HbplanDetail;
import com.boliangshenghe.outteam.entity.Response;
import com.boliangshenghe.outteam.entity.ResponseCompany;
import com.boliangshenghe.outteam.repository.CompanyMapper;
import com.boliangshenghe.outteam.repository.ResponseCompanyMapper;
import com.boliangshenghe.outteam.repository.ResponseMapper;
import com.boliangshenghe.outteam.util.CommonUtils;
import com.github.pagehelper.PageHelper;
/**
* 响应service
* @author Administrator
*
*/
@Service
public class ResponseService {
@Autowired
ResponseMapper responseMapper;
@Autowired
ResponseCompanyMapper responseCompanyMapper;
@Autowired
CompanyMapper companyMapper;
public int insertSelective(Response response) {
return responseMapper.insertSelective(response);
}
public Response selectByPrimaryKey(Integer id){
return responseMapper.selectByPrimaryKey(id);
}
public int updateByPrimaryKeySelective(Response record){
return responseMapper.updateByPrimaryKeySelective(record);
}
public int deleteByPrimaryKey(Integer id){
return responseMapper.deleteByPrimaryKey(id);
}
public List<Response> selectResponseList(Response record){
return responseMapper.selectResponseList(record);
}
public PageBean<Response> getResponseByPage(Response record,Integer pageNo) {
PageHelper.startPage(pageNo,CommonUtils.PAGESIZE);
List<Response> list = this.responseMapper.selectResponseList(record);
return new PageBean<Response>(list);
}
public void addDetail(Response resp){
if(null == resp.getId()){
responseMapper.insertSelective(resp);
}else{
responseMapper.updateByPrimaryKeySelective(resp);
}
String cids = resp.getCids();
String[] cidsArr = cids.split(",");
if(!cidsArr[0].equals("")){
ResponseCompany responseCompany = new ResponseCompany();
responseCompany.setRid(resp.getId());
responseCompanyMapper.deleteByResponseCompany(responseCompany);//如果有修改,先删除之前的配置
for (String cidtemp : cidsArr) {
ResponseCompany temp = new ResponseCompany();
temp.setCid(Integer.parseInt(cidtemp));
Company company = companyMapper.selectByPrimaryKey(Integer.parseInt(cidtemp));//单位
temp.setCompany(company.getProvince());
temp.setRid(resp.getId());
temp.setRname(resp.getName());
temp.setState("1");
responseCompanyMapper.insertSelective(temp);
}
}
}
}
|
nyla-solutions/nyla-formInjection | nyla.solutions.formInjection/src/main/java/nyla/solutions/formInjection/bre/OperationBluePrint.java | <filename>nyla.solutions.formInjection/src/main/java/nyla/solutions/formInjection/bre/OperationBluePrint.java
package nyla.solutions.formInjection.bre;
import java.io.Serializable;
import java.util.Collection;
import nyla.solutions.global.data.AbstractAudit;
import nyla.solutions.global.data.Criteria;
import nyla.solutions.global.exception.SetupException;
import nyla.solutions.global.patterns.servicefactory.ServiceFactory;
/**
* <pre>
* OperationBluePrint is a value object representation of the
* OPERATION_TBL table and associated entities.
* </pre>
* @author <NAME>
* @version 1.0
*/
public class OperationBluePrint extends AbstractAudit
implements Serializable
{
/**
* Constructor for OperationBluePrint initalizes internal
* data settings.
*
*/
public OperationBluePrint()
{
super();
}//--------------------------------------------
/**
* Constructor for OperationBluePrint initalizes internal
* data settings.
* @param aPK the primary key
* @throws IllegalArgumentException
*/
public OperationBluePrint(int aPK) throws IllegalArgumentException
{
super(aPK);
}//--------------------------------------------
/**
* Constructor for OperationBluePrint initalizes internal
* data settings.
* @param aPK the operation primary key
* @throws IllegalArgumentException
*/
public OperationBluePrint(Criteria aPK) throws IllegalArgumentException
{
super(aPK);
}//--------------------------------------------
/**
* Constructor for OperationBluePrint initalizes internal
* data settings.
* @param aPK
* @throws IllegalArgumentException
*/
public OperationBluePrint(String aPK) throws IllegalArgumentException
{
super(aPK);
}//--------------------------------------------
/**
* @return Returns the className.
*/
public String getClassName()
{
return className;
}//--------------------------------------------
/**
* @param className The className to set.
*/
public void setClassName(String className)
{
if (className == null)
className = "";
this.className = className;
}//--------------------------------------------
/**
*
* @return the operation
*/
public Operation getOperation()
{
//if(operation == null )
//{
try
{
if(this.name == null || this.name.length() == 0)
throw new IllegalArgumentException("Name set for operation PK="+getPrimaryKey()+" name="+name);
Operation operation = null;
ServiceFactory serviceFactory = ServiceFactory.getInstance();
if(className == null)
{
operation = (Operation)serviceFactory.create(this.getName());
}
else
{
operation = (Operation)Class.forName(className).newInstance();
}
if(this.operationParameters != null && !this.operationParameters.isEmpty())
{
operation.setPrimaryKey(this.getPrimaryKey());
operation.setName(this.getName());
//initial operation inputs
operation.initialize(getOperationParameterArray());
}
return operation;
}
catch(Exception e)
{
throw new SetupException(e);
}
//}
}//--------------------------------------------
/**
* @return Returns the name.
*/
public String getName()
{
return name;
}//--------------------------------------------
/**
* @param name The name to set.
*/
public void setName(String name)
{
if (name == null)
name = "";
this.name = name;
}//--------------------------------------------
private OperationParameter[] getOperationParameterArray()
{
OperationParameter[] paramArray = new OperationParameter[this.operationParameters.size()];
System.arraycopy(operationParameters.toArray(),0,
paramArray,0,paramArray.length);
return paramArray;
}//--------------------------------------------
private Collection<Object> operationParameters = null;
private String name = null;
private String className = null;
static final long serialVersionUID = OperationBluePrint.class.getName()
.hashCode();
}
|
Half-Shot/sync-v3 | state/event_table_test.go | <gh_stars>0
package state
import (
"bytes"
"testing"
"github.com/jmoiron/sqlx"
"github.com/matrix-org/sync-v3/sqlutil"
)
func TestEventTable(t *testing.T) {
db, err := sqlx.Open("postgres", postgresConnectionString)
if err != nil {
t.Fatalf("failed to open SQL db: %s", err)
}
txn, err := db.Beginx()
if err != nil {
t.Fatalf("failed to start txn: %s", err)
}
roomID := "!0:localhost"
table := NewEventTable(db)
events := []Event{
{
ID: "100",
JSON: []byte(`{"event_id":"100", "foo":"bar", "type": "T1", "state_key":"S1", "room_id":"` + roomID + `"}`),
},
{
ID: "101",
JSON: []byte(`{"event_id":"101", "foo":"bar", "type": "T2", "state_key":"S2", "room_id":"` + roomID + `"}`),
},
{
// ID is optional, it will pull event_id out if it's missing
JSON: []byte(`{"event_id":"102", "foo":"bar", "type": "T3", "state_key":"", "room_id":"` + roomID + `"}`),
},
}
numNew, err := table.Insert(txn, events)
if err != nil {
t.Fatalf("Insert failed: %s", err)
}
if numNew != len(events) {
t.Fatalf("wanted %d new events, got %d", len(events), numNew)
}
txn.Commit()
txn, err = db.Beginx()
if err != nil {
t.Fatalf("failed to start txn: %s", err)
}
defer txn.Rollback()
// duplicate insert is ok
numNew, err = table.Insert(txn, events)
if err != nil {
t.Fatalf("Insert failed: %s", err)
}
if numNew != 0 {
t.Fatalf("wanted 0 new events, got %d", numNew)
}
// pulling non-existent ids returns no error but a zero slice
events, err = table.SelectByIDs(txn, false, []string{"101010101010"})
if err != nil {
t.Fatalf("SelectByIDs failed: %s", err)
}
if len(events) != 0 {
t.Fatalf("SelectByIDs returned %d events, wanted none", len(events))
}
// pulling events by event_id is ok
events, err = table.SelectByIDs(txn, true, []string{"100", "101", "102"})
if err != nil {
t.Fatalf("SelectByIDs failed: %s", err)
}
if len(events) != 3 {
t.Fatalf("SelectByIDs returned %d events, want 3", len(events))
}
// pulling nids by event_id is ok
gotnids, err := table.SelectNIDsByIDs(txn, []string{"100", "101", "102"})
if err != nil {
t.Fatalf("SelectNIDsByIDs failed: %s", err)
}
if len(gotnids) != 3 {
t.Fatalf("SelectNIDsByIDs returned %d events, want 3", len(gotnids))
}
// set a snapshot ID on them
var firstSnapshotID int64 = 55
for _, nid := range gotnids {
if err = table.UpdateBeforeSnapshotID(txn, nid, firstSnapshotID, 0); err != nil {
t.Fatalf("UpdateSnapshotID: %s", err)
}
}
// query the snapshot
lastEventNID, _, snapID, err := table.BeforeStateSnapshotIDForEventNID(txn, roomID, gotnids[1])
if err != nil {
t.Fatalf("BeforeStateSnapshotIDForEventNID: %s", err)
}
if snapID != firstSnapshotID {
t.Fatalf("BeforeStateSnapshotIDForEventNID: got snap ID %d want %d", snapID, firstSnapshotID)
}
// the queried position
if lastEventNID != gotnids[1] {
t.Fatalf("BeforeStateSnapshotIDForEventNID: didn't return last inserted event, got %d want %d", lastEventNID, gotnids[1])
}
// try again with a much higher pos
lastEventNID, _, snapID, err = table.BeforeStateSnapshotIDForEventNID(txn, roomID, 999999)
if err != nil {
t.Fatalf("BeforeStateSnapshotIDForEventNID: %s", err)
}
if snapID != firstSnapshotID {
t.Fatalf("BeforeStateSnapshotIDForEventNID: got snap ID %d want %d", snapID, firstSnapshotID)
}
if lastEventNID != gotnids[2] {
t.Fatalf("BeforeStateSnapshotIDForEventNID: didn't return last inserted event, got %d want %d", lastEventNID, gotnids[2])
}
// find max and query it
var wantHighestNID int64
for _, nid := range gotnids {
if nid > wantHighestNID {
wantHighestNID = nid
}
}
gotHighestNID, err := table.SelectHighestNID()
if err != nil {
t.Fatalf("SelectHighestNID returned error: %s", err)
}
if wantHighestNID != gotHighestNID {
t.Errorf("SelectHighestNID didn't select highest, got %d want %d", gotHighestNID, wantHighestNID)
}
latest, err := table.SelectLatestEventInRoom(txn, roomID, gotHighestNID)
if err != nil {
t.Errorf("SelectLatestEventInRoom returned error: %s", err)
}
if latest.ID != "102" {
t.Errorf("SelectLatestEventInRoom returned unexpect latest event: %+v", latest)
}
var nids []int64
for _, ev := range events {
nids = append(nids, int64(ev.NID))
}
// pulling events by event nid is ok
events, err = table.SelectByNIDs(txn, true, nids)
if err != nil {
t.Fatalf("SelectByNIDs failed: %s", err)
}
if len(events) != 3 {
t.Fatalf("SelectByNIDs returned %d events, want 3", len(events))
}
// pulling non-existent nids returns no error but a zero slice if verifyAll=false
events, err = table.SelectByNIDs(txn, false, []int64{9999999})
if err != nil {
t.Fatalf("SelectByNIDs failed: %s", err)
}
if len(events) != 0 {
t.Fatalf("SelectByNIDs returned %d events, wanted none", len(events))
}
// pulling non-existent nids returns error if verifyAll=true
events, err = table.SelectByNIDs(txn, true, []int64{9999999})
if err == nil {
t.Fatalf("SelectByNIDs did not fail, wanted to")
}
// pulling stripped events by NID is ok
strippedEvents, err := table.SelectStrippedEventsByNIDs(txn, true, nids)
if err != nil {
t.Fatalf("SelectStrippedEventsByNIDs failed: %s", err)
}
if len(strippedEvents) != 3 {
t.Fatalf("SelectStrippedEventsByNIDs returned %d events, want 3", len(strippedEvents))
}
verifyStripped := func(stripped []Event) {
wantTypes := []string{"T1", "T2", "T3"}
wantKeys := []string{"S1", "S2", ""}
for i := range stripped {
if stripped[i].Type != wantTypes[i] {
t.Errorf("Stripped event %d type mismatch: got %s want %s", i, stripped[i].Type, wantTypes[i])
}
if stripped[i].StateKey != wantKeys[i] {
t.Errorf("Stripped event %d state_key mismatch: got %s want %s", i, stripped[i].StateKey, wantKeys[i])
}
if stripped[i].NID != nids[i] {
t.Errorf("Stripped event %d nid mismatch: got %d want %d", i, stripped[i].NID, nids[i])
}
}
}
verifyStripped(strippedEvents)
// pulling stripped events by ID is ok
strippedEvents, err = table.SelectStrippedEventsByIDs(txn, true, []string{"100", "101", "102"})
if err != nil {
t.Fatalf("SelectStrippedEventsByIDs failed: %s", err)
}
if len(strippedEvents) != 3 {
t.Fatalf("SelectStrippedEventsByIDs returned %d events, want 3", len(strippedEvents))
}
verifyStripped(strippedEvents)
}
func TestEventTableNullValue(t *testing.T) {
db, err := sqlx.Open("postgres", postgresConnectionString)
if err != nil {
t.Fatalf("failed to open SQL db: %s", err)
}
txn, err := db.Beginx()
if err != nil {
t.Fatalf("failed to start txn: %s", err)
}
defer txn.Rollback()
roomID := "!0:localhost"
table := NewEventTable(db)
// `state_key` has a null byte value, but `type` has an escaped literal "\u0000". Ensure the former is culled but the latter is not.
originalJSON := []byte(`{"event_id":"nullevent", "state_key":"foo", "null":"\u0000", "type": "\\u0000", "room_id":"` + roomID + `"}`)
events := []Event{
{
ID: "nullevent",
JSON: originalJSON,
},
}
numNew, err := table.Insert(txn, events)
if err != nil {
t.Fatalf("Insert failed: %s", err)
}
if numNew != len(events) {
t.Fatalf("wanted %d new events, got %d", len(events), numNew)
}
gotEvents, err := table.SelectByIDs(txn, true, []string{"nullevent"})
if err != nil {
t.Fatalf("SelectByIDs: %s", err)
}
if len(gotEvents) != 1 {
t.Fatalf("SelectByIDs: got %d events want 1", len(gotEvents))
}
if gotEvents[0].Type != `\u0000` {
t.Fatalf(`Escaped null byte didn't survive storage, got %s want \u0000`, gotEvents[0].Type)
}
if !bytes.Equal(gotEvents[0].JSON, originalJSON) {
t.Fatalf("event JSON was modified, \ngot %v \nwant %v", string(gotEvents[0].JSON), string(originalJSON))
}
}
func TestEventTableDupeInsert(t *testing.T) {
db, err := sqlx.Open("postgres", postgresConnectionString)
if err != nil {
t.Fatalf("failed to open SQL db: %s", err)
}
// first insert
txn, err := db.Beginx()
if err != nil {
t.Fatalf("failed to start txn: %s", err)
}
roomID := "!TestEventTableDupeInsert:localhost"
table := NewEventTable(db)
originalJSON := []byte(`{"event_id":"dupeevent", "state_key":"foo", "type":"bar", "room_id":"` + roomID + `"}`)
events := []Event{
{
JSON: originalJSON,
RoomID: roomID,
},
}
numNew, err := table.Insert(txn, events)
if err != nil {
t.Fatalf("Insert failed: %s", err)
}
if numNew != len(events) {
t.Fatalf("wanted %d new events, got %d", len(events), numNew)
}
// pull out the nid
nids, err := table.SelectNIDsByIDs(txn, []string{"dupeevent"})
if err != nil {
t.Fatalf("SelectNIDsByIDs: %s", err)
}
nid := nids[0]
txn.Commit()
// now insert again
txn, err = db.Beginx()
if err != nil {
t.Fatalf("failed to start txn: %s", err)
}
events = []Event{
{
JSON: originalJSON,
RoomID: roomID,
},
}
numNew, err = table.Insert(txn, events)
if err != nil {
t.Fatalf("Insert failed: %s", err)
}
if numNew != 0 {
t.Fatalf("wanted 0 new events, got %d", numNew)
}
// pull out the nid
nids, err = table.SelectNIDsByIDs(txn, []string{"dupeevent"})
if err != nil {
t.Fatalf("SelectNIDsByIDs: %s", err)
}
nid2 := nids[0]
txn.Commit()
if nid != nid2 {
t.Fatalf("nid mismatch, got %v want %v", nid2, nid)
}
}
func TestEventTableSelectEventsBetween(t *testing.T) {
db, err := sqlx.Open("postgres", postgresConnectionString)
if err != nil {
t.Fatalf("failed to open SQL db: %s", err)
}
txn, err := db.Beginx()
if err != nil {
t.Fatalf("failed to start txn: %s", err)
}
table := NewEventTable(db)
searchRoomID := "!0TestEventTableSelectEventsBetween:localhost"
eventIDs := []string{
"100TestEventTableSelectEventsBetween",
"101TestEventTableSelectEventsBetween",
"102TestEventTableSelectEventsBetween",
"103TestEventTableSelectEventsBetween",
"104TestEventTableSelectEventsBetween",
}
events := []Event{
{
JSON: []byte(`{"event_id":"` + eventIDs[0] + `","type": "T1", "state_key":"S1", "room_id":"` + searchRoomID + `"}`),
},
{
JSON: []byte(`{"event_id":"` + eventIDs[1] + `","type": "T2", "state_key":"S2", "room_id":"` + searchRoomID + `"}`),
},
{
JSON: []byte(`{"event_id":"` + eventIDs[2] + `","type": "T3", "state_key":"", "room_id":"` + searchRoomID + `"}`),
},
{
// different room
JSON: []byte(`{"event_id":"` + eventIDs[3] + `","type": "T4", "state_key":"", "room_id":"!1TestEventTableSelectEventsBetween:localhost"}`),
},
{
JSON: []byte(`{"event_id":"` + eventIDs[4] + `","type": "T5", "state_key":"", "room_id":"` + searchRoomID + `"}`),
},
}
numNew, err := table.Insert(txn, events)
if err != nil {
t.Fatalf("Insert failed: %s", err)
}
if numNew != len(events) {
t.Fatalf("failed to insert events: got %d want %d", numNew, len(events))
}
txn.Commit()
t.Run("selecting multiple events known lower bound", func(t *testing.T) {
t.Parallel()
txn2, err := db.Beginx()
if err != nil {
t.Fatalf("failed to start txn: %s", err)
}
defer txn2.Rollback()
events, err := table.SelectByIDs(txn2, true, []string{eventIDs[0]})
if err != nil || len(events) == 0 {
t.Fatalf("failed to extract event for lower bound: %s", err)
}
events, err = table.SelectEventsBetween(txn2, searchRoomID, int64(events[0].NID), EventsEnd, 1000)
if err != nil {
t.Fatalf("failed to SelectEventsBetween: %s", err)
}
// 3 as 1 is from a different room
if len(events) != 3 {
t.Fatalf("wanted 3 events, got %d", len(events))
}
})
t.Run("selecting multiple events known lower and upper bound", func(t *testing.T) {
t.Parallel()
txn3, err := db.Beginx()
if err != nil {
t.Fatalf("failed to start txn: %s", err)
}
defer txn3.Rollback()
events, err := table.SelectByIDs(txn3, true, []string{eventIDs[0], eventIDs[2]})
if err != nil || len(events) == 0 {
t.Fatalf("failed to extract event for lower/upper bound: %s", err)
}
events, err = table.SelectEventsBetween(txn3, searchRoomID, int64(events[0].NID), int64(events[1].NID), 1000)
if err != nil {
t.Fatalf("failed to SelectEventsBetween: %s", err)
}
// eventIDs[1] and eventIDs[2]
if len(events) != 2 {
t.Fatalf("wanted 2 events, got %d", len(events))
}
})
t.Run("selecting multiple events unknown bounds (all events)", func(t *testing.T) {
t.Parallel()
txn4, err := db.Beginx()
if err != nil {
t.Fatalf("failed to start txn: %s", err)
}
defer txn4.Rollback()
gotEvents, err := table.SelectEventsBetween(txn4, searchRoomID, EventsStart, EventsEnd, 1000)
if err != nil {
t.Fatalf("failed to SelectEventsBetween: %s", err)
}
// one less as one event is for a different room
if len(gotEvents) != (len(events) - 1) {
t.Fatalf("wanted %d events, got %d", len(events)-1, len(gotEvents))
}
})
t.Run("selecting multiple events hitting the limit", func(t *testing.T) {
t.Parallel()
txn5, err := db.Beginx()
if err != nil {
t.Fatalf("failed to start txn: %s", err)
}
defer txn5.Rollback()
limit := 2
gotEvents, err := table.SelectEventsBetween(txn5, searchRoomID, EventsStart, EventsEnd, limit)
if err != nil {
t.Fatalf("failed to SelectEventsBetween: %s", err)
}
if len(gotEvents) != limit {
t.Fatalf("wanted %d events, got %d", limit, len(gotEvents))
}
})
}
func TestChunkify(t *testing.T) {
// Make 100 dummy events
events := make([]Event, 100)
for i := 0; i < len(events); i++ {
events[i] = Event{
NID: int64(i),
}
}
eventChunker := EventChunker(events)
testCases := []struct {
name string
numParamsPerStmt int
maxParamsPerCall int
chunkSizes []int // length = number of chunks wanted, ints = events in that chunk
}{
{
name: "below chunk limit returns 1 chunk",
numParamsPerStmt: 3,
maxParamsPerCall: 400,
chunkSizes: []int{100},
},
{
name: "just above chunk limit returns 2 chunks",
numParamsPerStmt: 3,
maxParamsPerCall: 297,
chunkSizes: []int{99, 1},
},
{
name: "way above chunk limit returns many chunks",
numParamsPerStmt: 3,
maxParamsPerCall: 30,
chunkSizes: []int{10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
},
{
name: "fractional division rounds down",
numParamsPerStmt: 3,
maxParamsPerCall: 298,
chunkSizes: []int{99, 1},
},
{
name: "fractional division rounds down",
numParamsPerStmt: 3,
maxParamsPerCall: 299,
chunkSizes: []int{99, 1},
},
}
for _, tc := range testCases {
testCase := tc
t.Run(testCase.name, func(t *testing.T) {
chunks := sqlutil.Chunkify(testCase.numParamsPerStmt, testCase.maxParamsPerCall, eventChunker)
if len(chunks) != len(testCase.chunkSizes) {
t.Fatalf("got %d chunks, want %d", len(chunks), len(testCase.chunkSizes))
}
eventNID := int64(0)
for i := 0; i < len(chunks); i++ {
if chunks[i].Len() != testCase.chunkSizes[i] {
t.Errorf("chunk %d got %d elements, want %d", i, chunks[i].Len(), testCase.chunkSizes[i])
}
eventChunk := chunks[i].(EventChunker)
for j, ev := range eventChunk {
if ev.NID != eventNID {
t.Errorf("chunk %d got wrong event in position %d: got NID %d want NID %d", i, j, ev.NID, eventNID)
}
eventNID += 1
}
}
})
}
}
|
bugduino/alpha-wallet-android | app/src/main/java/com/alphawallet/app/viewmodel/AdvancedSettingsViewModelFactory.java | <gh_stars>1-10
package com.alphawallet.app.viewmodel;
import androidx.lifecycle.ViewModel;
import androidx.lifecycle.ViewModelProvider;
import androidx.annotation.NonNull;
import com.alphawallet.app.repository.CurrencyRepositoryType;
import com.alphawallet.app.repository.LocaleRepositoryType;
import com.alphawallet.app.service.AssetDefinitionService;
public class AdvancedSettingsViewModelFactory implements ViewModelProvider.Factory {
private final LocaleRepositoryType localeRepository;
private final CurrencyRepositoryType currencyRepository;
private final AssetDefinitionService assetDefinitionService;
public AdvancedSettingsViewModelFactory(
LocaleRepositoryType localeRepository,
CurrencyRepositoryType currencyRepository,
AssetDefinitionService assetDefinitionService) {
this.localeRepository = localeRepository;
this.currencyRepository = currencyRepository;
this.assetDefinitionService = assetDefinitionService;
}
@NonNull
@Override
public <T extends ViewModel> T create(@NonNull Class<T> modelClass) {
return (T) new AdvancedSettingsViewModel(
localeRepository,
currencyRepository,
assetDefinitionService
);
}
}
|
majunbao/xue | react-flux/demo11/src/views/appLayout/Top.js | import {h, cloneElement} from 'preact';
import HeaderPage from '../HeaderPage';
const style = {
top: 0,
right: 0,
bottom: 0,
left: 0,
background: '#242424'
}
function Top(props) {
return (
<div className="uk-view" style={{...style, height: props.layout.top}}>
<HeaderPage {...props} />
</div>
)
}
export default Top; |
trngaje/mame-2003-plus-kaze | src/drivers/beathead.c | <gh_stars>100-1000
/***************************************************************************
Atari "Stella on Steroids" hardware
driver by <NAME>
Games supported:
* BeatHead
Known bugs:
* none known
****************************************************************************
Memory map
===================================================================================================
MAIN CPU
===================================================================================================
00000000-0001FFFFF R/W xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx Main RAM
01800000-01BFFFFFF R xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx Main ROM
40000000-4000007FF R/W -------- -------- -------- xxxxxxxx EEPROM
41000000 R -------- -------- -------- xxxxxxxx Data from sound board
41000000 W -------- -------- -------- xxxxxxxx Data to sound board
41000100 R -------- -------- -------- -----xxx Interrupt enables
-------- -------- -------- -----x-- (scanline int enable)
-------- -------- -------- ------x- (unknown int enable)
-------- -------- -------- -------x (unknown int enable)
41000100 W -------- -------- -------- -------- Interrupt acknowledge
41000104 W -------- -------- -------- -------- Unknown int disable
41000108 W -------- -------- -------- -------- Unknown int disable
4100010c W -------- -------- -------- -------- Scanline int disable
41000114 W -------- -------- -------- -------- Unknown int enable
41000118 W -------- -------- -------- -------- Unknown int enable
4100011c W -------- -------- -------- -------- Scanline int enable
41000200 R -------- -------- xxxx--xx xxxx--xx Player 2/3 inputs
R -------- -------- xxxx---- -------- (player 3 joystick UDLR)
R -------- -------- ------x- -------- (player 3 button 1)
R -------- -------- -------x -------- (player 3 button 2)
R -------- -------- -------- xxxx---- (player 2 joystick UDLR)
R -------- -------- -------- ------x- (player 2 button 1)
R -------- -------- -------- -------x (player 2 button 2)
41000204 R -------- -------- xxxx--xx xxxx--xx Player 1/4 inputs
R -------- -------- xxxx---- -------- (player 1 joystick UDLR)
R -------- -------- ------x- -------- (player 1 button 1)
R -------- -------- -------x -------- (player 1 button 2)
R -------- -------- -------- xxxx---- (player 4 joystick UDLR)
R -------- -------- -------- ------x- (player 4 button 1)
R -------- -------- -------- -------x (player 4 button 2)
41000208 W -------- -------- -------- -------- Sound /RESET assert
4100020C W -------- -------- -------- -------- Sound /RESET deassert
41000220 W -------- -------- -------- -------- Coin counter assert
41000224 W -------- -------- -------- -------- Coin counter deassert
41000300 R -------- -------- xxxxxxxx -xxx---- DIP switches/additional inputs
R -------- -------- xxxxxxxx -------- (debug DIP switches)
R -------- -------- -------- -x------ (service switch)
R -------- -------- -------- --x----- (sound output buffer full)
R -------- -------- -------- ---x---- (sound input buffer full)
41000304 R -------- -------- -------- xxxxxxxx Coin/service inputs
R -------- -------- -------- xxxx---- (service inputs: R,RC,LC,L)
R -------- -------- -------- ----xxxx (coin inputs: R,RC,LC,L)
41000400 W -------- -------- -------- -xxxxxxx Palette select
41000500 W -------- -------- -------- -------- EEPROM write enable
41000600 W -------- -------- -------- ----xxxx Finescroll, vertical SYNC flags
W -------- -------- -------- ----x--- (VBLANK)
W -------- -------- -------- -----x-- (VSYNC)
W -------- -------- -------- ------xx (fine scroll value)
41000700 W -------- -------- -------- -------- Watchdog reset
42000000-4201FFFF R/W -------- -------- xxxxxxxx xxxxxxxx Palette RAM
R/W -------- -------- x------- -------- (LSB of all three components)
R/W -------- -------- -xxxxx-- -------- (red component)
R/W -------- -------- ------xx xxx----- (green component)
R/W -------- -------- -------- ---xxxxx (blue component)
43000000 W -------- -------- ----xxxx xxxxxxxx HSYNC RAM address latch
W -------- -------- ----x--- -------- (counter enable)
W -------- -------- -----xxx xxxxxxxx (RAM address)
43000004 R/W -------- -------- -------- xxxxx--- HSYNC RAM data latch
R/W -------- -------- -------- x------- (generate IRQ)
R/W -------- -------- -------- -x------ (VRAM shift enable)
R/W -------- -------- -------- --x----- (HBLANK)
R/W -------- -------- -------- ---x---- (/HSYNC)
R/W -------- -------- -------- ----x--- (release wait for sync)
43000008 W -------- -------- -------- ---x-xx- HSYNC unknown control
8DF80000 R -------- -------- -------- -------- Unknown
8F380000-8F3FFFFF W -------- -------- -------- -------- VRAM latch address
8F900000-8F97FFFF W xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx VRAM transparent write
8F980000-8F9FFFFF R/W xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx VRAM standard read/write
8FB80000-8FBFFFFF W ----xxxx ----xxxx ----xxxx ----xxxx VRAM "bulk" write
W ----xxxx -------- -------- -------- (enable byte lanes for word 3?)
W -------- ----xxxx -------- -------- (enable byte lanes for word 2?)
W -------- -------- ----xxxx -------- (enable byte lanes for word 1?)
W -------- -------- -------- ----xxxx (enable byte lanes for word 0?)
8FFF8000 W xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx VRAM "bulk" data latch
9E280000-9E2FFFFF W -------- -------- -------- -------- VRAM copy destination address latch
===================================================================================================
***************************************************************************/
#include "driver.h"
#include "cpu/asap/asap.h"
#include "machine/atarigen.h"
#include "sndhrdw/atarijsa.h"
#include "vidhrdw/generic.h"
#include "beathead.h"
/*************************************
*
* Statics
*
*************************************/
static data32_t * ram_base;
static data32_t * rom_base;
static double hblank_offset;
static UINT8 irq_line_state;
static UINT8 irq_enable[3];
static UINT8 irq_state[3];
static UINT8 eeprom_enabled;
#define MAX_SCANLINES 262
/*************************************
*
* Machine init
*
*************************************/
static void update_interrupts(void);
static void scanline_callback(int scanline)
{
/* update the video */
beathead_scanline_update(scanline);
/* on scanline zero, clear any halt condition */
if (scanline == 0)
cpu_set_halt_line(0, CLEAR_LINE);
/* wrap around at 262 */
scanline++;
if (scanline >= MAX_SCANLINES)
scanline = 0;
/* set the scanline IRQ */
irq_state[2] = 1;
update_interrupts();
/* set the timer for the next one */
timer_set(cpu_getscanlinetime(scanline) - hblank_offset, scanline, scanline_callback);
}
static MACHINE_INIT( beathead )
{
/* reset the common subsystems */
atarigen_eeprom_reset();
atarigen_interrupt_reset(update_interrupts);
atarijsa_reset();
/* the code is temporarily mapped at 0 at startup */
/* just copying the first 0x40 bytes is sufficient */
memcpy(ram_base, rom_base, 0x40);
/* compute the timing of the HBLANK interrupt and set the first timer */
hblank_offset = cpu_getscanlineperiod() * ((455. - 336. - 25.) / 455.);
timer_set(cpu_getscanlinetime(0) - hblank_offset, 0, scanline_callback);
/* reset IRQs */
irq_line_state = CLEAR_LINE;
irq_state[0] = irq_state[1] = irq_state[2] = 0;
irq_enable[0] = irq_enable[1] = irq_enable[2] = 0;
}
/*************************************
*
* Interrupt handling
*
*************************************/
static void update_interrupts(void)
{
int gen_int;
/* compute the combined interrupt signal */
gen_int = irq_state[0] & irq_enable[0];
gen_int |= irq_state[1] & irq_enable[1];
gen_int |= irq_state[2] & irq_enable[2];
gen_int = gen_int ? ASSERT_LINE : CLEAR_LINE;
/* if it's changed since the last time, call through */
if (irq_line_state != gen_int)
{
irq_line_state = gen_int;
if (irq_line_state != CLEAR_LINE)
cpu_set_irq_line(0, ASAP_IRQ0, irq_line_state);
else
asap_set_irq_line(ASAP_IRQ0, irq_line_state);
}
}
static WRITE32_HANDLER( interrupt_control_w )
{
int irq = offset & 3;
int control = (offset >> 2) & 1;
/* offsets 1-3 seem to be the enable latches for the IRQs */
if (irq != 0)
irq_enable[irq - 1] = control;
/* offset 0 seems to be the interrupt ack */
else
irq_state[0] = irq_state[1] = irq_state[2] = 0;
/* update the current state */
update_interrupts();
}
static READ32_HANDLER( interrupt_control_r )
{
/* return the enables as a bitfield */
return (irq_enable[0]) | (irq_enable[1] << 1) | (irq_enable[2] << 2);
}
/*************************************
*
* EEPROM handling
*
*************************************/
static WRITE32_HANDLER( eeprom_data_w )
{
if (eeprom_enabled)
{
mem_mask |= 0xffffff00;
COMBINE_DATA(&((data32_t *)generic_nvram)[offset]);
eeprom_enabled = 0;
}
}
static WRITE32_HANDLER( eeprom_enable_w )
{
eeprom_enabled = 1;
}
/*************************************
*
* Input handling
*
*************************************/
static READ32_HANDLER( input_0_r )
{
return readinputport(0);
}
static READ32_HANDLER( input_1_r )
{
return readinputport(1);
}
static READ32_HANDLER( input_2_r )
{
int result = readinputport(2);
if (atarigen_sound_to_cpu_ready) result ^= 0x10;
if (atarigen_cpu_to_sound_ready) result ^= 0x20;
return result;
}
static READ32_HANDLER( input_3_r )
{
return readinputport(3);
}
/*************************************
*
* Sound communication
*
*************************************/
static READ32_HANDLER( sound_data_r )
{
return atarigen_sound_r(offset,0);
}
static WRITE32_HANDLER( sound_data_w )
{
if (ACCESSING_LSB32)
atarigen_sound_w(offset, data, mem_mask);
}
static WRITE32_HANDLER( sound_reset_w )
{
log_cb(RETRO_LOG_DEBUG, LOGPRE "Sound reset = %d\n", !offset);
cpu_set_reset_line(1, offset ? CLEAR_LINE : ASSERT_LINE);
}
/*************************************
*
* Misc other I/O
*
*************************************/
static WRITE32_HANDLER( coin_count_w )
{
coin_counter_w(0, !offset);
}
/*************************************
*
* Main CPU memory handlers
*
*************************************/
static MEMORY_READ32_START( readmem )
{ 0x00000000, 0x0001ffff, MRA32_RAM },
{ 0x01800000, 0x01bfffff, MRA32_ROM },
{ 0x40000000, 0x400007ff, MRA32_RAM },
{ 0x41000000, 0x41000003, sound_data_r },
{ 0x41000100, 0x41000103, interrupt_control_r },
{ 0x41000200, 0x41000203, input_0_r },
{ 0x41000204, 0x41000207, input_1_r },
{ 0x41000300, 0x41000303, input_2_r },
{ 0x41000304, 0x41000307, input_3_r },
{ 0x42000000, 0x4201ffff, MRA32_RAM },
{ 0x43000000, 0x43000007, beathead_hsync_ram_r },
{ 0x8df80000, 0x8df80003, MRA32_NOP }, /* noisy x4 during scanline int */
{ 0x8f980000, 0x8f9fffff, MRA32_RAM },
MEMORY_END
static MEMORY_WRITE32_START( writemem )
{ 0x00000000, 0x0001ffff, MWA32_RAM, &ram_base },
{ 0x01800000, 0x01bfffff, MWA32_ROM, &rom_base },
{ 0x40000000, 0x400007ff, eeprom_data_w, (data32_t **)&generic_nvram, &generic_nvram_size },
{ 0x41000000, 0x41000003, sound_data_w },
{ 0x41000100, 0x4100011f, interrupt_control_w },
{ 0x41000208, 0x4100020f, sound_reset_w },
{ 0x41000220, 0x41000227, coin_count_w },
{ 0x41000400, 0x41000403, MWA32_RAM, &beathead_palette_select },
{ 0x41000500, 0x41000503, eeprom_enable_w },
{ 0x41000600, 0x41000603, beathead_finescroll_w },
{ 0x41000700, 0x41000703, watchdog_reset32_w },
{ 0x42000000, 0x4201ffff, beathead_palette_w, &paletteram32 },
{ 0x43000000, 0x43000007, beathead_hsync_ram_w },
{ 0x8f380000, 0x8f3fffff, beathead_vram_latch_w },
{ 0x8f900000, 0x8f97ffff, beathead_vram_transparent_w },
{ 0x8f980000, 0x8f9fffff, MWA32_RAM, &videoram32 },
{ 0x8fb80000, 0x8fbfffff, beathead_vram_bulk_w },
{ 0x8fff8000, 0x8fff8003, MWA32_RAM, &beathead_vram_bulk_latch },
{ 0x9e280000, 0x9e2fffff, beathead_vram_copy_w },
MEMORY_END
/*************************************
*
* Port definitions
*
*************************************/
INPUT_PORTS_START( beathead )
PORT_START
PORT_BIT( 0x0001, IP_ACTIVE_LOW, IPT_BUTTON2 | IPF_PLAYER2 )
PORT_BIT( 0x0002, IP_ACTIVE_LOW, IPT_BUTTON1 | IPF_PLAYER2 )
PORT_BIT( 0x0002, IP_ACTIVE_LOW, IPT_START2 )
PORT_BIT( 0x0004, IP_ACTIVE_LOW, IPT_UNUSED )
PORT_BIT( 0x0008, IP_ACTIVE_LOW, IPT_UNUSED )
PORT_BIT( 0x0010, IP_ACTIVE_LOW, IPT_JOYSTICK_RIGHT | IPF_PLAYER2 )
PORT_BIT( 0x0020, IP_ACTIVE_LOW, IPT_JOYSTICK_LEFT | IPF_PLAYER2 )
PORT_BIT( 0x0040, IP_ACTIVE_LOW, IPT_JOYSTICK_DOWN | IPF_PLAYER2 )
PORT_BIT( 0x0080, IP_ACTIVE_LOW, IPT_JOYSTICK_UP | IPF_PLAYER2 )
PORT_BIT( 0xff00, IP_ACTIVE_LOW, IPT_UNUSED )
PORT_START
PORT_BIT( 0x00ff, IP_ACTIVE_LOW, IPT_UNUSED )
PORT_BIT( 0x0100, IP_ACTIVE_LOW, IPT_BUTTON2 | IPF_PLAYER1 )
PORT_BIT( 0x0200, IP_ACTIVE_LOW, IPT_BUTTON1 | IPF_PLAYER1 )
PORT_BIT( 0x0200, IP_ACTIVE_LOW, IPT_START1 )
PORT_BIT( 0x0400, IP_ACTIVE_LOW, IPT_UNUSED )
PORT_BIT( 0x0800, IP_ACTIVE_LOW, IPT_UNUSED )
PORT_BIT( 0x1000, IP_ACTIVE_LOW, IPT_JOYSTICK_RIGHT | IPF_PLAYER1 )
PORT_BIT( 0x2000, IP_ACTIVE_LOW, IPT_JOYSTICK_LEFT | IPF_PLAYER1 )
PORT_BIT( 0x4000, IP_ACTIVE_LOW, IPT_JOYSTICK_DOWN | IPF_PLAYER1 )
PORT_BIT( 0x8000, IP_ACTIVE_LOW, IPT_JOYSTICK_UP | IPF_PLAYER1 )
PORT_START
PORT_BIT( 0x000f, IP_ACTIVE_LOW, IPT_UNUSED )
PORT_BIT( 0x0010, IP_ACTIVE_LOW, IPT_SPECIAL )
PORT_BIT( 0x0020, IP_ACTIVE_LOW, IPT_SPECIAL )
PORT_SERVICE( 0x0040, IP_ACTIVE_LOW )
PORT_BIT( 0xff80, IP_ACTIVE_LOW, IPT_UNUSED )
PORT_START
PORT_BIT( 0x0001, IP_ACTIVE_LOW, IPT_COIN1 )
PORT_BIT( 0x0006, IP_ACTIVE_LOW, IPT_UNUSED )
PORT_BIT( 0x0008, IP_ACTIVE_LOW, IPT_COIN2 )
PORT_BIT( 0xfff0, IP_ACTIVE_LOW, IPT_UNUSED )
JSA_III_PORT /* audio board port */
INPUT_PORTS_END
/*************************************
*
* Machine driver
*
*************************************/
static MACHINE_DRIVER_START( beathead )
/* basic machine hardware */
MDRV_CPU_ADD(ASAP, ATARI_CLOCK_14MHz)
MDRV_CPU_MEMORY(readmem,writemem)
MDRV_FRAMES_PER_SECOND(60)
MDRV_VBLANK_DURATION((int)(((262. - 240.) / 262.) * 1000000. / 60.))
MDRV_MACHINE_INIT(beathead)
MDRV_NVRAM_HANDLER(generic_1fill)
/* video hardware */
MDRV_VIDEO_ATTRIBUTES(VIDEO_TYPE_RASTER | VIDEO_NEEDS_6BITS_PER_GUN | VIDEO_UPDATE_BEFORE_VBLANK)
MDRV_SCREEN_SIZE(42*8, 30*8)
MDRV_VISIBLE_AREA(0*8, 42*8-1, 0*8, 30*8-1)
MDRV_PALETTE_LENGTH(32768)
MDRV_VIDEO_START(beathead)
MDRV_VIDEO_UPDATE(beathead)
/* sound hardware */
MDRV_IMPORT_FROM(jsa_iii_mono)
MACHINE_DRIVER_END
/*************************************
*
* ROM definition(s)
*
*************************************/
ROM_START( beathead )
ROM_REGION( 0x20000, REGION_CPU1, 0 ) /* dummy ASAP region */
ROM_REGION( 0x14000, REGION_CPU2, 0 ) /* 64k + 16k for 6502 code */
ROM_LOAD( "bhsnd.bin", 0x10000, 0x4000, CRC(dfd33f02) SHA1(479a4838c89691d5a4654a4cd84b6433a9e86109) )
ROM_CONTINUE( 0x04000, 0xc000 )
ROM_REGION32_LE( 0x400000, REGION_USER1, ROMREGION_DISPOSE ) /* 4MB for ASAP code */
ROM_LOAD32_BYTE( "bhprog0.bin", 0x000000, 0x80000, CRC(87975721) SHA1(862cb3a290c829aedea26ee7100c50a12e9517e7) )
ROM_LOAD32_BYTE( "bhprog1.bin", 0x000001, 0x80000, CRC(25d89743) SHA1(9ff9a41355aa6914efc4a44909026e648a3c40f3) )
ROM_LOAD32_BYTE( "bhprog2.bin", 0x000002, 0x80000, CRC(87722609) SHA1(dbd766fa57f4528702a98db28ae48fb5d2a7f7df) )
ROM_LOAD32_BYTE( "bhprog3.bin", 0x000003, 0x80000, CRC(a795d616) SHA1(d3b201be62486f3b12e1b20c4694eeff0b4e3fca) )
ROM_LOAD32_BYTE( "bhpics0.bin", 0x200000, 0x80000, CRC(926bf65d) SHA1(49f25a2844ca1cd940d17fc56c0d2698e95e0e1d) )
ROM_LOAD32_BYTE( "bhpics1.bin", 0x200001, 0x80000, CRC(a8f12e41) SHA1(693cb7a2510f34af5442870a6ae4d19445d991f9) )
ROM_LOAD32_BYTE( "bhpics2.bin", 0x200002, 0x80000, CRC(00b96481) SHA1(39daa46321c1d4f8bce8c25d0450b97f1f19dedb) )
ROM_LOAD32_BYTE( "bhpics3.bin", 0x200003, 0x80000, CRC(99c4f1db) SHA1(aba4440c5cdf413f970a0c65457e2d1b37caf2d6) )
ROM_REGION( 0x100000, REGION_SOUND1, 0 ) /* 1MB for ADPCM */
ROM_LOAD( "bhpcm0.bin", 0x80000, 0x20000, CRC(609ca626) SHA1(9bfc913fc4c3453b132595f8553245376bce3a51) )
ROM_LOAD( "bhpcm1.bin", 0xa0000, 0x20000, CRC(35511509) SHA1(41294b81e253db5d2f30f8589dd59729a31bb2bb) )
ROM_LOAD( "bhpcm2.bin", 0xc0000, 0x20000, CRC(f71a840a) SHA1(09d045552704cd1434307f9a36ce03c5c06a8ff6) )
ROM_LOAD( "bhpcm3.bin", 0xe0000, 0x20000, CRC(fedd4936) SHA1(430ed894fa4bfcd56ee5a8a8ef5e161246530e2d) )
ROM_END
/*************************************
*
* Driver speedups
*
*************************************/
/*
In-game hotspot @ 0180F8D8
*/
static data32_t *speedup_data;
static READ32_HANDLER( speedup_r )
{
int result = *speedup_data;
if ((activecpu_get_previouspc() & 0xfffff) == 0x006f0 && result == activecpu_get_reg(ASAP_R3))
cpu_spinuntil_int();
return result;
}
static data32_t *movie_speedup_data;
static READ32_HANDLER( movie_speedup_r )
{
int result = *movie_speedup_data;
if ((activecpu_get_previouspc() & 0xfffff) == 0x00a88 && (activecpu_get_reg(ASAP_R28) & 0xfffff) == 0x397c0 &&
movie_speedup_data[4] == activecpu_get_reg(ASAP_R1))
{
UINT32 temp = (INT16)result + movie_speedup_data[4] * 262;
if (temp - (UINT32)activecpu_get_reg(ASAP_R15) < (UINT32)activecpu_get_reg(ASAP_R23))
cpu_spinuntil_int();
}
return result;
}
/*************************************
*
* Driver initialization
*
*************************************/
static DRIVER_INIT( beathead )
{
/* initialize the common systems */
atarigen_eeprom_default = NULL;
atarijsa_init(1, 4, 2, 0x0040);
atarijsa3_init_adpcm(REGION_SOUND1);
atarigen_init_6502_speedup(1, 0x4321, 0x4339);
/* copy the ROM data */
memcpy(rom_base, memory_region(REGION_USER1), memory_region_length(REGION_USER1));
/* prepare the speedups */
speedup_data = install_mem_read32_handler(0, 0x00000ae8, 0x00000aeb, speedup_r);
movie_speedup_data = install_mem_read32_handler(0, 0x00000804, 0x00000807, movie_speedup_r);
}
/*************************************
*
* Game driver(s)
*
*************************************/
GAME( 1993, beathead, 0, beathead, beathead, beathead, ROT0, "Atari Games", "BeatHead (prototype)" )
|
jasonfeihe/BitFunnel | src/Common/Utilities/src/ReadLines.cpp | // The MIT License (MIT)
// Copyright (c) 2016, Microsoft
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include <algorithm>
#include <functional>
#include <cctype>
#include <istream>
#include "BitFunnel/Configuration/IFileSystem.h"
#include "BitFunnel/Utilities/IsSpace.h"
#include "BitFunnel/Utilities/ReadLines.h"
namespace BitFunnel
{
class IFileSystem;
std::vector<std::string> ReadLines(IFileSystem& fileSystem,
char const * fileName)
{
auto input = fileSystem.OpenForRead(fileName, std::ios::in);
std::vector<std::string> lines;
std::string line;
while (std::getline(*input, line)) {
// Trim whitespace from right side. If we're reading in a file from
// Windows BitFunnel, we can have a stray \r character.
line.erase(std::find_if(line.rbegin(),
line.rend(),
std::not1(std::ptr_fun<int, bool>(IsSpace))).base(),
line.end());
// Trim leading whitespace.
line.erase(line.begin(),
std::find_if(line.begin(),
line.end(),
std::not1(std::ptr_fun<int, bool>(IsSpace))));
lines.push_back(std::move(line));
}
return lines;
}
}
|
ghsecuritylab/AliOS-Things_rel_1.3.1 | framework/protocol/linkkit/iotkit/hal-impl/rhino/HAL_TCP_rhino.c | <reponame>ghsecuritylab/AliOS-Things_rel_1.3.1
/*
* Copyright (C) 2015-2017 Alibaba Group Holding Limited
*/
#include <stdio.h>
#include <string.h>
#include <aos/network.h>
#include <aos/errno.h>
extern uint64_t aliot_platform_time_left(uint64_t t_end, uint64_t t_now);
#define PLATFORM_RHINOSOCK_LOG(...) PLATFORM_LOG(__VA_ARGS__,"")
#define PLATFORM_LOG(format, ...) \
do { \
printf("RHINOSOCK %u %s() | "format"\n", __LINE__, __func__, __VA_ARGS__);\
fflush(stdout);\
}while(0);
#ifndef CONFIG_NO_TCPIP
uintptr_t HAL_TCP_Establish(const char *host, uint16_t port)
{
struct addrinfo hints;
struct addrinfo *addrInfoList = NULL;
struct addrinfo *cur = NULL;
int fd = 0;
int rc = -1;
char service[6];
memset(&hints, 0, sizeof(hints));
PLATFORM_RHINOSOCK_LOG("establish tcp connection with server(host=%s port=%u)", host, port);
hints.ai_family = AF_INET; //only IPv4
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
sprintf(service, "%u", port);
if ((rc = getaddrinfo(host, service, &hints, &addrInfoList)) != 0) {
perror("getaddrinfo error");
return -1;
}
for (cur = addrInfoList; cur != NULL; cur = cur->ai_next) {
if (cur->ai_family != AF_INET) {
perror("socket type error");
rc = -1;
continue;
}
fd = socket(cur->ai_family, cur->ai_socktype, cur->ai_protocol);
if (fd < 0) {
perror("create socket error");
rc = -1;
continue;
}
if (connect(fd, cur->ai_addr, cur->ai_addrlen) == 0) {
rc = fd;
break;
}
close(fd);
perror("connect error");
rc = -1;
}
if (-1 == rc){
PLATFORM_RHINOSOCK_LOG("fail to establish tcp");
} else {
PLATFORM_RHINOSOCK_LOG("success to establish tcp, fd=%d", rc);
}
freeaddrinfo(addrInfoList);
return (uintptr_t)rc;
}
int32_t HAL_TCP_Destroy(uintptr_t fd)
{
int rc;
//Shutdown both send and receive operations.
rc = shutdown((int) fd, 2);
if (0 != rc) {
perror("shutdown error");
return -1;
}
rc = close((int) fd);
if (0 != rc) {
perror("closesocket error");
return -1;
}
return 0;
}
int32_t HAL_TCP_Write(uintptr_t fd, const char *buf, uint32_t len, uint32_t timeout_ms)
{
int ret, err_code;
uint32_t len_sent;
uint64_t t_end, t_left;
fd_set sets;
t_end = HAL_UptimeMs( ) + timeout_ms;
len_sent = 0;
err_code = 0;
ret = 1; //send one time if timeout_ms is value 0
do {
t_left = aliot_platform_time_left(t_end, HAL_UptimeMs( ));
if (0 != t_left) {
struct timeval timeout;
FD_ZERO( &sets );
FD_SET(fd, &sets);
timeout.tv_sec = t_left / 1000;
timeout.tv_usec = (t_left % 1000) * 1000;
ret = select(fd + 1, NULL, &sets, NULL, &timeout);
if (ret > 0) {
if (0 == FD_ISSET(fd, &sets)) {
PLATFORM_RHINOSOCK_LOG("Should NOT arrive");
//If timeout in next loop, it will not sent any data
ret = 0;
continue;
}
} else if (0 == ret) {
//PLATFORM_RHINOSOCK_LOG("select-write timeout %lu", fd);
break;
} else {
if (EINTR == errno) {
PLATFORM_RHINOSOCK_LOG("EINTR be caught");
continue;
}
err_code = -1;
perror("select-write fail");
break;
}
}
if (ret > 0) {
ret = send(fd, buf + len_sent, len - len_sent, 0);
if (ret > 0) {
len_sent += ret;
} else if (0 == ret) {
PLATFORM_RHINOSOCK_LOG("No data be sent");
} else {
if (EINTR == errno) {
PLATFORM_RHINOSOCK_LOG("EINTR be caught");
continue;
}
err_code = -1;
perror("send fail");
break;
}
}
} while((len_sent < len) && (aliot_platform_time_left(t_end, HAL_UptimeMs()) > 0));
return len_sent;
}
int32_t HAL_TCP_Read(uintptr_t fd, char *buf, uint32_t len, uint32_t timeout_ms)
{
int ret, err_code;
uint32_t len_recv;
uint64_t t_end, t_left;
fd_set sets;
struct timeval timeout;
t_end = HAL_UptimeMs( ) + timeout_ms;
len_recv = 0;
err_code = 0;
do {
t_left = aliot_platform_time_left(t_end, HAL_UptimeMs());
if (0 == t_left) {
break;
}
FD_ZERO( &sets );
FD_SET(fd, &sets);
timeout.tv_sec = t_left / 1000;
timeout.tv_usec = (t_left % 1000) * 1000;
ret = select(fd + 1, &sets, NULL, NULL, &timeout);
if (ret > 0) {
ret = recv(fd, buf + len_recv, len - len_recv, 0);
if (ret > 0) {
len_recv += ret;
} else if (0 == ret) {
perror("connection is closed");
err_code = -1;
break;
} else {
if (EINTR == errno) {
PLATFORM_RHINOSOCK_LOG("EINTR be caught");
continue;
}
perror("send fail");
err_code = -2;
break;
}
} else if (0 == ret) {
break;
} else {
perror("select-recv fail");
err_code = -2;
break;
}
} while ((len_recv < len));
//priority to return data bytes if any data be received from TCP connection.
//It will get error code on next calling
return (0 != len_recv) ? len_recv : err_code;
}
#else
uintptr_t HAL_TCP_Establish(const char *host, uint16_t port)
{
return 0;
}
int32_t HAL_TCP_Destroy(uintptr_t fd)
{
return 0;
}
int32_t HAL_TCP_Write(uintptr_t fd, const char *buf, uint32_t len, uint32_t timeout_ms)
{
return 0;
}
int32_t HAL_TCP_Read(uintptr_t fd, char *buf, uint32_t len, uint32_t timeout_ms)
{
return 0;
}
#endif
|
kk9923/StudyView | app/src/main/java/com/kx/studyview/views/drag/DragLayout.java | <filename>app/src/main/java/com/kx/studyview/views/drag/DragLayout.java
package com.kx.studyview.views.drag;
import android.content.Context;
import android.content.res.TypedArray;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.support.v4.view.ViewCompat;
import android.support.v4.widget.ViewDragHelper;
import android.util.AttributeSet;
import android.view.GestureDetector;
import android.view.MotionEvent;
import android.view.View;
import android.widget.FrameLayout;
import com.kx.studyview.R;
/**
* Created by admin on 2018/11/8.
*/
public class DragLayout extends FrameLayout {
private View dragView;
private ViewDragHelper mViewDragHelper;
private GestureDetector mGestureDetector ;
private int mChildBottom;
public DragLayout(@NonNull Context context) {
this(context,null);
}
public DragLayout(@NonNull Context context, @Nullable AttributeSet attrs) {
this(context, attrs, 0);
}
public DragLayout(@NonNull Context context, @Nullable AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
TypedArray a = context.obtainStyledAttributes(attrs, R.styleable.DragLayout, 0, 0);
mFixHeight = a.getDimensionPixelOffset(R.styleable.DragLayout_fix_height, mFixHeight);
a.recycle();
mGestureDetector = new GestureDetector(context,mGestureListener);
mViewDragHelper = ViewDragHelper.create(this,1f,mCallback );
}
ViewDragHelper.Callback mCallback = new ViewDragHelper.Callback() {
@Override
public boolean tryCaptureView(@NonNull View child, int pointerId) {
return child==dragView;
}
/**
* 限制移动的上下边界
*/
@Override
public int clampViewPositionVertical(@NonNull View child, int top, int dy) {
if (top <= mHeight - child.getHeight()){
top = mHeight - child.getHeight();
}else if (top >= mHeight - mFixHeight ){
top = mHeight - mFixHeight;
}
return top;
}
/**
* 松手时回调
*/
@Override
public void onViewReleased(@NonNull View releasedChild, float xvel, float yvel) {
int controlY = mHeight - releasedChild.getHeight() / 2 ;
int top = releasedChild.getTop();
// 向上位移
if (top < controlY){
mViewDragHelper.smoothSlideViewTo(releasedChild,0,mHeight - releasedChild.getHeight());
}else if (top > controlY){
// 向上位移
mViewDragHelper.smoothSlideViewTo(releasedChild,0,mHeight -mFixHeight);
}
ViewCompat.postInvalidateOnAnimation(DragLayout.this);
}
};
@Override
public void computeScroll() {
if (mViewDragHelper.continueSettling(true)) {
ViewCompat.postInvalidateOnAnimation(DragLayout.this);
}
super.computeScroll();
}
/**
* 手势监听
*/
private GestureDetector.OnGestureListener mGestureListener = new GestureDetector.SimpleOnGestureListener() {
// 是否是按下的标识,默认为其他动作,true为按下标识,false为其他动作
private boolean isDownTouch;
@Override
public boolean onDown(MotionEvent e) {
isDownTouch = true;
return super.onDown(e);
}
@Override
public boolean onSingleTapUp(MotionEvent e) {
System.out.println("onSingleTapUp");
return true;
}
@Override
public boolean onScroll(MotionEvent e1, MotionEvent e2, float distanceX, float distanceY) {
if (isDownTouch) {
// // 如果为上下滑动则控制拖拽
// if (Math.abs(distanceY) > Math.abs(distanceX)) {
// _stopAllScroller();
// mDragHelper.captureChildView(mDragView, 0);
// mIsDrag = true;
// }
// isDownTouch = false;
}
return super.onScroll(e1, e2, distanceX, distanceY);
}
};
@Override
public boolean onTouchEvent(MotionEvent event) {
mViewDragHelper.processTouchEvent(event);
return true;
}
@Override
public boolean onInterceptTouchEvent(MotionEvent ev) {
// 调用父类的方法,避免可能出现的 IllegalArgumentException: pointerIndex out of range
super.onInterceptTouchEvent(ev);
if (mViewDragHelper.isViewUnder(dragView, (int) ev.getX(), (int) ev.getY())){
return true ;
}
return super.onInterceptTouchEvent(ev);
}
@Override
protected void onSizeChanged(int w, int h, int oldw, int oldh) {
super.onSizeChanged(w, h, oldw, oldh);
mHeight = h;
}
@Override
protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
super.onMeasure(widthMeasureSpec, heightMeasureSpec);
if (mMaxHeight == 0) {
// 未设置最大高度则为布局高度的 1/2
mMaxHeight = getMeasuredHeight() / 2;
} else if (mMaxHeight > getMeasuredHeight()) {
// MODE_DRAG 模式最大高度不超过布局高度
mMaxHeight = getMeasuredHeight() / 2;
}
View childView = getChildAt(1);
MarginLayoutParams lp = (MarginLayoutParams) childView.getLayoutParams();
int childWidth = childView.getMeasuredWidth();
int childHeight = childView.getMeasuredHeight();
// 限定视图的最大高度
if (childHeight > mMaxHeight) {
childView.measure(MeasureSpec.makeMeasureSpec(childWidth - lp.leftMargin - lp.rightMargin, MeasureSpec.EXACTLY), MeasureSpec.makeMeasureSpec(mMaxHeight - lp.topMargin - lp.bottomMargin, MeasureSpec.EXACTLY));
}
}
@Override
protected void onLayout(boolean changed, int left, int top, int right, int bottom) {
super.onLayout(changed, left, top, right, bottom);
View childView = getChildAt(1);
MarginLayoutParams lp = (MarginLayoutParams) childView.getLayoutParams();
int childWidth = childView.getMeasuredWidth();
int childHeight = childView.getMeasuredHeight();
if (mFixHeight > childHeight){
mFixHeight = childHeight;
}
int childLeft = lp.leftMargin;
int childTop = bottom - mFixHeight;
int childright = lp.rightMargin+ childWidth;
if (mChildBottom==0){
mChildBottom = bottom + childHeight - mFixHeight;
}
childView.layout(childLeft,childTop,childright, mChildBottom);
}
@Override
protected void onFinishInflate() {
super.onFinishInflate();
dragView = getChildAt(1);
}
// 整个布局高度
private int mHeight;
// 文本缩放时的固定高度
private int mFixHeight ;
// 文本完全显示的最大高度
private int mMaxHeight;
// DragView的Top属性值
private int mDragViewTop = 0;
}
|
ModeenF/UltraDV | UltraDV/Headers/TAudioSourceView.h | //---------------------------------------------------------------------
//
// File: TAudioSourceView.h
//
// Author: <NAME>
//
// Date: 05.27.98
//
//
// Copyright ©1998 mediapede Software
//
//---------------------------------------------------------------------
#ifndef __TAUDIOSOURCEVIEW_H__
#define __TAUDIOSOURCEVIEW_H__
// Forward Declarations
class TAudioLevelsView;
class TLevelsSlider;
// Class Declarations
class TAudioSourceView: public BView
{
public:
// Member functions
TAudioSourceView(BMessage *archive);
void MessageReceived(BMessage *theMessage);
void AttachedToWindow();
// Member variables
private:
// Member functions
void Init();
// Member variables
BMenuField *m_DeviceMenuField;
BMenuField *m_InputMenuField;
TAudioLevelsView *m_AudioLevelsView;
BBox *m_VolumeBox;
TLevelsSlider *m_InputSlider;
TLevelsSlider *m_OutputSlider;
};
// Messages
#define DEVICE_DEFAULT_MSG 'dDef'
#define SOURCE_NONE_MSG 'sNon'
#define SOURCE_MIC_MSG 'sMic'
#define SOURCE_MIC_20_MSG 'sMiB'
#define SOURCE_CD_MSG 'sCD '
#define SOURCE_AUX_MSG 'sAux'
#endif
|
michel19/soajs.dashboard | test/unit/lib/daemons/index.test.js | "use strict";
var assert = require("assert");
var helper = require("../../../helper.js");
var utils = helper.requireModule('./lib/daemons/index.js');
describe("testing daemons.js", function () {
var daemon;
var req = {
soajs: {
inputmaskData: {}
}
};
var res = {};
it("Init model", function (done) {
utils.init('mongo', function (error, body) {
assert.ok(body);
daemon = body;
done();
});
});
describe("addGroupConfig", function () {
it("Success type cron", function (done) {
req.soajs.inputmaskData = {
type: 'cron',
"groupName": "test group config 1",
"daemon": "orderDaemon",
"status": 0,
"solo": true,
"processing": "parallel",
"jobs": {},
"order": []
};
daemon.addGroupConfig({}, req, res, function (error, body) {
// assert.ok(body);
done();
});
});
it("Success type once", function (done) {
req.soajs.inputmaskData = {
type: 'once',
timeZone: '',
"groupName": "test group config 1",
"daemon": "orderDaemon",
"status": 0,
"solo": true,
"processing": "parallel",
"jobs": {},
"order": []
};
daemon.addGroupConfig({}, req, res, function (error, body) {
// assert.ok(body);
done();
});
});
it("Fail", function (done) {
req.soajs.inputmaskData = {
"groupName": "test group config 1",
"daemon": "orderDaemon",
"status": 0,
"solo": true,
"processing": "parallel",
"jobs": {},
"order": []
};
daemon.addGroupConfig({}, req, res, function (error, body) {
assert.ok(error);
done();
});
});
});
describe("testing init", function () {
it("No Model Requested", function (done) {
utils.init(null, function (error, body) {
assert.ok(error);
done();
});
});
it("Model Name not found", function (done) {
utils.init('anyName', function (error, body) {
assert.ok(error);
done();
});
});
});
}); |
decilio4g/delicias-do-tchelo | node_modules/@styled-icons/heroicons-solid/ClipboardCheck/ClipboardCheck.esm.js | <filename>node_modules/@styled-icons/heroicons-solid/ClipboardCheck/ClipboardCheck.esm.js
import { __assign } from "tslib";
import * as React from 'react';
import { StyledIconBase } from '@styled-icons/styled-icon';
export var ClipboardCheck = React.forwardRef(function (props, ref) {
var attrs = {
"fill": "currentColor",
"xmlns": "http://www.w3.org/2000/svg",
};
return (React.createElement(StyledIconBase, __assign({ iconAttrs: attrs, iconVerticalAlign: "middle", iconViewBox: "0 0 20 20" }, props, { ref: ref }),
React.createElement("path", { d: "M9 2a1 1 0 000 2h2a1 1 0 100-2H9z", key: "k0" }),
React.createElement("path", { fillRule: "evenodd", d: "M4 5a2 2 0 012-2 3 3 0 003 3h2a3 3 0 003-3 2 2 0 012 2v11a2 2 0 01-2 2H6a2 2 0 01-2-2V5zm9.707 5.707a1 1 0 00-1.414-1.414L9 12.586l-1.293-1.293a1 1 0 00-1.414 1.414l2 2a1 1 0 001.414 0l4-4z", clipRule: "evenodd", key: "k1" })));
});
ClipboardCheck.displayName = 'ClipboardCheck';
export var ClipboardCheckDimensions = { height: 20, width: 20 };
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.