hexsha
stringlengths 40
40
| size
int64 7
1.05M
| ext
stringclasses 13
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
269
| max_stars_repo_name
stringlengths 5
109
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
9
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
269
| max_issues_repo_name
stringlengths 5
116
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
9
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
269
| max_forks_repo_name
stringlengths 5
116
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
9
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 7
1.05M
| avg_line_length
float64 1.21
330k
| max_line_length
int64 6
990k
| alphanum_fraction
float64 0.01
0.99
| author_id
stringlengths 2
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33417393672a3e71dca1c67200d26f6f5b120408
| 7,788
|
cc
|
C++
|
MELA/src/MELANCSpline_1D_fast.cc
|
mostafa1993/JHUGenMELA
|
a28f39043d98319f1f27493cf899a13646221eda
|
[
"Apache-2.0"
] | null | null | null |
MELA/src/MELANCSpline_1D_fast.cc
|
mostafa1993/JHUGenMELA
|
a28f39043d98319f1f27493cf899a13646221eda
|
[
"Apache-2.0"
] | 8
|
2020-10-02T20:02:44.000Z
|
2022-01-19T15:41:05.000Z
|
MELA/src/MELANCSpline_1D_fast.cc
|
mostafa1993/JHUGenMELA
|
a28f39043d98319f1f27493cf899a13646221eda
|
[
"Apache-2.0"
] | 11
|
2020-10-07T17:15:30.000Z
|
2022-03-19T10:41:52.000Z
|
#include "MELANCSpline_1D_fast.h"
#include <cmath>
#include "TMath.h"
#include "Riostream.h"
#include "RooAbsReal.h"
using namespace TMath;
using namespace RooFit;
using namespace std;
using namespace TNumericUtil;
ClassImp(MELANCSpline_1D_fast)
MELANCSpline_1D_fast::MELANCSpline_1D_fast() :
MELANCSplineCore(),
bcBeginX(MELANCSplineCore::bcNaturalSpline), bcEndX(MELANCSplineCore::bcNaturalSpline)
{}
MELANCSpline_1D_fast::MELANCSpline_1D_fast(
const char* name,
const char* title
) :
MELANCSplineCore(name, title),
bcBeginX(MELANCSplineCore::bcNaturalSpline), bcEndX(MELANCSplineCore::bcNaturalSpline)
{}
MELANCSpline_1D_fast::MELANCSpline_1D_fast(
const char* name,
const char* title,
RooAbsReal& inXVar,
const std::vector<T>& inXList,
const std::vector<T>& inFcnList,
MELANCSplineCore::BoundaryCondition const bcBeginX_,
MELANCSplineCore::BoundaryCondition const bcEndX_,
Bool_t inUseFloor,
T inFloorEval,
T inFloorInt
) :
MELANCSplineCore(name, title, inXVar, inXList, inUseFloor, inFloorEval, inFloorInt),
bcBeginX(bcBeginX_), bcEndX(bcEndX_),
FcnList(inFcnList)
{
if (npointsX()>1){
int npoints;
vector<vector<MELANCSplineCore::T>> xA; getKappas(kappaX, 0); getAArray(kappaX, xA, bcBeginX, bcEndX);
npoints=kappaX.size();
TMatrix_t xAtrans(npoints, npoints);
for (int i=0; i<npoints; i++){ for (int j=0; j<npoints; j++){ xAtrans[i][j]=xA.at(i).at(j); } }
Double_t det=0;
TMatrix_t xAinv = xAtrans.Invert(&det);
if (det==0.){
coutE(InputArguments) << "MELANCSpline_1D::interpolateFcn: Matrix xA could not be inverted. Something is wrong with the x coordinates of points!" << endl;
assert(0);
}
coefficients = getCoefficientsAlongDirection(kappaX, xAinv, FcnList, bcBeginX, bcEndX, -1);
}
else assert(0);
RooArgSet leafset;
getLeafDependents(theXVar, leafset);
addLeafDependents(leafset);
emptyFcnList();
}
MELANCSpline_1D_fast::MELANCSpline_1D_fast(
const MELANCSpline_1D_fast& other,
const char* name
) :
MELANCSplineCore(other, name),
bcBeginX(other.bcBeginX), bcEndX(other.bcEndX),
FcnList(other.FcnList),
kappaX(other.kappaX),
coefficients(other.coefficients)
{}
MELANCSplineCore::T MELANCSpline_1D_fast::interpolateFcn(Int_t code, const char* rangeName)const{
DefaultAccumulator<MELANCSplineCore::T> res;
if (verbosity==MELANCSplineCore::kVerbose) cout << "MELANCSpline_1D_fast(" << GetName() << ")::interpolateFcn begin with code: " << code << endl;
// Get bins
Int_t xbin=-1, xbinmin=-1, xbinmax=-1;
MELANCSplineCore::T tx=0, txmin=0, txmax=0;
if (code==0 || code%2!=0){ // Case to just compute the value at x
if (!testRangeValidity(theXVar)) return 0;
xbin = getWhichBin(theXVar, 0);
tx = getTVar(kappaX, theXVar, xbin, 0);
}
else{ // Case to integrate along x
MELANCSplineCore::T coordmin = theXVar.min(rangeName); cropValueForRange(coordmin);
MELANCSplineCore::T coordmax = theXVar.max(rangeName); cropValueForRange(coordmax);
xbinmin = getWhichBin(coordmin, 0);
txmin = getTVar(kappaX, coordmin, xbinmin, 0);
xbinmax = getWhichBin(coordmax, 0);
txmax = getTVar(kappaX, coordmax, xbinmax, 0);
}
int nxbins = (int)coefficients.size();
for (int ix=0; ix<nxbins; ix++){
if (
(xbin>=0 && ix!=xbin)
||
(xbinmin>=0 && xbinmax>=xbinmin && !(xbinmin<=ix && ix<=xbinmax))
) continue;
MELANCSplineCore::T txlow=0, txhigh=1;
if (code>0 && code%2==0){
if (ix==xbinmin) txlow=txmin;
if (ix==xbinmax) txhigh=txmax;
}
else txhigh=tx;
// Get the x coefficients at bin ix and evaluate value of spline at x
res += evalSplineSegment(coefficients.at(ix), kappaX.at(ix), txhigh, txlow, (code>0 && code%2==0));
}
return res;
}
void MELANCSpline_1D_fast::getKappas(vector<MELANCSplineCore::T>& kappas, const Int_t /*whichDirection*/){
kappas.clear();
MELANCSplineCore::T kappa=1;
Int_t npoints;
vector<MELANCSplineCore::T> const* coord;
npoints=npointsX();
coord=&XList;
for (Int_t j=0; j<npoints-1; j++){
MELANCSplineCore::T val_j = coord->at(j);
MELANCSplineCore::T val_jpo = coord->at(j+1);
MELANCSplineCore::T val_diff = (val_jpo-val_j);
if (fabs(val_diff)>MELANCSplineCore::T(0)) kappa = 1./val_diff;
else kappa = 0;
kappas.push_back(kappa);
}
kappas.push_back(kappa); // Push the same kappa_(N-1)=kappa_(N-2) at the end point
}
Int_t MELANCSpline_1D_fast::getWhichBin(const MELANCSplineCore::T& val, const Int_t /*whichDirection*/)const{
Int_t bin=-1;
MELANCSplineCore::T valj, valjpo;
Int_t npoints;
vector<MELANCSplineCore::T> const* coord;
coord=&XList;
npoints=npointsX();
if (npoints<=1) bin=0;
else{
valjpo = coord->at(0);
for (Int_t j=0; j<npoints-1; j++){
valj = coord->at(j);
valjpo = coord->at(j+1);
if (val<valjpo && val>=valj){ bin=j; break; }
}
if (bin==-1 && val>=valjpo) bin=npoints-2;
else if (bin==-1) bin=0;
}
return bin;
}
MELANCSplineCore::T MELANCSpline_1D_fast::getTVar(const vector<MELANCSplineCore::T>& kappas, const MELANCSplineCore::T& val, const Int_t& bin, const Int_t /*whichDirection*/)const{
const MELANCSplineCore::T& K=kappas.at(bin);
return (val-XList.at(bin))*K;
}
Double_t MELANCSpline_1D_fast::evaluate() const{
Double_t value = interpolateFcn(0);
if (useFloor && value<floorEval){
if (verbosity>=MELANCSplineCore::kError) coutE(Eval) << "MELANCSpline_1D_fast ERROR::MELANCSpline_1D_fast(" << GetName() << ") evaluation returned " << value << " at x = " << theXVar << endl;
value = floorEval;
}
if (verbosity==MELANCSplineCore::kVerbose){
cout << "MELANCSpline_1D_fast(" << GetName() << ")::evaluate = " << value << " at x = " << theXVar << endl;
RooArgSet Xdeps; theXVar.absArg()->leafNodeServerList(&Xdeps, 0, true);
TIterator* iter = Xdeps.createIterator();
RooAbsArg* var;
while ((var = (RooAbsArg*)iter->Next())){
cout << var->GetName() << " value = " << dynamic_cast<RooAbsReal*>(var)->getVal() << endl;
}
delete iter;
cout << endl;
}
return value;
}
Int_t MELANCSpline_1D_fast::getAnalyticalIntegral(RooArgSet& allVars, RooArgSet& analVars, const char* /*rangeName*/) const{
if (_forceNumInt) return 0;
Int_t code=0;
if (dynamic_cast<RooRealVar*>(theXVar.absArg())!=0){
if (matchArgs(allVars, analVars, theXVar)) code=2;
}
return code;
}
Double_t MELANCSpline_1D_fast::analyticalIntegral(Int_t code, const char* rangeName) const{
Double_t value = interpolateFcn(code, rangeName);
if (useFloor && value<floorInt){
if (verbosity>=MELANCSplineCore::kError) coutE(Integration) << "MELANCSpline_1D_fast ERROR::MELANCSpline_1D_fast(" << GetName() << ") integration returned " << value << " for code = " << code << endl;
value = floorInt;
}
if (verbosity==MELANCSplineCore::kVerbose){ cout << "MELANCSpline_1D_fast(" << GetName() << ")::analyticalIntegral = " << value << " for code = " << code << endl; }
return value;
}
Bool_t MELANCSpline_1D_fast::testRangeValidity(const T& val, const Int_t /*whichDirection*/) const{
const T* range[2];
range[0] = &rangeXmin;
range[1] = &rangeXmax;
return (*(range[0])>*(range[1]) || (val>=*(range[0]) && val<=*(range[1])));
}
void MELANCSpline_1D_fast::setRangeValidity(const T valmin, const T valmax, const Int_t /*whichDirection*/){
T* range[2];
range[0] = &rangeXmin;
range[1] = &rangeXmax;
*(range[0])=valmin;
*(range[1])=valmax;
}
void MELANCSpline_1D_fast::cropValueForRange(T& val, const Int_t /*whichDirection*/)const{
if (testRangeValidity(val)) return;
const T* range[2];
range[0] = &rangeXmin;
range[1] = &rangeXmax;
if (val<*(range[0])) val = *(range[0]);
if (val>*(range[1])) val = *(range[1]);
}
| 34.157895
| 204
| 0.681947
|
mostafa1993
|
334373b4db55f6022451cb91b33ee31d50e70b6e
| 5,586
|
cpp
|
C++
|
iceoryx_utils/test/moduletests/test_index_queue_unique_index.cpp
|
surendra210/iceoryx
|
f964c2435f65585784b2d77af0ce39f88670fb36
|
[
"Apache-2.0"
] | null | null | null |
iceoryx_utils/test/moduletests/test_index_queue_unique_index.cpp
|
surendra210/iceoryx
|
f964c2435f65585784b2d77af0ce39f88670fb36
|
[
"Apache-2.0"
] | null | null | null |
iceoryx_utils/test/moduletests/test_index_queue_unique_index.cpp
|
surendra210/iceoryx
|
f964c2435f65585784b2d77af0ce39f88670fb36
|
[
"Apache-2.0"
] | 1
|
2020-04-06T04:56:24.000Z
|
2020-04-06T04:56:24.000Z
|
// Copyright (c) 2020 by Robert Bosch GmbH. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "test.hpp"
#include "iceoryx_utils/internal/concurrent/lockfree_queue/index_queue.hpp"
using namespace ::testing;
namespace
{
using iox::concurrent::IndexQueue;
// by design, we need an IndexQueue to construct valid UniqueIndices,
// this protects against certain errors but in turn requires
// an Indexqueue to test the UniqueIndex
// since the implementation detail of using the unique<T> template is subject to
// further change, it is not tested yet
// however, except for that it allows construction and general types T,
// it has similar semantics so tests could be adapted
// the idea is, that each resource of type T constructed in such a way is only movable
// and not copyable (maybe rename in move_only?)
class UniqueIndexTest : public ::testing::Test
{
public:
using Queue = IndexQueue<2>;
using UniqueIndex = Queue::UniqueIndex;
protected:
UniqueIndexTest()
{
}
~UniqueIndexTest()
{
}
void SetUp()
{
indexQueue.pop(); // discards the index 0
}
void TearDown()
{
}
Queue indexQueue{Queue::ConstructFull};
UniqueIndex acquireIndex()
{
// returns the index 1
// better for some tests due to default zero initilization of some types
// which makes false positives in tests much less likely
return indexQueue.pop();
}
void returnIndex(UniqueIndex& index)
{
indexQueue.push(index);
}
};
// we *cannot* acquire a valid unique index in any other way since the constructor
// is private and only accessible by the friend IndexQueue
// this is the main use case of the IndexQueue
TEST_F(UniqueIndexTest, indexQueueConstructsValidIndexWhenAvailable)
{
auto index1 = acquireIndex();
EXPECT_TRUE(index1.isValid());
EXPECT_EQ(index1, 1); // returned index has value 0 by design of the IndexQueue
// capacity exhausted, no valid indices left until we return one
auto index2 = acquireIndex();
EXPECT_FALSE(index2.isValid());
returnIndex(index1);
EXPECT_FALSE(index1.isValid());
auto index3 = acquireIndex();
EXPECT_TRUE(index3.isValid());
EXPECT_EQ(index3, 1);
}
TEST_F(UniqueIndexTest, explicitlyInvalidConstructedIndexIsInvalid)
{
UniqueIndex index(UniqueIndex::invalid);
EXPECT_FALSE(index.isValid());
}
TEST_F(UniqueIndexTest, moveInvalidatesValidIndex)
{
auto index1 = acquireIndex();
EXPECT_TRUE(index1.isValid());
UniqueIndex index2(std::move(index1));
EXPECT_TRUE(index2.isValid());
EXPECT_EQ(index2, 1);
EXPECT_FALSE(index1.isValid());
}
TEST_F(UniqueIndexTest, moveAssignmentInvalidatesValidIndex)
{
auto index1 = acquireIndex();
EXPECT_TRUE(index1.isValid());
UniqueIndex index2(UniqueIndex::invalid);
index2 = std::move(index1);
EXPECT_TRUE(index2.isValid());
EXPECT_EQ(index2, 1);
EXPECT_FALSE(index1.isValid());
}
TEST_F(UniqueIndexTest, selfMoveAssignmentDoesNotInvalidateValidIndex)
{
auto index = acquireIndex();
// this construct is used to prevent a self-move warning
[](UniqueIndex& a, UniqueIndex& b) { a = std::move(b); }(index, index);
EXPECT_TRUE(index.isValid());
EXPECT_EQ(index, 1);
}
TEST_F(UniqueIndexTest, selfMoveAssignedInvalidIndexStaysInvalid)
{
UniqueIndex index(UniqueIndex::invalid);
// this construct is used to prevent a self-move warning
[](UniqueIndex& a, UniqueIndex& b) { a = std::move(b); }(index, index);
EXPECT_FALSE(index.isValid());
}
TEST_F(UniqueIndexTest, movedInvalidIndexStaysInvalid)
{
UniqueIndex index1(UniqueIndex::invalid);
EXPECT_FALSE(index1.isValid());
UniqueIndex index2(std::move(index1));
EXPECT_FALSE(index2.isValid());
EXPECT_FALSE(index1.isValid());
}
TEST_F(UniqueIndexTest, moveAssignedInvalidIndexStaysInvalid)
{
UniqueIndex index1(UniqueIndex::invalid);
EXPECT_FALSE(index1.isValid());
UniqueIndex index2(UniqueIndex::invalid);
index2 = std::move(index1);
EXPECT_FALSE(index2.isValid());
EXPECT_FALSE(index1.isValid());
}
TEST_F(UniqueIndexTest, moveAssignmentOfInvalidIndexInvalidatesDestination)
{
UniqueIndex index1(UniqueIndex::invalid);
auto index2 = acquireIndex();
EXPECT_TRUE(index2.isValid());
index2 = std::move(index1);
EXPECT_FALSE(index2.isValid());
EXPECT_FALSE(index1.isValid());
}
TEST_F(UniqueIndexTest, readAccessDoesNotInvalidateIndex)
{
auto index = acquireIndex();
const auto& ref = *index;
EXPECT_EQ(ref, 1);
EXPECT_TRUE(index.isValid());
}
TEST_F(UniqueIndexTest, releaseInvalidatesIndex)
{
auto index = acquireIndex();
auto value = index.release();
EXPECT_EQ(value, 1);
EXPECT_FALSE(index.isValid());
}
TEST_F(UniqueIndexTest, conversionToValueTypeDoesNotInvalidateIndex)
{
auto index = acquireIndex();
UniqueIndex::value_t value{73};
value = index;
EXPECT_EQ(value, 1);
EXPECT_TRUE(index.isValid());
}
} // namespace
| 26.102804
| 86
| 0.712137
|
surendra210
|
334500b5c95f180949beb4ad0514e693500d552b
| 5,210
|
hh
|
C++
|
source/neuropod/multiprocess/mq/ipc_message_queue.hh
|
qiyanz/neuropod
|
13c2bc794b168583588b68269026ec4ed76163ed
|
[
"Apache-2.0"
] | 887
|
2020-06-08T16:10:28.000Z
|
2022-03-27T21:55:43.000Z
|
source/neuropod/multiprocess/mq/ipc_message_queue.hh
|
qiyanz/neuropod
|
13c2bc794b168583588b68269026ec4ed76163ed
|
[
"Apache-2.0"
] | 150
|
2020-06-09T10:43:15.000Z
|
2022-03-30T02:48:39.000Z
|
source/neuropod/multiprocess/mq/ipc_message_queue.hh
|
qiyanz/neuropod
|
13c2bc794b168583588b68269026ec4ed76163ed
|
[
"Apache-2.0"
] | 70
|
2020-06-08T18:43:12.000Z
|
2022-03-18T20:37:51.000Z
|
/* Copyright (c) 2020 UATC, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#pragma once
#include "neuropod/internal/blocking_spsc_queue.hh"
#include "neuropod/internal/error_utils.hh"
#include "neuropod/internal/memory_utils.hh"
#include "neuropod/multiprocess/mq/heartbeat.hh"
#include "neuropod/multiprocess/mq/wire_format.hh"
#include <boost/interprocess/ipc/message_queue.hpp>
#include <atomic>
#include <chrono>
#include <condition_variable>
#include <thread>
#include <unordered_map>
namespace ipc = boost::interprocess;
namespace neuropod
{
// Forward declare IPCMessageQueue
template <typename>
class IPCMessageQueue;
// This is the user-facing queue message type
// UserPayloadType should be an enum that specifies types of payloads
template <typename UserPayloadType>
class QueueMessage
{
private:
// A pointer to the underlying data
std::shared_ptr<detail::WireFormat<UserPayloadType>> data_;
// Note: The constructor is private and only used in `IPCMessageQueue`
template <typename>
friend class IPCMessageQueue;
// Constructor used when receiving messages
QueueMessage(std::shared_ptr<detail::WireFormat<UserPayloadType>> data) : data_(std::move(data)) {}
public:
~QueueMessage() = default;
// Get a payload of type `Payload` from this message
template <typename Payload>
void get(Payload &out)
{
detail::deserialize_payload(*data_, out);
}
// Get the type of the user-defined payload included in this message
// This should only be used when it is known that this message contains
// a user-defined payload
UserPayloadType get_payload_type() { return data_->payload_type; }
};
// The type of the process creating the message queue
enum ProcessType
{
WORKER_PROCESS,
MAIN_PROCESS,
};
// A bidirectional IPC message queue that supports cross-process moves or copies of payloads.
// Includes an implementation of heartbeats and message acknowledgement (in the form of DONE messages)
// This class starts a thread for reading from the underlying `recv_queue` and uses a `HeartbeatController`
// to start a thread for sending heartbeats.
template <typename UserPayloadType>
class IPCMessageQueue : public std::enable_shared_from_this<IPCMessageQueue<UserPayloadType>>
{
private:
using WireFormat = detail::WireFormat<UserPayloadType>;
// A queue to store output messages received by the read thread
BlockingSPSCQueue<std::unique_ptr<WireFormat>> out_queue_;
// Internal IPC queues to communicate with the other process
std::string control_queue_name_;
std::unique_ptr<ipc::message_queue> send_queue_;
std::unique_ptr<ipc::message_queue> recv_queue_;
// Responsible for periodically sending a heartbeat
friend class detail::HeartbeatController;
std::unique_ptr<detail::HeartbeatController> heartbeat_controller_;
// Responsible for keeping things in scope during cross-process moves
std::unique_ptr<detail::TransferrableController> transferrable_controller_;
// Whether or not a shutdown is in progress
bool shutdown_started_ = false;
// If we lost the heartbeat from the other process
std::atomic_bool lost_heartbeat_;
// A thread that handles incoming messages
std::thread read_worker_;
// The worker loop for the message reading thread
void read_worker_loop();
// Send a message to the other process
void send_message(const WireFormat &msg);
// Throw an error if we lost communication with the other process
void throw_if_lost_heartbeat();
public:
IPCMessageQueue(const std::string &control_queue_name, ProcessType type);
~IPCMessageQueue();
// Send a message with a payload
// Note: this is threadsafe
template <typename Payload>
void send_message(UserPayloadType payload_type, const Payload &payload);
// Send a message with a payload and ensure `payload` stays in
// scope until the other process is done using the message.
// Note: this is threadsafe
template <typename Payload>
void send_message_move(UserPayloadType payload_type, Payload payload);
// Send a message with just a payload_type
// Note: this is threadsafe
void send_message(UserPayloadType payload_type);
// Get a message. Blocks if the queue is empty.
// Note: this is _NOT_ threadsafe. There should only be one thread calling `recv_message`
// at a time.
QueueMessage<UserPayloadType> recv_message();
};
// Cleanup control channels for the queue with name `control_queue_name`
void cleanup_control_channels(const std::string &control_queue_name);
} // namespace neuropod
#include "neuropod/multiprocess/mq/ipc_message_queue_impl.hh"
| 33.612903
| 107
| 0.750864
|
qiyanz
|
334701b8bda119ec30e0aa624ee8a57e77ca526a
| 25,448
|
cpp
|
C++
|
src/libtsduck/dtv/tsNames.cpp
|
ASTRO-Strobel/tsduck
|
f1da3d49df35b3d9740fb2c8031c92d0f261829a
|
[
"BSD-2-Clause"
] | null | null | null |
src/libtsduck/dtv/tsNames.cpp
|
ASTRO-Strobel/tsduck
|
f1da3d49df35b3d9740fb2c8031c92d0f261829a
|
[
"BSD-2-Clause"
] | null | null | null |
src/libtsduck/dtv/tsNames.cpp
|
ASTRO-Strobel/tsduck
|
f1da3d49df35b3d9740fb2c8031c92d0f261829a
|
[
"BSD-2-Clause"
] | null | null | null |
//----------------------------------------------------------------------------
//
// TSDuck - The MPEG Transport Stream Toolkit
// Copyright (c) 2005-2020, Thierry Lelegard
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
//----------------------------------------------------------------------------
#include "tsNames.h"
#include "tsMPEG.h"
#include "tsSysUtils.h"
#include "tsFatal.h"
#include "tsCerrReport.h"
#include "tsTablesFactory.h"
TSDUCK_SOURCE;
//----------------------------------------------------------------------------
// Configuration instances.
//----------------------------------------------------------------------------
TS_DEFINE_SINGLETON(ts::NamesMain);
ts::NamesMain::NamesMain() : Names(u"tsduck.names", true) {}
TS_DEFINE_SINGLETON(ts::NamesOUI);
ts::NamesOUI::NamesOUI() : Names(u"tsduck.oui.names") {}
//----------------------------------------------------------------------------
// Descriptor ids: specific processing for table-specific descriptors.
//----------------------------------------------------------------------------
bool ts::names::HasTableSpecificName(uint8_t did, uint8_t tid)
{
return tid != TID_NULL &&
did < 0x80 &&
NamesMain::Instance()->nameExists(u"DescriptorId", (Names::Value(tid) << 40) | TS_UCONST64(0x000000FFFFFFFF00) | Names::Value(did));
}
ts::UString ts::names::DID(uint8_t did, uint32_t pds, uint8_t tid, Flags flags)
{
if (did >= 0x80 && pds != 0 && pds != PDS_NULL) {
// If this is a private descriptor, only consider the private value.
// Do not fallback because the same value with PDS == 0 can be different.
return NamesMain::Instance()->nameFromSection(u"DescriptorId", (Names::Value(pds) << 8) | Names::Value(did), flags, 8);
}
else if (tid != 0xFF) {
// Could be a table-specific descriptor.
const Names::Value fullValue = (Names::Value(tid) << 40) | TS_UCONST64(0x000000FFFFFFFF00) | Names::Value(did);
return NamesMain::Instance()->nameFromSectionWithFallback(u"DescriptorId", fullValue, Names::Value(did), flags, 8);
}
else {
return NamesMain::Instance()->nameFromSection(u"DescriptorId", Names::Value(did), flags, 8);
}
}
//----------------------------------------------------------------------------
// Public functions returning names.
//----------------------------------------------------------------------------
ts::UString ts::names::TID(uint8_t tid, uint16_t cas, Flags flags)
{
// Use version with CAS first, then without CAS.
return NamesMain::Instance()->nameFromSectionWithFallback(u"TableId", (Names::Value(CASFamilyOf(cas)) << 8) | Names::Value(tid), Names::Value(tid), flags, 8);
}
ts::UString ts::names::EDID(uint8_t edid, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"DVBExtendedDescriptorId", Names::Value(edid), flags, 8);
}
ts::UString ts::names::StreamType(uint8_t type, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"StreamType", Names::Value(type), flags, 8);
}
ts::UString ts::names::Content(uint8_t x, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"ContentId", Names::Value(x), flags, 8);
}
ts::UString ts::names::PrivateDataSpecifier(uint32_t pds, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"PrivateDataSpecifier", Names::Value(pds), flags, 32);
}
ts::UString ts::names::CASFamily(ts::CASFamily cas)
{
return NamesMain::Instance()->nameFromSection(u"CASFamily", Names::Value(cas), NAME | DECIMAL);
}
ts::UString ts::names::CASId(uint16_t id, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"CASystemId", Names::Value(id), flags, 16);
}
ts::UString ts::names::BouquetId(uint16_t id, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"BouquetId", Names::Value(id), flags, 16);
}
ts::UString ts::names::OriginalNetworkId(uint16_t id, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"OriginalNetworkId", Names::Value(id), flags, 16);
}
ts::UString ts::names::NetworkId(uint16_t id, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"NetworkId", Names::Value(id), flags, 16);
}
ts::UString ts::names::PlatformId(uint32_t id, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"PlatformId", Names::Value(id), flags, 24);
}
ts::UString ts::names::DataBroadcastId(uint16_t id, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"DataBroadcastId", Names::Value(id), flags, 16);
}
ts::UString ts::names::OUI(uint32_t oui, Flags flags)
{
return NamesOUI::Instance()->nameFromSection(u"OUI", Names::Value(oui), flags, 24);
}
ts::UString ts::names::StreamId(uint8_t sid, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"StreamId", Names::Value(sid), flags, 8);
}
ts::UString ts::names::PESStartCode(uint8_t code, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"PESStartCode", Names::Value(code), flags, 8);
}
ts::UString ts::names::AspectRatio(uint8_t ar, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"AspectRatio", Names::Value(ar), flags, 8);
}
ts::UString ts::names::ChromaFormat(uint8_t cf, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"ChromaFormat", Names::Value(cf), flags, 8);
}
ts::UString ts::names::AVCUnitType(uint8_t type, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"AVCUnitType", Names::Value(type), flags, 8);
}
ts::UString ts::names::AVCProfile(int profile, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"AVCProfile", Names::Value(profile), flags, 8);
}
ts::UString ts::names::ServiceType(uint8_t type, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"ServiceType", Names::Value(type), flags, 8);
}
ts::UString ts::names::LinkageType(uint8_t type, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"LinkageType", Names::Value(type), flags, 8);
}
ts::UString ts::names::TeletextType(uint8_t type, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"TeletextType", Names::Value(type), flags, 8);
}
ts::UString ts::names::RunningStatus(uint8_t status, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"RunningStatus", Names::Value(status), flags, 8);
}
ts::UString ts::names::AudioType(uint8_t type, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"AudioType", Names::Value(type), flags, 8);
}
ts::UString ts::names::SubtitlingType(uint8_t type, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"SubtitlingType", Names::Value(type), flags, 8);
}
ts::UString ts::names::DTSSampleRateCode(uint8_t x, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"DTSSampleRate", Names::Value(x), flags, 8);
}
ts::UString ts::names::DTSBitRateCode(uint8_t x, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"DTSBitRate", Names::Value(x), flags, 8);
}
ts::UString ts::names::DTSSurroundMode(uint8_t x, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"DTSSurroundMode", Names::Value(x), flags, 8);
}
ts::UString ts::names::DTSExtendedSurroundMode(uint8_t x, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"DTSExtendedSurroundMode", Names::Value(x), flags, 8);
}
ts::UString ts::names::ScramblingControl(uint8_t scv, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"ScramblingControl", Names::Value(scv), flags, 8);
}
ts::UString ts::names::T2MIPacketType(uint8_t type, Flags flags)
{
return NamesMain::Instance()->nameFromSection(u"T2MIPacketType", Names::Value(type), flags, 8);
}
//----------------------------------------------------------------------------
// Component Type (in Component Descriptor)
//----------------------------------------------------------------------------
ts::UString ts::names::ComponentType(uint16_t type, Flags flags)
{
// There is a special case here. The binary layout of the 16 bits are:
// stream_content_ext (4 bits)
// stream_content (4 bits)
// component_type (8 bits).
//
// In the beginning, stream_content_ext did not exist and, as a reserved
// field, was 0xF. Starting with stream_content > 8, stream_content_ext
// appeared and may have different values. Logically, stream_content_ext
// is a subsection of stream_content. So, the bit order for values in the
// name file is stream_content || stream_content_ext || component_type.
//
// We apply the following transformations:
// - The lookup the name, we use stream_content || stream_content_ext || component_type.
// - To display the value, we use the real binary value where stream_content_ext
// is forced to zero when stream_content is in the range 1 to 8.
// Stream content:
const uint16_t sc = (type & 0x0F00) >> 8;
// Value to use for name lookup:
const uint16_t nType = (sc >= 1 && sc <= 8 ? 0x0F00 : ((type & 0xF000) >> 4)) | uint16_t((type & 0x0F00) << 4) | (type & 0x00FF);
// Value to display:
const uint16_t dType = sc >= 1 && sc <= 8 ? (type & 0x0FFF) : type;
if ((nType & 0xFF00) == 0x3F00) {
return SubtitlingType(nType & 0x00FF, flags);
}
else if ((nType & 0xFF00) == 0x4F00) {
return AC3ComponentType(nType & 0x00FF, flags);
}
else {
return NamesMain::Instance()->nameFromSection(u"ComponentType", Names::Value(nType), flags | names::ALTERNATE, 16, dType);
}
}
//----------------------------------------------------------------------------
// AC-3 Component Type, field-based, no buildin list of values
//----------------------------------------------------------------------------
ts::UString ts::names::AC3ComponentType(uint8_t type, Flags flags)
{
ts::UString s((type & 0x80) != 0 ? u"Enhanced AC-3" : u"AC-3");
s += (type & 0x40) != 0 ? u", full" : u", combined";
switch (type & 0x38) {
case 0x00: s += u", complete main"; break;
case 0x08: s += u", music and effects"; break;
case 0x10: s += u", visually impaired"; break;
case 0x18: s += u", hearing impaired"; break;
case 0x20: s += u", dialogue"; break;
case 0x28: s += u", commentary"; break;
case 0x30: s += u", emergency"; break;
case 0x38: s += (type & 0x40) ? u", karaoke" : u", voiceover"; break;
default: assert(false); // unreachable
}
switch (type & 0x07) {
case 0: s += u", mono"; break;
case 1: s += u", 1+1 channel"; break;
case 2: s += u", 2 channels"; break;
case 3: s += u", 2 channels dolby surround"; break;
case 4: s += u", multichannel > 2"; break;
case 5: s += u", multichannel > 5.1"; break;
case 6: s += u", multiple substreams"; break;
case 7: s += u", reserved"; break;
default: assert(false); // unreachable
}
return Names::Formatted(type, s, flags, 8);
}
//----------------------------------------------------------------------------
// Constructor (load the configuration file).
//----------------------------------------------------------------------------
ts::Names::Names(const UString& fileName, bool mergeExtensions) :
_log(CERR),
_configFile(SearchConfigurationFile(fileName)),
_configErrors(0),
_sections()
{
// Locate the configuration file.
if (_configFile.empty()) {
// Cannot load configuration, names will not be available.
_log.error(u"configuration file '%s' not found", {fileName});
}
else {
loadFile(_configFile);
}
// Merge extensions if required.
if (mergeExtensions) {
// Get list of extension names.
UStringList files;
TablesFactory::Instance()->getRegisteredNamesFiles(files);
for (auto name = files.begin(); name != files.end(); ++name) {
const UString path(SearchConfigurationFile(*name));
if (path.empty()) {
_log.error(u"extension file '%s' not found", {*name});
}
else {
loadFile(path);
}
}
}
}
//----------------------------------------------------------------------------
// Load a configuration file and merge its content into this instance.
//----------------------------------------------------------------------------
void ts::Names::loadFile(const UString& fileName)
{
// Open configuration file.
std::ifstream strm(fileName.toUTF8().c_str());
if (!strm) {
_log.error(u"error opening file %s", {fileName});
return;
}
ConfigSection* section = nullptr;
UString line;
// Read configuration file line by line.
for (size_t lineNumber = 1; line.getLine(strm); ++lineNumber) {
// Remove leading and trailing spaces in line.
line.trim();
if (line.empty() || line[0] == UChar('#')) {
// Empty or comment line, ignore.
}
else if (line.front() == UChar('[') && line.back() == UChar(']')) {
// Handle beginning of section, get section name.
line.erase(0, 1);
line.pop_back();
line.convertToLower();
// Get or create associated section.
ConfigSectionMap::iterator it = _sections.find(line);
if (it != _sections.end()) {
section = it->second;
}
else {
// Create new section.
section = new ConfigSection;
CheckNonNull(section);
_sections.insert(std::make_pair(line, section));
}
}
else if (!decodeDefinition(line, section)) {
// Invalid line.
_log.error(u"%s: invalid line %d: %s", {fileName, lineNumber, line});
if (++_configErrors >= 20) {
// Give up after that number of errors
_log.error(u"%s: too many errors, giving up", {fileName});
break;
}
}
}
strm.close();
}
//----------------------------------------------------------------------------
// Decode a line as "first[-last] = name". Return true on success.
//----------------------------------------------------------------------------
bool ts::Names::decodeDefinition(const UString& line, ConfigSection* section)
{
// Check the presence of the '=' and in a valid section.
const size_t equal = line.find(UChar('='));
if (equal == 0 || equal == NPOS || section == nullptr) {
return false;
}
// Extract fields.
UString range(line, 0, equal);
range.trim();
UString value(line, equal + 1, line.length() - equal - 1);
value.trim();
// Special case: specification of size in bits of values in this section.
if (range.similar(u"bits")) {
return value.toInteger(section->bits);
}
// Decode "first[-last]"
Value first = 0;
Value last = 0;
const size_t dash = range.find(UChar('-'));
bool valid = false;
if (dash == NPOS) {
valid = range.toInteger(first);
last = first;
}
else {
valid = range.substr(0, dash).toInteger(first) && range.substr(dash + 1).toInteger(last) && last >= first;
}
// Add the definition.
if (valid) {
if (section->freeRange(first, last)) {
section->addEntry(first, last, value);
}
else {
_log.error(u"%s: range 0x%X-0x%X overlaps with an existing range", {_configFile, first, last});
valid = false;
}
}
return valid;
}
//----------------------------------------------------------------------------
// Destructor: free all resources.
//----------------------------------------------------------------------------
ts::Names::~Names()
{
// Deallocate all configuration sections.
for (ConfigSectionMap::iterator it = _sections.begin(); it != _sections.end(); ++it) {
delete it->second;
}
_sections.clear();
}
//----------------------------------------------------------------------------
// Configuration entry.
//----------------------------------------------------------------------------
ts::Names::ConfigEntry::ConfigEntry(Value l, const UString& n) :
last(l),
name(n)
{
}
//----------------------------------------------------------------------------
// Configuration section.
//----------------------------------------------------------------------------
ts::Names::ConfigSection::ConfigSection() :
bits(0),
entries()
{
}
ts::Names::ConfigSection::~ConfigSection()
{
// Deallocate all configuration entries.
for (ConfigEntryMap::iterator it = entries.begin(); it != entries.end(); ++it) {
delete it->second;
}
entries.clear();
}
//----------------------------------------------------------------------------
// Check if a range is free, ie no value is defined in the range.
//----------------------------------------------------------------------------
bool ts::Names::ConfigSection::freeRange(Value first, Value last) const
{
// Get an iterator pointing to the first element that is "not less" than 'first'.
ConfigEntryMap::const_iterator it = entries.lower_bound(first);
if (it != entries.end() && it->first <= last) {
// This is an existing range which starts inside [first..last].
assert(it->first >= first);
return false;
}
if (it != entries.begin() && (--it)->second->last >= first) {
// The previous range ends inside [first..last].
assert(it->first < first);
return false;
}
// No overlap found.
return true;
}
//----------------------------------------------------------------------------
// Add a new configuration entry.
//----------------------------------------------------------------------------
void ts::Names::ConfigSection::addEntry(Value first, Value last, const UString& name)
{
ConfigEntry* entry = new ConfigEntry(last, name);
CheckNonNull(entry);
entries.insert(std::make_pair(first, entry));
}
//----------------------------------------------------------------------------
// Get a name from a value, empty if not found.
//----------------------------------------------------------------------------
ts::UString ts::Names::ConfigSection::getName(Value val) const
{
// Eliminate trivial cases which would cause issues with code below.
if (entries.empty()) {
return UString();
}
// The key in the 'entries' map is the _first_ value of a range.
// Get an iterator pointing to the first element that is "not less" than 'val'.
ConfigEntryMap::const_iterator it = entries.lower_bound(val);
if (it == entries.end() || (it != entries.begin() && it->first != val)) {
// There is no entry with a value range starting at 'val'.
// Maybe 'val' is in the range of the previous entry.
--it;
}
assert(it != entries.end());
assert(it->second != nullptr);
return val >= it->first && val <= it->second->last ? it->second->name : UString();
}
//----------------------------------------------------------------------------
// Format helper
//----------------------------------------------------------------------------
// Compute a number of hexa digits.
int ts::Names::HexaDigits(size_t bits)
{
return int((bits + 3) / 4);
}
// Compute the display mask
ts::Names::Value ts::Names::DisplayMask(size_t bits)
{
if (bits == 0 || bits >= 4 * sizeof(Value)) {
// Unspecified, keep all bits.
return ~Value(0);
}
else {
return ~Value(0) >> (8 * sizeof(Value) - bits);
}
}
//----------------------------------------------------------------------------
// Format a name.
//----------------------------------------------------------------------------
ts::UString ts::Names::Formatted(Value value, const UString& name, names::Flags flags, size_t bits, Value alternateValue)
{
// If neither decimal nor hexa are specified, hexa is the default.
if ((flags & (names::DECIMAL | names::HEXA)) == 0) {
flags |= names::HEXA;
}
// Actual value to display.
if ((flags & names::ALTERNATE) != 0) {
value = alternateValue;
}
// Display meaningful bits only.
value &= DisplayMask(bits);
// Default name.
const UString defaultName(u"unknown");
const UString* displayName = &name;
if (name.empty()) {
// Name not found, force value display.
flags |= names::VALUE;
displayName = &defaultName;
}
if ((flags & (names::VALUE | names::FIRST)) == 0) {
// Name only.
return *displayName;
}
switch (flags & (names::FIRST | names::DECIMAL | names::HEXA)) {
case names::DECIMAL:
return UString::Format(u"%s (%d)", {*displayName, value});
case names::HEXA:
return UString::Format(u"%s (0x%0*X)", {*displayName, HexaDigits(bits), value});
case names::BOTH:
return UString::Format(u"%s (0x%0*X, %d)", {*displayName, HexaDigits(bits), value, value});
case names::DECIMAL_FIRST:
return UString::Format(u"%d (%s)", {value, *displayName});
case names::HEXA_FIRST:
return UString::Format(u"0x%0*X (%s)", {HexaDigits(bits), value, *displayName});
case names::BOTH_FIRST:
return UString::Format(u"0x%0*X (%d, %s)", {HexaDigits(bits), value, value, *displayName});
case names::NAME:
case names::VALUE:
case names::FIRST:
case names::ALTERNATE:
default:
assert(false);
return UString();
}
}
//----------------------------------------------------------------------------
// Check if a name exists in a specified section.
//----------------------------------------------------------------------------
bool ts::Names::nameExists(const UString& sectionName, Value value) const
{
// Get the section, normalize the section name.
ConfigSectionMap::const_iterator it = _sections.find(sectionName.toTrimmed().toLower());
return it != _sections.end() && !it->second->getName(value).empty();
}
//----------------------------------------------------------------------------
// Get a name from a specified section.
//----------------------------------------------------------------------------
ts::UString ts::Names::nameFromSection(const UString& sectionName, Value value, names::Flags flags, size_t bits, Value alternateValue) const
{
// Get the section, normalize the section name.
ConfigSectionMap::const_iterator it = _sections.find(sectionName.toTrimmed().toLower());
const ConfigSection* section = it == _sections.end() ? nullptr : it->second;
if (section == nullptr) {
// Non-existent section, no name.
return Formatted(value, UString(), flags, bits, alternateValue);
}
else {
return Formatted(value, section->getName(value), flags, bits != 0 ? bits : section->bits, alternateValue);
}
}
//----------------------------------------------------------------------------
// Get a name from a specified section, with alternate fallback value.
//----------------------------------------------------------------------------
ts::UString ts::Names::nameFromSectionWithFallback(const UString& sectionName, Value value1, Value value2, names::Flags flags, size_t bits, Value alternateValue) const
{
// Get the section, normalize the section name.
ConfigSectionMap::const_iterator it = _sections.find(sectionName.toTrimmed().toLower());
const ConfigSection* section = it == _sections.end() ? nullptr : it->second;
if (section == nullptr) {
// Non-existent section, no name.
return Formatted(value1, UString(), flags, bits, alternateValue);
}
else {
const UString name(section->getName(value1));
if (!name.empty()) {
// value1 has a name
return Formatted(value1, name, flags, bits != 0 ? bits : section->bits, alternateValue);
}
else {
// value1 has no name, use value2.
return Formatted(value2, section->getName(value2), flags, bits != 0 ? bits : section->bits, alternateValue);
}
}
}
| 35.591608
| 167
| 0.562009
|
ASTRO-Strobel
|
3348ba349334df652784ce461727725f95bf804c
| 5,636
|
cc
|
C++
|
tpccbench/src/randomgenerator.cc
|
mkatsa/PENVMTool
|
c63de91036cd84d36cd8ac54f2033ea141a292dc
|
[
"Apache-2.0"
] | 1
|
2022-03-22T15:16:56.000Z
|
2022-03-22T15:16:56.000Z
|
tpccbench/src/randomgenerator.cc
|
mkatsa/PENVMTool
|
c63de91036cd84d36cd8ac54f2033ea141a292dc
|
[
"Apache-2.0"
] | null | null | null |
tpccbench/src/randomgenerator.cc
|
mkatsa/PENVMTool
|
c63de91036cd84d36cd8ac54f2033ea141a292dc
|
[
"Apache-2.0"
] | null | null | null |
#include "randomgenerator.h"
#include <algorithm>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include "assert.h"
namespace tpcc
{
NURandC NURandC::makeRandom(RandomGenerator *generator)
{
NURandC c;
c.c_last_ = generator->number(0, 255);
c.c_id_ = generator->number(0, 1023);
c.ol_i_id_ = generator->number(0, 8191);
return c;
}
// Returns true if the C-Run value is valid. See TPC-C 2.1.6.1 (page 20).
static bool validCRun(int cRun, int cLoad)
{
int cDelta = abs(cRun - cLoad);
return 65 <= cDelta && cDelta <= 119 && cDelta != 96 && cDelta != 112;
}
NURandC NURandC::makeRandomForRun(RandomGenerator *generator, const NURandC &c_load)
{
NURandC c = makeRandom(generator);
while (!validCRun(c.c_last_, c_load.c_last_))
{
c.c_last_ = generator->number(0, 255);
}
assert(validCRun(c.c_last_, c_load.c_last_));
return c;
}
int RandomGenerator::numberExcluding(int lower, int upper, int excluding)
{
assert(lower < upper);
assert(lower <= excluding && excluding <= upper);
// Generate 1 less number than the range
int num = number(lower, upper - 1);
// Adjust the numbers to remove excluding
if (num >= excluding)
{
num += 1;
}
assert(lower <= num && num <= upper && num != excluding);
return num;
}
static void generateString(RandomGenerator *generator, char *s, int lower_length, int upper_length,
char base_character, int num_characters)
{
int length = generator->number(lower_length, upper_length);
for (int i = 0; i < length; ++i)
{
s[i] = static_cast<char>(base_character + generator->number(0, num_characters - 1));
}
s[length] = '\0';
}
void RandomGenerator::astring(char *s, int lower_length, int upper_length)
{
generateString(this, s, lower_length, upper_length, 'a', 26);
}
void RandomGenerator::nstring(char *s, int lower_length, int upper_length)
{
generateString(this, s, lower_length, upper_length, '0', 10);
}
void RandomGenerator::lastName(char *c_last, int max_cid)
{
makeLastName(NURand(255, 0, std::min(999, max_cid - 1)), c_last);
}
float RandomGenerator::fixedPoint(int digits, float lower, float upper)
{
int multiplier = 1;
for (int i = 0; i < digits; ++i)
{
multiplier *= 10;
}
int int_lower = static_cast<int>(lower * static_cast<double>(multiplier) + 0.5);
int int_upper = static_cast<int>(upper * static_cast<double>(multiplier) + 0.5);
return (float)number(int_lower, int_upper) / (float)multiplier;
}
int RandomGenerator::NURand(int A, int x, int y)
{
int C = 0;
switch (A)
{
case 255:
C = c_values_.c_last_;
break;
case 1023:
C = c_values_.c_id_;
break;
case 8191:
C = c_values_.ol_i_id_;
break;
default:
fprintf(stderr, "Error: NURand: A = %d not supported\n", A);
exit(1);
}
return (((number(0, A) | number(x, y)) + C) % (y - x + 1)) + x;
}
int *RandomGenerator::makePermutation(int lower, int upper)
{
// initialize with consecutive values
int *array = new int[upper - lower + 1];
for (int i = 0; i <= upper - lower; ++i)
{
array[i] = lower + i;
}
for (int i = 0; i < upper - lower; ++i)
{
// choose a value to go into this position, including this position
int index = number(i, upper - lower);
int temp = array[i];
array[i] = array[index];
array[index] = temp;
}
return array;
}
// Defined by TPC-C 4.3.2.3.
void makeLastName(int num, char *name)
{
static const char *const SYLLABLES[] = {
"BAR",
"OUGHT",
"ABLE",
"PRI",
"PRES",
"ESE",
"ANTI",
"CALLY",
"ATION",
"EING",
};
static const int LENGTHS[] = {
3,
5,
4,
3,
4,
3,
4,
5,
5,
4,
};
assert(0 <= num && num <= 999);
int indicies[] = {num / 100, (num / 10) % 10, num % 10};
int offset = 0;
for (int i = 0; i < sizeof(indicies) / sizeof(*indicies); ++i)
{
assert(strlen(SYLLABLES[indicies[i]]) == LENGTHS[indicies[i]]);
memcpy(name + offset, SYLLABLES[indicies[i]], static_cast<size_t>(LENGTHS[indicies[i]]));
offset += LENGTHS[indicies[i]];
}
name[offset] = '\0';
}
RealRandomGenerator::RealRandomGenerator()
{
#ifdef HAVE_RANDOM_R
// Set the random state to zeros. glibc will attempt to access the old state if not NULL.
memset(&state, 0, sizeof(state));
int result = initstate_r(static_cast<unsigned int>(time(NULL)), state_array,
sizeof(state_array), &state);
assert(result == 0);
#else
seed(time(NULL));
#endif
}
int RealRandomGenerator::number(int lower, int upper)
{
int rand_int;
#ifdef HAVE_RANDOM_R
int error = random_r(&state, &rand_int);
assert(error == 0);
#else
rand_int = nrand48(state);
#endif
assert(0 <= rand_int && rand_int <= RAND_MAX);
// Select a number in [0, range_size-1]
int range_size = upper - lower + 1;
rand_int %= range_size;
assert(0 <= rand_int && rand_int < range_size);
// Shift the range to [lower, upper]
rand_int += lower;
assert(lower <= rand_int && rand_int <= upper);
return rand_int;
}
void RealRandomGenerator::seed(unsigned int seed)
{
#ifdef HAVE_RANDOM_R
int error = srandom_r(seed, &state);
assert(error == 0);
#else
memcpy(state, &seed, std::min(sizeof(seed), sizeof(state)));
#endif
}
} // namespace tpcc
| 24.719298
| 99
| 0.599006
|
mkatsa
|
334b46165bd882a4a1837fc0dc4dcb4bf2cfeae8
| 3,782
|
hpp
|
C++
|
src/HF_pure.hpp
|
ahmadadam96/Hackflight
|
a733930e10aeb28085367c63a74d2161935f3caf
|
[
"MIT"
] | null | null | null |
src/HF_pure.hpp
|
ahmadadam96/Hackflight
|
a733930e10aeb28085367c63a74d2161935f3caf
|
[
"MIT"
] | null | null | null |
src/HF_pure.hpp
|
ahmadadam96/Hackflight
|
a733930e10aeb28085367c63a74d2161935f3caf
|
[
"MIT"
] | null | null | null |
/*
Core Hackflight class
Copyright (c) 2021 Simon D. Levy
MIT License
*/
#pragma once
#include "HF_sensor.hpp"
#include "HF_board.hpp"
#include "HF_pidtask.hpp"
#include "HF_pidcontroller.hpp"
#include "HF_receiver.hpp"
#include "HF_mixer.hpp"
#include "HF_state.hpp"
#include "HF_update_scheduler.hpp"
namespace hf {
class HackflightPure {
private:
// Timer task for PID controllers
PidTask _pidTask;
void checkSensors(State * state)
{
// Some sensors may need to know the current time
float time = _board->getTime();
for (uint8_t k=0; k<_sensor_count; ++k) {
if(_sensors[k]->ready(_board)){
unsigned int task_id = k + 3;
_update_scheduler.task_started(task_id);
printTaskTime(task_id, true);
_sensors[k]->modifyState((State *)state, time);
printTaskTime(task_id, false);
}
}
}
protected:
// Essentials
Board * _board = NULL;
Receiver * _receiver = NULL;
Mixer * _mixer = NULL;
State _state = {};
// Sensors
Sensor * _sensors[256] = {};
uint8_t _sensor_count = 0;
// Update scheduler with the sensor count
UpdateScheduler _update_scheduler;
public:
HackflightPure(Board * board, Receiver * receiver, Mixer * mixer)
{
_board = board;
_receiver = receiver;
_mixer = mixer;
_update_scheduler.init(1, 1520, &_pidTask, &_state);
_update_scheduler.set_task_period(_pidTask.task_id, 1000000 / _pidTask.FREQ);
_update_scheduler.set_task_criticality(_pidTask.task_id, _pidTask.criticality);
_update_scheduler.set_task_period(_receiver->task_id, 1000000 / _receiver->FREQ);
_update_scheduler.set_task_criticality(_receiver->task_id, _receiver->criticality);
_receiver->init(board, &_state, mixer);
_sensor_count = 0;
}
void begin(void)
{
_state.armed = true;
} // begin
void update(void)
{
if(_receiver->ready(_board)){
_update_scheduler.task_started(_receiver->task_id);
printTaskTime(_receiver->task_id, true);
if(_receiver->gotNewFrame()) {
_receiver->update();
}
printTaskTime(_receiver->task_id, false);
}
// Update PID controllers task
if (_pidTask.ready(_board)) {
_update_scheduler.task_started(_pidTask.task_id);
_pidTask.update(_board, _receiver, _mixer, &_state);
}
// Check sensors
checkSensors(&_state);
}
void addSensor(Sensor * sensor, unsigned int sensor_frequency)
{
_sensors[_sensor_count++] = sensor;
sensor->change_frequency(sensor_frequency);
_update_scheduler.add_sensor(sensor);
_update_scheduler.set_task_period(2 + _sensor_count, 1000000 / sensor_frequency);
_update_scheduler.set_task_criticality(2 + _sensor_count, 2);
}
void addPidController(PidController * controller)
{
_pidTask.addController(controller);
}
}; // class HackflightPure
} // namespace hf
| 30.5
| 99
| 0.52459
|
ahmadadam96
|
334c89eff71929f2cb27bc6978dfc403e26e8198
| 4,210
|
cpp
|
C++
|
ZibenEngine/Source/Renderer/VertexArray.cpp
|
NavkaGleb/Ziben
|
6729a74a67aea8e780e958438642895e27043745
|
[
"MIT"
] | 3
|
2021-06-10T05:59:09.000Z
|
2021-06-10T18:29:02.000Z
|
ZibenEngine/Source/Renderer/VertexArray.cpp
|
navkagleb/Ziben
|
6729a74a67aea8e780e958438642895e27043745
|
[
"MIT"
] | null | null | null |
ZibenEngine/Source/Renderer/VertexArray.cpp
|
navkagleb/Ziben
|
6729a74a67aea8e780e958438642895e27043745
|
[
"MIT"
] | null | null | null |
#include "VertexArray.hpp"
namespace Ziben {
Ref<VertexArray> VertexArray::Create() {
return CreateRef<VertexArray>();
}
void VertexArray::Bind(const Ref<VertexArray>& vertexArray) {
ZIBEN_PROFILE_FUNCTION();
glBindVertexArray(vertexArray->m_Handle);
}
void VertexArray::Unbind() {
ZIBEN_PROFILE_FUNCTION();
glBindVertexArray(0);
}
VertexArray::VertexArray()
: m_Handle(0)
, m_VertexBufferIndex(0)
, m_IndexBuffer(nullptr) {
ZIBEN_PROFILE_FUNCTION();
glGenVertexArrays(1, &m_Handle);
}
VertexArray::~VertexArray() {
ZIBEN_PROFILE_FUNCTION();
glDeleteVertexArrays(1, &m_Handle);
}
void VertexArray::PushVertexBuffer(const Ref<VertexBuffer>& vertexBuffer) {
// Bind Current VertexArray
glBindVertexArray(m_Handle);
// Bind VertexBuffer
VertexBuffer::Bind(vertexBuffer);
m_VertexBuffers.push_back(vertexBuffer);
for (const auto& element : vertexBuffer->GetLayout()) {
// switch
// glEnableVertexAttribArray(m_VertexBufferIndex);
// glVertexAttribPointer(
// m_VertexBufferIndex++,
// ShaderData::GetCount(element.Type),
// ShaderData::ToNativeType(element.Type),
// element.IsNormalized,
// static_cast<GLsizei>(vertexBuffer->GetLayout().GetStride()),
// (const void*)element.Offset
// );
switch (element.Type) {
case ShaderData::Type::Float:
case ShaderData::Type::Float2:
case ShaderData::Type::Float3:
case ShaderData::Type::Float4: {
glEnableVertexAttribArray(m_VertexBufferIndex);
glVertexAttribPointer(
m_VertexBufferIndex++,
ShaderData::GetCount(element.Type),
ShaderData::ToNativeType(element.Type),
element.IsNormalized,
static_cast<GLsizei>(vertexBuffer->GetLayout().GetStride()),
reinterpret_cast<const void *>(element.Offset)
);
break;
}
case ShaderData::Type::Int:
case ShaderData::Type::Int2:
case ShaderData::Type::Int3:
case ShaderData::Type::Int4:
case ShaderData::Type::Bool: {
glEnableVertexAttribArray(m_VertexBufferIndex);
glVertexAttribIPointer(
m_VertexBufferIndex++,
ShaderData::GetCount(element.Type),
ShaderData::ToNativeType(element.Type),
static_cast<GLsizei>(vertexBuffer->GetLayout().GetStride()),
reinterpret_cast<const void *>(element.Offset)
);
break;
}
case ShaderData::Type::Mat3:
case ShaderData::Type::Mat4: {
int count = ShaderData::GetCount(element.Type);
for (int i = 0; i < count; ++i) {
glVertexAttribPointer(
m_VertexBufferIndex++,
count,
ShaderData::ToNativeType(element.Type),
element.IsNormalized,
static_cast<GLsizei>(vertexBuffer->GetLayout().GetStride()),
reinterpret_cast<const void *>(element.Offset + sizeof(float) * count * i)
);
glVertexAttribDivisor(m_VertexBufferIndex, 1);
}
break;
}
default: break;
}
}
}
void VertexArray::SetIndexBuffer(const Ref<IndexBuffer>& indexBuffer) {
// Bind Current VertexArray
glBindVertexArray(m_Handle);
// Bind IndexBuffer
IndexBuffer::Bind(indexBuffer);
m_IndexBuffer = indexBuffer;
}
} // namespace Ziben
| 32.890625
| 102
| 0.517102
|
NavkaGleb
|
334ca67864c0900db9476eea85254248cb258a68
| 417
|
cpp
|
C++
|
C++/problem0766.cpp
|
1050669722/LeetCode-Answers
|
c8f4d1ccaac09cda63b60d75144335347b06dc81
|
[
"MIT"
] | null | null | null |
C++/problem0766.cpp
|
1050669722/LeetCode-Answers
|
c8f4d1ccaac09cda63b60d75144335347b06dc81
|
[
"MIT"
] | null | null | null |
C++/problem0766.cpp
|
1050669722/LeetCode-Answers
|
c8f4d1ccaac09cda63b60d75144335347b06dc81
|
[
"MIT"
] | null | null | null |
class Solution {
public:
bool isToeplitzMatrix(vector<vector<int>>& matrix) {
int m = matrix.size(), n = matrix[0].size();
for (int i = 1; i < m; ++i)
{
for (int j = 1; j < n; ++j)
{
if (matrix[i][j] != matrix[i - 1][j - 1])
{
return false;
}
}
}
return true;
}
};
| 23.166667
| 57
| 0.357314
|
1050669722
|
334e7d49f8db548a51cb913417dbf677b37471b2
| 2,718
|
cc
|
C++
|
src/atlas/grid/detail/pl/classic_gaussian/N400.cc
|
mlange05/atlas
|
d8198435a9e39fbf67bfc467fe734203414af608
|
[
"Apache-2.0"
] | null | null | null |
src/atlas/grid/detail/pl/classic_gaussian/N400.cc
|
mlange05/atlas
|
d8198435a9e39fbf67bfc467fe734203414af608
|
[
"Apache-2.0"
] | null | null | null |
src/atlas/grid/detail/pl/classic_gaussian/N400.cc
|
mlange05/atlas
|
d8198435a9e39fbf67bfc467fe734203414af608
|
[
"Apache-2.0"
] | null | null | null |
// TL799
#include "N.h"
namespace atlas {
namespace grid {
namespace detail {
namespace pl {
namespace classic_gaussian {
DEFINE_POINTS_PER_LATITUDE(
400,
LIST( 18, 25, 32, 40, 45, 50, 60, 60, 72, 72, 75, 81, 90, 96, 100, 108, 120, 120, 125, 128, 144, 144, 150, 160, 160,
180, 180, 192, 192, 200, 200, 216, 216, 225, 240, 240, 240, 250, 250, 256, 270, 288, 288, 288, 300, 300, 320,
320, 320, 324, 360, 360, 360, 360, 360, 360, 375, 375, 384, 400, 400, 400, 405, 432, 432, 432, 432, 450, 450,
450, 480, 480, 480, 480, 480, 486, 500, 500, 512, 512, 540, 540, 540, 540, 540, 576, 576, 576, 576, 576, 576,
600, 600, 600, 600, 640, 640, 640, 640, 640, 640, 640, 648, 675, 675, 675, 675, 675, 720, 720, 720, 720, 720,
720, 720, 729, 729, 750, 750, 750, 750, 768, 768, 768, 800, 800, 800, 800, 800, 800, 810, 864, 864, 864, 864,
864, 864, 864, 864, 864, 864, 900, 900, 900, 900, 900, 900, 900, 960, 960, 960, 960, 960, 960, 960, 960, 960,
960, 960, 960, 972, 972, 1000, 1000, 1000, 1000, 1000, 1000, 1024, 1024, 1024, 1024, 1024, 1080, 1080, 1080,
1080, 1080, 1080, 1080, 1080, 1080, 1080, 1080, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1152,
1152, 1152, 1152, 1152, 1152, 1200, 1200, 1200, 1200, 1200, 1200, 1200, 1200, 1200, 1200, 1200, 1215, 1215,
1215, 1215, 1280, 1280, 1280, 1280, 1280, 1280, 1280, 1280, 1280, 1280, 1280, 1280, 1280, 1280, 1280, 1280,
1296, 1296, 1296, 1296, 1350, 1350, 1350, 1350, 1350, 1350, 1350, 1350, 1350, 1350, 1350, 1350, 1350, 1350,
1350, 1440, 1440, 1440, 1440, 1440, 1440, 1440, 1440, 1440, 1440, 1440, 1440, 1440, 1440, 1440, 1440, 1440,
1440, 1440, 1440, 1440, 1440, 1440, 1440, 1440, 1440, 1440, 1440, 1458, 1458, 1458, 1458, 1458, 1458, 1458,
1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1500, 1536,
1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1600, 1600, 1600,
1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600,
1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600,
1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600,
1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600,
1600, 1600, 1600, 1600, 1600, 1600, 1600, 1600 ) )
} // namespace classic_gaussian
} // namespace pl
} // namespace detail
} // namespace grid
} // namespace atlas
| 67.95
| 120
| 0.598602
|
mlange05
|
33506923fe1a77601d513acc3218f566d177d33a
| 19,674
|
hpp
|
C++
|
nano/node/lmdb.hpp
|
grendias/NANO-1
|
98d04d2579214ac5215c01488cdba5c2b3f76ea7
|
[
"BSD-2-Clause"
] | null | null | null |
nano/node/lmdb.hpp
|
grendias/NANO-1
|
98d04d2579214ac5215c01488cdba5c2b3f76ea7
|
[
"BSD-2-Clause"
] | null | null | null |
nano/node/lmdb.hpp
|
grendias/NANO-1
|
98d04d2579214ac5215c01488cdba5c2b3f76ea7
|
[
"BSD-2-Clause"
] | null | null | null |
#pragma once
#include <boost/filesystem.hpp>
#include <boost/optional.hpp>
#include <lmdb/libraries/liblmdb/lmdb.h>
#include <nano/lib/config.hpp>
#include <nano/lib/numbers.hpp>
#include <nano/node/logging.hpp>
#include <nano/secure/blockstore.hpp>
#include <nano/secure/common.hpp>
#include <thread>
namespace nano
{
class mdb_env;
class account_info_v13;
class mdb_txn : public transaction_impl
{
public:
mdb_txn (nano::mdb_env const &, bool = false);
mdb_txn (nano::mdb_txn const &) = delete;
mdb_txn (nano::mdb_txn &&) = default;
~mdb_txn ();
nano::mdb_txn & operator= (nano::mdb_txn const &) = delete;
nano::mdb_txn & operator= (nano::mdb_txn &&) = default;
operator MDB_txn * () const;
MDB_txn * handle;
};
/**
* RAII wrapper for MDB_env
*/
class mdb_env
{
public:
mdb_env (bool &, boost::filesystem::path const &, int max_dbs = 128, size_t map_size = 128ULL * 1024 * 1024 * 1024);
~mdb_env ();
operator MDB_env * () const;
nano::transaction tx_begin (bool = false) const;
MDB_txn * tx (nano::transaction const &) const;
MDB_env * environment;
};
/**
* Encapsulates MDB_val and provides uint256_union conversion of the data.
*/
class mdb_val
{
public:
mdb_val (nano::epoch = nano::epoch::unspecified);
mdb_val (nano::account_info const &);
mdb_val (nano::account_info_v13 const &);
mdb_val (nano::block_info const &);
mdb_val (MDB_val const &, nano::epoch = nano::epoch::unspecified);
mdb_val (nano::pending_info const &);
mdb_val (nano::pending_key const &);
mdb_val (nano::unchecked_info const &);
mdb_val (size_t, void *);
mdb_val (nano::uint128_union const &);
mdb_val (nano::uint256_union const &);
mdb_val (nano::endpoint_key const &);
mdb_val (std::shared_ptr<nano::block> const &);
mdb_val (std::shared_ptr<nano::vote> const &);
mdb_val (uint64_t);
void * data () const;
size_t size () const;
explicit operator nano::account_info () const;
explicit operator nano::account_info_v13 () const;
explicit operator nano::block_info () const;
explicit operator nano::pending_info () const;
explicit operator nano::pending_key () const;
explicit operator nano::unchecked_info () const;
explicit operator nano::uint128_union () const;
explicit operator nano::uint256_union () const;
explicit operator std::array<char, 64> () const;
explicit operator nano::endpoint_key () const;
explicit operator nano::no_value () const;
explicit operator std::shared_ptr<nano::block> () const;
explicit operator std::shared_ptr<nano::send_block> () const;
explicit operator std::shared_ptr<nano::receive_block> () const;
explicit operator std::shared_ptr<nano::open_block> () const;
explicit operator std::shared_ptr<nano::change_block> () const;
explicit operator std::shared_ptr<nano::state_block> () const;
explicit operator std::shared_ptr<nano::vote> () const;
explicit operator uint64_t () const;
operator MDB_val * () const;
operator MDB_val const & () const;
MDB_val value;
std::shared_ptr<std::vector<uint8_t>> buffer;
nano::epoch epoch{ nano::epoch::unspecified };
};
class block_store;
template <typename T, typename U>
class mdb_iterator : public store_iterator_impl<T, U>
{
public:
mdb_iterator (nano::transaction const & transaction_a, MDB_dbi db_a, nano::epoch = nano::epoch::unspecified);
mdb_iterator (std::nullptr_t, nano::epoch = nano::epoch::unspecified);
mdb_iterator (nano::transaction const & transaction_a, MDB_dbi db_a, MDB_val const & val_a, nano::epoch = nano::epoch::unspecified);
mdb_iterator (nano::mdb_iterator<T, U> && other_a);
mdb_iterator (nano::mdb_iterator<T, U> const &) = delete;
~mdb_iterator ();
nano::store_iterator_impl<T, U> & operator++ () override;
std::pair<nano::mdb_val, nano::mdb_val> * operator-> ();
bool operator== (nano::store_iterator_impl<T, U> const & other_a) const override;
bool is_end_sentinal () const override;
void fill (std::pair<T, U> &) const override;
void clear ();
nano::mdb_iterator<T, U> & operator= (nano::mdb_iterator<T, U> && other_a);
nano::store_iterator_impl<T, U> & operator= (nano::store_iterator_impl<T, U> const &) = delete;
MDB_cursor * cursor;
std::pair<nano::mdb_val, nano::mdb_val> current;
private:
MDB_txn * tx (nano::transaction const &) const;
};
/**
* Iterates the key/value pairs of two stores merged together
*/
template <typename T, typename U>
class mdb_merge_iterator : public store_iterator_impl<T, U>
{
public:
mdb_merge_iterator (nano::transaction const &, MDB_dbi, MDB_dbi);
mdb_merge_iterator (std::nullptr_t);
mdb_merge_iterator (nano::transaction const &, MDB_dbi, MDB_dbi, MDB_val const &);
mdb_merge_iterator (nano::mdb_merge_iterator<T, U> &&);
mdb_merge_iterator (nano::mdb_merge_iterator<T, U> const &) = delete;
~mdb_merge_iterator ();
nano::store_iterator_impl<T, U> & operator++ () override;
std::pair<nano::mdb_val, nano::mdb_val> * operator-> ();
bool operator== (nano::store_iterator_impl<T, U> const &) const override;
bool is_end_sentinal () const override;
void fill (std::pair<T, U> &) const override;
void clear ();
nano::mdb_merge_iterator<T, U> & operator= (nano::mdb_merge_iterator<T, U> &&) = default;
nano::mdb_merge_iterator<T, U> & operator= (nano::mdb_merge_iterator<T, U> const &) = delete;
private:
nano::mdb_iterator<T, U> & least_iterator () const;
std::unique_ptr<nano::mdb_iterator<T, U>> impl1;
std::unique_ptr<nano::mdb_iterator<T, U>> impl2;
};
class logging;
/**
* mdb implementation of the block store
*/
class mdb_store : public block_store
{
friend class nano::block_predecessor_set;
public:
mdb_store (bool &, nano::logging &, boost::filesystem::path const &, int lmdb_max_dbs = 128, bool drop_unchecked = false, size_t batch_size = 512);
nano::transaction tx_begin_write () override;
nano::transaction tx_begin_read () override;
nano::transaction tx_begin (bool write = false) override;
void initialize (nano::transaction const &, nano::genesis const &) override;
void block_put (nano::transaction const &, nano::block_hash const &, nano::block const &, nano::block_sideband const &, nano::epoch version = nano::epoch::epoch_0) override;
nano::block_hash block_successor (nano::transaction const &, nano::block_hash const &) const override;
void block_successor_clear (nano::transaction const &, nano::block_hash const &) override;
std::shared_ptr<nano::block> block_get (nano::transaction const &, nano::block_hash const &, nano::block_sideband * = nullptr) const override;
std::shared_ptr<nano::block> block_random (nano::transaction const &) override;
void block_del (nano::transaction const &, nano::block_hash const &) override;
bool block_exists (nano::transaction const &, nano::block_hash const &) override;
bool block_exists (nano::transaction const &, nano::block_type, nano::block_hash const &) override;
nano::block_counts block_count (nano::transaction const &) override;
bool root_exists (nano::transaction const &, nano::uint256_union const &) override;
bool source_exists (nano::transaction const &, nano::block_hash const &) override;
nano::account block_account (nano::transaction const &, nano::block_hash const &) const override;
void frontier_put (nano::transaction const &, nano::block_hash const &, nano::account const &) override;
nano::account frontier_get (nano::transaction const &, nano::block_hash const &) const override;
void frontier_del (nano::transaction const &, nano::block_hash const &) override;
void account_put (nano::transaction const &, nano::account const &, nano::account_info const &) override;
bool account_get (nano::transaction const &, nano::account const &, nano::account_info &) override;
void account_del (nano::transaction const &, nano::account const &) override;
bool account_exists (nano::transaction const &, nano::account const &) override;
size_t account_count (nano::transaction const &) override;
void confirmation_height_clear (nano::transaction const &, nano::account const & account, nano::account_info const & account_info) override;
void confirmation_height_clear (nano::transaction const &) override;
nano::store_iterator<nano::account, nano::account_info> latest_v0_begin (nano::transaction const &, nano::account const &) override;
nano::store_iterator<nano::account, nano::account_info> latest_v0_begin (nano::transaction const &) override;
nano::store_iterator<nano::account, nano::account_info> latest_v0_end () override;
nano::store_iterator<nano::account, nano::account_info> latest_v1_begin (nano::transaction const &, nano::account const &) override;
nano::store_iterator<nano::account, nano::account_info> latest_v1_begin (nano::transaction const &) override;
nano::store_iterator<nano::account, nano::account_info> latest_v1_end () override;
nano::store_iterator<nano::account, nano::account_info> latest_begin (nano::transaction const &, nano::account const &) override;
nano::store_iterator<nano::account, nano::account_info> latest_begin (nano::transaction const &) override;
nano::store_iterator<nano::account, nano::account_info> latest_end () override;
void pending_put (nano::transaction const &, nano::pending_key const &, nano::pending_info const &) override;
void pending_del (nano::transaction const &, nano::pending_key const &) override;
bool pending_get (nano::transaction const &, nano::pending_key const &, nano::pending_info &) override;
bool pending_exists (nano::transaction const &, nano::pending_key const &) override;
nano::store_iterator<nano::pending_key, nano::pending_info> pending_v0_begin (nano::transaction const &, nano::pending_key const &) override;
nano::store_iterator<nano::pending_key, nano::pending_info> pending_v0_begin (nano::transaction const &) override;
nano::store_iterator<nano::pending_key, nano::pending_info> pending_v0_end () override;
nano::store_iterator<nano::pending_key, nano::pending_info> pending_v1_begin (nano::transaction const &, nano::pending_key const &) override;
nano::store_iterator<nano::pending_key, nano::pending_info> pending_v1_begin (nano::transaction const &) override;
nano::store_iterator<nano::pending_key, nano::pending_info> pending_v1_end () override;
nano::store_iterator<nano::pending_key, nano::pending_info> pending_begin (nano::transaction const &, nano::pending_key const &) override;
nano::store_iterator<nano::pending_key, nano::pending_info> pending_begin (nano::transaction const &) override;
nano::store_iterator<nano::pending_key, nano::pending_info> pending_end () override;
bool block_info_get (nano::transaction const &, nano::block_hash const &, nano::block_info &) const override;
nano::uint128_t block_balance (nano::transaction const &, nano::block_hash const &) override;
nano::epoch block_version (nano::transaction const &, nano::block_hash const &) override;
nano::uint128_t representation_get (nano::transaction const &, nano::account const &) override;
void representation_put (nano::transaction const &, nano::account const &, nano::uint128_t const &) override;
void representation_add (nano::transaction const &, nano::account const &, nano::uint128_t const &) override;
nano::store_iterator<nano::account, nano::uint128_union> representation_begin (nano::transaction const &) override;
nano::store_iterator<nano::account, nano::uint128_union> representation_end () override;
void unchecked_clear (nano::transaction const &) override;
void unchecked_put (nano::transaction const &, nano::unchecked_key const &, nano::unchecked_info const &) override;
void unchecked_put (nano::transaction const &, nano::block_hash const &, std::shared_ptr<nano::block> const &) override;
std::vector<nano::unchecked_info> unchecked_get (nano::transaction const &, nano::block_hash const &) override;
void unchecked_del (nano::transaction const &, nano::unchecked_key const &) override;
nano::store_iterator<nano::unchecked_key, nano::unchecked_info> unchecked_begin (nano::transaction const &) override;
nano::store_iterator<nano::unchecked_key, nano::unchecked_info> unchecked_begin (nano::transaction const &, nano::unchecked_key const &) override;
nano::store_iterator<nano::unchecked_key, nano::unchecked_info> unchecked_end () override;
size_t unchecked_count (nano::transaction const &) override;
// Return latest vote for an account from store
std::shared_ptr<nano::vote> vote_get (nano::transaction const &, nano::account const &) override;
// Populate vote with the next sequence number
std::shared_ptr<nano::vote> vote_generate (nano::transaction const &, nano::account const &, nano::raw_key const &, std::shared_ptr<nano::block>) override;
std::shared_ptr<nano::vote> vote_generate (nano::transaction const &, nano::account const &, nano::raw_key const &, std::vector<nano::block_hash>) override;
// Return either vote or the stored vote with a higher sequence number
std::shared_ptr<nano::vote> vote_max (nano::transaction const &, std::shared_ptr<nano::vote>) override;
// Return latest vote for an account considering the vote cache
std::shared_ptr<nano::vote> vote_current (nano::transaction const &, nano::account const &) override;
void flush (nano::transaction const &) override;
nano::store_iterator<nano::account, std::shared_ptr<nano::vote>> vote_begin (nano::transaction const &) override;
nano::store_iterator<nano::account, std::shared_ptr<nano::vote>> vote_end () override;
void online_weight_put (nano::transaction const &, uint64_t, nano::amount const &) override;
void online_weight_del (nano::transaction const &, uint64_t) override;
nano::store_iterator<uint64_t, nano::amount> online_weight_begin (nano::transaction const &) override;
nano::store_iterator<uint64_t, nano::amount> online_weight_end () override;
size_t online_weight_count (nano::transaction const &) const override;
void online_weight_clear (nano::transaction const &) override;
std::mutex cache_mutex;
std::unordered_map<nano::account, std::shared_ptr<nano::vote>> vote_cache_l1;
std::unordered_map<nano::account, std::shared_ptr<nano::vote>> vote_cache_l2;
void version_put (nano::transaction const &, int) override;
int version_get (nano::transaction const &) const override;
// Requires a write transaction
nano::raw_key get_node_id (nano::transaction const &) const override;
/** Deletes the node ID from the store */
void delete_node_id (nano::transaction const &) override;
void peer_put (nano::transaction const & transaction_a, nano::endpoint_key const & endpoint_a) override;
bool peer_exists (nano::transaction const & transaction_a, nano::endpoint_key const & endpoint_a) const override;
void peer_del (nano::transaction const & transaction_a, nano::endpoint_key const & endpoint_a) override;
size_t peer_count (nano::transaction const & transaction_a) const override;
void peer_clear (nano::transaction const & transaction_a) override;
nano::store_iterator<nano::endpoint_key, nano::no_value> peers_begin (nano::transaction const & transaction_a) override;
nano::store_iterator<nano::endpoint_key, nano::no_value> peers_end () override;
uint64_t block_account_height (nano::transaction const & transaction_a, nano::block_hash const & hash_a) const override;
bool full_sideband (nano::transaction const &) const;
MDB_dbi get_account_db (nano::epoch epoch_a) const;
size_t block_successor_offset (nano::transaction const &, MDB_val, nano::block_type) const;
nano::logging & logging;
nano::mdb_env env;
/**
* Maps head block to owning account
* nano::block_hash -> nano::account
*/
MDB_dbi frontiers{ 0 };
/**
* Maps account v1 to account information, head, rep, open, balance, timestamp and block count.
* nano::account -> nano::block_hash, nano::block_hash, nano::block_hash, nano::amount, uint64_t, uint64_t
*/
MDB_dbi accounts_v0{ 0 };
/**
* Maps account v0 to account information, head, rep, open, balance, timestamp and block count.
* nano::account -> nano::block_hash, nano::block_hash, nano::block_hash, nano::amount, uint64_t, uint64_t
*/
MDB_dbi accounts_v1{ 0 };
/**
* Maps block hash to send block.
* nano::block_hash -> nano::send_block
*/
MDB_dbi send_blocks{ 0 };
/**
* Maps block hash to receive block.
* nano::block_hash -> nano::receive_block
*/
MDB_dbi receive_blocks{ 0 };
/**
* Maps block hash to open block.
* nano::block_hash -> nano::open_block
*/
MDB_dbi open_blocks{ 0 };
/**
* Maps block hash to change block.
* nano::block_hash -> nano::change_block
*/
MDB_dbi change_blocks{ 0 };
/**
* Maps block hash to v0 state block.
* nano::block_hash -> nano::state_block
*/
MDB_dbi state_blocks_v0{ 0 };
/**
* Maps block hash to v1 state block.
* nano::block_hash -> nano::state_block
*/
MDB_dbi state_blocks_v1{ 0 };
/**
* Maps min_version 0 (destination account, pending block) to (source account, amount).
* nano::account, nano::block_hash -> nano::account, nano::amount
*/
MDB_dbi pending_v0{ 0 };
/**
* Maps min_version 1 (destination account, pending block) to (source account, amount).
* nano::account, nano::block_hash -> nano::account, nano::amount
*/
MDB_dbi pending_v1{ 0 };
/**
* Maps block hash to account and balance.
* block_hash -> nano::account, nano::amount
*/
MDB_dbi blocks_info{ 0 };
/**
* Representative weights.
* nano::account -> nano::uint128_t
*/
MDB_dbi representation{ 0 };
/**
* Unchecked bootstrap blocks info.
* nano::block_hash -> nano::unchecked_info
*/
MDB_dbi unchecked{ 0 };
/**
* Highest vote observed for account.
* nano::account -> uint64_t
*/
MDB_dbi vote{ 0 };
/**
* Samples of online vote weight
* uint64_t -> nano::amount
*/
MDB_dbi online_weight{ 0 };
/**
* Meta information about block store, such as versions.
* nano::uint256_union (arbitrary key) -> blob
*/
MDB_dbi meta{ 0 };
/*
* Endpoints for peers
* nano::endpoint_key -> no_value
*/
MDB_dbi peers{ 0 };
private:
nano::network_params network_params;
bool entry_has_sideband (MDB_val, nano::block_type) const;
nano::account block_account_computed (nano::transaction const &, nano::block_hash const &) const;
nano::uint128_t block_balance_computed (nano::transaction const &, nano::block_hash const &) const;
MDB_dbi block_database (nano::block_type, nano::epoch);
template <typename T>
std::shared_ptr<nano::block> block_random (nano::transaction const &, MDB_dbi);
MDB_val block_raw_get (nano::transaction const &, nano::block_hash const &, nano::block_type &) const;
boost::optional<MDB_val> block_raw_get_by_type (nano::transaction const &, nano::block_hash const &, nano::block_type &) const;
void block_raw_put (nano::transaction const &, MDB_dbi, nano::block_hash const &, MDB_val);
void clear (MDB_dbi);
void do_upgrades (nano::transaction const &, size_t);
void upgrade_v1_to_v2 (nano::transaction const &);
void upgrade_v2_to_v3 (nano::transaction const &);
void upgrade_v3_to_v4 (nano::transaction const &);
void upgrade_v4_to_v5 (nano::transaction const &);
void upgrade_v5_to_v6 (nano::transaction const &);
void upgrade_v6_to_v7 (nano::transaction const &);
void upgrade_v7_to_v8 (nano::transaction const &);
void upgrade_v8_to_v9 (nano::transaction const &);
void upgrade_v9_to_v10 (nano::transaction const &);
void upgrade_v10_to_v11 (nano::transaction const &);
void upgrade_v11_to_v12 (nano::transaction const &);
void upgrade_v12_to_v13 (nano::transaction const &, size_t);
void upgrade_v13_to_v14 (nano::transaction const &);
MDB_dbi get_pending_db (nano::epoch epoch_a) const;
};
class wallet_value
{
public:
wallet_value () = default;
wallet_value (nano::mdb_val const &);
wallet_value (nano::uint256_union const &, uint64_t);
nano::mdb_val val () const;
nano::private_key key;
uint64_t work;
};
}
| 45.753488
| 174
| 0.742045
|
grendias
|
3353afd776b2d103053be541684e276474234714
| 1,481
|
hpp
|
C++
|
interface/libraymarching_interfaceexception.hpp
|
dabastynator/LibRayMarching
|
190c3f0f649896c04ba6ea00ebf13edbd4754c84
|
[
"MIT"
] | null | null | null |
interface/libraymarching_interfaceexception.hpp
|
dabastynator/LibRayMarching
|
190c3f0f649896c04ba6ea00ebf13edbd4754c84
|
[
"MIT"
] | null | null | null |
interface/libraymarching_interfaceexception.hpp
|
dabastynator/LibRayMarching
|
190c3f0f649896c04ba6ea00ebf13edbd4754c84
|
[
"MIT"
] | null | null | null |
/*++
Copyright (C) 2019 PrimeDevelopers
All rights reserved.
This file has been generated by the Automatic Component Toolkit (ACT) version 1.6.0.
Abstract: This is an autogenerated C++ Header file with the basic internal
exception type in order to allow an easy use of Ray Marching Library
Interface version: 2.0.0
*/
#ifndef __LIBRAYMARCHING_INTERFACEEXCEPTION_HEADER
#define __LIBRAYMARCHING_INTERFACEEXCEPTION_HEADER
#include <exception>
#include <stdexcept>
#include "libraymarching_types.hpp"
/*************************************************************************************************************************
Class ELibRayMarchingInterfaceException
**************************************************************************************************************************/
class ELibRayMarchingInterfaceException : public std::exception {
protected:
/**
* Error code for the Exception.
*/
LibRayMarchingResult m_errorCode;
/**
* Error message for the Exception.
*/
std::string m_errorMessage;
public:
/**
* Exception Constructor.
*/
ELibRayMarchingInterfaceException(LibRayMarchingResult errorCode);
/**
* Custom Exception Constructor.
*/
ELibRayMarchingInterfaceException(LibRayMarchingResult errorCode, std::string errorMessage);
/**
* Returns error code
*/
LibRayMarchingResult getErrorCode();
/**
* Returns error message
*/
const char* what() const noexcept override;
};
#endif // __LIBRAYMARCHING_INTERFACEEXCEPTION_HEADER
| 24.278689
| 123
| 0.647535
|
dabastynator
|
3355cea5478a9e036a3b11c83108daf1aec61d36
| 16,910
|
cpp
|
C++
|
openstudiocore/src/resultsviewer/TableView.cpp
|
OpenStudioThailand/OpenStudio
|
4e2173955e687ef1b934904acc10939ac0bed52f
|
[
"MIT"
] | 1
|
2017-10-13T09:23:04.000Z
|
2017-10-13T09:23:04.000Z
|
openstudiocore/src/resultsviewer/TableView.cpp
|
OpenStudioThailand/OpenStudio
|
4e2173955e687ef1b934904acc10939ac0bed52f
|
[
"MIT"
] | null | null | null |
openstudiocore/src/resultsviewer/TableView.cpp
|
OpenStudioThailand/OpenStudio
|
4e2173955e687ef1b934904acc10939ac0bed52f
|
[
"MIT"
] | 1
|
2022-03-20T13:19:42.000Z
|
2022-03-20T13:19:42.000Z
|
/***********************************************************************************************************************
* OpenStudio(R), Copyright (c) 2008-2017, Alliance for Sustainable Energy, LLC. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
* following conditions are met:
*
* (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following
* disclaimer.
*
* (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
* following disclaimer in the documentation and/or other materials provided with the distribution.
*
* (3) Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote
* products derived from this software without specific prior written permission from the respective party.
*
* (4) Other than as required in clauses (1) and (2), distributions in any form of modifications or other derivative
* works may not use the "OpenStudio" trademark, "OS", "os", or any other confusingly similar designation without
* specific prior written permission from Alliance for Sustainable Energy, LLC.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER, THE UNITED STATES GOVERNMENT, OR ANY CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**********************************************************************************************************************/
#include "TableView.hpp"
#include "../utilities/sql/SqlFileEnums.hpp"
#include <QHeaderView>
#include <QMouseEvent>
using openstudio::toString;
using openstudio::toQString;
using openstudio::ReportingFrequency;
using namespace openstudio;
namespace resultsviewer{
TableView::TableView( QWidget* parent):QTableWidget(0,6,parent)
{
setSortingEnabled(true);
setContextMenuPolicy(Qt::CustomContextMenu);
horizontalHeader()->setSortIndicator(0,Qt::AscendingOrder);
horizontalHeader()->setSortIndicatorShown(true);
horizontalHeader()->setSectionsClickable(true);
horizontalHeader()->setSectionsMovable(true);
setSelectionBehavior(QAbstractItemView::SelectRows);
setHorizontalScrollBarPolicy(Qt::ScrollBarAsNeeded);
m_slHeaders << tr("Variable Name") << tr("Key Value") << tr("Reporting Frequency") << tr("Alias") << tr("Environment Period") << tr("File");
setHorizontalHeaderLabels( m_slHeaders );
horizontalHeader()->setStretchLastSection(true);
verticalHeader()->hide();
setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Expanding);
setEditTriggers(QAbstractItemView::NoEditTriggers); // no editing
setDragEnabled(true);
}
void TableView::mouseMoveEvent(QMouseEvent *e)
{
if (e->buttons() & Qt::LeftButton) {
int distance = (e->pos()-m_startPos).manhattanLength();
if (distance >= QApplication::startDragDistance()) {
performResultsViewerPlotDataDrag();
}
}
QTableWidget::mouseMoveEvent(e);
}
void TableView::mousePressEvent(QMouseEvent *e)
{
if (e->button() == Qt::LeftButton) {
m_startPos = e->pos();
}
QTableWidget::mousePressEvent(e);
}
void TableView::performResultsViewerPlotDataDrag()
{
std::vector<resultsviewer::ResultsViewerPlotData> rvPlotDataVec;
std::vector<int> selectedRows;
if (selectedItems().count() > 0)
{
for (QTableWidgetItem *item : selectedItems())
{
int row = item->row();
if ( std::find(selectedRows.begin(), selectedRows.end(), row) == selectedRows.end() )
{
selectedRows.push_back(row);
rvPlotDataVec.push_back(resultsViewerPlotDataFromTableItem(item));
}
}
}
emit(signalDragResultsViewerPlotData(rvPlotDataVec));
}
void TableView::removeFile(const QString& filename)
{
for (int row=rowCount()-1; row>-1; row--)
{
if ( item(row, m_slHeaders.indexOf(tr("File")))->text().toUpper() == filename.toUpper() ) removeRow(row);
}
}
bool TableView::addFile(const QString& alias, openstudio::SqlFile sqlFile)
{
if (alias.isEmpty() || !sqlFile.connectionOpen()) return false;
setSortingEnabled(false);
detail::DataDictionaryTable ddTable = sqlFile.dataDictionary();
detail::DataDictionaryTable::iterator iter;
for (iter=ddTable.begin();iter!=ddTable.end();++iter)
{
// skip runPeriod
if (sqlFile.reportingFrequencyFromDB((*iter).reportingFrequency)
&& *(sqlFile.reportingFrequencyFromDB((*iter).reportingFrequency)) != ReportingFrequency::RunPeriod)
{
int row = addRow();
item(row, m_slHeaders.indexOf("Alias"))->setText(alias);
item(row, m_slHeaders.indexOf("File"))->setText(openstudio::toQString(sqlFile.energyPlusSqliteFile()));
item(row, m_slHeaders.indexOf("Environment Period"))->setText(openstudio::toQString((*iter).envPeriod));
item(row, m_slHeaders.indexOf("Reporting Frequency"))->setText(openstudio::toQString((*iter).reportingFrequency));
item(row, m_slHeaders.indexOf("Key Value"))->setText(openstudio::toQString((*iter).keyValue));
item(row, m_slHeaders.indexOf("Variable Name"))->setText(openstudio::toQString((*iter).name));
item(row, m_slHeaders.indexOf("File"))->setData(Qt::UserRole, RVD_TIMESERIES);
} // end skip runPeriod
}
/* illuminance maps */
std::vector<std::string> mapNames(sqlFile.illuminanceMapNames());
std::vector<std::string>::iterator nameIter;
for (nameIter=mapNames.begin(); nameIter!=mapNames.end(); ++nameIter)
{
// retrieve mapIndex for map name to retrieve environment period and zone name
QString envPeriod = "";
QString keyValue = "";
boost::optional<int> mapIndex = sqlFile.illuminanceMapIndex(*nameIter);
if (mapIndex)
{
boost::optional<std::string> strValue;
boost::optional<int> intValue;
std::stringstream s;
s << "select Environment from daylightmaps where MapNumber=" << *mapIndex;
strValue = sqlFile.execAndReturnFirstString(s.str());
if (strValue) envPeriod = openstudio::toQString(*strValue);
s.str("");
s << "select Zone from daylightmaps where MapNumber=" << *mapIndex;
intValue = sqlFile.execAndReturnFirstInt(s.str());
if (intValue)
{
s.str("");
s << "select ZoneName from zones where ZoneIndex=" << *intValue;
strValue = sqlFile.execAndReturnFirstString(s.str());
if (strValue) keyValue = openstudio::toQString(*strValue);
}
}
int row = addRow();
item(row, m_slHeaders.indexOf("Alias"))->setText(alias);
item(row, m_slHeaders.indexOf("File"))->setText(openstudio::toQString(sqlFile.energyPlusSqliteFile()));
item(row, m_slHeaders.indexOf("Environment Period"))->setText(envPeriod); // environment period
/* update based on email from Dan 8/10/10
item(row, 3)->setText("Illuminance"); // reporting frequency
item(row, 4)->setText(keyValue); // illuminance zone?
item(row, 5)->setText(openstudio::toQString(*nameIter));
*/
item(row, m_slHeaders.indexOf("Reporting Frequency"))->setText("Hourly"); // reporting frequency
item(row, m_slHeaders.indexOf("Key Value"))->setText(keyValue); // illuminance zone
item(row, m_slHeaders.indexOf("Variable Name"))->setText("Illuminance Map"); // Variable Name
item(row, m_slHeaders.indexOf("File"))->setData(Qt::UserRole, RVD_ILLUMINANCEMAP);
item(row, m_slHeaders.indexOf("Alias"))->setData(Qt::UserRole, openstudio::toQString(*nameIter)); // map name for retrieving from database
}
resizeColumnToContents(m_slHeaders.indexOf("Alias"));
hideColumn(m_slHeaders.indexOf("File"));
resizeColumnToContents(m_slHeaders.indexOf("Variable Name"));
resizeColumnToContents(m_slHeaders.indexOf("Key Value"));
resizeColumnToContents(m_slHeaders.indexOf("Reporting Frequency"));
// resizeColumnToContents(5);
setSortingEnabled(true);
emit( fileAdded() );
return true;
}
int TableView::addRow()
{
int row = rowCount();
insertRow(row);
auto aliasCol = new QTableWidgetItem;
aliasCol->setTextAlignment(Qt::AlignLeft | Qt::AlignVCenter);
setItem(row,m_slHeaders.indexOf("Alias"),aliasCol);
auto fileCol = new QTableWidgetItem;
fileCol->setTextAlignment(Qt::AlignLeft | Qt::AlignVCenter);
setItem(row,m_slHeaders.indexOf("File"),fileCol);
auto envPerCol = new QTableWidgetItem;
envPerCol->setTextAlignment(Qt::AlignLeft | Qt::AlignVCenter);
setItem(row,m_slHeaders.indexOf("Environment Period"),envPerCol);
auto timestepCol = new QTableWidgetItem;
timestepCol->setTextAlignment(Qt::AlignLeft | Qt::AlignVCenter);
setItem(row,m_slHeaders.indexOf("Reporting Frequency"),timestepCol);
auto zoneNameCol = new QTableWidgetItem;
zoneNameCol->setTextAlignment(Qt::AlignLeft | Qt::AlignVCenter);
setItem(row,m_slHeaders.indexOf("Key Value"),zoneNameCol);
auto varNameCol = new QTableWidgetItem;
varNameCol->setTextAlignment(Qt::AlignLeft | Qt::AlignVCenter);
setItem(row,m_slHeaders.indexOf("Variable Name"),varNameCol);
return row;
}
int TableView::selectedRowCount()
{
std::vector<int> selectedRows;
if (selectedItems().count() > 0)
{
for (QTableWidgetItem *item : selectedItems())
{
int row = item->row();
if ( std::find(selectedRows.begin(), selectedRows.end(), row) == selectedRows.end() )
{
selectedRows.push_back(row);
}
} // foreach
}
return selectedRows.size();
}
std::vector<int> TableView::selectedRows()
{
std::vector<int> selectedRows;
if (selectedItems().count() > 0)
{
for (QTableWidgetItem *item : selectedItems())
{
int row = item->row();
if ( std::find(selectedRows.begin(), selectedRows.end(), row) == selectedRows.end() )
{
selectedRows.push_back(row);
}
} // foreach
}
return selectedRows;
}
std::vector<resultsviewer::ResultsViewerPlotData> TableView::generateResultsViewerPlotData()
{
std::vector<resultsviewer::ResultsViewerPlotData> resultsViewerPlotDataVec;
std::vector<int> selectedRows;
for (QTableWidgetItem *item : selectedItems())
{
int row = item->row();
if ( std::find(selectedRows.begin(), selectedRows.end(), row) == selectedRows.end() )
{
selectedRows.push_back(row);
resultsviewer::ResultsViewerPlotData resultsViewerPlotData = resultsViewerPlotDataFromTableItem(item);
resultsViewerPlotDataVec.push_back(resultsViewerPlotData);
}
} // foreach
return resultsViewerPlotDataVec;
}
void TableView::generateLinePlotData()
{
std::vector<resultsviewer::ResultsViewerPlotData> lpVec = generateResultsViewerPlotData();
signalAddLinePlot(lpVec);
}
void TableView::generateFloodPlotData()
{
std::vector<resultsviewer::ResultsViewerPlotData> fpVec = generateResultsViewerPlotData();
signalAddFloodPlot(fpVec);
}
void TableView::generateIlluminancePlotData()
{
std::vector<resultsviewer::ResultsViewerPlotData> fpVec = generateResultsViewerPlotData();
signalAddIlluminancePlot(fpVec);
}
void TableView::generateIlluminancePlotComparisonData()
{
std::vector<resultsviewer::ResultsViewerPlotData> ipVec = generateResultsViewerPlotData();
signalAddIlluminancePlotComparison(ipVec);
}
void TableView::generateReverseIlluminancePlotComparisonData()
{
std::vector<resultsviewer::ResultsViewerPlotData> ipVec = generateResultsViewerPlotData();
std::vector<resultsviewer::ResultsViewerPlotData> ipVecRev(ipVec.rbegin(),ipVec.rend());
signalAddIlluminancePlotComparison(ipVecRev);
}
void TableView::generateLinePlotComparisonData()
{
std::vector<resultsviewer::ResultsViewerPlotData> lpVec = generateResultsViewerPlotData();
signalAddLinePlotComparison(lpVec);
}
void TableView::generateReverseLinePlotComparisonData()
{
std::vector<resultsviewer::ResultsViewerPlotData> lpVec = generateResultsViewerPlotData();
std::vector<resultsviewer::ResultsViewerPlotData> lpVecRev(lpVec.rbegin(),lpVec.rend());
signalAddLinePlotComparison(lpVecRev);
}
void TableView::generateFloodPlotComparisonData()
{
std::vector<resultsviewer::ResultsViewerPlotData> fpVec = generateResultsViewerPlotData();
signalAddFloodPlotComparison(fpVec);
}
void TableView::generateReverseFloodPlotComparisonData()
{
std::vector<resultsviewer::ResultsViewerPlotData> fpVec = generateResultsViewerPlotData();
std::vector<resultsviewer::ResultsViewerPlotData> fpVecRev(fpVec.rbegin(),fpVec.rend());
signalAddFloodPlotComparison(fpVecRev);
}
resultsviewer::ResultsViewerPlotData TableView::resultsViewerPlotDataFromTableRow(int row)
{
resultsviewer::ResultsViewerPlotData rvPlotData;
if ((row > -1) && (row < rowCount()))
{
rvPlotData = resultsViewerPlotDataFromTableItem(item(row,0));
}
return rvPlotData;
}
resultsviewer::ResultsViewerPlotData TableView::resultsViewerPlotDataFromTableItem(QTableWidgetItem* tableItem)
{
// m_slHeaders << tr("Alias") << tr("File") << tr("Environment Period") << tr("Reporting Frequency") << tr("Key Value") << tr("Variable Name");
QString keyValue = "", variableName = "", envPeriod = "", filename = "", reportFreq = "", s = "";
resultsviewer::ResultsViewerPlotData rvPlotData;
if (tableItem)
{
int row = tableItem->row();
rvPlotData.keyName = item(row, m_slHeaders.indexOf("Key Value") )->text();
rvPlotData.variableName = item(row, m_slHeaders.indexOf("Variable Name") )->text();
rvPlotData.reportFreq = item(row, m_slHeaders.indexOf("Reporting Frequency") )->text();
rvPlotData.envPeriod = item(row, m_slHeaders.indexOf("Environment Period") )->text();
rvPlotData.filename = item(row, m_slHeaders.indexOf("File") )->text();
rvPlotData.alias = item(row, m_slHeaders.indexOf("Alias") )->text();
rvPlotData.dataType = item(row, m_slHeaders.indexOf("File") )->data(Qt::UserRole).toInt();
rvPlotData.dbIdentifier = item(row, m_slHeaders.indexOf("Alias") )->data(Qt::UserRole).toString();
}
return rvPlotData;
}
bool TableView::updateFileAlias(const QString& alias, const QString& filename)
{
setSortingEnabled(false);
for (int i=0; i<rowCount();i++)
{
if (item(i,1)->text().toUpper() == filename.toUpper()) item(i,0)->setText(alias);
}
setSortingEnabled(true);
return true;
}
void TableView::applyFilter(QString& filterText)
{
// QRegExp regExp(filterText); //strict regular expression matching
QRegExp regExp(filterText, Qt::CaseInsensitive, QRegExp::Wildcard); // text wild card *, ? matching
std::vector<int> rowsToShow;
for (int row=0;row<rowCount();row++)
{
if ( (regExp.exactMatch(item(row,m_slHeaders.indexOf("Variable Name"))->text()))
|| (regExp.exactMatch(item(row,m_slHeaders.indexOf("Key Value"))->text()))
|| (regExp.exactMatch(item(row,m_slHeaders.indexOf("Reporting Frequency"))->text()))
|| (regExp.exactMatch(item(row,m_slHeaders.indexOf("Environment Period"))->text()))
// || (regExp.exactMatch(item(row,m_slHeaders.indexOf("File"))->text()))
|| (regExp.exactMatch(item(row,m_slHeaders.indexOf("Alias"))->text())) ) rowsToShow.push_back(row);
}
for (int i=0; i<rowCount();i++)
{
if ( std::find(rowsToShow.begin(), rowsToShow.end(), i) == rowsToShow.end() )
hideRow(i);
else
showRow(i);
}
}
void TableView::clearFilter()
{
for (int i=0; i<rowCount();i++) showRow(i);
}
void TableView::goToFile(const QString& filename)
{
for (int i=0;i<rowCount();i++)
{
if ( (!isRowHidden(i)) && (item(i,m_slHeaders.indexOf("File"))->text().toUpper() == filename.toUpper()) )
{
scrollToItem(item(i,0), QAbstractItemView::PositionAtTop);
break;
}
}
}
};
| 39.053118
| 149
| 0.680662
|
OpenStudioThailand
|
33580d980d54fcab81497d57da9d64051c6a83fb
| 6,813
|
cpp
|
C++
|
boxModel.cpp
|
subhajit1412/cs675-iitb
|
91bd8d9338005117ce8804a32d413ea2342ad48c
|
[
"MIT"
] | 1
|
2017-09-29T18:55:18.000Z
|
2017-09-29T18:55:18.000Z
|
boxModel.cpp
|
subhajit1412/cs675-iitb
|
91bd8d9338005117ce8804a32d413ea2342ad48c
|
[
"MIT"
] | null | null | null |
boxModel.cpp
|
subhajit1412/cs675-iitb
|
91bd8d9338005117ce8804a32d413ea2342ad48c
|
[
"MIT"
] | null | null | null |
#include<iostream>
class box_t
{
public:
GLfloat x;
GLfloat y;
GLfloat z;
GLfloat tx;
GLfloat ty;
GLfloat tz;
GLfloat yl;
int theta;
GLuint tex12;
box_t(GLfloat width, GLfloat height, GLfloat depth, GLfloat thickness)
{
x=width;
y=height;
z=-depth;
tx=thickness;
ty=thickness;
tz=-thickness;
yl=height/3;
};
// Courtesy http://www.opengl-tutorial.org/beginners-tutorials/tutorial-5-a-textured-cube/#Loading__BMP_images_yourself (on 8th Oct 2013)
void loadBMP_custom(const char * imagepath)
{
// Data read from the header of the BMP file
unsigned char header[54],tmp; // Each BMP file begins by a 54-bytes header
unsigned int dataPos; // Position in the file where the actual data begins
unsigned int width, height;
unsigned int imageSize; // = width*height*3
// Actual RGB data
unsigned char * data;
// Open the file
FILE * file = fopen(imagepath,"rb");
if (!file)
{
std::cout<<"Image could not be opened\n";
return;
}
if ( fread(header, 1, 54, file)!=54 ){ // If not 54 bytes read : problem
std::cout<<"Not a correct BMP file\n";
return;
}
if ( header[0]!='B' || header[1]!='M' ){
std::cout<<"Not a correct BMP file\n";
return;
}
// Read ints from the byte array
dataPos = *(int*)&(header[0x0A]);
imageSize = *(int*)&(header[0x22]);
width = *(int*)&(header[0x12]);
height = *(int*)&(header[0x16]);
// Some BMP files are misformatted, guess missing information
if (imageSize==0) imageSize=width*height*3; // 3 : one byte for each Red, Green and Blue component
if (dataPos==0) dataPos=54; // The BMP header is done that way
// Create a buffer
data = new unsigned char [imageSize];
// Read the actual data from the file into the buffer
fread(data,1,imageSize,file);
//Everything is in memory now, the file can be closed
fclose(file);
for(int i=0;i<imageSize;i=i+3)
{
tmp=data[i];
data[i]=data[i+2];
data[i+2]=tmp;
}
// Create one OpenGL texture
//GLuint textureID;
glGenTextures(1, &tex12);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, tex12);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0,GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}
void initTex()
{
loadBMP_custom("wood2.bmp");
}
void drawHalfBox(GLfloat x, GLfloat y, GLfloat z, GLfloat tx, GLfloat ty, GLfloat tz)
{
glBegin(GL_QUADS);
//front face
glNormal3f(0,-0.1,1);
glTexCoord2f(0,0); glVertex3f(0,0,0);
glTexCoord2f(0,1); glVertex3f(x,0,0);
glTexCoord2f(1,1); glVertex3f(x,y,0);
glTexCoord2f(1,0); glVertex3f(0,y,0);
//right side face
glNormal3f(1,0,0);
glTexCoord2f(0,0); glVertex3f(x,0,0);
glTexCoord2f(0,1); glVertex3f(x,0,z);
glTexCoord2f(1,1); glVertex3f(x,y,z);
glTexCoord2f(1,0); glVertex3f(x,y,0);
//back face
glNormal3f(0,0,-1);
glTexCoord2f(0,0); glVertex3f(0,0,z);
glTexCoord2f(0,1); glVertex3f(x,0,z);
glTexCoord2f(1,1); glVertex3f(x,y,z);
glTexCoord2f(1,0); glVertex3f(0,y,z);
//left side face
glNormal3f(-1,0,0);
glTexCoord2f(0,0); glVertex3f(0,0,z);
glTexCoord2f(0,1); glVertex3f(0,0,0);
glTexCoord2f(1,1); glVertex3f(0,y,0);
glTexCoord2f(1,0); glVertex3f(0,y,z);
//bottom face
glNormal3f(0,-1,0);
glTexCoord2f(0,0); glVertex3f(0,0,0);
glTexCoord2f(0,1); glVertex3f(x,0,0);
glTexCoord2f(1,1); glVertex3f(x,0,z);
glTexCoord2f(1,0); glVertex3f(0,0,z);
//inner botttom face
glNormal3f(0,1,0);
glTexCoord2f(0,0); glVertex3f(0+tx,0+ty,0+tz);
glTexCoord2f(0,1); glVertex3f(x-tx,0+ty,0+tz);
glTexCoord2f(1,1); glVertex3f(x-tx,0+ty,z-tz);
glTexCoord2f(1,0); glVertex3f(0+tx,0+ty,z-tz);
//inner left side face
glNormal3f(1,0,0);
glTexCoord2f(0,0); glVertex3f(0+tx,0+ty,z-tz);
glTexCoord2f(0,1); glVertex3f(0+tx,0+ty,0+tz);
glTexCoord2f(1,1); glVertex3f(0+tx,y,0+tz);
glTexCoord2f(1,0); glVertex3f(0+tx,y,z-tz);
//inner back face
glNormal3f(0,0,1);
glTexCoord2f(0,0); glVertex3f(0+tx,0+ty,z-tz);
glTexCoord2f(0,1); glVertex3f(x-tx,0+ty,z-tz);
glTexCoord2f(1,1); glVertex3f(x-tx,y,z-tz);
glTexCoord2f(1,0); glVertex3f(0+tx,y,z-tz);
//inner right side face
glNormal3f(-1,0,0);
glTexCoord2f(0,0); glVertex3f(x-tx,0+ty,0+tz);
glTexCoord2f(0,1); glVertex3f(x-tx,0+ty,z-tz);
glTexCoord2f(1,1); glVertex3f(x-tx,y,z-tz);
glTexCoord2f(1,0); glVertex3f(x-tx,y,0+tz);
//front rim
glNormal3f(0,1,0);
glTexCoord2f(0,0); glVertex3f(0,y,0);
glTexCoord2f(0,1); glVertex3f(x,y,0);
glTexCoord2f(1,1); glVertex3f(x-tx,y,0+tz);
glTexCoord2f(1,0); glVertex3f(0+tx,y,0+tz);
//right side rim
glNormal3f(0,1,0);
glTexCoord2f(0,0); glVertex3f(x,y,0);
glTexCoord2f(0,1); glVertex3f(x,y,z);
glTexCoord2f(1,1); glVertex3f(x-tx,y,z-tz);
glTexCoord2f(1,0); glVertex3f(x-tx,y,0+tz);
//back rim
glNormal3f(0,1,0);
glTexCoord2f(0,0); glVertex3f(x,y,z);
glTexCoord2f(0,1); glVertex3f(0,y,z);
glTexCoord2f(1,1); glVertex3f(0+tx,y,z-tz);
glTexCoord2f(1,0); glVertex3f(x-tx,y,z-tz);
//left side rim
glNormal3f(0,1,0);
glTexCoord2f(0,0); glVertex3f(0,y,z);
glTexCoord2f(0,1); glVertex3f(0,y,0);
glTexCoord2f(1,1); glVertex3f(0+tx,y,0+tz);
glTexCoord2f(1,0); glVertex3f(0+tx,y,z-tz);
glEnd();
}
void drawStage(GLfloat x,GLfloat y,GLfloat z)
{
glBegin(GL_QUADS);
//top
glNormal3f(0,1,0);
glTexCoord2f(0,0); glVertex3f(0,y,0);
glTexCoord2f(0,1); glVertex3f(x,y,0);
glTexCoord2f(1,1); glVertex3f(x,y,z);
glTexCoord2f(1,0); glVertex3f(0,y,z);
//bottom
glNormal3f(0,-1,0);
glTexCoord2f(0,0); glVertex3f(0,0,0);
glTexCoord2f(0,1); glVertex3f(x,0,0);
glTexCoord2f(1,1); glVertex3f(x,0,z);
glTexCoord2f(1,0); glVertex3f(0,0,z);
//right
glNormal3f(1,0,0);
glTexCoord2f(0,0); glVertex3f(x,y,0);
glTexCoord2f(0,1); glVertex3f(x,y,z);
glTexCoord2f(1,1); glVertex3f(x,0,z);
glTexCoord2f(1,0); glVertex3f(x,0,0);
//left
glNormal3f(-1,0,0);
glTexCoord2f(0,0); glVertex3f(0,0,z);
glTexCoord2f(0,1); glVertex3f(0,0,0);
glTexCoord2f(1,1); glVertex3f(0,y,0);
glTexCoord2f(1,0); glVertex3f(0,y,z);
glEnd();
}
void drawBox()
{
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, tex12);
glTranslatef(-x/2, -y/2, -z/2);
drawHalfBox(x, y, z, tx, ty, tz);
glPushMatrix();
glTranslatef(11, 5, -1);
drawStage(10, 1, -26);
glPopMatrix();
glTranslatef(0,y,0);
glTranslatef(0,0,z);
glRotatef(theta, 1,0,0);
glTranslatef(0,0,-z);
glTranslatef(x/2, yl/2, z/2);
glRotatef(180, 1,0,0);
glTranslatef(-x/2, -yl/2, -z/2);
drawHalfBox(x, yl, z, tx, ty, tz);
glDisable(GL_TEXTURE_2D);
}
}box(30, 15, 30, 1);
| 25.327138
| 138
| 0.659621
|
subhajit1412
|
335b0bce896b8532508fdb932ef53baa80455ac1
| 12,235
|
cpp
|
C++
|
src/turtlebot2_src/src/orocos-bayesian-filtering/orocos_bfl/src/filter/particlefilter.cpp
|
alexoterno/turtlebot2_with_head
|
ac714f77379dd0f47ddb76d83896fdabee269a03
|
[
"MIT"
] | null | null | null |
src/turtlebot2_src/src/orocos-bayesian-filtering/orocos_bfl/src/filter/particlefilter.cpp
|
alexoterno/turtlebot2_with_head
|
ac714f77379dd0f47ddb76d83896fdabee269a03
|
[
"MIT"
] | null | null | null |
src/turtlebot2_src/src/orocos-bayesian-filtering/orocos_bfl/src/filter/particlefilter.cpp
|
alexoterno/turtlebot2_with_head
|
ac714f77379dd0f47ddb76d83896fdabee269a03
|
[
"MIT"
] | null | null | null |
// Copyright (C) 2003 Klaas Gadeyne <first dot last at gmail dot com>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation; either version 2.1 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
//
// $Id$
#include "particlefilter.h"
#include "../pdf/mcpdf.h"
#define SV StateVar
#define MV MeasVar
#define MeasModel MeasurementModel
#define STATE_AND_MEAS_VAR_DIFFERENT
template <typename SV, typename MV>
ParticleFilter<SV,MV>::ParticleFilter(MCPdf<SV> * prior,
ConditionalPdf<SV,SV> * proposal,
int resampleperiod,
double resamplethreshold,
int resamplescheme)
: Filter<SV,MV>(prior)
, _proposal(proposal)
, _sample(WeightedSample<SV>(prior->DimensionGet()))
, _resampleScheme(resamplescheme)
, _created_post(true)
{
/* Initialize Post, at time = 0, post = prior
To be more clean, this should be done in the filter base class,
but this is impossible because of the pure virtuals...
*/
this->_post = new MCPdf<SV>(prior->NumSamplesGet(),prior->DimensionGet());
// Post is equal to prior at timetep 1
/* Note: Dirty cast should be avoided by not demanding an MCPdf as
prior and just sample from the prior instead :-(
*/
bool ret = (dynamic_cast<MCPdf<SV> *>(this->_post))->ListOfSamplesSet(prior->ListOfSamplesGet());
assert(ret);
// Initialise lists of samples
_old_samples = (prior->ListOfSamplesGet());
_new_samples = _old_samples;
// You have to choose for dynamic resampling by specifying a threshold != 0 OR give me a fixed resample period != 0
assert(!(resampleperiod == 0 && resamplethreshold == 0));
assert(!(resampleperiod != 0 && resamplethreshold != 0));
// dynamic resampling
if (resampleperiod == 0)
_dynamicResampling = true;
// fixed period resampling
else
_dynamicResampling = false;
_resamplePeriod = resampleperiod;
_resampleThreshold = resamplethreshold;
}
template <typename SV, typename MV>
ParticleFilter<SV,MV>::ParticleFilter(MCPdf<SV> * prior,
MCPdf<SV> * post,
ConditionalPdf<SV,SV> * proposal,
int resampleperiod,
double resamplethreshold,
int resamplescheme)
: Filter<SV,MV>(prior),
_proposal(proposal),
_resampleScheme(resamplescheme),
_created_post(false)
{
this->_post = post;
// Post is equal to prior at timestep 1
/* Note: Dirty cast should be avoided by not demanding an MCPdf as
prior and just sample from the prior instead :-(
*/
bool ret = (dynamic_cast<MCPdf<SV> *>(this->_post))->ListOfSamplesSet(prior->ListOfSamplesGet());
assert(ret);
// Initialise lists of samples
_old_samples = (prior->ListOfSamplesGet());
_new_samples = _old_samples;
// You have to choose for dynamic resampling by specifying a threshold != 0 OR give me a fixed resample period != 0
assert(!(resampleperiod == 0 && resamplethreshold == 0));
assert(!(resampleperiod != 0 && resamplethreshold != 0));
// dynamic resampling
if (resampleperiod == 0)
_dynamicResampling = true;
// fixed period resampling
else
_dynamicResampling = false;
_resamplePeriod = resampleperiod;
_resampleThreshold = resamplethreshold;
}
template <typename SV, typename MV>
ParticleFilter<SV,MV>::~ParticleFilter()
{
if (_created_post)
delete this->_post;
}
template <typename SV, typename MV>
ParticleFilter<SV,MV>::ParticleFilter(const ParticleFilter<SV,MV> & filter)
: Filter<SV,MV>(filter),
_created_post(true)
{
// Copy constructor of MCPdf
// Probably a bug...
this->_post = new MCPdf<SV>(dynamic_cast<MCPdf<SV> *>(filter._post));
}
template <typename SV, typename MV> void
ParticleFilter<SV,MV>::ProposalSet(ConditionalPdf<SV,SV> * const cpdf)
{
_proposal = cpdf;
}
template <typename SV, typename MV> ConditionalPdf<SV,SV> *
ParticleFilter<SV,MV>::ProposalGet()
{
return _proposal;
}
template <typename SV, typename MV> bool
ParticleFilter<SV,MV>::ProposalStepInternal(SystemModel<SV> * const sysmodel,
const SV & u,
MeasurementModel<MV,SV> * const measmodel,
const MV & z,
const SV & s)
{
// Get all samples from the current post through proposal density
_old_samples = (dynamic_cast<MCPdf<SV> *>(this->_post))->ListOfSamplesGet();
_ns_it = _new_samples.begin();
for ( _os_it=_old_samples.begin(); _os_it != _old_samples.end() ; _os_it++)
{
const SV& x_old = _os_it->ValueGet();
_proposal->ConditionalArgumentSet(0,x_old);
if (!sysmodel->SystemWithoutInputs())
{
_proposal->ConditionalArgumentSet(1,u);
if (this->_proposal_depends_on_meas)
{
#ifndef STATE_AND_MEAS_VAR_DIFFERENT
_proposal->ConditionalArgumentSet(2,z);
if (!measmodel->SystemWithoutSensorParams())
_proposal->ConditionalArgumentSet(3,s);
#endif
}
}
else // System without inputs
{
if (this->_proposal_depends_on_meas)
{
#ifndef STATE_AND_MEAS_VAR_DIFFERENT
_proposal->ConditionalArgumentSet(1,z);
if (!measmodel->SystemWithoutSensorParams())
_proposal->ConditionalArgumentSet(2,s);
#endif
}
}
// Bug, make sampling method a parameter!
_proposal->SampleFrom(_sample, SampleMthd::DEFAULT,NULL);
_ns_it->ValueSet(_sample.ValueGet());
_ns_it->WeightSet(_os_it->WeightGet());
_ns_it++;
}
(this->_timestep)++;
// Update the list of samples
return (dynamic_cast<MCPdf<SV> *>(this->_post))->ListOfSamplesUpdate(_new_samples);
}
template <typename SV, typename MV> bool
ParticleFilter<SV,MV>::UpdateWeightsInternal(SystemModel<SV> * const sysmodel,
const SV & u,
MeasurementModel<MV,SV> * const measmodel,
const MV & z,
const SV & s)
{
Probability weightfactor = 1;
// Update the weights
// Same remarks as for the system update!
_new_samples = (dynamic_cast<MCPdf<SV> *>(this->_post))->ListOfSamplesGet();
_os_it = _old_samples.begin();
for ( _ns_it=_new_samples.begin(); _ns_it != _new_samples.end() ; _ns_it++)
{
const SV& x_new = _ns_it->ValueGet();
const SV& x_old = _os_it->ValueGet();
if (sysmodel == NULL)
{
if (measmodel->SystemWithoutSensorParams() == true)
weightfactor = measmodel->ProbabilityGet(z,x_new);
else
weightfactor = measmodel->ProbabilityGet(z,x_new,s);
}
else // We do have a system model
{
_proposal->ConditionalArgumentSet(0,x_old);
if (measmodel->SystemWithoutSensorParams() == true)
{
weightfactor = measmodel->ProbabilityGet(z,x_new);
if (sysmodel->SystemWithoutInputs() == false)
{
_proposal->ConditionalArgumentSet(1,u);
if (this->_proposal_depends_on_meas){
#ifndef STATE_AND_MEAS_VAR_DIFFERENT
_proposal->ConditionalArgumentSet(2,z);
#endif
}
if (_proposal->ProbabilityGet(x_new) != 0)
weightfactor = weightfactor * ( sysmodel->ProbabilityGet(x_new,x_old,u) / _proposal->ProbabilityGet(x_new) );
else weightfactor = 0;
}
else // we do have a system without inputs
{
if (this->_proposal_depends_on_meas){
#ifndef STATE_AND_MEAS_VAR_DIFFERENT
_proposal->ConditionalArgumentSet(1,z);
#endif
}
if ( _proposal->ProbabilityGet(x_new) != 0)
weightfactor = weightfactor * ( sysmodel->ProbabilityGet(x_new,_os_it->ValueGet()) / _proposal->ProbabilityGet(x_new) );
else weightfactor = 0;
}
}
else // System with sensor Parameters
{
weightfactor = measmodel->ProbabilityGet(z,x_new,s);
}
}
_ns_it->WeightSet(_ns_it->WeightGet() * weightfactor);
_os_it++;
}
// Update the sample list of post the SumofWeights of the pdf
return (dynamic_cast<MCPdf<SV> *>(this->_post))->ListOfSamplesUpdate(_new_samples);
}
template <typename SV, typename MV> bool
ParticleFilter<SV,MV>::DynamicResampleStep()
{
// Resampling?
bool resampling = false;
double sum_sq_weigths = 0.0;
// Resampling if necessary
if ( this->_dynamicResampling)
{
// Check if sum of 1 / \sum{(wi_normalised)^2} < threshold
// This is the criterion proposed by Liu
// BUG foresee other methods of approximation/calculating
// effective sample size. Let the user implement this in !
_new_samples = (dynamic_cast<MCPdf<SV> *>(this->_post))->ListOfSamplesGet();
for ( _ns_it=_new_samples.begin(); _ns_it != _new_samples.end() ; _ns_it++)
{
sum_sq_weigths += pow(_ns_it->WeightGet(),2);
}
if ((1.0 / sum_sq_weigths) < _resampleThreshold)
{
// #define __RESAMPLE_DEBUG__
#ifdef __RESAMPLE_DEBUG__
cout << "resampling now: " << this->_timestep
<< "\tN_eff: " << (1.0 / sum_sq_weigths) << endl;
#endif // __RESAMPLE_DEBUG__
resampling = true;
}
}
if (resampling == true)
return this->Resample();
else
return true;
}
template <typename SV, typename MV> bool
ParticleFilter<SV,MV>::StaticResampleStep()
{
// Resampling if necessary
if ( (!this->_dynamicResampling) && (((this->_timestep) % _resamplePeriod) == 0) && (this->_timestep != 0))
return this->Resample();
return true;
}
template <typename SV, typename MV> bool
ParticleFilter<SV,MV>::UpdateInternal(SystemModel<StateVar>* const sysmodel,
const StateVar& u,
MeasurementModel<MeasVar,StateVar>* const measmodel,
const MeasVar& z,
const StateVar& s)
{
bool result = true;
// Only makes sense if there is a system model?
// Bug, not completely true, but should do for now...
if (sysmodel != NULL)
{
result = result && this->StaticResampleStep();
result = result && this->ProposalStepInternal(sysmodel,u,measmodel,z,s);
}
// Updating the weights only makes sense using a measurement model
if (measmodel != NULL)
{
result = result && this->UpdateWeightsInternal(sysmodel,u,measmodel,z,s);
result = result && this->DynamicResampleStep();
}
return result;
}
template <typename SV, typename MV> bool
ParticleFilter<SV,MV>::Resample()
{
int NumSamples = (dynamic_cast<MCPdf<SV> *>(this->_post))->NumSamplesGet();
// #define __PARTICLEFILTER_DEBUG__
#ifdef __PARTICLEFILTER_DEBUG__
cout << "PARTICLEFILTER: resampling now" << endl;
_new_samples= (dynamic_cast<MCPdf<SV> *>(this->_post))->ListOfSamplesGet();
for ( _ns_it=_new_samples.begin(); _ns_it != _new_samples.end() ; _ns_it++)
{
cout << "PF: Old samples:\n";
cout << _ns_it->ValueGet() << _ns_it->WeightGet() << endl;
}
#endif // __PARTICLEFILTER_DEBUG
switch(_resampleScheme)
{
case MULTINOMIAL_RS:
{
(dynamic_cast<MCPdf<SV> *>(this->_post))->SampleFrom(_new_samples_unweighted, NumSamples,SampleMthd::RIPLEY,NULL);
break;
}
case SYSTEMATIC_RS:{break;}
case STRATIFIED_RS:{break;}
case RESIDUAL_RS:{break;}
default:
{
cerr << "Sampling method not supported" << endl;
break;
}
}
bool result = (dynamic_cast<MCPdf<SV> *>(this->_post))->ListOfSamplesUpdate(_new_samples_unweighted);
#ifdef __PARTICLEFILTER_DEBUG__
cout << "PARTICLEFILTER: after resampling" << endl;
_new_samples= (dynamic_cast<MCPdf<SV> *>(this->_post))->ListOfSamplesGet();
for ( _ns_it=_new_samples.begin(); _ns_it != _new_samples.end() ; _ns_it++)
{
cout << "PF: New samples:\n";
cout << _ns_it->ValueGet() << _ns_it->WeightGet() << endl;
}
#endif // __PARTICLEFILTER_DEBUG
return result;
}
template<typename SV, typename MV> MCPdf<SV> *
ParticleFilter<SV,MV>::PostGet()
{
return (MCPdf<SV>*)Filter<SV,MV>::PostGet();
}
| 31.053299
| 126
| 0.673886
|
alexoterno
|
335ba765ebc4cbca9627f4f5183e65c281a038c3
| 308
|
cpp
|
C++
|
extras2/Masina/Dacia.cpp
|
stefanaciudin/CommandDefense
|
cae223bf72019b2098764664be5de2f052d57d6c
|
[
"MIT"
] | null | null | null |
extras2/Masina/Dacia.cpp
|
stefanaciudin/CommandDefense
|
cae223bf72019b2098764664be5de2f052d57d6c
|
[
"MIT"
] | null | null | null |
extras2/Masina/Dacia.cpp
|
stefanaciudin/CommandDefense
|
cae223bf72019b2098764664be5de2f052d57d6c
|
[
"MIT"
] | null | null | null |
#include "Dacia.h"
string Dacia::GetName()
{
return "Dacia";
}
int Dacia::getCapacitate()
{
return capacitate;
}
string Dacia::getCuloare()
{
return culoare;
}
void Dacia::setCapacitate(int c)
{
this->capacitate = c;
}
void Dacia::setCuloare(string color)
{
this->culoare = color;
}
| 11.407407
| 36
| 0.649351
|
stefanaciudin
|
335d52e2fddea25865a41314668a64fba42ff76b
| 20,904
|
cpp
|
C++
|
src/maskgeneration.cpp
|
3d-pli/PLImig
|
db4075ddd0f6f8d82b3f61f645002c788fa9a46c
|
[
"MIT"
] | null | null | null |
src/maskgeneration.cpp
|
3d-pli/PLImig
|
db4075ddd0f6f8d82b3f61f645002c788fa9a46c
|
[
"MIT"
] | null | null | null |
src/maskgeneration.cpp
|
3d-pli/PLImig
|
db4075ddd0f6f8d82b3f61f645002c788fa9a46c
|
[
"MIT"
] | 1
|
2022-02-13T03:59:40.000Z
|
2022-02-13T03:59:40.000Z
|
/*
MIT License
Copyright (c) 2021 Forschungszentrum Jülich / Jan André Reuter.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "maskgeneration.h"
PLImg::MaskGeneration::MaskGeneration(std::shared_ptr<cv::Mat> retardation, std::shared_ptr<cv::Mat> transmittance) :
m_retardation(std::move(retardation)), m_transmittance(std::move(transmittance)), m_tref(nullptr), m_tback(nullptr),
m_rthres(nullptr), m_tthres(nullptr), m_whiteMask(nullptr), m_grayMask(nullptr), m_probabilityMask(nullptr) {
if(m_transmittance) {
cv::minMaxIdx(*m_transmittance, &m_minTransmittance, &m_maxTransmittance);
m_minTransmittance = fmax(m_minTransmittance, 0.0f);
} else {
m_minTransmittance = 0;
m_maxTransmittance = 1;
}
if(m_retardation) {
cv::minMaxIdx(*m_retardation, &m_minRetardation, &m_maxRetardation);
m_minRetardation = fmax(m_minRetardation, 0.0f);
} else {
m_minRetardation = 0;
m_maxRetardation = 1;
}
}
void PLImg::MaskGeneration::setModalities(std::shared_ptr<cv::Mat> retardation, std::shared_ptr<cv::Mat> transmittance) {
this->m_retardation = std::move(retardation);
this->m_transmittance = std::move(transmittance);
resetParameters();
if(m_transmittance) {
cv::minMaxIdx(*m_transmittance, &m_minTransmittance, &m_maxTransmittance);
m_minTransmittance = fmax(m_minTransmittance, 0.0f);
} else {
m_minTransmittance = 0;
m_maxTransmittance = 1;
}
if(m_retardation) {
cv::minMaxIdx(*m_retardation, &m_minRetardation, &m_maxRetardation);
m_minRetardation = fmax(m_minRetardation, 0.0f);
} else {
m_minRetardation = 0;
m_maxRetardation = 1;
}
}
void PLImg::MaskGeneration::resetParameters() {
this->m_tref = nullptr;
this->m_tback = nullptr;
this->m_rthres = nullptr;
this->m_tthres = nullptr;
this->m_whiteMask = nullptr;
this->m_grayMask = nullptr;
this->m_fullMask = nullptr;
this->m_probabilityMask = nullptr;
}
void PLImg::MaskGeneration::removeBackground() {
auto transmittanceThreshold = this->T_back();
m_transmittance->setTo(m_maxTransmittance, *m_transmittance > transmittanceThreshold);
m_retardation->setTo(m_minRetardation, *m_transmittance > transmittanceThreshold);
}
void PLImg::MaskGeneration::set_tback(float tMax) {
this->m_tback = std::make_unique<float>(tMax);
}
void PLImg::MaskGeneration::set_tref(float tMin) {
this->m_tref = std::make_unique<float>(tMin);
}
void PLImg::MaskGeneration::set_rthres(float tRet) {
this->m_rthres = std::make_unique<float>(tRet);
}
void PLImg::MaskGeneration::set_tthres(float tTra) {
this->m_tthres = std::make_unique<float>(tTra);
}
float PLImg::MaskGeneration::T_thres() {
if(!m_tthres) {
float temp_tTra = T_ref();
// Generate histogram for potential correction of tMin for tTra
cv::Mat hist = PLImg::cuda::histogram(*m_transmittance, m_minTransmittance, m_maxTransmittance, MAX_NUMBER_OF_BINS);
int startPosition = temp_tTra / (float(m_maxTransmittance) - float(m_minTransmittance)) * float(MAX_NUMBER_OF_BINS);
int endPosition = T_back() / (float(m_maxTransmittance) - float(m_minTransmittance)) * float(MAX_NUMBER_OF_BINS);
if(startPosition > endPosition) {
int tmp = startPosition;
startPosition = endPosition;
endPosition = tmp;
}
auto peaks = Histogram::peaks(hist, startPosition, endPosition);
if(peaks.size() > 0) {
endPosition = std::min_element(hist.begin<float>() + startPosition, hist.begin<float>() + peaks.at(0)) - hist.begin<float>();
float stepSize = (m_maxTransmittance - m_minTransmittance) / MAX_NUMBER_OF_BINS;
auto kappa = Histogram::curvature(hist, m_minTransmittance, m_maxTransmittance);
auto kappaPeaks = PLImg::Histogram::peaks(kappa, startPosition, endPosition);
if(kappaPeaks.empty()) {
this->m_tthres = std::make_unique<float>(m_minTransmittance + startPosition * stepSize);
} else {
this->m_tthres = std::make_unique<float>(m_minTransmittance + kappaPeaks.at(0) * stepSize);
}
} else {
this->m_tthres = std::make_unique<float>(temp_tTra);
}
}
return *this->m_tthres;
}
float PLImg::MaskGeneration::R_thres() {
if(!m_rthres) {
cv::Mat intHist = PLImg::cuda::histogram(*m_retardation, m_minRetardation + 1e-15, m_maxRetardation, MAX_NUMBER_OF_BINS);
cv::Mat hist;
intHist.convertTo(hist, CV_32FC1);
cv::normalize(hist, hist, 0.0f, 1.0f, cv::NORM_MINMAX, CV_32FC1);
float temp_tRet = 0;
int histogramMinimalBin = 0;
float histogramMinimalValue = m_minRetardation;
auto peaks = PLImg::Histogram::peaks(hist, 0, MAX_NUMBER_OF_BINS / 2);
int startPosition, endPosition;
startPosition = 0;
if(!peaks.empty()) {
histogramMinimalBin = peaks.at(peaks.size() - 1);
histogramMinimalValue = histogramMinimalBin * (m_maxRetardation - m_minRetardation)/MAX_NUMBER_OF_BINS + m_minRetardation;
}
hist(cv::Range(histogramMinimalBin, MAX_NUMBER_OF_BINS), cv::Range(0, 1)).copyTo(hist);
int width = Histogram::peakWidth(hist, startPosition, 1);
endPosition = ceil(MIN_NUMBER_OF_BINS * 20.0f * width / MAX_NUMBER_OF_BINS);
for(unsigned NUMBER_OF_BINS = MIN_NUMBER_OF_BINS; NUMBER_OF_BINS <= MAX_NUMBER_OF_BINS; NUMBER_OF_BINS *= 2) {
hist = PLImg::cuda::histogram(*m_retardation, histogramMinimalValue, m_maxRetardation, NUMBER_OF_BINS);
cv::normalize(hist, hist, 0.0f, 1.0f, cv::NORM_MINMAX, CV_32FC1);
auto kappa = Histogram::curvature(hist, histogramMinimalValue, m_maxRetardation);
cv::normalize(kappa, kappa, 0.0f, 1.0f, cv::NORM_MINMAX, CV_32FC1);
// If more than one prominent peak is in the histogram, start at the second peak and not at the beginning
auto peaks = PLImg::Histogram::peaks(hist, startPosition, endPosition);
if(!peaks.empty()) {
startPosition = peaks.at(peaks.size() - 1);
}
int resultingBin;
auto kappaPeaks = PLImg::Histogram::peaks(kappa, startPosition, endPosition);
if(kappaPeaks.empty()) {
resultingBin = std::max_element(kappa.begin<float>() + startPosition, kappa.begin<float>() + endPosition) - kappa.begin<float>();
} else {
resultingBin = kappaPeaks.at(0);
}
float stepSize = float(m_maxRetardation - histogramMinimalValue) / float(NUMBER_OF_BINS);
temp_tRet = histogramMinimalValue + float(resultingBin) * stepSize;
// If our next step would be still in bounds for our histogram.
startPosition = fmax(0, (resultingBin - 2) * 2 - 1);
endPosition = fmin((resultingBin + 2) * 2 + 1, NUMBER_OF_BINS << 1);
}
this->m_rthres = std::make_unique<float>(temp_tRet);
}
return *this->m_rthres;
}
float PLImg::MaskGeneration::T_ref() {
if(!m_tref) {
cv::Mat backgroundMask = *m_retardation > 0 & *m_transmittance > 0 & *m_transmittance < T_back();
cv::Mat mask = cuda::labeling::largestAreaConnectedComponents(*m_retardation, backgroundMask);
cv::Scalar mean = cv::mean(*m_transmittance, mask);
m_tref = std::make_unique<float>(mean[0]);
}
return *this->m_tref;
}
float PLImg::MaskGeneration::T_back() {
if(!m_tback) {
cv::Mat fullHist = PLImg::cuda::histogram(*m_transmittance, m_minTransmittance, m_maxTransmittance, MAX_NUMBER_OF_BINS);
fullHist.convertTo(fullHist, CV_32FC1);
// Determine start and end on full histogram
int startPosition, endPosition;
startPosition = MAX_NUMBER_OF_BINS / 3;
endPosition = std::max_element(fullHist.begin<float>() + startPosition, fullHist.end<float>()) - fullHist.begin<float>();
float histMaximum = endPosition * (m_maxTransmittance - m_minTransmittance) / MAX_NUMBER_OF_BINS + m_minTransmittance;
fullHist = PLImg::cuda::histogram(*m_transmittance, m_minTransmittance,
histMaximum,
MAX_NUMBER_OF_BINS);
fullHist.convertTo(fullHist, CV_32FC1);
endPosition = MAX_NUMBER_OF_BINS - 1;
auto peaks = PLImg::Histogram::peaks(fullHist, MAX_NUMBER_OF_BINS / 2, endPosition-1);
if(!peaks.empty()) {
startPosition = std::min_element(fullHist.begin<float>() + peaks.at(peaks.size() - 1),
fullHist.begin<float>() + endPosition) - fullHist.begin<float>();
} else {
int width = Histogram::peakWidth(fullHist, endPosition, -1);
startPosition = endPosition - 10 * width;
}
//If the transmittance was masked, we should see a large maxCurvature with 0 values after the highest peak
if(endPosition - startPosition < 2) {
float stepSize = float(histMaximum - m_minTransmittance) / float(MAX_NUMBER_OF_BINS);
this->m_tback = std::make_unique<float>(m_minTransmittance + startPosition * stepSize);
}
//Else do the normal calculation
else {
// Convert from 256 to 64 bins
endPosition = MIN_NUMBER_OF_BINS;
startPosition = fmin(endPosition - 1, MIN_NUMBER_OF_BINS * float(startPosition)/MAX_NUMBER_OF_BINS);
float temp_tMax;
for(unsigned NUMBER_OF_BINS = MIN_NUMBER_OF_BINS; NUMBER_OF_BINS <= MAX_NUMBER_OF_BINS; NUMBER_OF_BINS = NUMBER_OF_BINS << 1) {
cv::Mat hist = PLImg::cuda::histogram(*m_transmittance, m_minTransmittance,
histMaximum,
NUMBER_OF_BINS);
cv::normalize(hist, hist, 0, 1, cv::NORM_MINMAX, CV_32FC1);
float stepSize = float(histMaximum - m_minTransmittance) / float(NUMBER_OF_BINS);
auto kappa = Histogram::curvature(hist, m_minTransmittance, histMaximum);
auto kappaPeaks = Histogram::peaks(kappa, startPosition+1, endPosition-1);
int resultingBin;
if (kappaPeaks.empty()) {
resultingBin = std::max_element(kappa.begin<float>() + startPosition,
kappa.begin<float>() + endPosition) - kappa.begin<float>();
} else {
resultingBin = kappaPeaks.at(kappaPeaks.size() - 1);
}
temp_tMax = m_minTransmittance + resultingBin * stepSize;
startPosition = fmax(0, (resultingBin * 2 - 1));
endPosition = NUMBER_OF_BINS << 1;
}
this->m_tback = std::make_unique<float>(temp_tMax);
}
}
return *this->m_tback;
}
std::shared_ptr<cv::Mat> PLImg::MaskGeneration::grayMask() {
if(!m_grayMask) {
cv::Mat mask = (*m_transmittance >= T_thres()) & (*m_transmittance <= T_back()) & (*m_retardation <= R_thres());
m_grayMask = std::make_shared<cv::Mat>(mask);
}
return m_grayMask;
}
std::shared_ptr<cv::Mat> PLImg::MaskGeneration::whiteMask() {
if(!m_whiteMask) {
cv::Mat mask = ((*m_transmittance < T_thres()) & (*m_transmittance > 0)) | (*m_retardation > R_thres());
m_whiteMask = std::make_shared<cv::Mat>(mask);
}
return m_whiteMask;
}
std::shared_ptr<cv::Mat> PLImg::MaskGeneration::fullMask() {
if(!m_fullMask) {
if (!m_whiteMask) whiteMask();
if (!m_grayMask) grayMask();
cv::Mat mask(m_whiteMask->rows, m_whiteMask->cols, CV_8UC1);
mask.setTo(0);
mask.setTo(200, *whiteMask());
mask.setTo(100, *grayMask());
m_fullMask = std::make_shared<cv::Mat>(mask);
}
return m_fullMask;
}
std::shared_ptr<cv::Mat> PLImg::MaskGeneration::noNerveFiberMask() {
cv::Mat backgroundMask;
cv::Scalar mean, stddev;
cv::bitwise_not(*fullMask(), backgroundMask);
cv::meanStdDev(*m_retardation, mean, stddev, backgroundMask);
cv::Mat mask = *m_retardation < mean[0] + 2*stddev[0] & *grayMask();
return std::make_shared<cv::Mat>(mask);
}
std::shared_ptr<cv::Mat> PLImg::MaskGeneration::probabilityMask() {
if(!m_probabilityMask) {
std::vector<float> above_rthres;
std::vector<float> below_rthres;
std::vector<float> above_tthres;
std::vector<float> below_tthres;
m_probabilityMask = std::make_shared<cv::Mat>(m_retardation->rows, m_retardation->cols, CV_32FC1);
// We're trying to calculate the maximum possible number of threads than can be used simultaneously to calculate multiple iterations at once.
float predictedMemoryUsage = PLImg::cuda::getHistogramMemoryEstimation(Image::randomizedModalities(m_transmittance, m_retardation, 0.5f)[0], MAX_NUMBER_OF_BINS);
// Calculate the number of threads that will be used based on the free memory and the maximum number of threads
int numberOfThreads;
#pragma omp parallel
numberOfThreads = omp_get_num_threads();
numberOfThreads = fmax(1, fmin(numberOfThreads, uint(float(PLImg::cuda::getFreeMemory()) / predictedMemoryUsage)));
std::cout << "OpenMP version used during compilation (doesn't have to match the executing OpenMP version): " << _OPENMP << std::endl;
#if _OPENMP < 201611
omp_set_nested(true);
#endif
#ifdef __GNUC__
auto omp_levels = omp_get_max_active_levels();
omp_set_max_active_levels(3);
#endif
ushort numberOfFinishedIterations = 0;
#pragma omp parallel shared(numberOfThreads, above_rthres, below_rthres, above_tthres, below_tthres, numberOfFinishedIterations)
{
#pragma omp single
{
std::cout << "Computing " << numberOfThreads << " iterations in parallel with max. " << omp_get_max_threads() / numberOfThreads << " threads per iteration." << std::endl;
}
#ifdef __GNUC__
omp_set_num_threads(omp_get_max_threads() / numberOfThreads);
#endif
// Only work with valid threads. The other threads won't do any work.
if(omp_get_thread_num() < numberOfThreads) {
std::shared_ptr<cv::Mat> small_retardation;
std::shared_ptr<cv::Mat> small_transmittance;
MaskGeneration generation(small_retardation, small_transmittance);
float r_thres, t_thres;
unsigned int ownNumberOfIterations = PROBABILITY_MASK_ITERATIONS / numberOfThreads;
int overhead = PROBABILITY_MASK_ITERATIONS % numberOfThreads;
if (overhead > 0 && omp_get_thread_num() < overhead) {
++ownNumberOfIterations;
}
for (unsigned int i = 0; i < ownNumberOfIterations; ++i) {
auto small_modalities = Image::randomizedModalities(m_transmittance, m_retardation, 0.5f);
small_transmittance = std::make_shared<cv::Mat>(small_modalities[0]);
small_retardation = std::make_shared<cv::Mat>(small_modalities[1]);
generation.setModalities(small_retardation, small_transmittance);
generation.set_tref(this->T_ref());
generation.set_tback(this->T_back());
r_thres = generation.R_thres();
if (r_thres >= this->R_thres()) {
#pragma omp critical
above_rthres.push_back(r_thres);
} else if (r_thres <= this->R_thres()) {
#pragma omp critical
below_rthres.push_back(r_thres);
}
t_thres = generation.T_thres();
if (t_thres >= this->T_thres()) {
#pragma omp critical
above_tthres.push_back(t_thres);
} else if (t_thres <= this->T_thres() && t_thres > 0) {
#pragma omp critical
below_tthres.push_back(t_thres);
}
#pragma omp critical
{
++numberOfFinishedIterations;
std::cout << "\rProbability Mask Generation: Iteration " << numberOfFinishedIterations << " of "
<< PROBABILITY_MASK_ITERATIONS;
std::flush(std::cout);
};
}
small_transmittance = nullptr;
small_retardation = nullptr;
generation.setModalities(nullptr, nullptr);
}
}
#ifdef __GNUC__
omp_set_max_active_levels(omp_levels);
#endif
#if _OPENMP < 201611
omp_set_nested(false);
#endif
std::cout << std::endl;
float diff_rthres_p, diff_rthres_m, diff_tthres_p, diff_tthres_m;
if (above_rthres.empty()) {
diff_rthres_p = R_thres();
} else {
diff_rthres_p = std::accumulate(above_rthres.begin(), above_rthres.end(), 0.0f) / above_rthres.size();
}
if (below_rthres.empty()) {
diff_rthres_m = R_thres();
} else {
diff_rthres_m = std::accumulate(below_rthres.begin(), below_rthres.end(), 0.0f) / below_rthres.size();
}
if (above_tthres.empty()) {
diff_tthres_p = T_thres();
} else {
diff_tthres_p = std::accumulate(above_tthres.begin(), above_tthres.end(), 0.0f) / above_tthres.size();
}
if (below_tthres.empty()) {
diff_tthres_m = T_thres();
} else {
diff_tthres_m = std::accumulate(below_tthres.begin(), below_tthres.end(), 0.0f) / below_tthres.size();
}
std::cout << "Probability parameters: R+:" << diff_rthres_p << ", R-:" << diff_rthres_m <<
", T+:" << diff_tthres_p << ", T-:" << diff_tthres_m
<< std::endl;
float diffTra = 0.0f;
float diffRet = 0.0f;
// Get pointers from OpenCV matrices to prevent overflow errors when image is larger than UINT_MAX
float* probabilityMaskPtr = (float*) m_probabilityMask->data;
const float* transmittancePtr = (float*) m_transmittance->data;
const float* retardationPtr = (float*) m_retardation->data;
// Calculate probability mask
#pragma omp parallel for private(diffTra, diffRet) default(shared) schedule(static)
for(unsigned long long idx = 0; idx < ((unsigned long long) m_probabilityMask->rows * m_probabilityMask->cols); ++idx) {
diffTra = transmittancePtr[idx];
if(diffTra < T_thres()) {
diffTra = (diffTra - T_thres()) / diff_tthres_m;
} else {
diffTra = (diffTra - T_thres()) / diff_tthres_p;
}
diffRet = retardationPtr[idx];
if(diffRet < R_thres()) {
diffRet = (diffRet - R_thres()) / diff_rthres_m;
} else {
diffRet = (diffRet - R_thres()) / diff_rthres_p;
}
probabilityMaskPtr[idx] =
(-erf(cos(3.0f * M_PI / 4.0f - atan2f(diffTra, diffRet)) *
sqrtf(diffTra * diffTra + diffRet * diffRet) * 2) + 1) / 2.0f;
}
}
return m_probabilityMask;
}
| 45.641921
| 186
| 0.615098
|
3d-pli
|
335ea74d325299934fac2d0bc5ad9f85dc464677
| 27,950
|
cpp
|
C++
|
src/BundleAdjustment2Viewes.cpp
|
cashiwamochi/SimpleBundleAdjustment
|
2b4e5249835c5a798fc1ddd03f101e8579d70213
|
[
"MIT"
] | 27
|
2018-08-01T08:53:46.000Z
|
2021-12-16T11:58:35.000Z
|
src/BundleAdjustment2Viewes.cpp
|
cashiwamochi/SimpleBundleAdjustment
|
2b4e5249835c5a798fc1ddd03f101e8579d70213
|
[
"MIT"
] | 4
|
2019-04-12T10:34:13.000Z
|
2022-03-26T06:10:08.000Z
|
src/BundleAdjustment2Viewes.cpp
|
cashiwamochi/SimpleBundleAdjustment
|
2b4e5249835c5a798fc1ddd03f101e8579d70213
|
[
"MIT"
] | 8
|
2019-05-06T08:54:57.000Z
|
2021-04-11T21:48:41.000Z
|
#include "BundleAdjustment2Viewes.hpp"
#include <cmath>
#include <random>
namespace BA2Viewes {
Optimizer::Optimizer(const PoseAndStructure _pose_and_structure, const BAMode _mode)
: m_pose_and_structure(_pose_and_structure), me_mode(_mode)
{
mb_verbose = false;
mpm_images = std::make_pair(cv::noArray().getMat(), cv::noArray().getMat());
}
cv::Mat Optimizer::ComputeJ(const std::vector<cv::Mat>& vm_data_for_process, const PoseAndStructure& _pose_and_structure) {
cv::Mat J;
const cv::Mat K = _pose_and_structure.m_Kd;
switch(me_mode)
{
case BA2Viewes::POSE : {
assert( vm_data_for_process.size() == 2 );
const int N_points = _pose_and_structure.m_point3d.cols;
const int N_cameras = 2;
J = cv::Mat::zeros(2*N_points*N_cameras, 6*N_cameras, CV_64F);
std::vector<cv::Mat> vm_poses{vm_data_for_process[0], vm_data_for_process[1]};
cv::Mat point3d_homo = cv::Mat::ones(4, N_points, CV_64F);
_pose_and_structure.m_point3d.copyTo(point3d_homo.rowRange(0,3));
// Jacobian
for(int i = 0; i < N_cameras; i++) {
for(int j = 0; j < N_points; j++) {
cv::Mat tmp = vm_poses[i]*point3d_homo;
double x = tmp.at<double>(0,j);
double y = tmp.at<double>(1,j);
double z = tmp.at<double>(2,j);
cv::Mat _j = (cv::Mat_<double>(2, 6)
<< K.at<double>(0,0)/z, 0.0, - K.at<double>(0,0)*x/(z*z), - K.at<double>(0,0)*x*y/(z*z), K.at<double>(0,0)*( 1.0 + ((x*x)/(z*z)) ), - K.at<double>(0,0)*y/z,
0.0, K.at<double>(1,1)/z, - K.at<double>(1,1)*y/(z*z), - K.at<double>(1,1)*( 1.0 + ((y*y)/(z*z)) ), K.at<double>(1,1)*((x*y)/(z*z)), K.at<double>(1,1)*x/z);
_j.copyTo(J.rowRange(i*N_points*2 + 2*j, i*N_points*2 + 2*j + 2).colRange(6*i, 6*i + 6));
}
}
} break;
case BA2Viewes::STRUCTURE : {
assert( vm_data_for_process.size() == 1 );
//const cv::Mat point3d_homo = vm_data_for_process[0];
cv::Mat point3d_homo = cv::Mat::ones(4, vm_data_for_process[0].cols, CV_64F);
vm_data_for_process[0].copyTo(point3d_homo.rowRange(0,3));
std::vector<cv::Mat> vm_poses{_pose_and_structure.vp_pose_and_structure[0].first, _pose_and_structure.vp_pose_and_structure[1].first};
const int N_points = _pose_and_structure.m_point3d.cols;
const int N_cameras = 2; // this is constant becaese this system is used for BA in 2-viewes.
J = cv::Mat::zeros(2*N_points*N_cameras, 3*N_points, CV_64F);
// Jacobian
for(int i = 0; i < N_cameras; i++) {
for(int j = 0; j < N_points; j++) {
cv::Mat tmp = vm_poses[i]*point3d_homo;
double x = tmp.at<double>(0,j);
double y = tmp.at<double>(1,j);
double z = tmp.at<double>(2,j);
cv::Mat _j = (cv::Mat_<double>(2, 3)
<< K.at<double>(0,0)/z, 0.0, - K.at<double>(0,0)*x/(z*z),
0.0, K.at<double>(1,1)/z, - K.at<double>(1,1)*y/(z*z));
_j *= vm_poses[i].colRange(0, 3);
_j.copyTo(J.rowRange(i*N_points*2 + 2*j, i*N_points*2 + 2*j + 2).colRange(3*j, 3*j + 3));
}
}
} break;
case BA2Viewes::FULL : {
assert( vm_data_for_process.size() == 3 );
cv::Mat point3d_homo = cv::Mat::ones(4, vm_data_for_process[0].cols, CV_64F);
vm_data_for_process[0].copyTo(point3d_homo.rowRange(0,3));
std::vector<cv::Mat> vm_poses{vm_data_for_process[1], vm_data_for_process[2]};
const int N_points = _pose_and_structure.m_point3d.cols;
const int N_cameras = 2; // Only 1 camera is used in Optimization
J = cv::Mat::zeros(2*N_points*N_cameras, 5 + 3*N_points, CV_64F);
// Jacobian
for(int i = 0; i < N_cameras; i++) {
for(int j = 0; j < N_points; j++) {
cv::Mat tmp = vm_poses[i]*point3d_homo;
double x = tmp.at<double>(0,j);
double y = tmp.at<double>(1,j);
double z = tmp.at<double>(2,j);
cv::Mat _j_structure = (cv::Mat_<double>(2, 3)
<< K.at<double>(0,0)/z, 0.0, - K.at<double>(0,0)*x/(z*z),
0.0, K.at<double>(1,1)/z, - K.at<double>(1,1)*y/(z*z));
_j_structure *= vm_poses[i].colRange(0, 3);
_j_structure.copyTo(J.rowRange(i*N_points*2 + 2*j, i*N_points*2 + 2*j + 2).colRange(5 + 3*j, 5 + 3*j + 3));
/*
へシアン行列がフルランクであるためには,自由度を下げる必要がある.
つまり,第1視点を[I 0]に固定し,第2視点の6成分のうち一つを固定する.ここでは,第2視点の平行移動のxを固定した.
ゆえに,第2視点の自由度は 5
*/
if(i == 1) {
cv::Mat _j_cam = (cv::Mat_<double>(2, 5)
<< 0.0, - K.at<double>(0,0)*x/(z*z), - K.at<double>(0,0)*x*y/(z*z), K.at<double>(0,0)*( 1.0 + ((x*x)/(z*z)) ), - K.at<double>(0,0)*y/z,
K.at<double>(1,1)/z, - K.at<double>(1,1)*y/(z*z), - K.at<double>(1,1)*( 1.0 + ((y*y)/(z*z)) ), K.at<double>(1,1)*((x*y)/(z*z)), K.at<double>(1,1)*x/z);
_j_cam.copyTo(J.rowRange(i*N_points*2 + 2*j, i*N_points*2 + 2*j + 2).colRange(0, 5));
}
}
}
} break;
}
return J;
}
double Optimizer::ComputeReprojectionError(cv::Mat& mat_reprojection_error, const std::vector<cv::Mat> vm_data_for_process, const PoseAndStructure& _pose_and_structure) {
double error = -1.0;
const cv::Mat K = _pose_and_structure.m_Kd;
switch(me_mode)
{
case BA2Viewes::POSE : {
assert( vm_data_for_process.size() == 2 );
double reprojection_error = 0.0;
const int N_points = _pose_and_structure.m_point3d.cols;
const int N_cameras = 2;
cv::Mat point3d_homo = cv::Mat::ones(4, N_points, CV_64F);
_pose_and_structure.m_point3d.copyTo(point3d_homo.rowRange(0,3));
// Reprojection Error (b)
mat_reprojection_error = cv::Mat::zeros(2*N_cameras*N_points, 1, CV_64F);
std::vector<cv::Mat> vm_point2d_noise(2);
vm_point2d_noise[0] = K * vm_data_for_process[0] * point3d_homo;
vm_point2d_noise[1] = K * vm_data_for_process[1] * point3d_homo;
for(int i = 0; i < (int)vm_point2d_noise[0].cols; i++) {
vm_point2d_noise[0].at<double>(0,i) = vm_point2d_noise[0].at<double>(0,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[0].at<double>(1,i) = vm_point2d_noise[0].at<double>(1,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[0].at<double>(2,i) = vm_point2d_noise[0].at<double>(2,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[1].at<double>(0,i) = vm_point2d_noise[1].at<double>(0,i)/vm_point2d_noise[1].at<double>(2,i);
vm_point2d_noise[1].at<double>(1,i) = vm_point2d_noise[1].at<double>(1,i)/vm_point2d_noise[1].at<double>(2,i);
vm_point2d_noise[1].at<double>(2,i) = vm_point2d_noise[1].at<double>(2,i)/vm_point2d_noise[1].at<double>(2,i);
}
for(int i = 0; i < N_cameras; i++) {
for(int j = 0; j < N_points; j++) {
double d_x = (vm_point2d_noise[i].at<double>(0,j) - _pose_and_structure.vp_pose_and_structure[i].second[j].x);
double d_y = (vm_point2d_noise[i].at<double>(1,j) - _pose_and_structure.vp_pose_and_structure[i].second[j].y);
mat_reprojection_error.at<double>(i*N_points*2 + 2*j, 0) = d_x;
mat_reprojection_error.at<double>(i*N_points*2 + 2*j + 1, 0) = d_y;
reprojection_error += sqrt(d_x*d_x + d_y*d_y);
}
}
error = reprojection_error/(double)(N_cameras*N_points);
} break;
case BA2Viewes::STRUCTURE: {
assert( vm_data_for_process.size() == 1 );
double reprojection_error = 0.0;
const int N_points = _pose_and_structure.m_point3d.cols;
const int N_cameras = 2;
cv::Mat point3d_homo = cv::Mat::ones(4,vm_data_for_process[0].cols,CV_64F);
vm_data_for_process[0].copyTo(point3d_homo.rowRange(0,3));
// Reprojection Error (b)
mat_reprojection_error = cv::Mat::zeros(2*N_cameras*N_points, 1, CV_64F);
std::vector<cv::Mat> vm_point2d_noise(2);
vm_point2d_noise[0] = K * _pose_and_structure.vp_pose_and_structure[0].first * point3d_homo;
vm_point2d_noise[1] = K * _pose_and_structure.vp_pose_and_structure[1].first * point3d_homo;
for(int i = 0; i < (int)vm_point2d_noise[0].cols; i++) {
vm_point2d_noise[0].at<double>(0,i) = vm_point2d_noise[0].at<double>(0,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[0].at<double>(1,i) = vm_point2d_noise[0].at<double>(1,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[0].at<double>(2,i) = vm_point2d_noise[0].at<double>(2,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[1].at<double>(0,i) = vm_point2d_noise[1].at<double>(0,i)/vm_point2d_noise[1].at<double>(2,i);
vm_point2d_noise[1].at<double>(1,i) = vm_point2d_noise[1].at<double>(1,i)/vm_point2d_noise[1].at<double>(2,i);
vm_point2d_noise[1].at<double>(2,i) = vm_point2d_noise[1].at<double>(2,i)/vm_point2d_noise[1].at<double>(2,i);
}
for(int i = 0; i < N_cameras; i++) {
for(int j = 0; j < N_points; j++) {
double d_x = (vm_point2d_noise[i].at<double>(0,j) - _pose_and_structure.vp_pose_and_structure[i].second[j].x);
double d_y = (vm_point2d_noise[i].at<double>(1,j) - _pose_and_structure.vp_pose_and_structure[i].second[j].y);
mat_reprojection_error.at<double>(i*N_points*2 + 2*j, 0) = d_x;
mat_reprojection_error.at<double>(i*N_points*2 + 2*j + 1, 0) = d_y;
reprojection_error += sqrt(d_x*d_x + d_y*d_y);
}
}
error = reprojection_error/(double)(N_cameras*N_points);
} break;
case BA2Viewes::FULL: {
assert( vm_data_for_process.size() == 3 );
double reprojection_error = 0.0;
const int N_points = _pose_and_structure.m_point3d.cols;
const int N_cameras = 2;
cv::Mat point3d_homo = cv::Mat::ones(4, N_points, CV_64F);
vm_data_for_process[0].copyTo(point3d_homo.rowRange(0,3));
// Reprojection Error (b)
mat_reprojection_error = cv::Mat::zeros(2*N_cameras*N_points, 1, CV_64F);
std::vector<cv::Mat> vm_point2d_noise(2);
vm_point2d_noise[0] = K * vm_data_for_process[1] * point3d_homo;
vm_point2d_noise[1] = K * vm_data_for_process[2] * point3d_homo;
for(int i = 0; i < (int)vm_point2d_noise[0].cols; i++) {
vm_point2d_noise[0].at<double>(0,i) = vm_point2d_noise[0].at<double>(0,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[0].at<double>(1,i) = vm_point2d_noise[0].at<double>(1,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[0].at<double>(2,i) = vm_point2d_noise[0].at<double>(2,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[1].at<double>(0,i) = vm_point2d_noise[1].at<double>(0,i)/vm_point2d_noise[1].at<double>(2,i);
vm_point2d_noise[1].at<double>(1,i) = vm_point2d_noise[1].at<double>(1,i)/vm_point2d_noise[1].at<double>(2,i);
vm_point2d_noise[1].at<double>(2,i) = vm_point2d_noise[1].at<double>(2,i)/vm_point2d_noise[1].at<double>(2,i);
}
for(int i = 0; i < N_cameras; i++) {
for(int j = 0; j < N_points; j++) {
double d_x = (vm_point2d_noise[i].at<double>(0,j) - _pose_and_structure.vp_pose_and_structure[i].second[j].x);
double d_y = (vm_point2d_noise[i].at<double>(1,j) - _pose_and_structure.vp_pose_and_structure[i].second[j].y);
mat_reprojection_error.at<double>(i*N_points*2 + 2*j, 0) = d_x;
mat_reprojection_error.at<double>(i*N_points*2 + 2*j + 1, 0) = d_y;
reprojection_error += sqrt(d_x*d_x + d_y*d_y);
}
}
error = reprojection_error/(double)(N_cameras*N_points);
} break;
}
return error;
}
cv::Mat Optimizer::ComputeUpdateParams(const cv::Mat& J, const cv::Mat& mat_reprojection_error) {
cv::Mat delta_x;
cv::Mat minus_b = -1.0 * mat_reprojection_error;
cv::Mat H = J.t() * J;
delta_x = H.inv() * J.t() * minus_b;
return delta_x;
}
std::vector<cv::Mat> Optimizer::UpdateParams(const std::vector<cv::Mat>& vm_data_for_process, const cv::Mat& _delta_x) {
switch(me_mode) {
case BA2Viewes::POSE: {
std::vector<cv::Mat> vm_poses = vm_data_for_process;
const int N_cameras = (int)vm_data_for_process.size();
for(int i = 0; i < N_cameras; i++) {
// params on se3 are mapped to SE3
const cv::Mat t = _delta_x.rowRange(i*6, i*6 + 3);
const cv::Mat w = _delta_x.rowRange(i*6 + 3, i*6 + 6);
cv::Mat w_x = (cv::Mat_<double>(3,3) << 0.0, -w.at<double>(2,0), w.at<double>(1,0),
w.at<double>(2,0), 0.0, -w.at<double>(0,0),
-w.at<double>(1,0), w.at<double>(0,0), 0.0);
const double theta = sqrt(w.at<double>(0,0)*w.at<double>(0,0) + w.at<double>(1,0)*w.at<double>(1,0) + w.at<double>(2,0)*w.at<double>(2,0));
cv::Mat e_w_x = cv::Mat::eye(3,3,CV_64F) + (std::sin(theta)/theta)*w_x + ((1.0-std::cos(theta))/(theta*theta))*w_x*w_x;
cv::Mat V = cv::Mat::eye(3,3,CV_64F) + ((1.0-std::cos(theta))/(theta*theta))*w_x + ((theta - std::sin(theta))/(theta*theta*theta))*w_x*w_x;
cv::Mat Vt = V * t;
cv::Mat delta_SE3 = cv::Mat::eye(3,4,CV_64F);
e_w_x.copyTo(delta_SE3.rowRange(0,3).colRange(0,3));
Vt.copyTo(delta_SE3.rowRange(0,3).col(3));
cv::Mat current_pose = cv::Mat::eye(4,4,CV_64F);
vm_poses[i].copyTo(current_pose.rowRange(0,3).colRange(0,4));
// Update!
// vm_poses[i] = vm_poses[i] * delta_SE3;
vm_poses[i] = delta_SE3*current_pose;
}
return vm_poses;
} break;
case BA2Viewes::STRUCTURE: {
cv::Mat _point3d = vm_data_for_process[0];
const int N_points = _point3d.cols;
cv::Mat new_point3d = _point3d.clone();
for(int i = 0; i < N_points; i++) {
new_point3d.at<double>(0,i) += _delta_x.at<double>(3*i,0);
new_point3d.at<double>(1,i) += _delta_x.at<double>(3*i+1,0);
new_point3d.at<double>(2,i) += _delta_x.at<double>(3*i+2,0);
}
return std::vector<cv::Mat>{new_point3d};
} break;
case BA2Viewes::FULL: {
std::vector<cv::Mat> vm_poses{vm_data_for_process[1], vm_data_for_process[2]};
// params on se3 are mapped to SE3
const cv::Mat t = (cv::Mat_<double>(3,1) << 0.0, _delta_x.at<double>(0) , _delta_x.at<double>(1));
const cv::Mat w = _delta_x.rowRange(2, 5);
cv::Mat w_x = (cv::Mat_<double>(3,3) << 0.0, -w.at<double>(2,0), w.at<double>(1,0),
w.at<double>(2,0), 0.0, -w.at<double>(0,0),
-w.at<double>(1,0), w.at<double>(0,0), 0.0);
const double theta = sqrt(w.at<double>(0,0)*w.at<double>(0,0) + w.at<double>(1,0)*w.at<double>(1,0) + w.at<double>(2,0)*w.at<double>(2,0));
cv::Mat e_w_x = cv::Mat::eye(3,3,CV_64F) + (std::sin(theta)/theta)*w_x + ((1.0-std::cos(theta))/(theta*theta))*w_x*w_x;
cv::Mat V = cv::Mat::eye(3,3,CV_64F) + ((1.0-std::cos(theta))/(theta*theta))*w_x + ((theta - std::sin(theta))/(theta*theta*theta))*w_x*w_x;
cv::Mat Vt = V * t;
cv::Mat delta_SE3 = cv::Mat::eye(3,4,CV_64F);
e_w_x.copyTo(delta_SE3.rowRange(0,3).colRange(0,3));
Vt.copyTo(delta_SE3.rowRange(0,3).col(3));
cv::Mat current_pose = cv::Mat::eye(4,4,CV_64F);
vm_poses[1].copyTo(current_pose.rowRange(0,3).colRange(0,4));
// Update!
// vm_poses[1] = vm_poses[1] * delta_SE3;
vm_poses[1] = delta_SE3*current_pose;
cv::Mat _point3d = vm_data_for_process[0];
const int N_points = _point3d.cols;
cv::Mat new_point3d = _point3d.clone();
// Upadate!
for(int i = 0; i < N_points; i++) {
new_point3d.at<double>(0,i) += _delta_x.at<double>(5 + 3*i,0);
new_point3d.at<double>(1,i) += _delta_x.at<double>(5 + 3*i+1,0);
new_point3d.at<double>(2,i) += _delta_x.at<double>(5 + 3*i+2,0);
}
return std::vector<cv::Mat>{new_point3d, vm_poses[0], vm_poses[1]};
} break;
}
cv::Mat dummy;
return std::vector<cv::Mat>{dummy};
}
// This returns |reprojection error|^2
double Optimizer::Run() {
if(mb_verbose) {
cv::namedWindow(ms_window_name);
}
double error = -1.0;
switch(me_mode)
{
case BA2Viewes::POSE : {
error = OptimizeOnlyPose();
} break;
case BA2Viewes::STRUCTURE : {
error = OptimizeOnlyStructure();
} break;
case BA2Viewes::FULL : {
error = OptimizeAll();
} break;
}
return error;
}
double Optimizer::OptimizeOnlyPose () {
double error = -1.0;
std::vector<cv::Mat> vm_poses_for_process{mpm_noised_poses.first, mpm_noised_poses.second};
for(int iter = 0; iter < m_MAXITER; iter++) {
cv::Mat J = ComputeJ(vm_poses_for_process, m_pose_and_structure);
cv::Mat mat_reprojection_error;
double current_error = ComputeReprojectionError(mat_reprojection_error, std::vector<cv::Mat>{mpm_noised_poses.first, mpm_noised_poses.second}, m_pose_and_structure);
if(IsConverged(current_error, error)) {
break;
}
error = current_error;
std::cout << "Reprojection Error : " << error << " ( iter_num = " << iter << " )"<< std::endl;
cv::Mat delta_x = ComputeUpdateParams(J, mat_reprojection_error);
vm_poses_for_process = UpdateParams(vm_poses_for_process, delta_x);
if(mb_verbose) {
ShowProcess(vm_poses_for_process, m_pose_and_structure);
}
}
return error;
}
double Optimizer::OptimizeOnlyStructure () {
double error = -1.0;
cv::Mat m_structure_for_process = mm_noised_structure.clone();
for(int iter = 0; iter < m_MAXITER; iter++) {
cv::Mat J = ComputeJ(std::vector<cv::Mat>{m_structure_for_process}, m_pose_and_structure);
cv::Mat mat_reprojection_error;
double current_error = ComputeReprojectionError(mat_reprojection_error, std::vector<cv::Mat>{m_structure_for_process}, m_pose_and_structure);
if(IsConverged(current_error, error)) {
break;
}
error = current_error;
std::cout << "Reprojection Error : " << error << " ( iter_num = " << iter << " )"<< std::endl;
cv::Mat delta_x = ComputeUpdateParams(J, mat_reprojection_error);
m_structure_for_process = UpdateParams(std::vector<cv::Mat>{m_structure_for_process}, delta_x)[0];
if(mb_verbose) {
ShowProcess(std::vector<cv::Mat>{m_structure_for_process}, m_pose_and_structure);
}
}
return error;
}
double Optimizer::OptimizeAll () {
double error = -1.0;
std::vector<cv::Mat> vm_data_for_process{mm_noised_structure, mpm_noised_poses.first, mpm_noised_poses.second};
for(int iter = 0; iter < m_MAXITER; iter++) {
cv::Mat J = ComputeJ(vm_data_for_process, m_pose_and_structure);
cv::Mat mat_reprojection_error;
double current_error = ComputeReprojectionError(mat_reprojection_error, vm_data_for_process, m_pose_and_structure);
if(IsConverged(current_error, error)) {
break;
}
error = current_error;
std::cout << "Reprojection Error : " << error << " ( iter_num = " << iter << " )"<< std::endl;
cv::Mat delta_x = ComputeUpdateParams(J, mat_reprojection_error);
vm_data_for_process = UpdateParams(vm_data_for_process, delta_x);
if(mb_verbose) {
ShowProcess(vm_data_for_process, m_pose_and_structure);
}
}
return error;
}
void Optimizer::SetImagePair(const std::pair<cv::Mat,cv::Mat> _pm_images) {
mpm_images = _pm_images;
return;
}
void Optimizer::SetTargetData(const std::vector<cv::Mat>& _vm_noised_data) {
switch (me_mode) {
case BA2Viewes::POSE: {
/*
* _vm_noised_data = [1st-camera, 2nd-camera]
*/
mm_noised_structure = cv::Mat::zeros(3,3,CV_64F);
mpm_noised_poses = std::make_pair(_vm_noised_data[0],_vm_noised_data[1]);
}break;
case BA2Viewes::STRUCTURE: {
/*
* _vm_noised_data = [3d-point]
*/
mm_noised_structure = _vm_noised_data[0].clone();
mpm_noised_poses = std::make_pair(cv::Mat::eye(3,4,CV_64F),cv::Mat::eye(3,4,CV_64F));
}break;
case BA2Viewes::FULL: {
/*
* _vm_noised_data = [3d-point, 1st-camera(not noised), 2nd-camera]
*/
mm_noised_structure = _vm_noised_data[0].clone();
mpm_noised_poses = std::make_pair(_vm_noised_data[1],_vm_noised_data[2]);
}
}
}
bool Optimizer::IsConverged(const double current_error, const double previous_error) {
bool b_stop_optimization = false;
if(previous_error < 0.0) {
// This case is 1st optimization
// do nothing
return false;
}
if(previous_error <= current_error + 0.0000001) {
b_stop_optimization = true;
}
return b_stop_optimization;
}
void Optimizer::ShowProcess(const std::vector<cv::Mat> vm_data_for_process, const PoseAndStructure& _pose_and_structure) {
const cv::Mat K = _pose_and_structure.m_Kd;
std::vector<cv::Mat> vm_point2d_noise(2);
switch(me_mode)
{
case BA2Viewes::POSE : {
assert( vm_data_for_process.size() == 2 );
const int N_points = _pose_and_structure.m_point3d.cols;
cv::Mat point3d_homo = cv::Mat::ones(4, N_points, CV_64F);
_pose_and_structure.m_point3d.copyTo(point3d_homo.rowRange(0,3));
vm_point2d_noise[0] = K * vm_data_for_process[0] * point3d_homo;
vm_point2d_noise[1] = K * vm_data_for_process[1] * point3d_homo;
for(int i = 0; i < (int)vm_point2d_noise[0].cols; i++) {
vm_point2d_noise[0].at<double>(0,i) = vm_point2d_noise[0].at<double>(0,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[0].at<double>(1,i) = vm_point2d_noise[0].at<double>(1,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[0].at<double>(2,i) = vm_point2d_noise[0].at<double>(2,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[1].at<double>(0,i) = vm_point2d_noise[1].at<double>(0,i)/vm_point2d_noise[1].at<double>(2,i);
vm_point2d_noise[1].at<double>(1,i) = vm_point2d_noise[1].at<double>(1,i)/vm_point2d_noise[1].at<double>(2,i);
vm_point2d_noise[1].at<double>(2,i) = vm_point2d_noise[1].at<double>(2,i)/vm_point2d_noise[1].at<double>(2,i);
}
} break;
case BA2Viewes::STRUCTURE: {
assert( vm_data_for_process.size() == 1 );
cv::Mat point3d_homo = cv::Mat::ones(4,vm_data_for_process[0].cols,CV_64F);
const int N_points = _pose_and_structure.m_point3d.cols;
vm_data_for_process[0].copyTo(point3d_homo.rowRange(0,3));
vm_point2d_noise[0] = K * _pose_and_structure.vp_pose_and_structure[0].first * point3d_homo;
vm_point2d_noise[1] = K * _pose_and_structure.vp_pose_and_structure[1].first * point3d_homo;
for(int i = 0; i < (int)vm_point2d_noise[0].cols; i++) {
vm_point2d_noise[0].at<double>(0,i) = vm_point2d_noise[0].at<double>(0,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[0].at<double>(1,i) = vm_point2d_noise[0].at<double>(1,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[0].at<double>(2,i) = vm_point2d_noise[0].at<double>(2,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[1].at<double>(0,i) = vm_point2d_noise[1].at<double>(0,i)/vm_point2d_noise[1].at<double>(2,i);
vm_point2d_noise[1].at<double>(1,i) = vm_point2d_noise[1].at<double>(1,i)/vm_point2d_noise[1].at<double>(2,i);
vm_point2d_noise[1].at<double>(2,i) = vm_point2d_noise[1].at<double>(2,i)/vm_point2d_noise[1].at<double>(2,i);
}
} break;
case BA2Viewes::FULL: {
assert( vm_data_for_process.size() == 3 );
const int N_points = _pose_and_structure.m_point3d.cols;
cv::Mat point3d_homo = cv::Mat::ones(4, N_points, CV_64F);
vm_data_for_process[0].copyTo(point3d_homo.rowRange(0,3));
vm_point2d_noise[0] = K * vm_data_for_process[1] * point3d_homo;
vm_point2d_noise[1] = K * vm_data_for_process[2] * point3d_homo;
for(int i = 0; i < (int)vm_point2d_noise[0].cols; i++) {
vm_point2d_noise[0].at<double>(0,i) = vm_point2d_noise[0].at<double>(0,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[0].at<double>(1,i) = vm_point2d_noise[0].at<double>(1,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[0].at<double>(2,i) = vm_point2d_noise[0].at<double>(2,i)/vm_point2d_noise[0].at<double>(2,i);
vm_point2d_noise[1].at<double>(0,i) = vm_point2d_noise[1].at<double>(0,i)/vm_point2d_noise[1].at<double>(2,i);
vm_point2d_noise[1].at<double>(1,i) = vm_point2d_noise[1].at<double>(1,i)/vm_point2d_noise[1].at<double>(2,i);
vm_point2d_noise[1].at<double>(2,i) = vm_point2d_noise[1].at<double>(2,i)/vm_point2d_noise[1].at<double>(2,i);
}
} break;
}
std::pair< std::vector<cv::Point2d>,std::vector<cv::Point2d> > pv_point2d;
pv_point2d.first.reserve(vm_point2d_noise[0].cols);
pv_point2d.second.reserve(vm_point2d_noise[1].cols);
for(int i = 0; i < vm_point2d_noise[0].cols; i++) {
pv_point2d.first.push_back(cv::Point2d{vm_point2d_noise[0].at<double>(0,i),vm_point2d_noise[0].at<double>(1,i)});
pv_point2d.second.push_back(cv::Point2d{vm_point2d_noise[1].at<double>(0,i),vm_point2d_noise[1].at<double>(1,i)});
}
cv::Mat m_image0 = mpm_images.first.clone();
cv::Mat m_image1 = mpm_images.second.clone();
for(int i = 0; i < vm_point2d_noise[0].cols; i++) {
cv::drawMarker(m_image0, _pose_and_structure.vp_pose_and_structure[0].second[i],
cv::Scalar(255, 0, 0), cv::MARKER_CROSS, 10, 2);
cv::drawMarker(m_image0, pv_point2d.first[i],
cv::Scalar(0, 255, 0), cv::MARKER_CROSS, 10, 2);
cv::drawMarker(m_image1, _pose_and_structure.vp_pose_and_structure[1].second[i],
cv::Scalar(255, 0, 0), cv::MARKER_CROSS, 10, 2);
cv::drawMarker(m_image1, pv_point2d.second[i],
cv::Scalar(0, 255, 0), cv::MARKER_CROSS, 10, 2);
}
cv::Mat mat_for_viewer;
cv::hconcat(m_image0, m_image1, mat_for_viewer);
cv::putText( mat_for_viewer, "BLUE : Feature Points", cv::Point{10,20}, cv::FONT_HERSHEY_SIMPLEX, 0.7, cv::Scalar(255,0,0),2);
cv::putText( mat_for_viewer, "GREEN : Reprojected Points", cv::Point{10,45}, cv::FONT_HERSHEY_SIMPLEX, 0.7, cv::Scalar(0,255,0),2);
cv::resize(mat_for_viewer, mat_for_viewer,cv::Size(m_image0.cols,m_image0.rows/2));
cv::imshow(ms_window_name, mat_for_viewer);
cv::waitKey(1);
#if 0
static int count = 1;
cv::imwrite(std::to_string(count)+".png", mat_for_viewer);
count = count + 1;
#endif
return;
}
void Optimizer::SetVerbose(const bool b_verbose) {
if(b_verbose) {
if( (!mpm_images.first.empty()) and (!mpm_images.second.empty()) ) {
mb_verbose = b_verbose;
}
else {
std::cout << "[WARN] : The pair of images is empty, it is need in debug.\n";
std::cout << " Please use SetImagePair()." << std::endl;
mb_verbose = false;
return;
}
}
mb_verbose = b_verbose;
return;
}
void Optimizer::Spin() {
if(mb_verbose) {
while(1) {
if(cv::waitKey(1)=='q') {
break;
}
}
}
}
} // namespace
| 44.506369
| 173
| 0.607549
|
cashiwamochi
|
33646ccc21fa4e3063f45dde7e41b10ec981ac77
| 12,868
|
cpp
|
C++
|
code/steelth_renderer_opengl_bake.cpp
|
jessecalvert/steelth
|
6f1e4db5bdfb5b519708aace7c133dadc71dd540
|
[
"MIT"
] | null | null | null |
code/steelth_renderer_opengl_bake.cpp
|
jessecalvert/steelth
|
6f1e4db5bdfb5b519708aace7c133dadc71dd540
|
[
"MIT"
] | null | null | null |
code/steelth_renderer_opengl_bake.cpp
|
jessecalvert/steelth
|
6f1e4db5bdfb5b519708aace7c133dadc71dd540
|
[
"MIT"
] | null | null | null |
/*@H
* File: steelth_renderer_opengl_bake.cpp
* Author: Jesse Calvert
* Created: April 23, 2019, 16:15
* Last modified: April 24, 2019, 20:46
*/
internal u32
HashSurfel(opengl_baked_data *BakedData, v3i PositionIndex, u32 PrincipalNormal)
{
u32 HashValue = 2*(u32)(PositionIndex.x) +
3*(u32)(PositionIndex.y) +
7*(u32)(PositionIndex.z) +
13*PrincipalNormal;
HashValue = HashValue % ArrayCount(BakedData->SurfelGridHash);
return HashValue;
}
internal u32
GetPrincipalDirIndex(v3 Vector)
{
u32 PrincipalDirIndex = (Vector.x > 0.0f) ? 0 : 1;
f32 MaxNormalExtentSq = Square(Vector.x);
f32 yExtent = Square(Vector.y);
f32 zExtent = Square(Vector.z);
if(yExtent > MaxNormalExtentSq)
{
MaxNormalExtentSq = yExtent;
PrincipalDirIndex = (Vector.y > 0.0f) ? 2 : 3;
}
if(zExtent > MaxNormalExtentSq)
{
PrincipalDirIndex = (Vector.z > 0.0f) ? 4 : 5;
}
return PrincipalDirIndex;
}
internal v3i
GetPositionIndex(opengl_baked_data *BakedData, v3 P)
{
v3i PositionIndex = V3i(FloorReal32ToInt32(P.x/BakedData->GridCellDim),
FloorReal32ToInt32(P.y/BakedData->GridCellDim),
FloorReal32ToInt32(P.z/BakedData->GridCellDim));
return PositionIndex;
}
internal surfel_grid_cell *
GetSurfelGridCell(opengl_baked_data *BakedData, surface_element *Surfel)
{
v3i PositionIndex = GetPositionIndex(BakedData, Surfel->P);
u32 PrincipalNormal = GetPrincipalDirIndex(Surfel->Normal);
u32 HashValue = HashSurfel(BakedData, PositionIndex, PrincipalNormal);
surfel_grid_cell *Result = BakedData->SurfelGridHash[HashValue];
while(Result)
{
if((Result->PositionIndex == PositionIndex) &&
(Result->PrincipalNormal == PrincipalNormal))
{
break;
}
Result = Result->NextInHash;
}
if(!Result)
{
u32 Index = BakedData->SurfelCellCount++;
Assert(Index < BakedData->MaxSurfelCells);
Result = BakedData->SurfelCells + Index;
Result->Index = Index;
Result->PositionIndex = PositionIndex;
Result->PrincipalNormal = PrincipalNormal;
DLIST_INIT(&Result->Sentinel);
Result->NextInHash = BakedData->SurfelGridHash[HashValue];
BakedData->SurfelGridHash[HashValue] = Result;
}
return Result;
}
internal u32
AddSurfelToCell(opengl_baked_data *BakedData, memory_region *TempRegion, surface_element *Surfel)
{
surfel_link *Link = PushStruct(TempRegion, surfel_link);
Link->Surfel = Surfel;
surfel_grid_cell *Cell = GetSurfelGridCell(BakedData, Surfel);
DLIST_INSERT(&Cell->Sentinel, Link);
return Cell->Index;
}
internal opengl_baked_data *
InitOpenGLBakedData(opengl_state *State)
{
opengl_baked_data *BakedData = State->BakedData = PushStruct(&State->Region, opengl_baked_data);
BakedData->GridCellDim = 1.0f;
BakedData->Resolution = V2i(32, 32);
BakedData->TotalResolution = V2i(6*BakedData->Resolution.x, BakedData->Resolution.y);
opengl_framebuffer *ProbeGBuffer = &BakedData->ProbeGBuffer;
texture_settings Settings[] =
{
TextureSettings(0, 0, 3, 4, Texture_Float), // Position
TextureSettings(0, 0, 1, 4, Texture_Float|Texture_DepthBuffer), // Depth
TextureSettings(0, 0, 3, 4, Texture_Float), // Normal
TextureSettings(0, 0, 3, 1, 0), // Albedo
TextureSettings(0, 0, 1, 4, Texture_Float), // Emission
};
u32 TextureCount = ArrayCount(Settings);
OpenGLCreateFramebuffer(ProbeGBuffer, Settings, TextureCount, BakedData->TotalResolution);
return BakedData;
}
internal void
OpenGLBakeLightProbes(opengl_state *State,
renderer_mesh StaticGeometry, renderer_texture Texture,
v3 *ProbePositions, u32 ProbeCount,
lighting_solution *Solution)
{
TIMED_FUNCTION();
Assert(ProbeCount < MAX_PROBES);
Assert(Solution);
memory_region TempRegion = {};
opengl_baked_data *BakedData = State->BakedData;
if(!BakedData)
{
BakedData = InitOpenGLBakedData(State);
}
rectangle3 SurfelExtent = InvertedInfinityRectangle3();
for(u32 ProbeIndex = 0;
ProbeIndex < ProbeCount;
++ProbeIndex)
{
v3 P = ProbePositions[ProbeIndex];
if(P.x < SurfelExtent.Min.x) {SurfelExtent.Min.x = P.x;}
if(P.y < SurfelExtent.Min.y) {SurfelExtent.Min.y = P.y;}
if(P.z < SurfelExtent.Min.z) {SurfelExtent.Min.z = P.z;}
if(P.x > SurfelExtent.Max.x) {SurfelExtent.Max.x = P.x;}
if(P.y > SurfelExtent.Max.y) {SurfelExtent.Max.y = P.y;}
if(P.z > SurfelExtent.Max.z) {SurfelExtent.Max.z = P.z;}
}
v3 ExtentDim = Dim(SurfelExtent);
v3i GridDim = V3i((s32)(ExtentDim.x/BakedData->GridCellDim),
(s32)(ExtentDim.y/BakedData->GridCellDim),
(s32)(ExtentDim.z/BakedData->GridCellDim));
BakedData->MaxSurfelCells = (1 + GridDim.x)*(1 + GridDim.y)*(1 + GridDim.z)*6;
BakedData->SurfelCells = PushArray(&TempRegion, BakedData->MaxSurfelCells, surfel_grid_cell);
light_probe_baking *Probes = PushArray(&TempRegion, ProbeCount, light_probe_baking);
camera ProbeCamera = {};
ProbeCamera.Type = Camera_Perspective;
ChangeCameraSettings(&ProbeCamera, 0.5f*Pi32, 1.0f, 0.01f, 100.0f);
v3 ViewRay = V3(1, 1, -1);
v3 XAxis = V3(1,0,0);
v3 YAxis = V3(0,1,0);
v3 ZAxis = V3(0,0,1);
quaternion Rotations[] =
{
RotationQuaternion(ZAxis, YAxis, -XAxis),
RotationQuaternion(-ZAxis, YAxis, XAxis),
RotationQuaternion(XAxis, ZAxis, -YAxis),
RotationQuaternion(XAxis, -ZAxis, YAxis),
RotationQuaternion(-XAxis, YAxis, -ZAxis),
NoRotation(),
};
v2i Resolution = BakedData->Resolution;
v2i TotalResolution = BakedData->TotalResolution;
u32 SurfelCount = TotalResolution.x*TotalResolution.y;
u32 BufferSize = SurfelCount*sizeof(v4);
u32 DepthBufferSize = SurfelCount*sizeof(f32);
u32 MaxSurfelRefs = TotalResolution.x*TotalResolution.y;
f32 PixelSolidAngle = 1.0f / (3*Resolution.x*Resolution.y);
v4 *Positions = (v4 *)PushSize(&TempRegion, BufferSize);
f32 *Depths = (f32 *)PushSize(&TempRegion, DepthBufferSize);
v4 *Normals = (v4 *)PushSize(&TempRegion, BufferSize);
v4 *Albedos = (v4 *)PushSize(&TempRegion, BufferSize);
v4 *Emissions = (v4 *)PushSize(&TempRegion, BufferSize);
u32 SurfelsAllocated = 0;
u32 TotalRefCount = 0;
for(u32 ProbeIndex = 0;
ProbeIndex < ProbeCount;
++ProbeIndex)
{
TIMED_BLOCK("Process probe");
light_probe_baking *Probe = Probes + ProbeIndex;
Probe->P = ProbePositions[ProbeIndex];
Probe->SurfelRefs = PushArray(&TempRegion, MaxSurfelRefs, u32);
MoveCamera(&ProbeCamera, Probe->P);
shader_bake_probe *ProbeShader = (shader_bake_probe *) BeginShader(State, Shader_BakeProbe);
glDepthFunc(GL_LEQUAL);
glCullFace(GL_BACK);
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
glEnable(GL_SCISSOR_TEST);
opengl_framebuffer *ProbeGBuffer = &BakedData->ProbeGBuffer;
OpenGLBindFramebuffer(State, ProbeGBuffer);
f32 MaxDepth = 1.0f;
f32 Emission = 0.0f;
glClearBufferfv(GL_DEPTH, 0, &MaxDepth);
glClearBufferfv(GL_COLOR, 0, V3(0,0,0).E);
glClearBufferfv(GL_COLOR, 1, V3(0,0,0).E);
glClearBufferfv(GL_COLOR, 2, V3(0,0,0).E);
glClearBufferfv(GL_COLOR, 3, &Emission);
for(u32 DirIndex = 0;
DirIndex < ArrayCount(Rotations);
++DirIndex)
{
RotateCamera(&ProbeCamera, Rotations[DirIndex]);
CameraSetMatrices(&ProbeCamera);
rectangle2i ClipRect =
Rectangle2i(V2i(DirIndex * Resolution.x, 0), Resolution + V2i(DirIndex * Resolution.x, 0));
OpenGLBindFramebuffer(State, ProbeGBuffer, ClipRect);
SetUniform(ProbeShader->Projection, ProbeCamera.Projection_);
SetUniform(ProbeShader->View, ProbeCamera.View_);
SetUniform(ProbeShader->FarPlane, ProbeCamera.Far);
SetUniform(ProbeShader->Orthogonal, false);
BindTexture(0, State->TextureArray, GL_TEXTURE_2D_ARRAY);
SetUniform(ProbeShader->MeshTextureIndex, Texture.Index);
DrawMesh(State, StaticGeometry.Index);
}
BindTexture(0, 0, GL_TEXTURE_2D_ARRAY);
glDisable(GL_SCISSOR_TEST);
glDisable(GL_DEPTH_TEST);
glDisable(GL_CULL_FACE);
EndShader(State);
glGetTextureImage(ProbeGBuffer->Textures[0], 0, GL_RGBA, GL_FLOAT, BufferSize, Positions);
glGetTextureImage(ProbeGBuffer->Textures[1], 0, GL_DEPTH_COMPONENT, GL_FLOAT, DepthBufferSize, Depths);
glGetTextureImage(ProbeGBuffer->Textures[2], 0, GL_RGBA, GL_FLOAT, BufferSize, Normals);
glGetTextureImage(ProbeGBuffer->Textures[3], 0, GL_RGBA, GL_FLOAT, BufferSize, Albedos);
glGetTextureImage(ProbeGBuffer->Textures[4], 0, GL_RGBA, GL_FLOAT, BufferSize, Emissions);
for(s32 Y = 0;
Y < Resolution.y;
Y++)
{
for(s32 X = 0;
X < Resolution.x;
++X)
{
u32 PixelIndex = Y * (TotalResolution.x) + X;
for(u32 DirIndex = 0;
DirIndex < ArrayCount(ProbeDirections);
++DirIndex)
{
f32 Depth = Depths[PixelIndex];
if(Depth < 1.0f)
{
surface_element *Surfel = PushStruct(&TempRegion, surface_element);
++SurfelsAllocated;
Surfel->P = Positions[PixelIndex].xyz;
Surfel->Normal = Normals[PixelIndex].xyz;
Surfel->Albedo = Albedos[PixelIndex].xyz;
Surfel->Emission = Emissions[PixelIndex].x;
u32 CellIndex = AddSurfelToCell(BakedData, &TempRegion, Surfel);
b32 Found = false;
for(u32 Index = 0;
!Found && (Index < Probe->SurfelCount);
++Index)
{
Found = (Probe->SurfelRefs[Index] == CellIndex);
}
if(!Found)
{
Probe->SurfelRefs[Probe->SurfelCount++] = CellIndex;
++TotalRefCount;
}
}
else
{
v2 TexCoords = V2((f32)X/(f32)Resolution.x, (f32)Y/(f32)Resolution.y);
TexCoords = 2.0f*TexCoords - V2(1.0f, 1.0f);
v3 SurfelDirection = NOZ(RotateBy(
Hadamard(ViewRay, V3(TexCoords, 1.0f)),
Rotations[DirIndex]));
// TODO: Pixels close to the cube borders should have a smaller solid angle.
Probe->SkyVisibility[(SurfelDirection.x > 0.0f) ? 0 : 1] +=
AbsoluteValue(SurfelDirection.x) * PixelSolidAngle * (6.17f/Pi32);
Probe->SkyVisibility[(SurfelDirection.y > 0.0f) ? 2 : 3] +=
AbsoluteValue(SurfelDirection.y) * PixelSolidAngle * (6.17f/Pi32);
Probe->SkyVisibility[(SurfelDirection.z > 0.0f) ? 4 : 5] +=
AbsoluteValue(SurfelDirection.z) * PixelSolidAngle * (6.17f/Pi32);
}
PixelIndex += (Resolution.x);
}
}
}
for(u32 DirectionIndex = 0;
DirectionIndex < ArrayCount(ProbeDirections);
++DirectionIndex)
{
Probe->SkyVisibility[DirectionIndex] = Clamp01(Probe->SkyVisibility[DirectionIndex]);
}
}
Solution->SurfelCount = BakedData->SurfelCellCount;
Solution->ProbeCount = ProbeCount;
Solution->SurfelRefCount = TotalRefCount;
Solution->Surfels = PushArray(&State->Region, Solution->SurfelCount, surface_element);
Solution->Probes = PushArray(&State->Region, Solution->ProbeCount, light_probe);
Solution->SurfelRefs = PushArray(&State->Region, Solution->SurfelRefCount, u32);
for(u32 SurfelIndex = 0;
SurfelIndex < Solution->SurfelCount;
++SurfelIndex)
{
surface_element *Surfel = Solution->Surfels + SurfelIndex;
surfel_grid_cell *Cell = BakedData->SurfelCells + SurfelIndex;
u32 LinkCount = 0;
for(surfel_link *Link = Cell->Sentinel.Next;
Link != &Cell->Sentinel;
Link = Link->Next)
{
surface_element *ContainedSurfel = Link->Surfel;
Surfel->P += ContainedSurfel->P;
Surfel->Normal += ContainedSurfel->Normal;
Surfel->Albedo += ContainedSurfel->Albedo;
Surfel->Emission += ContainedSurfel->Emission;
++LinkCount;
}
f32 InvSurfelCount = (1.0f/LinkCount);
Surfel->P = InvSurfelCount*Surfel->P;
Surfel->Normal = NOZ(InvSurfelCount*Surfel->Normal);
Surfel->Albedo = InvSurfelCount*Surfel->Albedo;
Surfel->Emission = InvSurfelCount*Surfel->Emission;
Surfel->Area = Square(BakedData->GridCellDim);
Assert(Surfel->Albedo != V3(0,0,0));
f32 MinDistSq = Real32Maximum;
for(u32 ProbeIndex = 0;
ProbeIndex < ProbeCount;
++ProbeIndex)
{
v3 P = ProbePositions[ProbeIndex];
v3 ProbeRelP = P - Surfel->P;
if(Inner(Surfel->Normal, ProbeRelP) > 0.0f)
{
f32 DistSq = LengthSq(ProbeRelP);
if(DistSq < MinDistSq)
{
MinDistSq = DistSq;
Surfel->ClosestProbe = ProbeIndex;
}
}
}
}
u32 CurrentRef = 0;
for(u32 ProbeIndex = 0;
ProbeIndex < Solution->ProbeCount;
++ProbeIndex)
{
light_probe *Probe = Solution->Probes + ProbeIndex;
light_probe_baking *ProbeBaking = Probes + ProbeIndex;
Probe->P = ProbeBaking->P;
Probe->LightContributions[0].w = ProbeBaking->SkyVisibility[0];
Probe->LightContributions[1].w = ProbeBaking->SkyVisibility[1];
Probe->LightContributions[2].w = ProbeBaking->SkyVisibility[2];
Probe->LightContributions[3].w = ProbeBaking->SkyVisibility[3];
Probe->LightContributions[4].w = ProbeBaking->SkyVisibility[4];
Probe->LightContributions[5].w = ProbeBaking->SkyVisibility[5];
Probe->FirstSurfel = CurrentRef;
Probe->OnePastLastSurfel = Probe->FirstSurfel + ProbeBaking->SurfelCount;
for(u32 RefIndex = 0;
RefIndex < ProbeBaking->SurfelCount;
++RefIndex)
{
Solution->SurfelRefs[CurrentRef++] = ProbeBaking->SurfelRefs[RefIndex];
}
}
Clear(&TempRegion);
Solution->Valid = true;
}
| 30.932692
| 105
| 0.711532
|
jessecalvert
|
336486c02e42eb1da9c8bc33c1c3874ae9270cd1
| 1,626
|
cpp
|
C++
|
043_ofShader/src/ofApp.cpp
|
KangWeon/openFrameworksTutorialSeries
|
de396349bf895f7bc8205deeb25feb155d1e3bcc
|
[
"MIT"
] | 168
|
2015-04-15T09:17:44.000Z
|
2022-03-29T13:41:06.000Z
|
043_ofShader/src/ofApp.cpp
|
KangWeon/openFrameworksTutorialSeries
|
de396349bf895f7bc8205deeb25feb155d1e3bcc
|
[
"MIT"
] | 1
|
2020-04-23T16:55:45.000Z
|
2020-04-23T16:56:07.000Z
|
043_ofShader/src/ofApp.cpp
|
KangWeon/openFrameworksTutorialSeries
|
de396349bf895f7bc8205deeb25feb155d1e3bcc
|
[
"MIT"
] | 41
|
2015-04-26T15:48:41.000Z
|
2022-03-16T03:44:03.000Z
|
#include "ofApp.h"
//--------------------------------------------------------------
void ofApp::setup(){
shader.load("shader");
}
//--------------------------------------------------------------
void ofApp::update(){
}
//--------------------------------------------------------------
void ofApp::draw(){
shader.begin();
ofDrawRectangle(0, 0, ofGetWidth(), ofGetHeight());
shader.end();
}
//--------------------------------------------------------------
void ofApp::keyPressed(int key){
}
//--------------------------------------------------------------
void ofApp::keyReleased(int key){
}
//--------------------------------------------------------------
void ofApp::mouseMoved(int x, int y ){
}
//--------------------------------------------------------------
void ofApp::mouseDragged(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mousePressed(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mouseReleased(int x, int y, int button){
}
//--------------------------------------------------------------
void ofApp::mouseEntered(int x, int y){
}
//--------------------------------------------------------------
void ofApp::mouseExited(int x, int y){
}
//--------------------------------------------------------------
void ofApp::windowResized(int w, int h){
}
//--------------------------------------------------------------
void ofApp::gotMessage(ofMessage msg){
}
//--------------------------------------------------------------
void ofApp::dragEvent(ofDragInfo dragInfo){
}
| 21.972973
| 64
| 0.292743
|
KangWeon
|
33649d5d61f6bab4541246776ac60d87ebc6bfe7
| 35,658
|
cpp
|
C++
|
src/cpp/fastdds/publisher/DataWriterImpl.cpp
|
mspiegel/Fast-RTPS
|
82fd67e58a5a18f22c19209936b15356c063d3aa
|
[
"Apache-2.0"
] | null | null | null |
src/cpp/fastdds/publisher/DataWriterImpl.cpp
|
mspiegel/Fast-RTPS
|
82fd67e58a5a18f22c19209936b15356c063d3aa
|
[
"Apache-2.0"
] | null | null | null |
src/cpp/fastdds/publisher/DataWriterImpl.cpp
|
mspiegel/Fast-RTPS
|
82fd67e58a5a18f22c19209936b15356c063d3aa
|
[
"Apache-2.0"
] | null | null | null |
// Copyright 2019 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* DataWriterImpl.cpp
*
*/
#include <fastdds/publisher/DataWriterImpl.hpp>
#include <fastdds/dds/topic/TypeSupport.hpp>
#include <fastdds/dds/publisher/DataWriter.hpp>
#include <fastrtps/attributes/TopicAttributes.h>
#include <fastdds/publisher/PublisherImpl.hpp>
#include <fastdds/dds/publisher/Publisher.hpp>
#include <fastdds/rtps/writer/RTPSWriter.h>
#include <fastdds/rtps/writer/StatefulWriter.h>
#include <fastdds/dds/domain/DomainParticipant.hpp>
#include <fastdds/rtps/participant/RTPSParticipant.h>
#include <fastdds/rtps/RTPSDomain.h>
#include <fastdds/dds/log/Log.hpp>
#include <fastrtps/utils/TimeConversion.h>
#include <fastdds/rtps/resources/ResourceEvent.h>
#include <fastdds/rtps/resources/TimedEvent.h>
#include <fastdds/rtps/builtin/liveliness/WLP.h>
#include <functional>
#include <iostream>
using namespace eprosima::fastrtps;
using namespace eprosima::fastrtps::rtps;
using namespace std::chrono;
namespace eprosima {
namespace fastdds {
namespace dds {
DataWriterImpl::DataWriterImpl(
PublisherImpl* p,
TypeSupport type,
Topic* topic,
const DataWriterQos& qos,
DataWriterListener* listen)
: publisher_(p)
, type_(type)
, topic_(topic)
, qos_(&qos == &DATAWRITER_QOS_DEFAULT ? publisher_->get_default_datawriter_qos() : qos)
, history_(get_topic_attributes(qos_, *topic_, type_), type_->m_typeSize
#if HAVE_SECURITY
// In future v2 changepool is in writer, and writer set this value to cachechagepool.
+ 20 /*SecureDataHeader*/ + 4 + ((2 * 16) /*EVP_MAX_IV_LENGTH max block size*/ - 1 ) /* SecureDataBodey*/
+ 16 + 4 /*SecureDataTag*/
#endif
, qos_.endpoint().history_memory_policy)
, listener_(listen)
#pragma warning (disable : 4355 )
, writer_listener_(this)
, high_mark_for_frag_(0)
, deadline_duration_us_(qos_.deadline().period.to_ns() * 1e-3)
, lifespan_duration_us_(qos_.lifespan().duration.to_ns() * 1e-3)
{
}
ReturnCode_t DataWriterImpl::enable()
{
assert(writer_ == nullptr);
WriterAttributes w_att;
w_att.throughputController = qos_.throughput_controller();
w_att.endpoint.durabilityKind = qos_.durability().durabilityKind();
w_att.endpoint.endpointKind = WRITER;
w_att.endpoint.multicastLocatorList = qos_.endpoint().multicast_locator_list;
w_att.endpoint.reliabilityKind = qos_.reliability().kind == RELIABLE_RELIABILITY_QOS ? RELIABLE : BEST_EFFORT;
w_att.endpoint.topicKind = type_->m_isGetKeyDefined ? WITH_KEY : NO_KEY;
w_att.endpoint.unicastLocatorList = qos_.endpoint().unicast_locator_list;
w_att.endpoint.remoteLocatorList = qos_.endpoint().remote_locator_list;
w_att.mode = qos_.publish_mode().kind == SYNCHRONOUS_PUBLISH_MODE ? SYNCHRONOUS_WRITER : ASYNCHRONOUS_WRITER;
w_att.endpoint.properties = qos_.properties();
if (qos_.endpoint().entity_id > 0)
{
w_att.endpoint.setEntityID(static_cast<uint8_t>(qos_.endpoint().entity_id));
}
if (qos_.endpoint().user_defined_id > 0)
{
w_att.endpoint.setUserDefinedID(static_cast<uint8_t>(qos_.endpoint().user_defined_id));
}
w_att.times = qos_.reliable_writer_qos().times;
w_att.liveliness_kind = qos_.liveliness().kind;
w_att.liveliness_lease_duration = qos_.liveliness().lease_duration;
w_att.liveliness_announcement_period = qos_.liveliness().announcement_period;
w_att.matched_readers_allocation = qos_.writer_resource_limits().matched_subscriber_allocation;
// TODO(Ricardo) Remove in future
// Insert topic_name and partitions
Property property;
property.name("topic_name");
property.value(topic_->get_name().c_str());
w_att.endpoint.properties.properties().push_back(std::move(property));
if (publisher_->get_qos().partition().names().size() > 0)
{
property.name("partitions");
std::string partitions;
for (auto partition : publisher_->get_qos().partition().names())
{
partitions += partition + ";";
}
property.value(std::move(partitions));
w_att.endpoint.properties.properties().push_back(std::move(property));
}
if (qos_.reliable_writer_qos().disable_positive_acks.enabled &&
qos_.reliable_writer_qos().disable_positive_acks.duration != c_TimeInfinite)
{
w_att.disable_positive_acks = true;
w_att.keep_duration = qos_.reliable_writer_qos().disable_positive_acks.duration;
}
RTPSWriter* writer = RTPSDomain::createRTPSWriter(
publisher_->rtps_participant(),
w_att,
static_cast<WriterHistory*>(&history_),
static_cast<WriterListener*>(&writer_listener_));
if (writer == nullptr)
{
logError(DATA_WRITER, "Problem creating associated Writer");
return ReturnCode_t::RETCODE_ERROR;
}
writer_ = writer;
deadline_timer_ = new TimedEvent(publisher_->get_participant()->get_resource_event(),
[&]() -> bool
{
return deadline_missed();
},
qos_.deadline().period.to_ns() * 1e-6);
lifespan_timer_ = new TimedEvent(publisher_->get_participant()->get_resource_event(),
[&]() -> bool
{
return lifespan_expired();
},
qos_.lifespan().duration.to_ns() * 1e-6);
// REGISTER THE WRITER
WriterQos wqos = qos_.get_writerqos(get_publisher()->get_qos(), topic_->get_qos());
publisher_->rtps_participant()->registerWriter(writer_, get_topic_attributes(qos_, *topic_, type_), wqos);
return ReturnCode_t::RETCODE_OK;
}
void DataWriterImpl::disable()
{
set_listener(nullptr);
if (writer_ != nullptr)
{
writer_->set_listener(nullptr);
}
}
DataWriterImpl::~DataWriterImpl()
{
delete lifespan_timer_;
delete deadline_timer_;
if (writer_ != nullptr)
{
logInfo(PUBLISHER, guid().entityId << " in topic: " << type_->getName());
RTPSDomain::removeRTPSWriter(writer_);
}
delete user_datawriter_;
}
bool DataWriterImpl::write(
void* data)
{
if (writer_ == nullptr)
{
return false;
}
logInfo(DATA_WRITER, "Writing new data");
return create_new_change(ALIVE, data);
}
bool DataWriterImpl::write(
void* data,
fastrtps::rtps::WriteParams& params)
{
if (writer_ == nullptr)
{
return false;
}
logInfo(DATA_WRITER, "Writing new data with WriteParams");
return create_new_change_with_params(ALIVE, data, params);
}
ReturnCode_t DataWriterImpl::write(
void* data,
const fastrtps::rtps::InstanceHandle_t& handle)
{
if (writer_ == nullptr)
{
return ReturnCode_t::RETCODE_NOT_ENABLED;
}
InstanceHandle_t instance_handle;
if (type_.get()->m_isGetKeyDefined)
{
bool is_key_protected = false;
#if HAVE_SECURITY
is_key_protected = writer_->getAttributes().security_attributes().is_key_protected;
#endif
type_.get()->getKey(data, &instance_handle, is_key_protected);
}
//Check if the Handle is different from the special value HANDLE_NIL and
//does not correspond with the instance referred by the data
if (handle.isDefined() && handle.value != instance_handle.value)
{
return ReturnCode_t::RETCODE_PRECONDITION_NOT_MET;
}
logInfo(DATA_WRITER, "Writing new data with Handle");
WriteParams wparams;
if (create_new_change_with_params(ALIVE, data, wparams, instance_handle))
{
return ReturnCode_t::RETCODE_OK;
}
return ReturnCode_t::RETCODE_ERROR;
}
fastrtps::rtps::InstanceHandle_t DataWriterImpl::register_instance(
void* key)
{
/// Preconditions
if (writer_ == nullptr)
{
return c_InstanceHandle_Unknown;
}
if (key == nullptr)
{
logError(PUBLISHER, "Data pointer not valid");
return c_InstanceHandle_Unknown;
}
if (!type_->m_isGetKeyDefined)
{
logError(PUBLISHER, "Topic is NO_KEY, operation not permitted");
return c_InstanceHandle_Unknown;
}
InstanceHandle_t instance_handle = c_InstanceHandle_Unknown;
bool is_key_protected = false;
#if HAVE_SECURITY
is_key_protected = writer_->getAttributes().security_attributes().is_key_protected;
#endif
type_->getKey(key, &instance_handle, is_key_protected);
// Block lowlevel writer
auto max_blocking_time = std::chrono::steady_clock::now() +
std::chrono::microseconds(::TimeConv::Time_t2MicroSecondsInt64(qos_.reliability().max_blocking_time));
#if HAVE_STRICT_REALTIME
std::unique_lock<RecursiveTimedMutex> lock(writer_->getMutex(), std::defer_lock);
if (lock.try_lock_until(max_blocking_time))
#else
std::unique_lock<RecursiveTimedMutex> lock(writer_->getMutex());
#endif
{
if (history_.register_instance(instance_handle, lock, max_blocking_time))
{
return instance_handle;
}
}
return c_InstanceHandle_Unknown;
}
ReturnCode_t DataWriterImpl::unregister_instance(
void* instance,
const InstanceHandle_t& handle,
bool dispose)
{
/// Preconditions
if (writer_ == nullptr)
{
return ReturnCode_t::RETCODE_NOT_ENABLED;
}
if (instance == nullptr)
{
logError(PUBLISHER, "Data pointer not valid");
return ReturnCode_t::RETCODE_BAD_PARAMETER;
}
if (!type_->m_isGetKeyDefined)
{
logError(PUBLISHER, "Topic is NO_KEY, operation not permitted");
return ReturnCode_t::RETCODE_PRECONDITION_NOT_MET;
}
ReturnCode_t returned_value = ReturnCode_t::RETCODE_ERROR;
InstanceHandle_t ih = handle;
#if !defined(NDEBUG)
if (c_InstanceHandle_Unknown == ih)
#endif
{
bool is_key_protected = false;
#if HAVE_SECURITY
is_key_protected = writer_->getAttributes().security_attributes().is_key_protected;
#endif
type_->getKey(instance, &ih, is_key_protected);
}
#if !defined(NDEBUG)
if (c_InstanceHandle_Unknown != handle && ih != handle)
{
logError(PUBLISHER, "handle differs from data's key.");
return ReturnCode_t::RETCODE_BAD_PARAMETER;
}
#endif
if (history_.is_key_registered(ih))
{
WriteParams wparams;
ChangeKind_t change_kind =
dispose ? NOT_ALIVE_DISPOSED : (
qos_.writer_data_lifecycle().autodispose_unregistered_instances ?
NOT_ALIVE_DISPOSED_UNREGISTERED :
NOT_ALIVE_UNREGISTERED
);
if (create_new_change_with_params(change_kind, instance, wparams, ih))
{
returned_value = ReturnCode_t::RETCODE_OK;
}
}
else
{
returned_value = ReturnCode_t::RETCODE_PRECONDITION_NOT_MET;
}
return returned_value;
}
bool DataWriterImpl::create_new_change(
ChangeKind_t changeKind,
void* data)
{
WriteParams wparams;
return create_new_change_with_params(changeKind, data, wparams);
}
bool DataWriterImpl::check_new_change_preconditions(
ChangeKind_t change_kind,
void* data)
{
// Preconditions
if (data == nullptr)
{
logError(PUBLISHER, "Data pointer not valid");
return false;
}
if (change_kind == NOT_ALIVE_UNREGISTERED
|| change_kind == NOT_ALIVE_DISPOSED
|| change_kind == NOT_ALIVE_DISPOSED_UNREGISTERED)
{
if (!type_->m_isGetKeyDefined)
{
logError(PUBLISHER, "Topic is NO_KEY, operation not permitted");
return false;
}
}
return true;
}
bool DataWriterImpl::perform_create_new_change(
ChangeKind_t change_kind,
void* data,
WriteParams& wparams,
const InstanceHandle_t& handle)
{
// Block lowlevel writer
auto max_blocking_time = steady_clock::now() +
microseconds(::TimeConv::Time_t2MicroSecondsInt64(qos_.reliability().max_blocking_time));
#if HAVE_STRICT_REALTIME
std::unique_lock<RecursiveTimedMutex> lock(writer_->getMutex(), std::defer_lock);
if (lock.try_lock_until(max_blocking_time))
#else
std::unique_lock<RecursiveTimedMutex> lock(writer_->getMutex());
#endif
{
CacheChange_t* ch = writer_->new_change(type_->getSerializedSizeProvider(data), change_kind, handle);
if (ch != nullptr)
{
if (change_kind == ALIVE)
{
//If these two checks are correct, we asume the cachechange is valid and thwn we can write to it.
if (!type_->serialize(data, &ch->serializedPayload))
{
logWarning(RTPS_WRITER, "RTPSWriter:Serialization returns false"; );
history_.release_Cache(ch);
return false;
}
}
//TODO(Ricardo) This logic in a class. Then a user of rtps layer can use it.
if (high_mark_for_frag_ == 0)
{
RTPSParticipant* part = publisher_->rtps_participant();
uint32_t max_data_size = writer_->getMaxDataSize();
uint32_t writer_throughput_controller_bytes =
writer_->calculateMaxDataSize(qos_.throughput_controller().bytesPerPeriod);
uint32_t participant_throughput_controller_bytes =
writer_->calculateMaxDataSize(
part->getRTPSParticipantAttributes().throughputController.bytesPerPeriod);
high_mark_for_frag_ =
max_data_size > writer_throughput_controller_bytes ?
writer_throughput_controller_bytes :
(max_data_size > participant_throughput_controller_bytes ?
participant_throughput_controller_bytes :
max_data_size);
high_mark_for_frag_ &= ~3;
}
uint32_t final_high_mark_for_frag = high_mark_for_frag_;
// If needed inlineqos for related_sample_identity, then remove the inlinqos size from final fragment size.
if (wparams.related_sample_identity() != SampleIdentity::unknown())
{
final_high_mark_for_frag -= 32;
}
// If it is big data, fragment it.
if (ch->serializedPayload.length > final_high_mark_for_frag)
{
// Fragment the data.
// Set the fragment size to the cachechange.
ch->setFragmentSize(static_cast<uint16_t>(
(std::min)(final_high_mark_for_frag, RTPSMessageGroup::get_max_fragment_payload_size())));
}
if (!this->history_.add_pub_change(ch, wparams, lock, max_blocking_time))
{
history_.release_Cache(ch);
return false;
}
if (qos_.deadline().period != c_TimeInfinite)
{
if (!history_.set_next_deadline(
ch->instanceHandle,
steady_clock::now() + duration_cast<system_clock::duration>(deadline_duration_us_)))
{
logError(PUBLISHER, "Could not set the next deadline in the history");
}
else
{
if (timer_owner_ == handle || timer_owner_ == InstanceHandle_t())
{
if (deadline_timer_reschedule())
{
deadline_timer_->cancel_timer();
deadline_timer_->restart_timer();
}
}
}
}
if (qos_.lifespan().duration != c_TimeInfinite)
{
lifespan_duration_us_ = duration<double, std::ratio<1, 1000000> >(
qos_.lifespan().duration.to_ns() * 1e-3);
lifespan_timer_->update_interval_millisec(qos_.lifespan().duration.to_ns() * 1e-6);
lifespan_timer_->restart_timer();
}
return true;
}
}
return false;
}
bool DataWriterImpl::create_new_change_with_params(
ChangeKind_t changeKind,
void* data,
WriteParams& wparams)
{
if (!check_new_change_preconditions(changeKind, data))
{
return false;
}
InstanceHandle_t handle;
if (type_->m_isGetKeyDefined)
{
bool is_key_protected = false;
#if HAVE_SECURITY
is_key_protected = writer_->getAttributes().security_attributes().is_key_protected;
#endif
type_->getKey(data, &handle, is_key_protected);
}
return perform_create_new_change(changeKind, data, wparams, handle);
}
bool DataWriterImpl::create_new_change_with_params(
ChangeKind_t changeKind,
void* data,
WriteParams& wparams,
const fastrtps::rtps::InstanceHandle_t& handle)
{
if (!check_new_change_preconditions(changeKind, data))
{
return false;
}
return perform_create_new_change(changeKind, data, wparams, handle);
}
bool DataWriterImpl::remove_min_seq_change()
{
return history_.removeMinChange();
}
ReturnCode_t DataWriterImpl::clear_history(
size_t* removed)
{
return (history_.removeAllChange(removed) ? ReturnCode_t::RETCODE_OK : ReturnCode_t::RETCODE_ERROR);
}
const GUID_t& DataWriterImpl::guid() const
{
return writer_ ? writer_->getGuid() : c_Guid_Unknown;
}
InstanceHandle_t DataWriterImpl::get_instance_handle() const
{
return guid();
}
void DataWriterImpl::publisher_qos_updated()
{
if (writer_ != nullptr)
{
//NOTIFY THE BUILTIN PROTOCOLS THAT THE WRITER HAS CHANGED
WriterQos wqos = qos_.get_writerqos(get_publisher()->get_qos(), topic_->get_qos());
publisher_->rtps_participant()->updateWriter(writer_, get_topic_attributes(qos_, *topic_, type_), wqos);
}
}
ReturnCode_t DataWriterImpl::set_qos(
const DataWriterQos& qos)
{
bool enabled = writer_ != nullptr;
const DataWriterQos& qos_to_set = (&qos == &DATAWRITER_QOS_DEFAULT) ?
publisher_->get_default_datawriter_qos() : qos;
// Default qos is always considered consistent
if (&qos != &DATAWRITER_QOS_DEFAULT)
{
ReturnCode_t ret_val = check_qos(qos_to_set);
if (!ret_val)
{
return ret_val;
}
if (publisher_->get_participant()->get_qos().allocation().data_limits.max_user_data != 0 &&
publisher_->get_participant()->get_qos().allocation().data_limits.max_user_data <
qos_to_set.user_data().getValue().size())
{
return ReturnCode_t::RETCODE_INCONSISTENT_POLICY;
}
}
if (enabled && !can_qos_be_updated(qos_, qos_to_set))
{
return ReturnCode_t::RETCODE_IMMUTABLE_POLICY;
}
set_qos(qos_, qos_to_set, !enabled);
if (enabled)
{
//Notify the participant that a Writer has changed its QOS
fastrtps::TopicAttributes topic_att = get_topic_attributes(qos_, *topic_, type_);
WriterQos wqos = qos_.get_writerqos(get_publisher()->get_qos(), topic_->get_qos());
publisher_->rtps_participant()->updateWriter(writer_, topic_att, wqos);
// Deadline
if (qos_.deadline().period != c_TimeInfinite)
{
deadline_duration_us_ =
duration<double, std::ratio<1, 1000000> >(qos_.deadline().period.to_ns() * 1e-3);
deadline_timer_->update_interval_millisec(qos_.deadline().period.to_ns() * 1e-6);
}
else
{
deadline_timer_->cancel_timer();
}
// Lifespan
if (qos_.lifespan().duration != c_TimeInfinite)
{
lifespan_duration_us_ =
duration<double, std::ratio<1, 1000000> >(qos_.lifespan().duration.to_ns() * 1e-3);
lifespan_timer_->update_interval_millisec(qos_.lifespan().duration.to_ns() * 1e-6);
}
else
{
lifespan_timer_->cancel_timer();
}
}
return ReturnCode_t::RETCODE_OK;
}
const DataWriterQos& DataWriterImpl::get_qos() const
{
return qos_;
}
ReturnCode_t DataWriterImpl::set_listener(
DataWriterListener* listener)
{
listener_ = listener;
return ReturnCode_t::RETCODE_OK;
}
const DataWriterListener* DataWriterImpl::get_listener() const
{
return listener_;
}
Topic* DataWriterImpl::get_topic() const
{
return topic_;
}
const Publisher* DataWriterImpl::get_publisher() const
{
return publisher_->get_publisher();
}
void DataWriterImpl::InnerDataWriterListener::onWriterMatched(
RTPSWriter* /*writer*/,
const PublicationMatchedStatus& info)
{
if (data_writer_->listener_ != nullptr)
{
data_writer_->listener_->on_publication_matched(
data_writer_->user_datawriter_, info);
}
data_writer_->publisher_->publisher_listener_.on_publication_matched(data_writer_->user_datawriter_, info);
}
void DataWriterImpl::InnerDataWriterListener::onWriterChangeReceivedByAll(
RTPSWriter* /*writer*/,
CacheChange_t* ch)
{
if (data_writer_->type_->m_isGetKeyDefined &&
(NOT_ALIVE_UNREGISTERED == ch->kind ||
NOT_ALIVE_DISPOSED_UNREGISTERED == ch->kind))
{
data_writer_->history_.remove_instance_changes(ch->instanceHandle, ch->sequenceNumber);
}
else if (data_writer_->qos_.durability().kind == VOLATILE_DURABILITY_QOS)
{
data_writer_->history_.remove_change_g(ch);
}
}
void DataWriterImpl::InnerDataWriterListener::on_liveliness_lost(
fastrtps::rtps::RTPSWriter* /*writer*/,
const fastrtps::LivelinessLostStatus& status)
{
if (data_writer_->listener_ != nullptr)
{
data_writer_->listener_->on_liveliness_lost(data_writer_->user_datawriter_, status);
}
data_writer_->publisher_->publisher_listener_.on_liveliness_lost(data_writer_->user_datawriter_, status);
}
ReturnCode_t DataWriterImpl::wait_for_acknowledgments(
const Duration_t& max_wait)
{
if (writer_ == nullptr)
{
return ReturnCode_t::RETCODE_NOT_ENABLED;
}
if (writer_->wait_for_all_acked(max_wait))
{
return ReturnCode_t::RETCODE_OK;
}
return ReturnCode_t::RETCODE_ERROR;
}
bool DataWriterImpl::deadline_timer_reschedule()
{
assert(qos_.deadline().period != c_TimeInfinite);
std::unique_lock<RecursiveTimedMutex> lock(writer_->getMutex());
steady_clock::time_point next_deadline_us;
if (!history_.get_next_deadline(timer_owner_, next_deadline_us))
{
logError(PUBLISHER, "Could not get the next deadline from the history");
return false;
}
auto interval_ms = duration_cast<milliseconds>(next_deadline_us - steady_clock::now());
deadline_timer_->update_interval_millisec(static_cast<double>(interval_ms.count()));
return true;
}
bool DataWriterImpl::deadline_missed()
{
assert(qos_.deadline().period != c_TimeInfinite);
std::unique_lock<RecursiveTimedMutex> lock(writer_->getMutex());
deadline_missed_status_.total_count++;
deadline_missed_status_.total_count_change++;
deadline_missed_status_.last_instance_handle = timer_owner_;
if (listener_ != nullptr)
{
listener_->on_offered_deadline_missed(user_datawriter_, deadline_missed_status_);
}
publisher_->publisher_listener_.on_offered_deadline_missed(user_datawriter_, deadline_missed_status_);
deadline_missed_status_.total_count_change = 0;
if (!history_.set_next_deadline(
timer_owner_,
steady_clock::now() + duration_cast<system_clock::duration>(deadline_duration_us_)))
{
logError(PUBLISHER, "Could not set the next deadline in the history");
return false;
}
return deadline_timer_reschedule();
}
ReturnCode_t DataWriterImpl::get_offered_deadline_missed_status(
OfferedDeadlineMissedStatus& status)
{
if (writer_ == nullptr)
{
return ReturnCode_t::RETCODE_NOT_ENABLED;
}
std::unique_lock<RecursiveTimedMutex> lock(writer_->getMutex());
status = deadline_missed_status_;
deadline_missed_status_.total_count_change = 0;
return ReturnCode_t::RETCODE_OK;
}
bool DataWriterImpl::lifespan_expired()
{
std::unique_lock<RecursiveTimedMutex> lock(writer_->getMutex());
CacheChange_t* earliest_change;
while (history_.get_earliest_change(&earliest_change))
{
auto source_timestamp = system_clock::time_point() + nanoseconds(earliest_change->sourceTimestamp.to_ns());
auto now = system_clock::now();
// Check that the earliest change has expired (the change which started the timer could have been removed from the history)
if (now - source_timestamp < lifespan_duration_us_)
{
auto interval = source_timestamp - now + lifespan_duration_us_;
lifespan_timer_->update_interval_millisec(static_cast<double>(duration_cast<milliseconds>(interval).count()));
return true;
}
// The earliest change has expired
history_.remove_change_pub(earliest_change);
// Set the timer for the next change if there is one
if (!history_.get_earliest_change(&earliest_change))
{
return false;
}
// Calculate when the next change is due to expire and restart
source_timestamp = system_clock::time_point() + nanoseconds(earliest_change->sourceTimestamp.to_ns());
now = system_clock::now();
auto interval = source_timestamp - now + lifespan_duration_us_;
if (interval.count() > 0)
{
lifespan_timer_->update_interval_millisec(static_cast<double>(duration_cast<milliseconds>(interval).count()));
return true;
}
}
return false;
}
ReturnCode_t DataWriterImpl::get_liveliness_lost_status(
LivelinessLostStatus& status)
{
if (writer_ == nullptr)
{
return ReturnCode_t::RETCODE_NOT_ENABLED;
}
std::unique_lock<RecursiveTimedMutex> lock(writer_->getMutex());
status.total_count = writer_->liveliness_lost_status_.total_count;
status.total_count_change = writer_->liveliness_lost_status_.total_count_change;
writer_->liveliness_lost_status_.total_count_change = 0u;
return ReturnCode_t::RETCODE_OK;
}
ReturnCode_t DataWriterImpl::assert_liveliness()
{
if (writer_ == nullptr)
{
return ReturnCode_t::RETCODE_NOT_ENABLED;
}
if (!publisher_->rtps_participant()->wlp()->assert_liveliness(
writer_->getGuid(),
writer_->get_liveliness_kind(),
writer_->get_liveliness_lease_duration()))
{
logError(DATAWRITER, "Could not assert liveliness of writer " << writer_->getGuid());
return ReturnCode_t::RETCODE_ERROR;
}
if (qos_.liveliness().kind == MANUAL_BY_TOPIC_LIVELINESS_QOS)
{
// As described in the RTPS specification, if liveliness kind is manual a heartbeat must be sent
// This only applies to stateful writers, as stateless writers do not send heartbeats
StatefulWriter* stateful_writer = dynamic_cast<StatefulWriter*>(writer_);
if (stateful_writer != nullptr)
{
stateful_writer->send_periodic_heartbeat(true, true);
}
}
return ReturnCode_t::RETCODE_OK;
}
fastrtps::TopicAttributes DataWriterImpl::get_topic_attributes(
const DataWriterQos& qos,
const Topic& topic,
const TypeSupport& type)
{
fastrtps::TopicAttributes topic_att;
topic_att.historyQos = qos.history();
topic_att.resourceLimitsQos = qos.resource_limits();
topic_att.topicName = topic.get_name();
topic_att.topicDataType = topic.get_type_name();
topic_att.topicKind = type->m_isGetKeyDefined ? WITH_KEY : NO_KEY;
topic_att.auto_fill_type_information = type->auto_fill_type_information();
topic_att.auto_fill_type_object = type->auto_fill_type_object();
if (type->type_identifier())
{
topic_att.type_id = *type->type_identifier();
}
if (type->type_object())
{
topic_att.type = *type->type_object();
}
if (type->type_information())
{
topic_att.type_information = *type->type_information();
}
return topic_att;
}
void DataWriterImpl::set_qos(
DataWriterQos& to,
const DataWriterQos& from,
bool is_default)
{
if (is_default && !(to.durability() == from.durability()))
{
to.durability() = from.durability();
to.durability().hasChanged = true;
}
if (is_default && !(to.durability_service() == from.durability_service()))
{
to.durability_service() = from.durability_service();
to.durability_service().hasChanged = true;
}
if (!(to.deadline() == from.deadline()))
{
to.deadline() = from.deadline();
to.deadline().hasChanged = true;
}
if (!(to.latency_budget() == from.latency_budget()))
{
to.latency_budget() = from.latency_budget();
to.latency_budget().hasChanged = true;
}
if (is_default && !(to.liveliness() == from.liveliness()))
{
to.liveliness() = from.liveliness();
to.liveliness().hasChanged = true;
}
if (is_default && !(to.reliability() == from.reliability()))
{
to.reliability() = from.reliability();
to.reliability().hasChanged = true;
}
if (is_default && !(to.destination_order() == from.destination_order()))
{
to.destination_order() = from.destination_order();
to.destination_order().hasChanged = true;
}
if (is_default && !(to.history() == from.history()))
{
to.history() = from.history();
to.history().hasChanged = true;
}
if (is_default && !(to.resource_limits() == from.resource_limits()))
{
to.resource_limits() = from.resource_limits();
to.resource_limits().hasChanged = true;
}
if (!(to.transport_priority() == from.transport_priority()))
{
to.transport_priority() = from.transport_priority();
to.transport_priority().hasChanged = true;
}
if (!(to.lifespan() == from.lifespan()))
{
to.lifespan() = from.lifespan();
to.lifespan().hasChanged = true;
}
if (!(to.user_data() == from.user_data()))
{
to.user_data() = from.user_data();
to.user_data().hasChanged = true;
}
if (is_default && !(to.ownership() == from.ownership()))
{
to.ownership() = from.ownership();
to.ownership().hasChanged = true;
}
if (!(to.ownership_strength() == from.ownership_strength()))
{
to.ownership_strength() = from.ownership_strength();
to.ownership_strength().hasChanged = true;
}
if (!(to.writer_data_lifecycle() == from.writer_data_lifecycle()))
{
to.writer_data_lifecycle() = from.writer_data_lifecycle();
}
if (is_default && !(to.publish_mode() == from.publish_mode()))
{
to.publish_mode() = from.publish_mode();
}
if (!(to.representation() == from.representation()))
{
to.representation() = from.representation();
to.representation().hasChanged = true;
}
if (is_default && !(to.properties() == from.properties()))
{
to.properties() = from.properties();
}
if (is_default && !(to.reliable_writer_qos() == from.reliable_writer_qos()))
{
to.reliable_writer_qos() = from.reliable_writer_qos();
}
if (is_default && !(to.endpoint() == from.endpoint()))
{
to.endpoint() = from.endpoint();
}
if (is_default && !(to.writer_resource_limits() == from.writer_resource_limits()))
{
to.writer_resource_limits() = from.writer_resource_limits();
}
if (is_default && !(to.throughput_controller() == from.throughput_controller()))
{
to.throughput_controller() = from.throughput_controller();
}
}
ReturnCode_t DataWriterImpl::check_qos(
const DataWriterQos& qos)
{
if (qos.durability().kind == PERSISTENT_DURABILITY_QOS)
{
logError(RTPS_QOS_CHECK, "PERSISTENT Durability not supported");
return ReturnCode_t::RETCODE_UNSUPPORTED;
}
if (qos.destination_order().kind == BY_SOURCE_TIMESTAMP_DESTINATIONORDER_QOS)
{
logError(RTPS_QOS_CHECK, "BY SOURCE TIMESTAMP DestinationOrder not supported");
return ReturnCode_t::RETCODE_UNSUPPORTED;
}
if (qos.reliability().kind == BEST_EFFORT_RELIABILITY_QOS && qos.ownership().kind == EXCLUSIVE_OWNERSHIP_QOS)
{
logError(RTPS_QOS_CHECK, "BEST_EFFORT incompatible with EXCLUSIVE ownership");
return ReturnCode_t::RETCODE_INCONSISTENT_POLICY;
}
if (qos.liveliness().kind == AUTOMATIC_LIVELINESS_QOS ||
qos.liveliness().kind == MANUAL_BY_PARTICIPANT_LIVELINESS_QOS)
{
if (qos.liveliness().lease_duration < eprosima::fastrtps::c_TimeInfinite &&
qos.liveliness().lease_duration <= qos.liveliness().announcement_period)
{
logError(RTPS_QOS_CHECK, "WRITERQOS: LeaseDuration <= announcement period.");
return ReturnCode_t::RETCODE_INCONSISTENT_POLICY;
}
}
return ReturnCode_t::RETCODE_OK;
}
bool DataWriterImpl::can_qos_be_updated(
const DataWriterQos& to,
const DataWriterQos& from)
{
bool updatable = true;
if (to.durability().kind != from.durability().kind)
{
updatable = false;
logWarning(RTPS_QOS_CHECK, "Durability kind cannot be changed after the creation of a DataWriter.");
}
if (to.liveliness().kind != from.liveliness().kind)
{
updatable = false;
logWarning(RTPS_QOS_CHECK, "Liveliness Kind cannot be changed after the creation of a DataWriter.");
}
if (to.liveliness().lease_duration != from.liveliness().lease_duration)
{
updatable = false;
logWarning(RTPS_QOS_CHECK, "Liveliness lease duration cannot be changed after the creation of a DataWriter.");
}
if (to.liveliness().announcement_period != from.liveliness().announcement_period)
{
updatable = false;
logWarning(RTPS_QOS_CHECK, "Liveliness announcement cannot be changed after the creation of a DataWriter.");
}
if (to.reliability().kind != from.reliability().kind)
{
updatable = false;
logWarning(RTPS_QOS_CHECK, "Reliability Kind cannot be changed after the creation of a DataWriter.");
}
if (to.ownership().kind != from.ownership().kind)
{
updatable = false;
logWarning(RTPS_QOS_CHECK, "Ownership Kind cannot be changed after the creation of a DataWriter.");
}
if (to.destination_order().kind != from.destination_order().kind)
{
updatable = false;
logWarning(RTPS_QOS_CHECK, "Destination order Kind cannot be changed after the creation of a DataWriter.");
}
return updatable;
}
} // namespace dds
} // namespace fastdds
} // namespace eprosima
| 32.564384
| 131
| 0.656318
|
mspiegel
|
33656921ce406ddcc199e4e400ea5e37f99da666
| 15,470
|
hpp
|
C++
|
ql/experimental/shortrate/generalizedhullwhite.hpp
|
SoftwareIngenieur/QuantLib
|
7a59dd749869f7a679536df322482bf9c6531d38
|
[
"BSD-3-Clause"
] | null | null | null |
ql/experimental/shortrate/generalizedhullwhite.hpp
|
SoftwareIngenieur/QuantLib
|
7a59dd749869f7a679536df322482bf9c6531d38
|
[
"BSD-3-Clause"
] | null | null | null |
ql/experimental/shortrate/generalizedhullwhite.hpp
|
SoftwareIngenieur/QuantLib
|
7a59dd749869f7a679536df322482bf9c6531d38
|
[
"BSD-3-Clause"
] | null | null | null |
/* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/*
Copyright (C) 2010 SunTrust Bank
Copyright (C) 2010, 2014 Cavit Hafizoglu
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<quantlib-dev@lists.sf.net>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
*/
/*! \file generalizedhullwhite.hpp
\brief generalized Hull-White model
*/
#ifndef quantlib_generalized_hull_white_hpp
#define quantlib_generalized_hull_white_hpp
#include <ql/models/shortrate/onefactormodel.hpp>
#include <ql/experimental/shortrate/generalizedornsteinuhlenbeckprocess.hpp>
#include <ql/processes/ornsteinuhlenbeckprocess.hpp>
#include <ql/math/interpolation.hpp>
namespace QuantLib {
//! Parameter that holds an interpolation object
class InterpolationParameter : public Parameter {
private:
class Impl : public Parameter::Impl {
public:
Real value(const Array&, Time t) const override { return interpolator_(t); }
void reset(const Interpolation& interp) { interpolator_ = interp; }
private:
Interpolation interpolator_;
};
public:
explicit InterpolationParameter(
Size count,
const Constraint& constraint = NoConstraint())
: Parameter(count,
ext::shared_ptr<Parameter::Impl>(
new InterpolationParameter::Impl()),
constraint)
{ }
void reset(const Interpolation &interp) {
ext::shared_ptr<InterpolationParameter::Impl> impl =
ext::dynamic_pointer_cast<InterpolationParameter::Impl>(impl_);
if (impl != 0)
impl->reset(interp);
}
};
//! Generalized Hull-White model class.
/*! This class implements the standard Black-Karasinski model defined by
\f[
d f(r_t) = (\theta(t) - \alpha f(r_t))dt + \sigma dW_t,
\f]
where \f$ alpha \f$ and \f$ sigma \f$ are piecewise linear functions.
\ingroup shortrate
*/
class GeneralizedHullWhite : public OneFactorAffineModel,
public TermStructureConsistentModel {
public:
GeneralizedHullWhite(
const Handle<YieldTermStructure>& yieldtermStructure,
const std::vector<Date>& speedstructure,
const std::vector<Date>& volstructure,
const std::vector<Real>& speed,
const std::vector<Real>& vol,
const ext::function<Real(Real)>& f =
ext::function<Real(Real)>(),
const ext::function<Real(Real)>& fInverse =
ext::function<Real(Real)>());
template <class SpeedInterpolationTraits,class VolInterpolationTraits>
GeneralizedHullWhite(
const Handle<YieldTermStructure>& yieldtermStructure,
const std::vector<Date>& speedstructure,
const std::vector<Date>& volstructure,
const std::vector<Real>& speed,
const std::vector<Real>& vol,
const SpeedInterpolationTraits &speedtraits,
const VolInterpolationTraits &voltraits,
const ext::function<Real(Real)>& f =
ext::function<Real(Real)>(),
const ext::function<Real(Real)>& fInverse =
ext::function<Real(Real)>()) :
OneFactorAffineModel(2), TermStructureConsistentModel(yieldtermStructure),
speedstructure_(speedstructure), volstructure_(volstructure),
a_(arguments_[0]), sigma_(arguments_[1]),
f_(f), fInverse_(fInverse)
{
initialize(yieldtermStructure,speedstructure,volstructure,
speed,vol,speedtraits,voltraits,f,fInverse);
}
ext::shared_ptr<ShortRateDynamics> dynamics() const override {
QL_FAIL("no defined process for generalized Hull-White model, "
"use HWdynamics()");
}
ext::shared_ptr<Lattice> tree(const TimeGrid& grid) const override;
//Analytical calibration of HW
GeneralizedHullWhite(
const Handle<YieldTermStructure>& yieldtermStructure,
Real a = 0.1, Real sigma = 0.01);
ext::shared_ptr<ShortRateDynamics> HWdynamics() const;
//! Only valid under Hull-White model
Real discountBondOption(Option::Type type,
Real strike,
Time maturity,
Time bondMaturity) const override;
//! vector to pass to 'calibrate' to fit only volatility
std::vector<bool> fixedReversion() const;
protected:
//Analytical calibration of HW
Real a() const { return a_(0.0); }
Real sigma() const { return sigma_(0.0); }
void generateArguments() override;
Real A(Time t, Time T) const override;
Real B(Time t, Time T) const override;
Real V(Time t, Time T) const;
private:
class Dynamics;
class Helper;
class FittingParameter;// for analytic HW fitting
std::vector<Date> speedstructure_;
std::vector<Date> volstructure_;
std::vector<Time> speedperiods_;
std::vector<Time> volperiods_;
Interpolation speed_;
Interpolation vol_;
ext::function<Real (Time)> speed() const;
ext::function<Real (Time)> vol() const;
Parameter& a_;
Parameter& sigma_;
Parameter phi_;
ext::function<Real(Real)> f_;
ext::function<Real(Real)> fInverse_;
static Real identity(Real x) {
return x;
}
template <class SpeedInterpolationTraits,class VolInterpolationTraits>
void initialize(const Handle<YieldTermStructure>& yieldtermStructure,
const std::vector<Date>& speedstructure,
const std::vector<Date>& volstructure,
const std::vector<Real>& speed,
const std::vector<Real>& vol,
const SpeedInterpolationTraits &speedtraits,
const VolInterpolationTraits &voltraits,
const ext::function<Real(Real)>& f,
const ext::function<Real(Real)>& fInverse)
{
QL_REQUIRE(speedstructure.size()==speed.size(),
"mean reversion inputs inconsistent");
QL_REQUIRE(volstructure.size()==vol.size(),
"volatility inputs inconsistent");
if (!f_)
f_ = identity;
if (!fInverse_)
fInverse_ = identity;
DayCounter dc = yieldtermStructure->dayCounter();
Date ref = yieldtermStructure->referenceDate();
for (Size i=0;i<speedstructure.size();i++)
speedperiods_.push_back(dc.yearFraction(ref,speedstructure[i]));
for (Size i=0;i<volstructure.size();i++)
volperiods_.push_back(dc.yearFraction(ref,volstructure[i]));
// interpolator x points to *periods_ vector, y points to
// the internal Array in the parameter
InterpolationParameter atemp(speedperiods_.size(), NoConstraint());
a_ = atemp;
for (Size i=0; i<speedperiods_.size(); i++)
a_.setParam(i, speed[i]);
speed_ = speedtraits.interpolate(speedperiods_.begin(),
speedperiods_.end(),a_.params().begin());
speed_.enableExtrapolation();
atemp.reset(speed_);
InterpolationParameter sigmatemp(volperiods_.size(), PositiveConstraint());
sigma_ = sigmatemp;
for (Size i=0; i<volperiods_.size(); i++)
sigma_.setParam(i, vol[i]);
vol_ = voltraits.interpolate(volperiods_.begin(),
volperiods_.end(),sigma_.params().begin());
vol_.enableExtrapolation();
sigmatemp.reset(vol_);
generateArguments();
registerWith(yieldtermStructure);
}
};
//! Short-rate dynamics in the generalized Hull-White model
/*! The short-rate is here
f(r_t) = x_t + g(t)
where g is the deterministic time-dependent
parameter (which can't be determined analytically)
used for initial term-structure fitting and x_t is the state
variable following an Ornstein-Uhlenbeck process.
In this version, the function f may also be defined as a piece-wise linear
function and can be calibrated to the away-from-the-money instruments.
*/
class GeneralizedHullWhite::Dynamics
: public GeneralizedHullWhite::ShortRateDynamics {
public:
Dynamics(const Parameter& fitting,
const ext::function<Real (Time)>& alpha,
const ext::function<Real (Time)>& sigma,
const ext::function<Real(Real)>& f,
const ext::function<Real(Real)>& fInverse)
: ShortRateDynamics(ext::shared_ptr<StochasticProcess1D>(
new GeneralizedOrnsteinUhlenbeckProcess(alpha, sigma))),
fitting_(fitting),
_f_(f), _fInverse_(fInverse) {}
//classical HW dynamics
Dynamics(const Parameter& fitting,
Real a,
Real sigma)
: GeneralizedHullWhite::ShortRateDynamics(
ext::shared_ptr<StochasticProcess1D>(
new OrnsteinUhlenbeckProcess(a, sigma))),
fitting_(fitting), _f_(identity()), _fInverse_(identity()) {}
Real variable(Time t, Rate r) const override { return _f_(r) - fitting_(t); }
Real shortRate(Time t, Real x) const override { return _fInverse_(x + fitting_(t)); }
private:
Parameter fitting_;
ext::function<Real(Real)> _f_;
ext::function<Real(Real)> _fInverse_;
struct identity {
Real operator()(Real x) const {return x;};
};
};
//! Analytical term-structure fitting parameter \f$ \varphi(t) \f$.
/*! \f$ \varphi(t) \f$ is analytically defined by
\f[
\varphi(t) = f(t) + \frac{1}{2}[\frac{\sigma(1-e^{-at})}{a}]^2,
\f]
where \f$ f(t) \f$ is the instantaneous forward rate at \f$ t \f$.
*/
class GeneralizedHullWhite::FittingParameter
: public TermStructureFittingParameter {
private:
class Impl : public Parameter::Impl {
public:
Impl(const Handle<YieldTermStructure>& termStructure,
Real a, Real sigma)
: termStructure_(termStructure), a_(a), sigma_(sigma) {}
Real value(const Array&, Time t) const override {
Rate forwardRate =
termStructure_->forwardRate(t, t, Continuous, NoFrequency);
Real temp = a_ < std::sqrt(QL_EPSILON) ?
sigma_*t :
sigma_*(1.0 - std::exp(-a_*t))/a_;
return (forwardRate + 0.5*temp*temp);
}
private:
Handle<YieldTermStructure> termStructure_;
Real a_, sigma_;
};
public:
FittingParameter(const Handle<YieldTermStructure>& termStructure,
Real a, Real sigma)
: TermStructureFittingParameter(ext::shared_ptr<Parameter::Impl>(
new FittingParameter::Impl(termStructure, a, sigma))) {}
};
// Analytic fitting dynamics
inline ext::shared_ptr<OneFactorModel::ShortRateDynamics>
GeneralizedHullWhite::HWdynamics() const {
return ext::shared_ptr<ShortRateDynamics>(
new Dynamics(phi_, a(), sigma()));
}
namespace detail {
template <class I1, class I2>
class LinearFlatInterpolationImpl;
}
//! %Linear interpolation between discrete points with flat extapolation
/*! \ingroup interpolations */
class LinearFlatInterpolation : public Interpolation {
public:
/*! \pre the \f$ x \f$ values must be sorted. */
template <class I1, class I2>
LinearFlatInterpolation(const I1& xBegin, const I1& xEnd,
const I2& yBegin) {
impl_ = ext::shared_ptr<Interpolation::Impl>(new
detail::LinearFlatInterpolationImpl<I1,I2>(xBegin, xEnd,
yBegin));
impl_->update();
}
};
//! %Linear-interpolation with flat-extrapolation factory and traits
/*! \ingroup interpolations */
class LinearFlat {
public:
template <class I1, class I2>
Interpolation interpolate(const I1& xBegin, const I1& xEnd,
const I2& yBegin) const {
return LinearFlatInterpolation(xBegin, xEnd, yBegin);
}
static const bool global = false;
static const Size requiredPoints = 1;
};
namespace detail {
template <class I1, class I2>
class LinearFlatInterpolationImpl
: public Interpolation::templateImpl<I1,I2> {
public:
LinearFlatInterpolationImpl(const I1& xBegin, const I1& xEnd,
const I2& yBegin)
: Interpolation::templateImpl<I1,I2>(xBegin, xEnd, yBegin,
LinearFlat::requiredPoints),
primitiveConst_(xEnd-xBegin), s_(xEnd-xBegin) {}
void update() override {
primitiveConst_[0] = 0.0;
for (Size i=1; i<Size(this->xEnd_-this->xBegin_); ++i) {
Real dx = this->xBegin_[i]-this->xBegin_[i-1];
s_[i-1] = (this->yBegin_[i]-this->yBegin_[i-1])/dx;
primitiveConst_[i] = primitiveConst_[i-1]
+ dx*(this->yBegin_[i-1] +0.5*dx*s_[i-1]);
}
}
Real value(Real x) const override {
if (x <= this->xMin())
return this->yBegin_[0];
if (x >= this->xMax())
return *(this->yBegin_+(this->xEnd_-this->xBegin_)-1);
Size i = this->locate(x);
return this->yBegin_[i] + (x-this->xBegin_[i])*s_[i];
}
Real primitive(Real x) const override {
Size i = this->locate(x);
Real dx = x-this->xBegin_[i];
return primitiveConst_[i] +
dx*(this->yBegin_[i] + 0.5*dx*s_[i]);
}
Real derivative(Real x) const override {
if (!this->isInRange(x))
return 0;
Size i = this->locate(x);
return s_[i];
}
Real secondDerivative(Real) const override { return 0.0; }
private:
std::vector<Real> primitiveConst_, s_;
};
}
}
#endif
| 39.065657
| 93
| 0.576794
|
SoftwareIngenieur
|
33670eaa57ac44f5bef7db0cf63c2faf9970507f
| 2,406
|
cpp
|
C++
|
external/glm/test/ext/ext_vector_ulp.cpp
|
diabl0-NEMESIS/enigma-android
|
6768df01003254245de660a0b9593f54a113e49d
|
[
"MIT"
] | 83
|
2019-09-18T16:14:43.000Z
|
2022-03-22T05:56:43.000Z
|
external/glm/test/ext/ext_vector_ulp.cpp
|
diabl0-NEMESIS/enigma-android
|
6768df01003254245de660a0b9593f54a113e49d
|
[
"MIT"
] | 7
|
2019-10-18T00:44:19.000Z
|
2021-02-09T17:22:15.000Z
|
external/glm/test/ext/ext_vector_ulp.cpp
|
diabl0-NEMESIS/enigma-android
|
6768df01003254245de660a0b9593f54a113e49d
|
[
"MIT"
] | 7
|
2019-10-11T21:27:42.000Z
|
2021-01-14T16:22:49.000Z
|
#include <glm/ext/vector_ulp.hpp>
#include <glm/ext/vector_relational.hpp>
#include <glm/ext/vector_float4.hpp>
#include <glm/ext/vector_double4.hpp>
#include <glm/ext/vector_int4.hpp>
static int test_ulp_float_dist()
{
int Error = 0;
glm::vec4 const A(1.0f);
glm::vec4 const B = glm::next_float(A);
Error += glm::any(glm::notEqual(A, B, 0)) ? 0 : 1;
glm::vec4 const C = glm::prev_float(B);
Error += glm::all(glm::equal(A, C, 0)) ? 0 : 1;
glm::ivec4 const D = glm::float_distance(A, B);
Error += D == glm::ivec4(1) ? 0 : 1;
glm::ivec4 const E = glm::float_distance(A, C);
Error += E == glm::ivec4(0) ? 0 : 1;
return Error;
}
static int test_ulp_float_step()
{
int Error = 0;
glm::vec4 const A(1.0f);
for(int i = 10; i < 1000; i *= 10)
{
glm::vec4 const B = glm::next_float(A, i);
Error += glm::any(glm::notEqual(A, B, 0)) ? 0 : 1;
glm::vec4 const C = glm::prev_float(B, i);
Error += glm::all(glm::equal(A, C, 0)) ? 0 : 1;
glm::ivec4 const D = glm::float_distance(A, B);
Error += D == glm::ivec4(i) ? 0 : 1;
glm::ivec4 const E = glm::float_distance(A, C);
Error += E == glm::ivec4(0) ? 0 : 1;
}
return Error;
}
static int test_ulp_double_dist()
{
int Error = 0;
glm::dvec4 const A(1.0);
glm::dvec4 const B = glm::next_float(A);
Error += glm::any(glm::notEqual(A, B, 0)) ? 0 : 1;
glm::dvec4 const C = glm::prev_float(B);
Error += glm::all(glm::equal(A, C, 0)) ? 0 : 1;
glm::ivec4 const D(glm::float_distance(A, B));
Error += D == glm::ivec4(1) ? 0 : 1;
glm::ivec4 const E = glm::float_distance(A, C);
Error += E == glm::ivec4(0) ? 0 : 1;
return Error;
}
static int test_ulp_double_step()
{
int Error = 0;
glm::dvec4 const A(1.0);
for(int i = 10; i < 1000; i *= 10)
{
glm::dvec4 const B = glm::next_float(A, i);
Error += glm::any(glm::notEqual(A, B, 0)) ? 0 : 1;
glm::dvec4 const C = glm::prev_float(B, i);
Error += glm::all(glm::equal(A, C, 0)) ? 0 : 1;
glm::ivec4 const D(glm::float_distance(A, B));
Error += D == glm::ivec4(i) ? 0 : 1;
glm::ivec4 const E(glm::float_distance(A, C));
Error += E == glm::ivec4(0) ? 0 : 1;
}
return Error;
}
int main()
{
int Error = 0;
Error += test_ulp_float_dist();
Error += test_ulp_float_step();
Error += test_ulp_double_dist();
Error += test_ulp_double_step();
return Error;
}
| 24.06
| 53
| 0.577722
|
diabl0-NEMESIS
|
3368534221b65ce82858b4f4faad2883b8f012fb
| 45,634
|
cpp
|
C++
|
FindSecret/Classes/Native/Il2CppCompilerCalculateTypeValues_12Table.cpp
|
GodIsWord/NewFindSecret
|
4f98f316d29936380f9665d6a6d89962d9ee5478
|
[
"MIT"
] | null | null | null |
FindSecret/Classes/Native/Il2CppCompilerCalculateTypeValues_12Table.cpp
|
GodIsWord/NewFindSecret
|
4f98f316d29936380f9665d6a6d89962d9ee5478
|
[
"MIT"
] | null | null | null |
FindSecret/Classes/Native/Il2CppCompilerCalculateTypeValues_12Table.cpp
|
GodIsWord/NewFindSecret
|
4f98f316d29936380f9665d6a6d89962d9ee5478
|
[
"MIT"
] | null | null | null |
#include "il2cpp-config.h"
#ifndef _MSC_VER
# include <alloca.h>
#else
# include <malloc.h>
#endif
#include <cstring>
#include <string.h>
#include <stdio.h>
#include <cmath>
#include <limits>
#include <assert.h>
#include "class-internals.h"
#include "codegen/il2cpp-codegen.h"
#include "Mono_Security_Mono_Security_X509_X509StoreManager1046782375.h"
#include "Mono_Security_Mono_Security_X509_X509Stores1373936237.h"
#include "Mono_Security_Mono_Security_X509_Extensions_Author1122691429.h"
#include "Mono_Security_Mono_Security_X509_Extensions_BasicC2462195278.h"
#include "Mono_Security_Mono_Security_X509_Extensions_Extend3929363080.h"
#include "Mono_Security_Mono_Security_X509_Extensions_Genera2702294159.h"
#include "Mono_Security_Mono_Security_X509_Extensions_KeyUsag820456313.h"
#include "Mono_Security_Mono_Security_X509_Extensions_KeyUsa1795615912.h"
#include "Mono_Security_Mono_Security_X509_Extensions_Netsca1524296876.h"
#include "Mono_Security_Mono_Security_X509_Extensions_Netsca3317701015.h"
#include "Mono_Security_Mono_Security_X509_Extensions_Subjec1536937677.h"
#include "Mono_Security_Mono_Security_Cryptography_HMAC3689525210.h"
#include "Mono_Security_Mono_Security_Cryptography_MD5SHA1723838944.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_AlertLeve2246417555.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_AlertDesc1549755611.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Alert4059934885.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_CipherAlg1174400495.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_CipherSui3414744575.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_CipherSui1129639304.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_CipherSui3316559455.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_ClientCon2797401965.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_ClientRec2031137796.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_ClientSes1775821398.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_ClientSes2353595803.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_ContentTy2602934270.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Context3971234707.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_ExchangeA1320888206.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_HandshakeS756684113.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_HashAlgor2376832258.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_HttpsClie1160552561.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_RecordPro3759049701.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_RecordPro3680907657.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_RecordPro3718352467.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_RSASslSig3558097625.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_RSASslSig2709678514.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_SecurityC4242483129.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_SecurityP2199972650.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_SecurityP1513093309.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_ServerCon3848440993.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Validatio3834298736.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_SslClient3914624661.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_SslCipher1981645747.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_SslHandsh2107581772.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_SslStream1667413407.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_SslStream3504282820.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_TlsCipher1545013223.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_TlsClient2486039503.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_TlsExcept3534743363.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_TlsServer4144396432.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_TlsStream2365453965.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Handshake1004704908.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Handshake3696583168.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Handshake3062346172.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Handshake3519510577.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Handshake1824902654.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Handshake2486981163.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Handshake_C97965998.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Handshake_643923608.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Handshake2716496392.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Handshake3690397592.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Handshake3860330041.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Handshake3343859594.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Handshake1850379324.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Handshake_699469151.h"
#include "Mono_Security_Mono_Math_Prime_PrimalityTest1539325943.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Certifica4091668218.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Certifica1842476440.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_Certifica3743405224.h"
#include "Mono_Security_Mono_Security_Protocol_Tls_PrivateKe3240194217.h"
#include "Mono_Security_U3CPrivateImplementationDetailsU3E3057255361.h"
#include "Mono_Security_U3CPrivateImplementationDetailsU3E_U2732071528.h"
#include "Mono_Security_U3CPrivateImplementationDetailsU3E_U1929481982.h"
#include "Mono_Security_U3CPrivateImplementationDetailsU3E_U1704471045.h"
#include "Mono_Security_U3CPrivateImplementationDetailsU3E_U3652892010.h"
#include "Mono_Security_U3CPrivateImplementationDetailsU3E_U1337922363.h"
#include "Mono_Security_U3CPrivateImplementationDetailsU3E_U2499776625.h"
#include "Mono_Security_U3CPrivateImplementationDetailsU3E_U2490092596.h"
#include "Mono_Security_U3CPrivateImplementationDetailsU3E_U3254766644.h"
#include "Mono_Security_U3CPrivateImplementationDetailsU3E_U1630999355.h"
#include "System_Xml_U3CModuleU3E692745525.h"
#include "System_Xml_System_MonoTODOAttribute4131080581.h"
#include "System_Xml_Mono_Xml_Schema_XsdIdentitySelector574258590.h"
#include "System_Xml_Mono_Xml_Schema_XsdIdentityField1964115728.h"
#include "System_Xml_Mono_Xml_Schema_XsdIdentityPath991900844.h"
#include "System_Xml_Mono_Xml_Schema_XsdIdentityStep1480907129.h"
#include "System_Xml_Mono_Xml_Schema_XsdKeyEntryField3552275292.h"
#include "System_Xml_Mono_Xml_Schema_XsdKeyEntryFieldCollect3698183622.h"
#include "System_Xml_Mono_Xml_Schema_XsdKeyEntry693496666.h"
#include "System_Xml_Mono_Xml_Schema_XsdKeyEntryCollection3090959213.h"
#include "System_Xml_Mono_Xml_Schema_XsdKeyTable2156891743.h"
#include "System_Xml_Mono_Xml_Schema_XsdParticleStateManager726654767.h"
#include "System_Xml_Mono_Xml_Schema_XsdValidationState376578997.h"
#include "System_Xml_Mono_Xml_Schema_XsdElementValidationSta2214590119.h"
#include "System_Xml_Mono_Xml_Schema_XsdSequenceValidationSta429792968.h"
#include "System_Xml_Mono_Xml_Schema_XsdChoiceValidationStat2566230191.h"
#include "System_Xml_Mono_Xml_Schema_XsdAllValidationState2703884157.h"
#include "System_Xml_Mono_Xml_Schema_XsdAnyValidationState3421545252.h"
#include "System_Xml_Mono_Xml_Schema_XsdAppendedValidationSt3608891238.h"
#include "System_Xml_Mono_Xml_Schema_XsdEmptyValidationState1344146143.h"
#include "System_Xml_Mono_Xml_Schema_XsdInvalidValidationSta3749995458.h"
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winvalid-offsetof"
#pragma clang diagnostic ignored "-Wunused-variable"
#endif
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1200 = { sizeof (X509StoreManager_t1046782376), -1, sizeof(X509StoreManager_t1046782376_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1200[2] =
{
X509StoreManager_t1046782376_StaticFields::get_offset_of__userStore_0(),
X509StoreManager_t1046782376_StaticFields::get_offset_of__machineStore_1(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1201 = { sizeof (X509Stores_t1373936238), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1201[2] =
{
X509Stores_t1373936238::get_offset_of__storePath_0(),
X509Stores_t1373936238::get_offset_of__trusted_1(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1202 = { sizeof (AuthorityKeyIdentifierExtension_t1122691429), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1202[1] =
{
AuthorityKeyIdentifierExtension_t1122691429::get_offset_of_aki_3(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1203 = { sizeof (BasicConstraintsExtension_t2462195279), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1203[2] =
{
BasicConstraintsExtension_t2462195279::get_offset_of_cA_3(),
BasicConstraintsExtension_t2462195279::get_offset_of_pathLenConstraint_4(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1204 = { sizeof (ExtendedKeyUsageExtension_t3929363080), -1, sizeof(ExtendedKeyUsageExtension_t3929363080_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1204[2] =
{
ExtendedKeyUsageExtension_t3929363080::get_offset_of_keyPurpose_3(),
ExtendedKeyUsageExtension_t3929363080_StaticFields::get_offset_of_U3CU3Ef__switchU24map14_4(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1205 = { sizeof (GeneralNames_t2702294159), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1205[5] =
{
GeneralNames_t2702294159::get_offset_of_rfc822Name_0(),
GeneralNames_t2702294159::get_offset_of_dnsName_1(),
GeneralNames_t2702294159::get_offset_of_directoryNames_2(),
GeneralNames_t2702294159::get_offset_of_uris_3(),
GeneralNames_t2702294159::get_offset_of_ipAddr_4(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1206 = { sizeof (KeyUsages_t820456313)+ sizeof (Il2CppObject), sizeof(int32_t), 0, 0 };
extern const int32_t g_FieldOffsetTable1206[11] =
{
KeyUsages_t820456313::get_offset_of_value___1() + static_cast<int32_t>(sizeof(Il2CppObject)),
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1207 = { sizeof (KeyUsageExtension_t1795615912), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1207[1] =
{
KeyUsageExtension_t1795615912::get_offset_of_kubits_3(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1208 = { sizeof (NetscapeCertTypeExtension_t1524296876), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1208[1] =
{
NetscapeCertTypeExtension_t1524296876::get_offset_of_ctbits_3(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1209 = { sizeof (CertTypes_t3317701015)+ sizeof (Il2CppObject), sizeof(int32_t), 0, 0 };
extern const int32_t g_FieldOffsetTable1209[8] =
{
CertTypes_t3317701015::get_offset_of_value___1() + static_cast<int32_t>(sizeof(Il2CppObject)),
0,
0,
0,
0,
0,
0,
0,
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1210 = { sizeof (SubjectAltNameExtension_t1536937677), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1210[1] =
{
SubjectAltNameExtension_t1536937677::get_offset_of__names_3(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1211 = { sizeof (HMAC_t3689525210), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1211[4] =
{
HMAC_t3689525210::get_offset_of_hash_5(),
HMAC_t3689525210::get_offset_of_hashing_6(),
HMAC_t3689525210::get_offset_of_innerPad_7(),
HMAC_t3689525210::get_offset_of_outerPad_8(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1212 = { sizeof (MD5SHA1_t723838944), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1212[3] =
{
MD5SHA1_t723838944::get_offset_of_md5_4(),
MD5SHA1_t723838944::get_offset_of_sha_5(),
MD5SHA1_t723838944::get_offset_of_hashing_6(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1213 = { sizeof (AlertLevel_t2246417555)+ sizeof (Il2CppObject), sizeof(uint8_t), 0, 0 };
extern const int32_t g_FieldOffsetTable1213[3] =
{
AlertLevel_t2246417555::get_offset_of_value___1() + static_cast<int32_t>(sizeof(Il2CppObject)),
0,
0,
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1214 = { sizeof (AlertDescription_t1549755611)+ sizeof (Il2CppObject), sizeof(uint8_t), 0, 0 };
extern const int32_t g_FieldOffsetTable1214[25] =
{
AlertDescription_t1549755611::get_offset_of_value___1() + static_cast<int32_t>(sizeof(Il2CppObject)),
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1215 = { sizeof (Alert_t4059934885), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1215[2] =
{
Alert_t4059934885::get_offset_of_level_0(),
Alert_t4059934885::get_offset_of_description_1(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1216 = { sizeof (CipherAlgorithmType_t1174400495)+ sizeof (Il2CppObject), sizeof(int32_t), 0, 0 };
extern const int32_t g_FieldOffsetTable1216[8] =
{
CipherAlgorithmType_t1174400495::get_offset_of_value___1() + static_cast<int32_t>(sizeof(Il2CppObject)),
0,
0,
0,
0,
0,
0,
0,
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1217 = { sizeof (CipherSuite_t3414744575), -1, sizeof(CipherSuite_t3414744575_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1217[21] =
{
CipherSuite_t3414744575_StaticFields::get_offset_of_EmptyArray_0(),
CipherSuite_t3414744575::get_offset_of_code_1(),
CipherSuite_t3414744575::get_offset_of_name_2(),
CipherSuite_t3414744575::get_offset_of_cipherAlgorithmType_3(),
CipherSuite_t3414744575::get_offset_of_hashAlgorithmType_4(),
CipherSuite_t3414744575::get_offset_of_exchangeAlgorithmType_5(),
CipherSuite_t3414744575::get_offset_of_isExportable_6(),
CipherSuite_t3414744575::get_offset_of_cipherMode_7(),
CipherSuite_t3414744575::get_offset_of_keyMaterialSize_8(),
CipherSuite_t3414744575::get_offset_of_keyBlockSize_9(),
CipherSuite_t3414744575::get_offset_of_expandedKeyMaterialSize_10(),
CipherSuite_t3414744575::get_offset_of_effectiveKeyBits_11(),
CipherSuite_t3414744575::get_offset_of_ivSize_12(),
CipherSuite_t3414744575::get_offset_of_blockSize_13(),
CipherSuite_t3414744575::get_offset_of_context_14(),
CipherSuite_t3414744575::get_offset_of_encryptionAlgorithm_15(),
CipherSuite_t3414744575::get_offset_of_encryptionCipher_16(),
CipherSuite_t3414744575::get_offset_of_decryptionAlgorithm_17(),
CipherSuite_t3414744575::get_offset_of_decryptionCipher_18(),
CipherSuite_t3414744575::get_offset_of_clientHMAC_19(),
CipherSuite_t3414744575::get_offset_of_serverHMAC_20(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1218 = { sizeof (CipherSuiteCollection_t1129639304), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1218[2] =
{
CipherSuiteCollection_t1129639304::get_offset_of_cipherSuites_0(),
CipherSuiteCollection_t1129639304::get_offset_of_protocol_1(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1219 = { sizeof (CipherSuiteFactory_t3316559455), -1, 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1220 = { sizeof (ClientContext_t2797401965), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1220[2] =
{
ClientContext_t2797401965::get_offset_of_sslStream_30(),
ClientContext_t2797401965::get_offset_of_clientHelloProtocol_31(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1221 = { sizeof (ClientRecordProtocol_t2031137796), -1, 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1222 = { sizeof (ClientSessionInfo_t1775821398), -1, sizeof(ClientSessionInfo_t1775821398_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1222[6] =
{
ClientSessionInfo_t1775821398_StaticFields::get_offset_of_ValidityInterval_0(),
ClientSessionInfo_t1775821398::get_offset_of_disposed_1(),
ClientSessionInfo_t1775821398::get_offset_of_validuntil_2(),
ClientSessionInfo_t1775821398::get_offset_of_host_3(),
ClientSessionInfo_t1775821398::get_offset_of_sid_4(),
ClientSessionInfo_t1775821398::get_offset_of_masterSecret_5(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1223 = { sizeof (ClientSessionCache_t2353595803), -1, sizeof(ClientSessionCache_t2353595803_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1223[2] =
{
ClientSessionCache_t2353595803_StaticFields::get_offset_of_cache_0(),
ClientSessionCache_t2353595803_StaticFields::get_offset_of_locker_1(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1224 = { sizeof (ContentType_t2602934270)+ sizeof (Il2CppObject), sizeof(uint8_t), 0, 0 };
extern const int32_t g_FieldOffsetTable1224[5] =
{
ContentType_t2602934270::get_offset_of_value___1() + static_cast<int32_t>(sizeof(Il2CppObject)),
0,
0,
0,
0,
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1225 = { sizeof (Context_t3971234707), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1225[30] =
{
Context_t3971234707::get_offset_of_securityProtocol_0(),
Context_t3971234707::get_offset_of_sessionId_1(),
Context_t3971234707::get_offset_of_compressionMethod_2(),
Context_t3971234707::get_offset_of_serverSettings_3(),
Context_t3971234707::get_offset_of_clientSettings_4(),
Context_t3971234707::get_offset_of_current_5(),
Context_t3971234707::get_offset_of_negotiating_6(),
Context_t3971234707::get_offset_of_read_7(),
Context_t3971234707::get_offset_of_write_8(),
Context_t3971234707::get_offset_of_supportedCiphers_9(),
Context_t3971234707::get_offset_of_lastHandshakeMsg_10(),
Context_t3971234707::get_offset_of_handshakeState_11(),
Context_t3971234707::get_offset_of_abbreviatedHandshake_12(),
Context_t3971234707::get_offset_of_receivedConnectionEnd_13(),
Context_t3971234707::get_offset_of_sentConnectionEnd_14(),
Context_t3971234707::get_offset_of_protocolNegotiated_15(),
Context_t3971234707::get_offset_of_writeSequenceNumber_16(),
Context_t3971234707::get_offset_of_readSequenceNumber_17(),
Context_t3971234707::get_offset_of_clientRandom_18(),
Context_t3971234707::get_offset_of_serverRandom_19(),
Context_t3971234707::get_offset_of_randomCS_20(),
Context_t3971234707::get_offset_of_randomSC_21(),
Context_t3971234707::get_offset_of_masterSecret_22(),
Context_t3971234707::get_offset_of_clientWriteKey_23(),
Context_t3971234707::get_offset_of_serverWriteKey_24(),
Context_t3971234707::get_offset_of_clientWriteIV_25(),
Context_t3971234707::get_offset_of_serverWriteIV_26(),
Context_t3971234707::get_offset_of_handshakeMessages_27(),
Context_t3971234707::get_offset_of_random_28(),
Context_t3971234707::get_offset_of_recordProtocol_29(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1226 = { sizeof (ExchangeAlgorithmType_t1320888206)+ sizeof (Il2CppObject), sizeof(int32_t), 0, 0 };
extern const int32_t g_FieldOffsetTable1226[6] =
{
ExchangeAlgorithmType_t1320888206::get_offset_of_value___1() + static_cast<int32_t>(sizeof(Il2CppObject)),
0,
0,
0,
0,
0,
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1227 = { sizeof (HandshakeState_t756684113)+ sizeof (Il2CppObject), sizeof(int32_t), 0, 0 };
extern const int32_t g_FieldOffsetTable1227[4] =
{
HandshakeState_t756684113::get_offset_of_value___1() + static_cast<int32_t>(sizeof(Il2CppObject)),
0,
0,
0,
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1228 = { sizeof (HashAlgorithmType_t2376832258)+ sizeof (Il2CppObject), sizeof(int32_t), 0, 0 };
extern const int32_t g_FieldOffsetTable1228[4] =
{
HashAlgorithmType_t2376832258::get_offset_of_value___1() + static_cast<int32_t>(sizeof(Il2CppObject)),
0,
0,
0,
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1229 = { sizeof (HttpsClientStream_t1160552561), -1, sizeof(HttpsClientStream_t1160552561_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1229[4] =
{
HttpsClientStream_t1160552561::get_offset_of__request_21(),
HttpsClientStream_t1160552561::get_offset_of__status_22(),
HttpsClientStream_t1160552561_StaticFields::get_offset_of_U3CU3Ef__amU24cache2_23(),
HttpsClientStream_t1160552561_StaticFields::get_offset_of_U3CU3Ef__amU24cache3_24(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1230 = { sizeof (RecordProtocol_t3759049701), -1, sizeof(RecordProtocol_t3759049701_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1230[3] =
{
RecordProtocol_t3759049701_StaticFields::get_offset_of_record_processing_0(),
RecordProtocol_t3759049701::get_offset_of_innerStream_1(),
RecordProtocol_t3759049701::get_offset_of_context_2(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1231 = { sizeof (ReceiveRecordAsyncResult_t3680907657), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1231[9] =
{
ReceiveRecordAsyncResult_t3680907657::get_offset_of_locker_0(),
ReceiveRecordAsyncResult_t3680907657::get_offset_of__userCallback_1(),
ReceiveRecordAsyncResult_t3680907657::get_offset_of__userState_2(),
ReceiveRecordAsyncResult_t3680907657::get_offset_of__asyncException_3(),
ReceiveRecordAsyncResult_t3680907657::get_offset_of_handle_4(),
ReceiveRecordAsyncResult_t3680907657::get_offset_of__resultingBuffer_5(),
ReceiveRecordAsyncResult_t3680907657::get_offset_of__record_6(),
ReceiveRecordAsyncResult_t3680907657::get_offset_of_completed_7(),
ReceiveRecordAsyncResult_t3680907657::get_offset_of__initialBuffer_8(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1232 = { sizeof (SendRecordAsyncResult_t3718352467), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1232[7] =
{
SendRecordAsyncResult_t3718352467::get_offset_of_locker_0(),
SendRecordAsyncResult_t3718352467::get_offset_of__userCallback_1(),
SendRecordAsyncResult_t3718352467::get_offset_of__userState_2(),
SendRecordAsyncResult_t3718352467::get_offset_of__asyncException_3(),
SendRecordAsyncResult_t3718352467::get_offset_of_handle_4(),
SendRecordAsyncResult_t3718352467::get_offset_of__message_5(),
SendRecordAsyncResult_t3718352467::get_offset_of_completed_6(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1233 = { sizeof (RSASslSignatureDeformatter_t3558097625), -1, sizeof(RSASslSignatureDeformatter_t3558097625_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1233[3] =
{
RSASslSignatureDeformatter_t3558097625::get_offset_of_key_0(),
RSASslSignatureDeformatter_t3558097625::get_offset_of_hash_1(),
RSASslSignatureDeformatter_t3558097625_StaticFields::get_offset_of_U3CU3Ef__switchU24map15_2(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1234 = { sizeof (RSASslSignatureFormatter_t2709678514), -1, sizeof(RSASslSignatureFormatter_t2709678514_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1234[3] =
{
RSASslSignatureFormatter_t2709678514::get_offset_of_key_0(),
RSASslSignatureFormatter_t2709678514::get_offset_of_hash_1(),
RSASslSignatureFormatter_t2709678514_StaticFields::get_offset_of_U3CU3Ef__switchU24map16_2(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1235 = { sizeof (SecurityCompressionType_t4242483129)+ sizeof (Il2CppObject), sizeof(int32_t), 0, 0 };
extern const int32_t g_FieldOffsetTable1235[3] =
{
SecurityCompressionType_t4242483129::get_offset_of_value___1() + static_cast<int32_t>(sizeof(Il2CppObject)),
0,
0,
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1236 = { sizeof (SecurityParameters_t2199972650), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1236[3] =
{
SecurityParameters_t2199972650::get_offset_of_cipher_0(),
SecurityParameters_t2199972650::get_offset_of_clientWriteMAC_1(),
SecurityParameters_t2199972650::get_offset_of_serverWriteMAC_2(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1237 = { sizeof (SecurityProtocolType_t1513093309)+ sizeof (Il2CppObject), sizeof(int32_t), 0, 0 };
extern const int32_t g_FieldOffsetTable1237[5] =
{
SecurityProtocolType_t1513093309::get_offset_of_value___1() + static_cast<int32_t>(sizeof(Il2CppObject)),
0,
0,
0,
0,
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1238 = { sizeof (ServerContext_t3848440993), -1, 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1239 = { sizeof (ValidationResult_t3834298736), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1239[3] =
{
ValidationResult_t3834298736::get_offset_of_trusted_0(),
ValidationResult_t3834298736::get_offset_of_user_denied_1(),
ValidationResult_t3834298736::get_offset_of_error_code_2(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1240 = { sizeof (SslClientStream_t3914624661), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1240[4] =
{
SslClientStream_t3914624661::get_offset_of_ServerCertValidation_17(),
SslClientStream_t3914624661::get_offset_of_ClientCertSelection_18(),
SslClientStream_t3914624661::get_offset_of_PrivateKeySelection_19(),
SslClientStream_t3914624661::get_offset_of_ServerCertValidation2_20(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1241 = { sizeof (SslCipherSuite_t1981645747), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1241[3] =
{
SslCipherSuite_t1981645747::get_offset_of_pad1_21(),
SslCipherSuite_t1981645747::get_offset_of_pad2_22(),
SslCipherSuite_t1981645747::get_offset_of_header_23(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1242 = { sizeof (SslHandshakeHash_t2107581772), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1242[8] =
{
SslHandshakeHash_t2107581772::get_offset_of_md5_4(),
SslHandshakeHash_t2107581772::get_offset_of_sha_5(),
SslHandshakeHash_t2107581772::get_offset_of_hashing_6(),
SslHandshakeHash_t2107581772::get_offset_of_secret_7(),
SslHandshakeHash_t2107581772::get_offset_of_innerPadMD5_8(),
SslHandshakeHash_t2107581772::get_offset_of_outerPadMD5_9(),
SslHandshakeHash_t2107581772::get_offset_of_innerPadSHA_10(),
SslHandshakeHash_t2107581772::get_offset_of_outerPadSHA_11(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1243 = { sizeof (SslStreamBase_t1667413407), -1, sizeof(SslStreamBase_t1667413407_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1243[15] =
{
0,
SslStreamBase_t1667413407_StaticFields::get_offset_of_record_processing_3(),
SslStreamBase_t1667413407::get_offset_of_innerStream_4(),
SslStreamBase_t1667413407::get_offset_of_inputBuffer_5(),
SslStreamBase_t1667413407::get_offset_of_context_6(),
SslStreamBase_t1667413407::get_offset_of_protocol_7(),
SslStreamBase_t1667413407::get_offset_of_ownsStream_8(),
SslStreamBase_t1667413407::get_offset_of_disposed_9(),
SslStreamBase_t1667413407::get_offset_of_checkCertRevocationStatus_10(),
SslStreamBase_t1667413407::get_offset_of_negotiate_11(),
SslStreamBase_t1667413407::get_offset_of_read_12(),
SslStreamBase_t1667413407::get_offset_of_write_13(),
SslStreamBase_t1667413407::get_offset_of_negotiationComplete_14(),
SslStreamBase_t1667413407::get_offset_of_recbuf_15(),
SslStreamBase_t1667413407::get_offset_of_recordStream_16(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1244 = { sizeof (InternalAsyncResult_t3504282820), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1244[12] =
{
InternalAsyncResult_t3504282820::get_offset_of_locker_0(),
InternalAsyncResult_t3504282820::get_offset_of__userCallback_1(),
InternalAsyncResult_t3504282820::get_offset_of__userState_2(),
InternalAsyncResult_t3504282820::get_offset_of__asyncException_3(),
InternalAsyncResult_t3504282820::get_offset_of_handle_4(),
InternalAsyncResult_t3504282820::get_offset_of_completed_5(),
InternalAsyncResult_t3504282820::get_offset_of__bytesRead_6(),
InternalAsyncResult_t3504282820::get_offset_of__fromWrite_7(),
InternalAsyncResult_t3504282820::get_offset_of__proceedAfterHandshake_8(),
InternalAsyncResult_t3504282820::get_offset_of__buffer_9(),
InternalAsyncResult_t3504282820::get_offset_of__offset_10(),
InternalAsyncResult_t3504282820::get_offset_of__count_11(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1245 = { sizeof (TlsCipherSuite_t1545013223), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1245[2] =
{
TlsCipherSuite_t1545013223::get_offset_of_header_21(),
TlsCipherSuite_t1545013223::get_offset_of_headerLock_22(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1246 = { sizeof (TlsClientSettings_t2486039503), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1246[4] =
{
TlsClientSettings_t2486039503::get_offset_of_targetHost_0(),
TlsClientSettings_t2486039503::get_offset_of_certificates_1(),
TlsClientSettings_t2486039503::get_offset_of_clientCertificate_2(),
TlsClientSettings_t2486039503::get_offset_of_certificateRSA_3(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1247 = { sizeof (TlsException_t3534743363), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1247[1] =
{
TlsException_t3534743363::get_offset_of_alert_11(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1248 = { sizeof (TlsServerSettings_t4144396432), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1248[8] =
{
TlsServerSettings_t4144396432::get_offset_of_certificates_0(),
TlsServerSettings_t4144396432::get_offset_of_certificateRSA_1(),
TlsServerSettings_t4144396432::get_offset_of_rsaParameters_2(),
TlsServerSettings_t4144396432::get_offset_of_signedParams_3(),
TlsServerSettings_t4144396432::get_offset_of_distinguisedNames_4(),
TlsServerSettings_t4144396432::get_offset_of_serverKeyExchange_5(),
TlsServerSettings_t4144396432::get_offset_of_certificateRequest_6(),
TlsServerSettings_t4144396432::get_offset_of_certificateTypes_7(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1249 = { sizeof (TlsStream_t2365453965), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1249[4] =
{
TlsStream_t2365453965::get_offset_of_canRead_2(),
TlsStream_t2365453965::get_offset_of_canWrite_3(),
TlsStream_t2365453965::get_offset_of_buffer_4(),
TlsStream_t2365453965::get_offset_of_temp_5(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1250 = { sizeof (ClientCertificateType_t1004704908)+ sizeof (Il2CppObject), sizeof(int32_t), 0, 0 };
extern const int32_t g_FieldOffsetTable1250[6] =
{
ClientCertificateType_t1004704908::get_offset_of_value___1() + static_cast<int32_t>(sizeof(Il2CppObject)),
0,
0,
0,
0,
0,
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1251 = { sizeof (HandshakeMessage_t3696583168), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1251[4] =
{
HandshakeMessage_t3696583168::get_offset_of_context_6(),
HandshakeMessage_t3696583168::get_offset_of_handshakeType_7(),
HandshakeMessage_t3696583168::get_offset_of_contentType_8(),
HandshakeMessage_t3696583168::get_offset_of_cache_9(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1252 = { sizeof (HandshakeType_t3062346172)+ sizeof (Il2CppObject), sizeof(uint8_t), 0, 0 };
extern const int32_t g_FieldOffsetTable1252[12] =
{
HandshakeType_t3062346172::get_offset_of_value___1() + static_cast<int32_t>(sizeof(Il2CppObject)),
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1253 = { sizeof (TlsClientCertificate_t3519510577), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1253[2] =
{
TlsClientCertificate_t3519510577::get_offset_of_clientCertSelected_10(),
TlsClientCertificate_t3519510577::get_offset_of_clientCert_11(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1254 = { sizeof (TlsClientCertificateVerify_t1824902654), -1, 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1255 = { sizeof (TlsClientFinished_t2486981163), -1, sizeof(TlsClientFinished_t2486981163_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1255[1] =
{
TlsClientFinished_t2486981163_StaticFields::get_offset_of_Ssl3Marker_10(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1256 = { sizeof (TlsClientHello_t97965998), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1256[1] =
{
TlsClientHello_t97965998::get_offset_of_random_10(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1257 = { sizeof (TlsClientKeyExchange_t643923608), -1, 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1258 = { sizeof (TlsServerCertificate_t2716496392), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1258[1] =
{
TlsServerCertificate_t2716496392::get_offset_of_certificates_10(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1259 = { sizeof (TlsServerCertificateRequest_t3690397592), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1259[2] =
{
TlsServerCertificateRequest_t3690397592::get_offset_of_certificateTypes_10(),
TlsServerCertificateRequest_t3690397592::get_offset_of_distinguisedNames_11(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1260 = { sizeof (TlsServerFinished_t3860330041), -1, sizeof(TlsServerFinished_t3860330041_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1260[1] =
{
TlsServerFinished_t3860330041_StaticFields::get_offset_of_Ssl3Marker_10(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1261 = { sizeof (TlsServerHello_t3343859594), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1261[4] =
{
TlsServerHello_t3343859594::get_offset_of_compressionMethod_10(),
TlsServerHello_t3343859594::get_offset_of_random_11(),
TlsServerHello_t3343859594::get_offset_of_sessionId_12(),
TlsServerHello_t3343859594::get_offset_of_cipherSuite_13(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1262 = { sizeof (TlsServerHelloDone_t1850379324), -1, 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1263 = { sizeof (TlsServerKeyExchange_t699469151), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1263[2] =
{
TlsServerKeyExchange_t699469151::get_offset_of_rsaParams_10(),
TlsServerKeyExchange_t699469151::get_offset_of_signedParams_11(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1264 = { sizeof (PrimalityTest_t1539325944), sizeof(Il2CppMethodPointer), 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1265 = { sizeof (CertificateValidationCallback_t4091668218), sizeof(Il2CppMethodPointer), 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1266 = { sizeof (CertificateValidationCallback2_t1842476440), sizeof(Il2CppMethodPointer), 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1267 = { sizeof (CertificateSelectionCallback_t3743405224), sizeof(Il2CppMethodPointer), 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1268 = { sizeof (PrivateKeySelectionCallback_t3240194217), sizeof(Il2CppMethodPointer), 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1269 = { sizeof (U3CPrivateImplementationDetailsU3E_t3057255362), -1, sizeof(U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1269[15] =
{
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D0_0(),
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D5_1(),
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D6_2(),
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D7_3(),
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D8_4(),
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D9_5(),
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D11_6(),
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D12_7(),
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D13_8(),
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D14_9(),
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D15_10(),
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D16_11(),
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D17_12(),
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D21_13(),
U3CPrivateImplementationDetailsU3E_t3057255362_StaticFields::get_offset_of_U24U24fieldU2D22_14(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1270 = { sizeof (U24ArrayTypeU243132_t2732071529)+ sizeof (Il2CppObject), sizeof(U24ArrayTypeU243132_t2732071529 ), 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1271 = { sizeof (U24ArrayTypeU24256_t1929481983)+ sizeof (Il2CppObject), sizeof(U24ArrayTypeU24256_t1929481983 ), 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1272 = { sizeof (U24ArrayTypeU2420_t1704471046)+ sizeof (Il2CppObject), sizeof(U24ArrayTypeU2420_t1704471046 ), 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1273 = { sizeof (U24ArrayTypeU2432_t3652892011)+ sizeof (Il2CppObject), sizeof(U24ArrayTypeU2432_t3652892011 ), 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1274 = { sizeof (U24ArrayTypeU2448_t1337922364)+ sizeof (Il2CppObject), sizeof(U24ArrayTypeU2448_t1337922364 ), 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1275 = { sizeof (U24ArrayTypeU2464_t499776626)+ sizeof (Il2CppObject), sizeof(U24ArrayTypeU2464_t499776626 ), 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1276 = { sizeof (U24ArrayTypeU2412_t2490092597)+ sizeof (Il2CppObject), sizeof(U24ArrayTypeU2412_t2490092597 ), 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1277 = { sizeof (U24ArrayTypeU2416_t3254766645)+ sizeof (Il2CppObject), sizeof(U24ArrayTypeU2416_t3254766645 ), 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1278 = { sizeof (U24ArrayTypeU244_t1630999355)+ sizeof (Il2CppObject), sizeof(U24ArrayTypeU244_t1630999355 ), 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1279 = { sizeof (U3CModuleU3E_t692745529), -1, 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1280 = { sizeof (MonoTODOAttribute_t4131080583), -1, 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1281 = { sizeof (XsdIdentitySelector_t574258590), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1281[3] =
{
XsdIdentitySelector_t574258590::get_offset_of_selectorPaths_0(),
XsdIdentitySelector_t574258590::get_offset_of_fields_1(),
XsdIdentitySelector_t574258590::get_offset_of_cachedFields_2(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1282 = { sizeof (XsdIdentityField_t1964115728), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1282[2] =
{
XsdIdentityField_t1964115728::get_offset_of_fieldPaths_0(),
XsdIdentityField_t1964115728::get_offset_of_index_1(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1283 = { sizeof (XsdIdentityPath_t991900844), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1283[2] =
{
XsdIdentityPath_t991900844::get_offset_of_OrderedSteps_0(),
XsdIdentityPath_t991900844::get_offset_of_Descendants_1(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1284 = { sizeof (XsdIdentityStep_t1480907129), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1284[6] =
{
XsdIdentityStep_t1480907129::get_offset_of_IsCurrent_0(),
XsdIdentityStep_t1480907129::get_offset_of_IsAttribute_1(),
XsdIdentityStep_t1480907129::get_offset_of_IsAnyName_2(),
XsdIdentityStep_t1480907129::get_offset_of_NsName_3(),
XsdIdentityStep_t1480907129::get_offset_of_Name_4(),
XsdIdentityStep_t1480907129::get_offset_of_Namespace_5(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1285 = { sizeof (XsdKeyEntryField_t3552275292), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1285[13] =
{
XsdKeyEntryField_t3552275292::get_offset_of_entry_0(),
XsdKeyEntryField_t3552275292::get_offset_of_field_1(),
XsdKeyEntryField_t3552275292::get_offset_of_FieldFound_2(),
XsdKeyEntryField_t3552275292::get_offset_of_FieldLineNumber_3(),
XsdKeyEntryField_t3552275292::get_offset_of_FieldLinePosition_4(),
XsdKeyEntryField_t3552275292::get_offset_of_FieldHasLineInfo_5(),
XsdKeyEntryField_t3552275292::get_offset_of_FieldType_6(),
XsdKeyEntryField_t3552275292::get_offset_of_Identity_7(),
XsdKeyEntryField_t3552275292::get_offset_of_IsXsiNil_8(),
XsdKeyEntryField_t3552275292::get_offset_of_FieldFoundDepth_9(),
XsdKeyEntryField_t3552275292::get_offset_of_FieldFoundPath_10(),
XsdKeyEntryField_t3552275292::get_offset_of_Consuming_11(),
XsdKeyEntryField_t3552275292::get_offset_of_Consumed_12(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1286 = { sizeof (XsdKeyEntryFieldCollection_t3698183622), -1, 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1287 = { sizeof (XsdKeyEntry_t693496666), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1287[8] =
{
XsdKeyEntry_t693496666::get_offset_of_StartDepth_0(),
XsdKeyEntry_t693496666::get_offset_of_SelectorLineNumber_1(),
XsdKeyEntry_t693496666::get_offset_of_SelectorLinePosition_2(),
XsdKeyEntry_t693496666::get_offset_of_SelectorHasLineInfo_3(),
XsdKeyEntry_t693496666::get_offset_of_KeyFields_4(),
XsdKeyEntry_t693496666::get_offset_of_KeyRefFound_5(),
XsdKeyEntry_t693496666::get_offset_of_OwnerSequence_6(),
XsdKeyEntry_t693496666::get_offset_of_keyFound_7(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1288 = { sizeof (XsdKeyEntryCollection_t3090959213), -1, 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1289 = { sizeof (XsdKeyTable_t2156891743), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1289[9] =
{
XsdKeyTable_t2156891743::get_offset_of_alwaysTrue_0(),
XsdKeyTable_t2156891743::get_offset_of_selector_1(),
XsdKeyTable_t2156891743::get_offset_of_source_2(),
XsdKeyTable_t2156891743::get_offset_of_qname_3(),
XsdKeyTable_t2156891743::get_offset_of_refKeyName_4(),
XsdKeyTable_t2156891743::get_offset_of_Entries_5(),
XsdKeyTable_t2156891743::get_offset_of_FinishedEntries_6(),
XsdKeyTable_t2156891743::get_offset_of_StartDepth_7(),
XsdKeyTable_t2156891743::get_offset_of_ReferencedKey_8(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1290 = { sizeof (XsdParticleStateManager_t726654767), -1, sizeof(XsdParticleStateManager_t726654767_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1290[6] =
{
XsdParticleStateManager_t726654767::get_offset_of_table_0(),
XsdParticleStateManager_t726654767::get_offset_of_processContents_1(),
XsdParticleStateManager_t726654767::get_offset_of_CurrentElement_2(),
XsdParticleStateManager_t726654767::get_offset_of_ContextStack_3(),
XsdParticleStateManager_t726654767::get_offset_of_Context_4(),
XsdParticleStateManager_t726654767_StaticFields::get_offset_of_U3CU3Ef__switchU24map2_5(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1291 = { sizeof (XsdValidationState_t376578997), -1, sizeof(XsdValidationState_t376578997_StaticFields), 0 };
extern const int32_t g_FieldOffsetTable1291[3] =
{
XsdValidationState_t376578997_StaticFields::get_offset_of_invalid_0(),
XsdValidationState_t376578997::get_offset_of_occured_1(),
XsdValidationState_t376578997::get_offset_of_manager_2(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1292 = { sizeof (XsdElementValidationState_t2214590119), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1292[1] =
{
XsdElementValidationState_t2214590119::get_offset_of_element_3(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1293 = { sizeof (XsdSequenceValidationState_t429792968), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1293[4] =
{
XsdSequenceValidationState_t429792968::get_offset_of_seq_3(),
XsdSequenceValidationState_t429792968::get_offset_of_current_4(),
XsdSequenceValidationState_t429792968::get_offset_of_currentAutomata_5(),
XsdSequenceValidationState_t429792968::get_offset_of_emptiable_6(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1294 = { sizeof (XsdChoiceValidationState_t2566230191), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1294[3] =
{
XsdChoiceValidationState_t2566230191::get_offset_of_choice_3(),
XsdChoiceValidationState_t2566230191::get_offset_of_emptiable_4(),
XsdChoiceValidationState_t2566230191::get_offset_of_emptiableComputed_5(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1295 = { sizeof (XsdAllValidationState_t2703884157), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1295[2] =
{
XsdAllValidationState_t2703884157::get_offset_of_all_3(),
XsdAllValidationState_t2703884157::get_offset_of_consumed_4(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1296 = { sizeof (XsdAnyValidationState_t3421545252), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1296[1] =
{
XsdAnyValidationState_t3421545252::get_offset_of_any_3(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1297 = { sizeof (XsdAppendedValidationState_t3608891238), -1, 0, 0 };
extern const int32_t g_FieldOffsetTable1297[2] =
{
XsdAppendedValidationState_t3608891238::get_offset_of_head_3(),
XsdAppendedValidationState_t3608891238::get_offset_of_rest_4(),
};
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1298 = { sizeof (XsdEmptyValidationState_t1344146143), -1, 0, 0 };
extern const Il2CppTypeDefinitionSizes g_typeDefinitionSize1299 = { sizeof (XsdInvalidValidationState_t3749995458), -1, 0, 0 };
#ifdef __clang__
#pragma clang diagnostic pop
#endif
| 54.455847
| 202
| 0.853048
|
GodIsWord
|
336a235d27b53c1358cbb327e397ba3556f57d8c
| 1,411
|
cpp
|
C++
|
src/util/readwritefile.cpp
|
BakedInside/beanscore
|
daa9b2ddbfd3305881749bda7f32146738154260
|
[
"MIT"
] | null | null | null |
src/util/readwritefile.cpp
|
BakedInside/beanscore
|
daa9b2ddbfd3305881749bda7f32146738154260
|
[
"MIT"
] | null | null | null |
src/util/readwritefile.cpp
|
BakedInside/beanscore
|
daa9b2ddbfd3305881749bda7f32146738154260
|
[
"MIT"
] | null | null | null |
// Copyright (c) 2015-2020 The Beans Core developers
// Copyright (c) 2017 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <fs.h>
#include <limits>
#include <stdio.h>
#include <string>
#include <utility>
std::pair<bool,std::string> ReadBinaryFile(const fs::path &filename, size_t maxsize=std::numeric_limits<size_t>::max())
{
FILE *f = fsbridge::fopen(filename, "rb");
if (f == nullptr)
return std::make_pair(false,"");
std::string retval;
char buffer[128];
do {
const size_t n = fread(buffer, 1, sizeof(buffer), f);
// Check for reading errors so we don't return any data if we couldn't
// read the entire file (or up to maxsize)
if (ferror(f)) {
fclose(f);
return std::make_pair(false,"");
}
retval.append(buffer, buffer+n);
} while (!feof(f) && retval.size() <= maxsize);
fclose(f);
return std::make_pair(true,retval);
}
bool WriteBinaryFile(const fs::path &filename, const std::string &data)
{
FILE *f = fsbridge::fopen(filename, "wb");
if (f == nullptr)
return false;
if (fwrite(data.data(), 1, data.size(), f) != data.size()) {
fclose(f);
return false;
}
if (fclose(f) != 0) {
return false;
}
return true;
}
| 29.395833
| 119
| 0.610206
|
BakedInside
|
3375a6cd410e3ef974d9abdd2c23853a62874e1b
| 26,436
|
hpp
|
C++
|
Container/Sequence/forward_list.hpp
|
chestnutme/MiniSTL
|
d8b8b5f51d13623490aceb58f3fdb623d38d8e24
|
[
"MIT"
] | 2
|
2019-02-24T09:47:04.000Z
|
2020-08-06T02:35:03.000Z
|
Container/Sequence/forward_list.hpp
|
chestnutme/MiniSTL
|
d8b8b5f51d13623490aceb58f3fdb623d38d8e24
|
[
"MIT"
] | null | null | null |
Container/Sequence/forward_list.hpp
|
chestnutme/MiniSTL
|
d8b8b5f51d13623490aceb58f3fdb623d38d8e24
|
[
"MIT"
] | null | null | null |
#pragma once
#include <initializer_list>
#include <cstdint>
#include <utility>
#include "Allocator/memory.hpp"
#include "Traits/type_traits.hpp"
#include "Iterator/iterator.hpp"
namespace MiniSTL {
struct forward_list_node_base {
forward_list_node_base* next;
};
inline forward_list_node_base*
__make_link(forward_list_node_base* prev_node,
forward_list_node_base* new_node) {
new_node->next = prev_node->next;
prev_node->next = new_node;
return new_node;
}
inline forward_list_node_base*
__previous(forward_list_node_base* head,
const forward_list_node_base* node) {
while (head && head->next != node)
head = head->next;
return head;
}
inline const forward_list_node_base*
__previous(const forward_list_node_base* head,
const forward_list_node_base* node) {
while (head && head->next != node)
head = head->next;
return head;
}
inline void
__splice_after(forward_list_node_base* pos,
forward_list_node_base* before_first,
forward_list_node_base* before_last) {
// pos == before_first : done
// pos = before_last : can not insert one after itself
if (pos != before_first && pos != before_last) {
forward_list_node_base* first = before_first->next;
forward_list_node_base* after = pos->next;
before_first->next = before_last->next;
pos->next = first;
before_last->next = after;
}
}
inline void
__splice_after(forward_list_node_base* pos,
forward_list_node_base* head) {
forward_list_node_base* before_last = __previous(head, nullptr);
if (before_last != head) {
forward_list_node_base* after = pos->next;
pos->next = head->next;
head->next = nullptr;
before_last->next = after;
}
}
inline forward_list_node_base*
__reverse(forward_list_node_base* node) {
forward_list_node_base* result = node;
node = node->next;
result->next = nullptr;
while(node) {
forward_list_node_base* next = node->next;
node->next = result;
result = node;
node = next;
}
return result;
}
inline size_t __size(forward_list_node_base* node) {
size_t result = 0;
for ( ; node != nullptr; node = node->next)
++result;
return result;
}
template <class T>
struct forward_list_node : public forward_list_node_base {
T data;
};
struct forward_list_iterator_base {
using size_type = size_t;
using difference_type = ptrdiff_t;
using iterator_category = forward_iterator_tag;
forward_list_node_base* node;
forward_list_iterator_base(forward_list_node_base* x) : node(x) {}
void incr() { node = node->next; }
bool operator==(const forward_list_iterator_base& x) const { return node == x.node; }
bool operator!=(const forward_list_iterator_base& x) const { return node != x.node; }
};
template <class T, class Ref, class Ptr>
struct forward_list_iterator : public forward_list_iterator_base {
using value_type = T;
using pointer = Ptr;
using reference = Ref;
using iterator = forward_list_iterator<T, T&, T*>;
using const_iterator = forward_list_iterator<T, const T&, const T*>;
using self = forward_list_iterator<T, Ref, Ptr>;
using node_t = forward_list_node<T>;
using node_base = forward_list_node_base;
forward_list_iterator() : forward_list_iterator_base(nullptr) {}
forward_list_iterator(node_t* x)
: forward_list_iterator_base(static_cast<node_base*>(x)) {}
forward_list_iterator(const iterator& x)
: forward_list_iterator_base(x.node) {}
reference operator*() const
{ return static_cast<node_t*>(node)->data; }
pointer operator->() const { return &(operator*()); }
self& operator++() {
incr();
return *this;
}
self operator++(int) {
self tmp = *this;
incr();
return tmp;
}
};
template <class T, class Allocator = simple_alloc<T>>
class forward_list {
public:
// types alias
using size_type = size_t;
using difference_type = ptrdiff_t;
using value_type = T;
using reference = T&;
using pointer = T*;
using const_reference = const T&;
using const_pointer = const T*;
using iterator = forward_list_iterator<T, T&, T*>;
using const_iterator = forward_list_iterator<T, const T&, const T*>;
using allocator_type = Allocator;
protected:
using node_alloc = simple_alloc<forward_list_node<T>>;
using node_t = forward_list_node<T>;
using node_base = forward_list_node_base;
// head does not need a data member
forward_list_node_base head;
node_t* get_node() { return node_alloc::allocate(1); }
void put_node(node_t* p) { node_alloc::deallocate(p); }
node_t* create_node(const T& val) {
node_t* p = get_node();
try {
construct(&p->data, val);
} catch(std::exception&) {
put_node(p);
throw;
}
return p;
}
node_t* create_node(T&& val) {
node_t* p = get_node();
try {
new (&p->data) T(std::forward(val));
} catch(std::exception&) {
put_node(p);
throw;
}
return p;
}
public:
allocator_type get_allocator() const noexcept { return allocator_type(); }
public:
// construct/copy/destroy:
forward_list() { head.next = nullptr; }
explicit forward_list(size_type n) {
head.next = nullptr;
insert_after_fill(static_cast<node_base*>(&head), n, T());
}
forward_list(size_type n, const T& val) {
head.next = nullptr;
insert_after_fill(static_cast<node_base*>(&head), n, T());
}
template <class InputIt>
forward_list(InputIt first, InputIt last) {
head.next = nullptr;
insert_after_range(static_cast<node_base*>(&head), first, last);
}
forward_list(const forward_list& x) {
head.next = nullptr;
insert_after_range(static_cast<node_base*>(&head), x.begin(), x.end());
}
forward_list(forward_list&& x) {
head = x.head;
x.head.next = nullptr;
}
forward_list(std::initializer_list<T> ilist) {
head.next = nullptr;
insert_after_range(before_begin(), ilist.begin(), ilist.end());
}
~forward_list() {
clear();
}
forward_list& operator=(const forward_list& x);
forward_list& operator=(forward_list&& x) {
if(&x != this) {
clear();
swap(x);
}
return *this;
}
forward_list& operator=(std::initializer_list<T>);
public:
//assign
template <class InputIt>
void assign(InputIt first, InputIt last) {
assign_dispatch(first, last, integral<InputIt>());
}
void assign(size_type n, const T& val) {
fill_assign(n, val);
}
void assign(std::initializer_list<T> ilist) {
assign_dispatch(ilist.begin(), ilist.end(), false_type());
}
protected:
void fill_assign(size_type n, const T& val);
template<class Integer>
void assign_dispatch(Integer n, Integer val, true_type){
fill_assign(static_cast<size_type>(n),static_cast<T>(val));
}
template<class InputIt>
void assign_dispatch(InputIt first, InputIt last, false_type);
public:
// iterators:
iterator before_begin() noexcept {
return iterator(static_cast<node_t*>(&head));
}
const_iterator before_begin() const noexcept {
// wrong : const_iterator(static_cast<node_t*>(&head))
// because &head is const node_base*, cannot cast away const qualifier
const_iterator(before_begin());
}
iterator begin() noexcept {
return iterator(static_cast<node_t*>(head.next));
}
const_iterator begin() const noexcept {
return const_iterator(static_cast<node_t*>(head.next));
}
iterator end() noexcept {
return iterator();
}
const_iterator end() const noexcept {
return const_iterator();
}
const_iterator cbegin() noexcept {
return const_iterator(begin()) ;
}
const_iterator cbefore_begin() const noexcept {
//
return const_iterator(before_begin());
}
const_iterator cend() noexcept{
return const_iterator();
}
public:
// capacity:
size_type max_size() const noexcept {
return SIZE_MAX / sizeof(node_t);
}
bool empty() const noexcept {
return head.next == nullptr;
}
void resize(size_type new_sz) {
resize(new_sz, T());
}
void resize(size_type new_sz, const value_type& val);
public:
// element access:
reference front() { return *begin(); }
const_reference front() const { return *begin(); }
// modifiers:
template <class... Args>
void emplace_front(Args&&... args) {
insert_after(before_begin(), std::move(T(args...)));
}
void push_front(const T& val) {
insert_after(before_begin(), val);
}
void push_front(T&& val) {
insert_after(before_begin(), std::forward(val));
}
void pop_front() {
erase_after(before_begin());
}
template <class... Args>
iterator emplace_after(const_iterator pos, Args&&... args) {
return insert_after(pos, std::move(T(args...)));
}
public:
iterator insert_after(const_iterator pos, const T& val) {
return iterator(static_cast<node_t*>(__make_link(
static_cast<node_base*>(pos.node), create_node(val))));
}
iterator insert_after(const_iterator pos, T&& val) {
return iterator(static_cast<node_t*>(__make_link(
static_cast<node_base*>(pos.node),
create_node(std::forward(val)))));
}
iterator insert_after(const_iterator pos, size_type n, const T& val) {
return insert_after_fill(pos, n, val);
}
// return iterator pointing to the last element inserted
template <class InputIt>
iterator insert_after_range(iterator pos,
InputIt first, InputIt last) {
return insert_after_range(pos, first, last,
integral<InputIt>());
}
iterator insert_after(const_iterator pos,
std::initializer_list<T> ilist) {
return insert_after_range(pos, ilist.begin(),
ilist.end(), false_type());
}
protected:
iterator insert_after_fill(iterator pos,
size_type n, const value_type& val) {
if(n == 0)
return pos;
node_base* cur = static_cast<node_base*>(pos.node);
for(size_type i = 0; i < n; ++i)
cur = __make_link(cur, create_node(val));
return iterator(static_cast<node_t*>(cur));
}
template <class Integer>
iterator insert_after_range(iterator pos, Integer n,
Integer val, true_type) {
return insert_after_fill(pos, static_cast<size_type>(n),
static_cast<T>(val));
}
template <class InputIt>
iterator insert_after_range(iterator pos, InputIt first,
InputIt last, false_type) {
if(first == last)
return pos;
node_base* cur = static_cast<node_base*>(pos.node);
while (first != last) {
cur = __make_link(cur, create_node(*first));
++first;
}
return iterator(static_cast<node_t*>(cur));
}
public:
iterator erase_after(const_iterator pos) {
if(pos.node == nullptr || pos.node->next == nullptr)
return end();
node_base* prev = static_cast<node_base*>(pos.node);
node_base* cur = prev->next;
prev->next = cur->next;
destroy(&(static_cast<node_t*>(cur)->data));
put_node(static_cast<node_t*>(cur));
return iterator(prev->next);
}
iterator erase_after(const_iterator pos, iterator last) {
node_base* prev = static_cast<node_base*>(pos.node);
node_base* cur = prev->next;
node_base* last1 = static_cast<node_base*>(last.node);
while(cur != last1) {
prev->next = cur->next;
destroy(&(static_cast<node_t*>(cur)->data));
put_node(static_cast<node_t*>(cur));
cur = prev->next;
}
return last;
}
void swap(forward_list& x) {
MiniSTL::swap(head.next, x.head.next);
}
void clear() noexcept {
erase_after(before_begin(), end());
}
public:
// forward_list operations:
void splice_after(const_iterator pos, forward_list& x) {
node_base* cur = static_cast<node_base*>(pos.node);
node_base* prev = &x.head;
if(prev->next)
__splice_after(pos, prev);
}
void splice_after(const_iterator pos, forward_list&& x) {
node_base* cur = static_cast<node_base*>(pos.node);
node_base* prev = &x.head;
if(prev->next)
__splice_after(pos, prev);
}
void splice_after(const_iterator pos, forward_list& x,
const_iterator i) {
if(i == x.before_begin() || i == end()) return;
node_base* cur = static_cast<node_base*>(pos.node);
node_base* n1 = static_cast<node_base*>(i.node);
__splice_after(cur, __previous(&x.head, n1), n1);
}
void splice_after(const_iterator pos, forward_list&& x,
const_iterator i) {
if(i == x.before_begin() || i == end()) return;
node_base* cur = static_cast<node_base*>(pos.node);
node_base* n1 = static_cast<node_base*>(i.node);
__splice_after(cur, __previous(&x.head, n1), n1);
}
// Moves the elements in the range (first, last) from other into *this
void splice_after(const_iterator pos, forward_list& x,
const_iterator first,
const_iterator last) {
node_base* cur = static_cast<node_base*>(pos.node);
node_base* n1 = static_cast<node_base*>(first.node);
node_base* n2 = static_cast<node_base*>(last.node);
// n1 == n2 || n1->next == n2 : splice nothing
if(n1 == n2 || n1->next == n2)
return;
__splice_after(cur, n1, __previous(&x.head, n2));
}
void splice_after(const_iterator pos, forward_list&& x,
const_iterator first, const_iterator last) {
node_base* cur = static_cast<node_base*>(pos.node);
node_base* n1 = static_cast<node_base*>(first.node);
node_base* n2 = static_cast<node_base*>(last.node);
// n1 == n2 || n1->next == n2 : splice nothing
if(n1 == n2 || n1->next == n2)
return;
__splice_after(cur, n1, __previous(x, n2));
}
void remove(const T& val);
template <class Predicate>
void remove_if(Predicate pred);
void unique();
template <class BinaryPredicate>
void unique(BinaryPredicate binary_pred);
void merge(forward_list& x);
void merge(forward_list&& x);
template <class Compare> void
merge(forward_list& x, Compare comp);
template <class Compare> void
merge(forward_list&& x, Compare comp);
void sort();
template <class Compare>
void sort(Compare comp);
void reverse() noexcept;
};
template <class T, class Alloc>
bool operator==(const forward_list<T,Alloc>& x, const forward_list<T,Alloc>& y) {
using const_iterator = typename forward_list<T, Alloc>::const_iterator;
const_iterator end1 = x.end();
const_iterator end2 = y.end();
const_iterator it1 = x.begin();
const_iterator it2 = y.begin();
while(it1 != end1 && it2 != end2 && *it1 == *it2) {
it1++;
it2++;
}
return it1 == end1 && it2 == end2;
}
template <class T, class Alloc>
bool operator< (const forward_list<T,Alloc>& x, const forward_list<T,Alloc>& y) {
return lexicographical_compare(x.begin(), x.end(),
y.begin(), y.end());
}
template <class T, class Alloc>
bool operator!=(const forward_list<T,Alloc>& x, const forward_list<T,Alloc>& y) {
return !(x == y);
}
template <class T, class Alloc>
bool operator> (const forward_list<T,Alloc>& x, const forward_list<T,Alloc>& y) {
return y < x;
}
template <class T, class Alloc>
bool operator>=(const forward_list<T,Alloc>& x, const forward_list<T,Alloc>& y) {
return !(x < y);
}
template <class T, class Alloc>
bool operator<=(const forward_list<T,Alloc>& x, const forward_list<T,Alloc>& y) {
return !(y < x);
}
template <class T, class Alloc>
void swap(forward_list<T,Alloc>& x, forward_list<T,Alloc>& y) {
x.swap(y);
}
template <class T, class Alloc>
forward_list<T, Alloc>& forward_list<T, Alloc>::operator=(const forward_list& x) {
if(&x != this) {
// another implement:
// forward_list tmp(x);
// this->swap(tmp);
// prev for erase_after and insert_after
iterator prev = before_begin();
iterator first1 = begin();
const_iterator first2 = x.begin();
iterator last1 = end();
const_iterator last2 = x.end();
while(first1 != last1 && first2 != last2) {
prev = first1;
*first1++ = *first2++;
}
if(first2 == last2)
erase_after(prev, last2);
else
insert_after_range(prev, first2, last2);
}
return *this;
}
template <class T, class Alloc>
forward_list<T, Alloc>& forward_list<T, Alloc>::operator=(std::initializer_list<T> ilist) {
iterator prev = before_begin();
iterator first1 = begin();
const_iterator first2 = ilist.begin();
iterator last1 = end();
const_iterator last2 = ilist.end();
while(first1 != last1 && first2 != last2) {
prev = first1;
*first1++ = *first2++;
}
if(first2 == last2)
erase_after(prev, last2);
else
insert_after_range(prev, first2, last2);
return *this;
}
template <class T, class Alloc>
void forward_list<T, Alloc>::fill_assign(size_type n, const T& val) {
iterator prev = before_begin();
iterator cur = begin();
for(;cur != end() && n > 0;++cur, --n) {
prev = cur;
*cur = val;
}
if(n > 0)
insert_after_fill(prev, n, val);
else
erase_after(prev, end());
}
template <class T, class Alloc>
template<class InputIt>
void forward_list<T, Alloc>::assign_dispatch(InputIt first, InputIt last, false_type) {
iterator prev = before_begin();
iterator cur = begin();
for(;cur != end() && first != last;++cur, ++first) {
prev = cur;
*cur = *first;
}
if(first != last)
insert_after_range(prev, first, last);
else
erase_after(prev, end());
}
template <class T, class Alloc>
void forward_list<T, Alloc>::resize(size_type new_sz, const T& val) {
iterator prev = before_begin();
iterator cur = begin();
for(;cur != end() && new_sz > 0;++cur, --new_sz)
prev = cur;
if(new_sz == 0)
erase_after(prev, end());
else
insert_after_fill(prev, new_sz, val);
}
template <class T, class Alloc>
void forward_list<T, Alloc>::remove(const T& val) {
iterator prev = before_begin();
iterator cur = begin();
iterator last = end();
while(cur != last) {
if(*cur == val) {
erase_after(prev);
cur = prev;
cur++;
} else {
prev = cur++;
}
}
}
template <class T, class Alloc>
template <class Predicate>
void forward_list<T, Alloc>::remove_if(Predicate pred) {
iterator prev = before_begin();
iterator cur = begin();
iterator last = end();
while(cur != last) {
if(pred(*cur)) {
erase_after(prev);
cur = prev;
cur++;
} else {
prev = cur++;
}
}
}
template <class T, class Alloc>
void forward_list<T, Alloc>::unique() {
iterator first = begin();
iterator last = end();
if(first == last)
return;
iterator next = first;
++next;
while(next != last) {
if(*first == *next) {
erase_after(first);
next = first;
}
else {
first = next;
}
++next;
}
}
template <class T, class Alloc>
template <class BinaryPredicate>
void forward_list<T, Alloc>::unique(BinaryPredicate binary_pred) {
iterator first = begin();
iterator last = end();
if(first == last)
return;
iterator next = first;
++next;
while(next != last) {
if(binary_pred(*first, *next)) {
erase_after(first);
next = first;
}
else {
first = next;
}
++next;
}
}
template <class T, class Alloc>
void forward_list<T, Alloc>::merge(forward_list& x) {
const_iterator prev1 = cbefore_begin();
const_iterator first1 = cbegin();
const_iterator last1 = cend();
const_iterator prev2 = x.cbefore_begin();
const_iterator first2 = x.cbegin();
const_iterator last2 = cend();
while(first1 != last1 && first2 != last2) {
if(*first2 < *first1) {
splice_after(prev1, x, first2);
++prev1;
first2 = prev2;
++first2;
} else {
prev1 = first1;
++first1;
}
}
if(first2 != last2)
splice_after(prev1, x, x.before_begin(), last2);
}
template <class T, class Alloc>
void forward_list<T, Alloc>::merge(forward_list&& x) {
const_iterator prev1 = cbefore_begin();
const_iterator first1 = cbegin();
const_iterator last1 = cend();
const_iterator prev2 = x.cbefore_begin();
const_iterator first2 = x.cbegin();
const_iterator last2 = cend();
while(first1 != last1 && first2 != last2) {
if(*first2 < *first1) {
splice_after(prev1, x, first2);
++prev1;
first2 = prev2;
++first2;
} else {
prev1 = first1;
++first1;
}
}
if(first2 != last2)
splice_after(prev1, x, x.before_begin(), last2);
}
template <class T, class Alloc>
template <class Compare>
void forward_list<T, Alloc>::merge(forward_list& x, Compare comp) {
const_iterator prev1 = cbefore_begin();
const_iterator first1 = cbegin();
const_iterator last1 = cend();
const_iterator prev2 = x.cbefore_begin();
const_iterator first2 = x.cbegin();
const_iterator last2 = cend();
while(first1 != last1 && first2 != last2) {
if(comp(*first2 , *first1)) {
splice_after(prev1, x, first2);
++prev1;
first2 = prev2;
++first2;
} else {
prev1 = first1;
++first1;
}
}
if(first2 != last2)
splice_after(prev1, x, x.before_begin(), last2);
}
template <class T, class Alloc>
template <class Compare>
void forward_list<T, Alloc>::merge(forward_list&& x, Compare comp) {
const_iterator prev1 = cbefore_begin();
const_iterator first1 = cbegin();
const_iterator last1 = cend();
const_iterator prev2 = x.cbefore_begin();
const_iterator first2 = x.cbegin();
const_iterator last2 = cend();
while(first1 != last1 && first2 != last2) {
if(comp(*first2 , *first1)) {
splice_after(prev1, x, first2);
++prev1;
first2 = prev2;
++first2;
} else {
prev1 = first1;
++first1;
}
}
if(first2 != last2)
splice_after(prev1, x, x.before_begin(), last2);
}
// iterative mergesort, O(n * logn)
template <class T, class Alloc>
void forward_list<T, Alloc>::sort() {
// empty or size = 1
if(head.next == nullptr || head.next->next == nullptr)
return;
forward_list carry;
forward_list counter[64];
int fill = 0;
while (!empty()) {
carry.splice_after(carry.before_begin(), *this, begin());
int i = 0;
while(i < fill && !counter[i].empty()) {
counter[i].merge(carry);
carry.swap(counter[i]);
i++;
}
carry.swap(counter[i]);
if (i == fill)
++fill;
}
for (int i = 1; i < fill; ++i)
counter[i].merge(counter[i-1]);
swap(counter[fill-1]);
}
template <class T, class Alloc>
template <class Compare>
void forward_list<T, Alloc>::sort(Compare comp) {
if(head.next == nullptr || head.next->next == nullptr)
return;
forward_list carry;
forward_list counter[64];
int fill = 0;
while (!empty()) {
carry.splice_after(carry.before_begin(), *this, begin());
int i = 0;
while(i < fill && !counter[i].empty()) {
counter[i].merge(carry, comp);
carry.swap(counter[i]);
i++;
}
carry.swap(counter[i]);
if (i == fill)
++fill;
}
for (int i = 1; i < fill; ++i)
counter[i].merge(counter[i-1], comp);
swap(counter[fill-1]);
}
template <class T, class Alloc>
void forward_list<T, Alloc>::reverse() noexcept {
node_base* tmp = static_cast<node_base*>(&head)->next;
if(tmp)
__reverse(tmp);
}
// Specialization of insert_iterator so that insertions
// will be constant time instead of linear time.
template <class T, class Alloc>
class insert_iterator<forward_list<T, Alloc> > {
protected:
using Container = forward_list<T, Alloc>;
using iterator_type = typename Container::iteartor;
Container* container;
iterator_type iter;
public:
using container_type = Container;
using iterator_category = output_iterator_tag;
using value_type = void;
using difference_type = void;
using pointer = void;
using reference = void;
insert_iterator(Container& c, iterator_type i) : container(&c) {
iter = c.before_begin();
while(iter.node->next != i.node)
++iter;
}
insert_iterator<Container>&
operator=(const typename Container::value_type& val) {
iter = container->insert_after(iter, val);
return *this;
}
insert_iterator<Container>& operator*() { return *this; }
insert_iterator<Container>& operator++() { return *this; }
insert_iterator<Container>& operator++(int) { return *this; }
};
} // MiniSTL
| 28.986842
| 91
| 0.594795
|
chestnutme
|
3376efb5693ae250d93c67378b963845078e2e73
| 2,407
|
cpp
|
C++
|
JanuaEngine/JanuaEngine/Query.cpp
|
gigc/Janua
|
cbcc8ad0e9501e1faef5b37a964769970aa3d236
|
[
"MIT",
"Unlicense"
] | 98
|
2015-01-13T16:23:23.000Z
|
2022-02-14T21:51:07.000Z
|
JanuaEngine/JanuaEngine/Query.cpp
|
gigc/Janua
|
cbcc8ad0e9501e1faef5b37a964769970aa3d236
|
[
"MIT",
"Unlicense"
] | 1
|
2016-06-30T22:07:54.000Z
|
2016-06-30T22:07:54.000Z
|
JanuaEngine/JanuaEngine/Query.cpp
|
gigc/Janua
|
cbcc8ad0e9501e1faef5b37a964769970aa3d236
|
[
"MIT",
"Unlicense"
] | 13
|
2015-08-26T11:19:08.000Z
|
2021-07-12T03:41:50.000Z
|
#include "StdAfx.h"
#include "Query.h"
using std::logic_error;
Query::Query(const PVSDatabase& pvsDatabase) : m_pvsDatabase(pvsDatabase)
{
}
const QueryResult Query::getPotentiallyVisibleObjectsFromRegion( const Vector3f position )
{
//TODO: Optimize this.
vector<shared_ptr<Cell>> cells;
const Vector3f voxelSize = m_pvsDatabase.getVoxelSize();
AABB sceneAABB = m_pvsDatabase.getSceneAABB();
int cellIndex = -1;
//For each existing cell.
m_pvsDatabase.getAllCells(cells);
for( unsigned int c = 0 ; c < cells.size() ; ++c)
{
Point3i minIndex = cells[c]->minPoint;
Point3i maxIndex = cells[c]->maxPoint;
AABB cellAABB( Vector3f (
minIndex.x * voxelSize.x + sceneAABB.minPoint.x,
minIndex.y * voxelSize.y + sceneAABB.minPoint.y,
minIndex.z * voxelSize.z + sceneAABB.minPoint.z ),
Vector3f(
maxIndex.x * voxelSize.x + sceneAABB.minPoint.x + voxelSize.x,
maxIndex.y * voxelSize.y + sceneAABB.minPoint.y + voxelSize.y,
maxIndex.z * voxelSize.z + sceneAABB.minPoint.z + voxelSize.z));
//If position it is in the cell.
if( position.x >= cellAABB.minPoint.x && position.x <= cellAABB.maxPoint.x &&
position.y >= cellAABB.minPoint.y && position.y <= cellAABB.maxPoint.y &&
position.z >= cellAABB.minPoint.z && position.z <= cellAABB.maxPoint.z )
{
cellIndex = c;
break;
}
}
vector<int> modelIds;
if( cellIndex == -1 )
{
return QueryResult(modelIds);
}
else//If position was inside a cell.
{
vector<int> cellModelIds;
const Cell& baseCell = *(cells[cellIndex]);
baseCell.getModelsIds(cellModelIds);
//Add the models of the base cell.
modelIds.insert(modelIds.end(), cellModelIds.begin(), cellModelIds.end() ) ;
//Get the other visible cells from that particular cell.
vector<shared_ptr<Cell>> pvsCells( baseCell.getVisibleCells() );
//For each cell in the PVS
for( unsigned int i = 0 ; i < pvsCells.size() ; i++)
{
cellModelIds.clear();
//Add the models to the pvs
pvsCells[i]->getModelsIds(cellModelIds);
//insert the models.
modelIds.insert(modelIds.end(), cellModelIds.begin(), cellModelIds.end() ) ;
}
}
//Sort and remove duplicates.
std::sort( modelIds.begin(), modelIds.end() );
modelIds.erase( unique( modelIds.begin(), modelIds.end() ), modelIds.end() );
//Return visible set.
return QueryResult(modelIds);
}
Query::~Query(void)
{
}
| 25.606383
| 90
| 0.678438
|
gigc
|
337b5c28812214a7f1f360c6780e64aacda08478
| 534
|
hpp
|
C++
|
src/Griddly/Core/Observers/TensorObservationInterface.hpp
|
Thaigun/Griddly
|
de5972a608a2928172510a0ac81a977c48af6b1f
|
[
"MIT"
] | null | null | null |
src/Griddly/Core/Observers/TensorObservationInterface.hpp
|
Thaigun/Griddly
|
de5972a608a2928172510a0ac81a977c48af6b1f
|
[
"MIT"
] | null | null | null |
src/Griddly/Core/Observers/TensorObservationInterface.hpp
|
Thaigun/Griddly
|
de5972a608a2928172510a0ac81a977c48af6b1f
|
[
"MIT"
] | null | null | null |
#pragma once
#include <vector>
#include "ObservationInterface.hpp"
namespace griddly {
class TensorObservationInterface : public ObservationInterface<uint8_t> {
public:
virtual ~TensorObservationInterface() = default;
virtual std::vector<uint32_t> getShape() const {
return observationShape_;
}
virtual std::vector<uint32_t> getStrides() const {
return observationStrides_;
}
protected:
std::vector<uint32_t> observationShape_{};
std::vector<uint32_t> observationStrides_{};
};
} // namespace griddly
| 19.777778
| 73
| 0.745318
|
Thaigun
|
337c9338ea7d7e7057e4307cc9e32853b49e8dfe
| 2,689
|
cpp
|
C++
|
src/test/fuzz/netbase_dns_lookup.cpp
|
BakedInside/beanscore
|
daa9b2ddbfd3305881749bda7f32146738154260
|
[
"MIT"
] | null | null | null |
src/test/fuzz/netbase_dns_lookup.cpp
|
BakedInside/beanscore
|
daa9b2ddbfd3305881749bda7f32146738154260
|
[
"MIT"
] | null | null | null |
src/test/fuzz/netbase_dns_lookup.cpp
|
BakedInside/beanscore
|
daa9b2ddbfd3305881749bda7f32146738154260
|
[
"MIT"
] | null | null | null |
// Copyright (c) 2021 The Beans Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <netaddress.h>
#include <netbase.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
#include <cstdint>
#include <string>
#include <vector>
FUZZ_TARGET(netbase_dns_lookup)
{
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
const std::string name = fuzzed_data_provider.ConsumeRandomLengthString(512);
const unsigned int max_results = fuzzed_data_provider.ConsumeIntegral<unsigned int>();
const bool allow_lookup = fuzzed_data_provider.ConsumeBool();
const uint16_t default_port = fuzzed_data_provider.ConsumeIntegral<uint16_t>();
auto fuzzed_dns_lookup_function = [&](const std::string&, bool) {
std::vector<CNetAddr> resolved_addresses;
while (fuzzed_data_provider.ConsumeBool()) {
resolved_addresses.push_back(ConsumeNetAddr(fuzzed_data_provider));
}
return resolved_addresses;
};
{
std::vector<CNetAddr> resolved_addresses;
if (LookupHost(name, resolved_addresses, max_results, allow_lookup, fuzzed_dns_lookup_function)) {
for (const CNetAddr& resolved_address : resolved_addresses) {
assert(!resolved_address.IsInternal());
}
}
assert(resolved_addresses.size() <= max_results || max_results == 0);
}
{
CNetAddr resolved_address;
if (LookupHost(name, resolved_address, allow_lookup, fuzzed_dns_lookup_function)) {
assert(!resolved_address.IsInternal());
}
}
{
std::vector<CService> resolved_services;
if (Lookup(name, resolved_services, default_port, allow_lookup, max_results, fuzzed_dns_lookup_function)) {
for (const CNetAddr& resolved_service : resolved_services) {
assert(!resolved_service.IsInternal());
}
}
assert(resolved_services.size() <= max_results || max_results == 0);
}
{
CService resolved_service;
if (Lookup(name, resolved_service, default_port, allow_lookup, fuzzed_dns_lookup_function)) {
assert(!resolved_service.IsInternal());
}
}
{
CService resolved_service = LookupNumeric(name, default_port, fuzzed_dns_lookup_function);
assert(!resolved_service.IsInternal());
}
{
CSubNet resolved_subnet;
if (LookupSubNet(name, resolved_subnet, fuzzed_dns_lookup_function)) {
assert(resolved_subnet.IsValid());
}
}
}
| 37.347222
| 115
| 0.679807
|
BakedInside
|
337e31563e947fcc2bd6fadea5323da548cff11d
| 862
|
cpp
|
C++
|
src/veil/ringct/temprecipient.cpp
|
PeterL73/veil
|
2825d735275cd592b1fd5207b0dfdca2d4e3e78c
|
[
"MIT"
] | 124
|
2018-12-25T00:01:18.000Z
|
2021-12-26T19:38:43.000Z
|
src/veil/ringct/temprecipient.cpp
|
PeterL73/veil
|
2825d735275cd592b1fd5207b0dfdca2d4e3e78c
|
[
"MIT"
] | 702
|
2018-12-16T18:07:18.000Z
|
2022-03-18T16:52:14.000Z
|
src/veil/ringct/temprecipient.cpp
|
PeterL73/veil
|
2825d735275cd592b1fd5207b0dfdca2d4e3e78c
|
[
"MIT"
] | 151
|
2018-12-13T07:33:34.000Z
|
2022-01-29T11:35:23.000Z
|
// Copyright (c) 2017-2019 The Particl Core developers
// Copyright (c) 2018-2019 Veil developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <veil/ringct/temprecipient.h>
bool CTempRecipient::ApplySubFee(CAmount nFee, size_t nSubtractFeeFromAmount, bool &fFirst)
{
if (nType != OUTPUT_DATA) {
nAmount = nAmountSelected;
if (fSubtractFeeFromAmount && !fExemptFeeSub) {
nAmount -= nFee / nSubtractFeeFromAmount; // Subtract fee equally from each selected recipient
if (fFirst) { // first receiver pays the remainder not divisible by output count
fFirst = false;
nAmount -= nFee % nSubtractFeeFromAmount;
}
return true;
}
}
return false;
};
| 35.916667
| 106
| 0.663573
|
PeterL73
|
338126597488eab1b0b7c1f734d799768a18b796
| 6,939
|
cpp
|
C++
|
src/Open3D/Visualization/Visualizer/ViewTrajectory.cpp
|
Forest75/Open3D
|
61b90a6e06a2c209ad2b9f1c57fbd5f21e879dae
|
[
"MIT"
] | 113
|
2018-11-12T03:32:52.000Z
|
2022-03-29T13:58:54.000Z
|
src/Open3D/Visualization/Visualizer/ViewTrajectory.cpp
|
Forest75/Open3D
|
61b90a6e06a2c209ad2b9f1c57fbd5f21e879dae
|
[
"MIT"
] | 3
|
2018-10-19T12:09:57.000Z
|
2020-04-22T11:55:54.000Z
|
src/Open3D/Visualization/Visualizer/ViewTrajectory.cpp
|
Forest75/Open3D
|
61b90a6e06a2c209ad2b9f1c57fbd5f21e879dae
|
[
"MIT"
] | 27
|
2018-10-16T20:01:18.000Z
|
2021-07-26T08:02:20.000Z
|
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include "Open3D/Visualization/Visualizer/ViewTrajectory.h"
#include <json/json.h>
#include <Eigen/Dense>
#include "Open3D/Utility/Console.h"
namespace open3d {
namespace visualization {
const int ViewTrajectory::INTERVAL_MAX = 59;
const int ViewTrajectory::INTERVAL_MIN = 0;
const int ViewTrajectory::INTERVAL_STEP = 1;
const int ViewTrajectory::INTERVAL_DEFAULT = 29;
void ViewTrajectory::ComputeInterpolationCoefficients() {
if (view_status_.empty()) {
return;
}
// num_of_status is used frequently, give it an alias
int n = int(view_status_.size());
coeff_.resize(n);
// Consider ViewStatus as a point in an 17-dimensional space.
for (int i = 0; i < n; i++) {
coeff_[i].setZero();
coeff_[i].block<17, 1>(0, 0) = view_status_[i].ConvertToVector17d();
}
// Handle degenerate cases first
if (n == 1) {
return;
} else if (n == 2) {
coeff_[0].block<17, 1>(0, 1) =
coeff_[1].block<17, 1>(0, 0) - coeff_[0].block<17, 1>(0, 0);
coeff_[1].block<17, 1>(0, 1) =
coeff_[0].block<17, 1>(0, 0) - coeff_[1].block<17, 1>(0, 0);
return;
}
Eigen::MatrixXd A(n, n);
Eigen::VectorXd b(n);
// Set matrix A first
A.setZero();
// Set first and last line
if (is_loop_) {
A(0, 0) = 4.0;
A(0, 1) = 1.0;
A(0, n - 1) = 1.0;
A(n - 1, 0) = 1.0;
A(n - 1, n - 2) = 1.0;
A(n - 1, n - 1) = 4.0;
} else {
A(0, 0) = 2.0;
A(0, 1) = 1.0;
A(n - 1, n - 2) = 1.0;
A(n - 1, n - 1) = 2.0;
}
// Set middle part
for (int i = 1; i < n - 1; i++) {
A(i, i) = 4.0;
A(i, i - 1) = 1.0;
A(i, i + 1) = 1.0;
}
auto llt_solver = A.llt();
for (int k = 0; k < 17; k++) {
// Now we work for the k-th coefficient
b.setZero();
// Set first and last line
if (is_loop_) {
b(0) = 3.0 * (coeff_[1](k, 0) - coeff_[n - 1](k, 0));
b(n - 1) = 3.0 * (coeff_[0](k, 0) - coeff_[n - 2](k, 0));
} else {
b(0) = 3.0 * (coeff_[1](k, 0) - coeff_[0](k, 0));
b(n - 1) = 3.0 * (coeff_[n - 1](k, 0) - coeff_[n - 2](k, 0));
}
// Set middle part
for (int i = 1; i < n - 1; i++) {
b(i) = 3.0 * (coeff_[i + 1](k, 0) - coeff_[i - 1](k, 0));
}
// Solve the linear system
Eigen::VectorXd x = llt_solver.solve(b);
for (int i = 0; i < n; i++) {
int i1 = (i + 1) % n;
coeff_[i](k, 1) = x(i);
coeff_[i](k, 2) = 3.0 * (coeff_[i1](k, 0) - coeff_[i](k, 0)) -
2.0 * x(i) - x(i1);
coeff_[i](k, 3) =
2.0 * (coeff_[i](k, 0) - coeff_[i1](k, 0)) + x(i) + x(i1);
}
}
}
std::tuple<bool, ViewParameters> ViewTrajectory::GetInterpolatedFrame(
size_t k) {
ViewParameters status;
if (view_status_.empty() || k >= NumOfFrames()) {
return std::make_tuple(false, status);
}
size_t segment_index = k / (interval_ + 1);
double segment_fraction =
double(k - segment_index * (interval_ + 1)) / double(interval_ + 1);
Eigen::Vector4d s(1.0, segment_fraction,
segment_fraction * segment_fraction,
segment_fraction * segment_fraction * segment_fraction);
ViewParameters::Vector17d status_in_vector = coeff_[segment_index] * s;
status.ConvertFromVector17d(status_in_vector);
return std::make_tuple(true, status);
}
bool ViewTrajectory::ConvertToJsonValue(Json::Value &value) const {
Json::Value trajectory_array;
for (const auto &status : view_status_) {
Json::Value status_object;
if (status.ConvertToJsonValue(status_object) == false) {
return false;
}
trajectory_array.append(status_object);
}
value["class_name"] = "ViewTrajectory";
value["version_major"] = 1;
value["version_minor"] = 0;
value["is_loop"] = is_loop_;
value["interval"] = interval_;
value["trajectory"] = trajectory_array;
return true;
}
bool ViewTrajectory::ConvertFromJsonValue(const Json::Value &value) {
if (value.isObject() == false) {
utility::LogWarning(
"ViewTrajectory read JSON failed: unsupported json format.");
return false;
}
if (value.get("class_name", "").asString() != "ViewTrajectory" ||
value.get("version_major", 1).asInt() != 1 ||
value.get("version_minor", 0).asInt() != 0) {
utility::LogWarning(
"ViewTrajectory read JSON failed: unsupported json format.");
return false;
}
is_loop_ = value.get("is_loop", false).asBool();
interval_ = value.get("interval", 29).asInt();
const Json::Value &trajectory_array = value["trajectory"];
if (trajectory_array.size() == 0) {
utility::LogWarning(
"ViewTrajectory read JSON failed: empty trajectory.");
return false;
}
view_status_.resize(trajectory_array.size());
for (int i = 0; i < (int)trajectory_array.size(); i++) {
const Json::Value &status_object = trajectory_array[i];
ViewParameters status;
if (status.ConvertFromJsonValue(status_object) == false) {
return false;
}
view_status_[i] = status;
}
return true;
}
} // namespace visualization
} // namespace open3d
| 34.695
| 80
| 0.55397
|
Forest75
|
338553e158bd719f404d587c0a38468e4fa66775
| 6,517
|
cpp
|
C++
|
src/apps/spout/main.cpp
|
sgct/sgct
|
0a4596ff2ea90c9f0dfbddd7b14f7ecee783baf1
|
[
"libpng-2.0"
] | 7
|
2020-02-11T15:03:48.000Z
|
2022-01-08T04:04:58.000Z
|
src/apps/spout/main.cpp
|
sgct/sgct
|
0a4596ff2ea90c9f0dfbddd7b14f7ecee783baf1
|
[
"libpng-2.0"
] | 28
|
2020-01-24T11:24:43.000Z
|
2022-03-03T15:48:10.000Z
|
src/apps/spout/main.cpp
|
sgct/sgct
|
0a4596ff2ea90c9f0dfbddd7b14f7ecee783baf1
|
[
"libpng-2.0"
] | 5
|
2020-02-19T08:26:25.000Z
|
2022-01-08T04:05:10.000Z
|
/*****************************************************************************************
* SGCT *
* Simple Graphics Cluster Toolkit *
* *
* Copyright (c) 2012-2021 *
* For conditions of distribution and use, see copyright notice in LICENSE.md *
****************************************************************************************/
#include <sgct/sgct.h>
#include <sgct/opengl.h>
#include <sgct/utils/box.h>
#include <fmt/format.h>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <SpoutLibrary.h>
#define GLFW_INCLUDE_NONE
#include <GLFW/glfw3.h>
namespace {
std::unique_ptr<sgct::utils::Box> box;
GLint matrixLoc = -1;
GLint flipLoc = -1;
GLuint texture = 0;
SPOUTHANDLE receiver = nullptr;
char senderName[256];
unsigned int width;
unsigned int height;
bool initialized = false;
// variables to share across cluster
double currentTime = 0.0;
constexpr const char* vertexShader = R"(
#version 330 core
layout(location = 0) in vec2 texCoords;
layout(location = 1) in vec3 normals;
layout(location = 2) in vec3 vertPositions;
uniform mat4 mvp;
uniform int flip;
out vec2 uv;
void main() {
// Output position of the vertex, in clip space : MVP * position
gl_Position = mvp * vec4(vertPositions, 1.0);
uv.x = texCoords.x;
if (flip == 0) {
uv.y = texCoords.y;
}
else {
uv.y = 1.0 - texCoords.y;
}
})";
constexpr const char* fragmentShader = R"(
#version 330 core
uniform sampler2D tex;
in vec2 uv;
out vec4 color;
void main() { color = texture(tex, uv); }
)";
} // namespace
using namespace sgct;
bool bindSpout() {
const bool creationSuccess = receiver->CreateReceiver(senderName, width, height);
if (!initialized && creationSuccess) {
Log::Info(fmt::format(
"Spout: Initing {}x{} texture from '{}'", width, height, senderName
));
initialized = true;
}
if (initialized) {
const bool receiveSucess = receiver->ReceiveTexture(senderName, width, height);
if (receiveSucess) {
return receiver->BindSharedTexture();
}
else {
Log::Info("Spout disconnected");
// reset if disconnected
initialized = false;
senderName[0] = '\0';
receiver->ReleaseReceiver();
}
}
return false;
}
void draw(RenderData data) {
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
constexpr const double Speed = 0.44;
//create scene transform (animation)
glm::mat4 scene = glm::translate(glm::mat4(1.f), glm::vec3(0.f, 0.f, -3.f));
scene = glm::rotate(
scene,
static_cast<float>(currentTime * Speed),
glm::vec3(0.f, -1.f, 0.f)
);
scene = glm::rotate(
scene,
static_cast<float>(currentTime * (Speed / 2.0)),
glm::vec3(1.f, 0.f, 0.f)
);
const glm::mat4 mvp = glm::make_mat4(data.modelViewProjectionMatrix.values) * scene;
glActiveTexture(GL_TEXTURE0);
// spout init
bool spoutStatus = false;
// check if spout supported (DX11 interop)
if (glfwExtensionSupported("WGL_NV_DX_interop2")) {
spoutStatus = bindSpout();
}
const ShaderProgram& prog = ShaderManager::instance().shaderProgram("xform");
prog.bind();
// DirectX textures are flipped around the Y axis compared to OpenGL
if (!spoutStatus) {
glUniform1i(flipLoc, 0);
glBindTexture(GL_TEXTURE_2D, texture);
}
else {
glUniform1i(flipLoc, 1);
}
glUniformMatrix4fv(matrixLoc, 1, GL_FALSE, glm::value_ptr(mvp));
box->draw();
prog.unbind();
if (spoutStatus) {
receiver->UnBindSharedTexture();
}
glDisable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
}
void preSync() {
if (Engine::instance().isMaster()) {
currentTime = Engine::getTime();
}
}
void initOGL(GLFWwindow*) {
// setup spout
senderName[0] = '\0';
receiver = GetSpout();
// set background
Engine::instance().setClearColor(vec4{ 0.3f, 0.3f, 0.3f, 0.f });
//texture = TextureManager::instance().loadTexture("box.png", true);
box = std::make_unique<utils::Box>(2.f, utils::Box::TextureMappingMode::Regular);
glCullFace(GL_BACK);
glFrontFace(GL_CCW);
ShaderManager::instance().addShaderProgram("xform", vertexShader, fragmentShader);
const ShaderProgram& prog = ShaderManager::instance().shaderProgram("xform");
prog.bind();
matrixLoc = glGetUniformLocation(prog.id(), "mvp");
glUniform1i(glGetUniformLocation(prog.id(), "tex"), 0);
flipLoc = glGetUniformLocation(prog.id(), "flip");
glUniform1i(flipLoc, 0);
prog.unbind();
}
std::vector<std::byte> encode() {
std::vector<std::byte> data;
serializeObject(data, currentTime);
return data;
}
void decode(const std::vector<std::byte>& data, unsigned int pos) {
deserializeObject(data, pos, currentTime);
}
void cleanup() {
box = nullptr;
if (receiver) {
receiver->ReleaseReceiver();
receiver->Release();
}
}
void keyboard(Key key, Modifier, Action action, int) {
if (key == Key::Esc && action == Action::Press) {
Engine::instance().terminate();
}
}
int main(int argc, char* argv[]) {
std::vector<std::string> arg(argv + 1, argv + argc);
Configuration config = parseArguments(arg);
config::Cluster cluster = loadCluster(config.configFilename);
if (!cluster.success) {
return -1;
}
Engine::Callbacks callbacks;
callbacks.initOpenGL = initOGL;
callbacks.preSync = preSync;
callbacks.encode = encode;
callbacks.decode = decode;
callbacks.draw = draw;
callbacks.cleanup = cleanup;
callbacks.keyboard = keyboard;
try {
Engine::create(cluster, callbacks, config);
}
catch (const std::runtime_error & e) {
Log::Error(e.what());
Engine::destroy();
return EXIT_FAILURE;
}
Engine::instance().render();
Engine::destroy();
exit(EXIT_SUCCESS);
}
| 26.278226
| 90
| 0.582937
|
sgct
|
338687471381a0b4808315b73141ebda5a4dd174
| 2,246
|
cc
|
C++
|
src/asset/Music.cc
|
shiromino/shiromino
|
10e9bc650417ea05d5990836c64709af3f82ec5e
|
[
"CC-BY-4.0"
] | 23
|
2020-07-12T22:49:10.000Z
|
2022-03-15T17:58:22.000Z
|
src/asset/Music.cc
|
shiromino/shiromino
|
10e9bc650417ea05d5990836c64709af3f82ec5e
|
[
"CC-BY-4.0"
] | 64
|
2020-07-12T22:27:53.000Z
|
2022-01-02T23:10:24.000Z
|
src/asset/Music.cc
|
shiromino/shiromino
|
10e9bc650417ea05d5990836c64709af3f82ec5e
|
[
"CC-BY-4.0"
] | 8
|
2020-08-30T04:16:17.000Z
|
2021-06-28T17:12:06.000Z
|
#include "asset/Music.h"
#include <cassert>
#include <iostream>
#include <algorithm>
namespace Shiro {
MusicAssetLoader::MusicAssetLoader(const std::filesystem::path &assetDirectory) :
assetDirectory(assetDirectory) {}
std::unique_ptr<Asset> MusicAssetLoader::create(const std::filesystem::path &location) const {
return std::unique_ptr<Shiro::Asset>(new MusicAsset(location));
}
bool MusicAssetLoader::load(Asset &asset) const {
assert(asset.getType() == AssetType::music);
MusicAsset &musicAsset = static_cast<MusicAsset&>(asset);
musicAsset.data = Mix_LoadMUS((assetDirectory / asset.location).concat(".ogg").string().c_str());
if (musicAsset.data) {
musicAsset.volume = 100.0f;
return true;
}
musicAsset.data = Mix_LoadMUS((assetDirectory / asset.location).concat(".wav").string().c_str());
if (musicAsset.data) {
musicAsset.volume = 100.0f;
return true;
}
std::cerr << "Failed loading music \"" << musicAsset.location.string() << "\"" << std::endl;
musicAsset.volume = 0.0f;
return false;
}
void MusicAssetLoader::unload(Asset &asset) const {
assert(asset.getType() == AssetType::music);
MusicAsset &musicAsset = static_cast<MusicAsset&>(asset);
if (musicAsset.loaded()) {
Mix_FreeMusic(musicAsset.data);
musicAsset.data = nullptr;
}
}
AssetType MusicAssetLoader::getType() const {
return AssetType::music;
}
MusicAsset::MusicAsset(const std::filesystem::path &location) :
Asset(location),
volume(0.0f),
data(nullptr) {}
MusicAsset::~MusicAsset() {}
bool MusicAsset::play(const Settings &settings) const {
if (!loaded()) {
return false;
}
Mix_VolumeMusic(std::clamp<int>(MIX_MAX_VOLUME * (volume / 100.0f) * (settings.musicVolume / 100.0f) * (settings.masterVolume / 100.0f), 0, MIX_MAX_VOLUME));
Mix_PlayMusic(data, -1);
return true;
}
bool MusicAsset::loaded() const {
return data != nullptr;
}
AssetType MusicAsset::getType() const {
return AssetType::music;
}
}
| 38.724138
| 165
| 0.615761
|
shiromino
|
3387b193b9ce49610b0d19a83fa3d652af92c459
| 10,446
|
cpp
|
C++
|
modules/ts/src/ocl_test.cpp
|
lastlegion/opencv
|
4b31b9cb8cbc673ca8db95fdec8a3e707d04fef2
|
[
"BSD-3-Clause"
] | 1
|
2019-04-19T22:56:53.000Z
|
2019-04-19T22:56:53.000Z
|
modules/ts/src/ocl_test.cpp
|
lastlegion/opencv
|
4b31b9cb8cbc673ca8db95fdec8a3e707d04fef2
|
[
"BSD-3-Clause"
] | null | null | null |
modules/ts/src/ocl_test.cpp
|
lastlegion/opencv
|
4b31b9cb8cbc673ca8db95fdec8a3e707d04fef2
|
[
"BSD-3-Clause"
] | null | null | null |
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the OpenCV Foundation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/ts/ocl_test.hpp"
#ifdef HAVE_OPENCL
namespace cvtest {
namespace ocl {
using namespace cv;
int test_loop_times = 1; // TODO Read from command line / environment
#define DUMP_PROPERTY_XML(propertyName, propertyValue) \
do { \
std::stringstream ssName, ssValue;\
ssName << propertyName;\
ssValue << (propertyValue); \
::testing::Test::RecordProperty(ssName.str(), ssValue.str()); \
} while (false)
#define DUMP_MESSAGE_STDOUT(msg) \
do { \
std::cout << msg << std::endl; \
} while (false)
static std::string bytesToStringRepr(size_t value)
{
size_t b = value % 1024;
value /= 1024;
size_t kb = value % 1024;
value /= 1024;
size_t mb = value % 1024;
value /= 1024;
size_t gb = value;
std::ostringstream stream;
if (gb > 0)
stream << gb << " GB ";
if (mb > 0)
stream << mb << " MB ";
if (kb > 0)
stream << kb << " kB ";
if (b > 0)
stream << b << " B";
return stream.str();
}
void dumpOpenCLDevice()
{
using namespace cv::ocl;
try
{
std::vector<PlatformInfo> platforms;
cv::ocl::getPlatfomsInfo(platforms);
if (platforms.size() > 0)
{
DUMP_MESSAGE_STDOUT("OpenCL Platforms: ");
for (size_t i = 0; i < platforms.size(); i++)
{
const PlatformInfo* platform = &platforms[i];
DUMP_MESSAGE_STDOUT(" " << platform->name().c_str());
Device current_device;
for (int j = 0; j < platform->deviceNumber(); j++)
{
platform->getDevice(current_device, j);
const char* deviceTypeStr = current_device.type() == Device::TYPE_CPU
? ("CPU") : (current_device.type() == Device::TYPE_GPU ? current_device.hostUnifiedMemory() ? "iGPU" : "dGPU" : "unknown");
DUMP_MESSAGE_STDOUT( " " << deviceTypeStr << ": " << current_device.name().c_str() << " (" << current_device.version().c_str() << ")");
DUMP_PROPERTY_XML( cv::format("cv_ocl_platform_%d_device_%d", (int)i, (int)j ),
cv::format("(Platform=%s)(Type=%s)(Name=%s)(Version=%s)",
platform->name().c_str(), deviceTypeStr, current_device.name().c_str(), current_device.version().c_str()) );
}
}
}
else
{
DUMP_MESSAGE_STDOUT("OpenCL is not available");
DUMP_PROPERTY_XML("cv_ocl", "not available");
return;
}
const Device& device = Device::getDefault();
DUMP_MESSAGE_STDOUT("Current OpenCL device: ");
#if 0
DUMP_MESSAGE_STDOUT(" Platform = "<< device.getPlatform().name());
DUMP_PROPERTY_XML("cv_ocl_current_platformName", device.getPlatform().name());
#endif
const char* deviceTypeStr = device.type() == Device::TYPE_CPU
? ("CPU") : (device.type() == Device::TYPE_GPU ? device.hostUnifiedMemory() ? "iGPU" : "dGPU" : "unknown");
DUMP_MESSAGE_STDOUT(" Type = "<< deviceTypeStr);
DUMP_PROPERTY_XML("cv_ocl_current_deviceType", deviceTypeStr);
DUMP_MESSAGE_STDOUT(" Name = "<< device.name());
DUMP_PROPERTY_XML("cv_ocl_current_deviceName", device.name());
DUMP_MESSAGE_STDOUT(" Version = " << device.version());
DUMP_PROPERTY_XML("cv_ocl_current_deviceVersion", device.version());
DUMP_MESSAGE_STDOUT(" Compute units = "<< device.maxComputeUnits());
DUMP_PROPERTY_XML("cv_ocl_current_maxComputeUnits", device.maxComputeUnits());
DUMP_MESSAGE_STDOUT(" Max work group size = "<< device.maxWorkGroupSize());
DUMP_PROPERTY_XML("cv_ocl_current_maxWorkGroupSize", device.maxWorkGroupSize());
std::string localMemorySizeStr = bytesToStringRepr(device.localMemSize());
DUMP_MESSAGE_STDOUT(" Local memory size = " << localMemorySizeStr);
DUMP_PROPERTY_XML("cv_ocl_current_localMemSize", device.localMemSize());
std::string maxMemAllocSizeStr = bytesToStringRepr(device.maxMemAllocSize());
DUMP_MESSAGE_STDOUT(" Max memory allocation size = "<< maxMemAllocSizeStr);
DUMP_PROPERTY_XML("cv_ocl_current_maxMemAllocSize", device.maxMemAllocSize());
const char* doubleSupportStr = device.doubleFPConfig() > 0 ? "Yes" : "No";
DUMP_MESSAGE_STDOUT(" Double support = "<< doubleSupportStr);
DUMP_PROPERTY_XML("cv_ocl_current_haveDoubleSupport", device.doubleFPConfig() > 0);
const char* isUnifiedMemoryStr = device.hostUnifiedMemory() ? "Yes" : "No";
DUMP_MESSAGE_STDOUT(" Host unified memory = "<< isUnifiedMemoryStr);
DUMP_PROPERTY_XML("cv_ocl_current_hostUnifiedMemory", device.hostUnifiedMemory());
}
catch (...)
{
DUMP_MESSAGE_STDOUT("Exception. Can't dump OpenCL info");
DUMP_MESSAGE_STDOUT("OpenCL device not available");
DUMP_PROPERTY_XML("cv_ocl", "not available");
}
}
#undef DUMP_MESSAGE_STDOUT
#undef DUMP_PROPERTY_XML
Mat TestUtils::readImage(const String &fileName, int flags)
{
return cv::imread(cvtest::TS::ptr()->get_data_path() + fileName, flags);
}
Mat TestUtils::readImageType(const String &fname, int type)
{
Mat src = readImage(fname, CV_MAT_CN(type) == 1 ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
if (CV_MAT_CN(type) == 4)
{
Mat temp;
cv::cvtColor(src, temp, cv::COLOR_BGR2BGRA);
swap(src, temp);
}
src.convertTo(src, CV_MAT_DEPTH(type));
return src;
}
double TestUtils::checkNorm(const Mat &m)
{
return norm(m, NORM_INF);
}
double TestUtils::checkNorm(const Mat &m1, const Mat &m2)
{
return norm(m1, m2, NORM_INF);
}
double TestUtils::checkSimilarity(const Mat &m1, const Mat &m2)
{
Mat diff;
matchTemplate(m1, m2, diff, CV_TM_CCORR_NORMED);
return std::abs(diff.at<float>(0, 0) - 1.f);
}
double TestUtils::checkRectSimilarity(Size sz, std::vector<Rect>& ob1, std::vector<Rect>& ob2)
{
double final_test_result = 0.0;
size_t sz1 = ob1.size();
size_t sz2 = ob2.size();
if(sz1 != sz2)
{
return sz1 > sz2 ? (double)(sz1 - sz2) : (double)(sz2 - sz1);
}
else
{
if(sz1==0 && sz2==0)
return 0;
cv::Mat cpu_result(sz, CV_8UC1);
cpu_result.setTo(0);
for(vector<Rect>::const_iterator r = ob1.begin(); r != ob1.end(); r++)
{
cv::Mat cpu_result_roi(cpu_result, *r);
cpu_result_roi.setTo(1);
cpu_result.copyTo(cpu_result);
}
int cpu_area = cv::countNonZero(cpu_result > 0);
cv::Mat gpu_result(sz, CV_8UC1);
gpu_result.setTo(0);
for(vector<Rect>::const_iterator r2 = ob2.begin(); r2 != ob2.end(); r2++)
{
cv::Mat gpu_result_roi(gpu_result, *r2);
gpu_result_roi.setTo(1);
gpu_result.copyTo(gpu_result);
}
cv::Mat result_;
multiply(cpu_result, gpu_result, result_);
int result = cv::countNonZero(result_ > 0);
if(cpu_area!=0 && result!=0)
final_test_result = 1.0 - (double)result/(double)cpu_area;
else if(cpu_area==0 && result!=0)
final_test_result = -1;
}
return final_test_result;
}
void TestUtils::showDiff(const Mat& src, const Mat& gold, const Mat& actual, double eps, bool alwaysShow)
{
Mat diff, diff_thresh;
absdiff(gold, actual, diff);
diff.convertTo(diff, CV_32F);
threshold(diff, diff_thresh, eps, 255.0, cv::THRESH_BINARY);
if (alwaysShow || cv::countNonZero(diff_thresh.reshape(1)) > 0)
{
#if 0
std::cout << "Source: " << std::endl << src << std::endl;
std::cout << "Expected: " << std::endl << gold << std::endl;
std::cout << "Actual: " << std::endl << actual << std::endl;
#endif
namedWindow("src", WINDOW_NORMAL);
namedWindow("gold", WINDOW_NORMAL);
namedWindow("actual", WINDOW_NORMAL);
namedWindow("diff", WINDOW_NORMAL);
imshow("src", src);
imshow("gold", gold);
imshow("actual", actual);
imshow("diff", diff);
cv::waitKey();
}
}
}} // namespace cvtest::ocl
#endif // HAVE_OPENCL
| 35.530612
| 162
| 0.621673
|
lastlegion
|
338cbae03ee370b28ab7c2c09f825cc4220289f7
| 4,445
|
cpp
|
C++
|
e-Paper/src/SPIHandler.cpp
|
PDA-UR/Dumb-e-Paper
|
99aecce5fbcb64d32b7e47809df393e0e2e7fab4
|
[
"MIT"
] | 2
|
2019-01-30T13:48:14.000Z
|
2021-10-30T16:11:03.000Z
|
e-Paper/src/SPIHandler.cpp
|
PDA-UR/Dumb-e-Paper
|
99aecce5fbcb64d32b7e47809df393e0e2e7fab4
|
[
"MIT"
] | null | null | null |
e-Paper/src/SPIHandler.cpp
|
PDA-UR/Dumb-e-Paper
|
99aecce5fbcb64d32b7e47809df393e0e2e7fab4
|
[
"MIT"
] | 2
|
2018-02-14T12:45:59.000Z
|
2021-12-17T20:57:02.000Z
|
#include "SPIHandler.hpp"
long SPIHandler::spiTime = 0;
long SPIHandler::lastSpiTime = 0;
void SPIHandler::printSpiTime()
{
Serial.print("SPI time: ");
Serial.println(spiTime);
}
void SPIHandler::init()
{
// initialize SPI:
SPI.begin();
//SPI.setClockDivider (SPI_CLOCK_DIV8); //slow the clock down (recommended: 3mhz, max: 12mhz)
SPI.setFrequency(4000000L); // 500000
SPI.setDataMode(SPI_MODE3); // CPOL = 1, CPH = 1
SPI.setBitOrder(MSBFIRST);
//does not work with my IDE
//SPI.beginTransaction (SPISettings (1000000, MSBFIRST, SPI_MODE0));
}
void SPIHandler::spiWrite(byte *data, char commandLength, byte *result, char resultLength)
{
lastSpiTime = micros();
// the delay(0) is used to prevent cpu lock result in WDT resets
yield();
//uint8_t* out = (uint8_t*) malloc(commandLength + resultLength);
// wait until controller is ready
while (digitalRead(PIN_BUSY) == LOW)
{
//yield();
delayMicroseconds(5); //100
}
// send command to controller
digitalWrite(PIN_CS, LOW);
//yield();
delayMicroseconds(10);
/*for(int i = 0; i < commandLength; i++)
{
SPI.transfer(data[i]);
}*/
SPI.writeBytes(data, commandLength);
/*for(int i = 0; i < commandLength + resultLength; i++)
{
Serial.println(out[i], HEX);
}
free(out);*/
//yield();
delayMicroseconds(5);
digitalWrite(PIN_CS, HIGH);
// wait until controller is ready
while (digitalRead(PIN_BUSY) == LOW)
{
delayMicroseconds(5); //100
}
digitalWrite(PIN_CS, LOW);
delayMicroseconds(10); //10
// read answer from controller and save it to result pointer
for (int i = 0; i < resultLength; i++)
{
result[i] = SPI.transfer(0x00);
}
delayMicroseconds(5); //10
digitalWrite(PIN_CS, HIGH);
spiTime += (micros() - lastSpiTime);
}
void SPIHandler::start()
{
//start the device
Serial.println("initializing...");
digitalWrite(PIN_EN, HIGH);
delay(100);
Serial.println("set en to low...");
digitalWrite(PIN_EN, LOW);
Serial.println("wait for BUSY rising edge...");
while (digitalRead(PIN_BUSY) == LOW)
{
delay(1);
}
Serial.println("set CS to high...");
digitalWrite(PIN_CS, HIGH);
Serial.println("wait for one second..."); //to give us time to open serial monitor
delay(1000);
Serial.println("starting...");
}
uint16_t SPIHandler::uploadImageData(byte slotNumber, byte packetSize, byte *data)
{
uint16_t result = 0;
byte params[4] = {0x20, 0x01, slotNumber, packetSize};
byte *input;
char resultLength = 2;
byte buffer[resultLength];
//unsigned long lastTimeMalloc = 0;
//unsigned long lastTimeMemcpy = 0;
//unsigned long lastTimeSpiWrite = 0;
//lastTimeMalloc = millis();
input = (byte *)malloc((sizeof params + packetSize) * sizeof(byte));
//Serial.print("time malloc: "); Serial.println(millis() - lastTimeMalloc);
//lastTimeMemcpy = millis();
memcpy(input, params, sizeof(params) * sizeof(byte));
memcpy(input + sizeof(params), data, packetSize * sizeof(byte));
//Serial.print("time cpy: "); Serial.println(millis() - lastTimeMemcpy);
//lastTimeSpiWrite = millis();
spiWrite(input, sizeof params + packetSize, buffer, resultLength);
//Serial.print("time spi: "); Serial.println(millis() - lastTimeSpiWrite);
// convert result code to uint16
for (int i = 0; i < resultLength; i++)
{
result <<= 8;
result |= buffer[i];
}
free(input);
return result;
}
uint16_t SPIHandler::imageEraseFrameBuffer(byte slotNumber)
{
uint16_t result;
byte input[3] = {0x20, 0x0E, slotNumber};
char resultLength = 2;
byte buffer[resultLength];
spiWrite(input, sizeof(input), buffer, resultLength);
// convert result code to uint16
for (int i = 0; i < resultLength; i++)
{
result <<= 8;
result |= buffer[i];
}
return result;
}
uint16_t SPIHandler::displayUpdate(byte updateMode)
{
uint16_t result;
byte input[4] = {updateMode, 0x01, 0x00, 0x00};
char resultLength = 2;
byte buffer[resultLength];
spiWrite(input, sizeof(input), buffer, resultLength);
// convert result code to uint16
for (int i = 0; i < resultLength; i++)
{
result <<= 8;
result |= buffer[i];
}
return result;
}
| 24.832402
| 98
| 0.624522
|
PDA-UR
|
338ce5de31bfffb5a2b83d6ce3020cd515549cd1
| 2,928
|
cc
|
C++
|
orly/data/matrix.cc
|
orlyatomics/orly
|
d413f999f51a8e553832dab4e3baa7ca68928840
|
[
"Apache-2.0"
] | 69
|
2015-01-06T05:12:57.000Z
|
2021-11-06T20:34:10.000Z
|
orly/data/matrix.cc
|
waderly/orly
|
9d7660ea9d07591f8cc6b1b92d8e6c3b8b78eeee
|
[
"Apache-2.0"
] | 5
|
2015-07-09T02:21:50.000Z
|
2021-08-13T11:10:26.000Z
|
orly/data/matrix.cc
|
waderly/orly
|
9d7660ea9d07591f8cc6b1b92d8e6c3b8b78eeee
|
[
"Apache-2.0"
] | 15
|
2015-01-23T13:34:05.000Z
|
2020-06-15T16:46:50.000Z
|
/* <orly/data/matrix.cc>
Generates a core-vector file with matrix data
Copyright 2010-2014 OrlyAtomics, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <fcntl.h>
#include <unistd.h>
#include <io/binary_output_only_stream.h>
#include <io/device.h>
#include <orly/atom/core_vector_builder.h>
using namespace std;
using namespace Base;
using namespace Orly;
/*
(0) {"name":"Neo"}
(1) {"name":"Morpheus"}
(2) {"name":"Trinity"}
(3) {"name":"Cypher"}
(4) {"name":"Agent Smith"}
(5) {"name":"The Architect"}
(0)-[:KNOWS]->(1)
(0)-[:LOVES]->(2)
(1)-[:KNOWS]->(2)
(1)-[:KNOWS]->(3)
(3)-[:KNOWS]->(4)
(4)-[:CODED_BY]->(5)
*/
static const TUuid NodeIndexId{TUuid::Twister};
static const TUuid EdgeIndexId{TUuid::Twister};
void Node(Atom::TCoreVectorBuilder &builder, int64_t id, const string &name) {
builder.Push(TUuid(TUuid::Twister));
builder.Push(1L);
builder.Push(1L);
builder.Push(NodeIndexId);
builder.Push(make_tuple(id));
builder.Push(name);
}
void MakeEdge(Atom::TCoreVectorBuilder &builder,
const std::string &kind,
int64_t from,
int64_t to) {
builder.Push(TUuid(TUuid::Twister));
builder.Push(1L);
builder.Push(1L);
builder.Push(EdgeIndexId);
builder.Push(make_tuple(from, kind, to));
builder.Push(true);
}
void Knows(Atom::TCoreVectorBuilder &builder, int64_t from, int64_t to) {
MakeEdge(builder, "Knows", from, to);
}
void Loves(Atom::TCoreVectorBuilder &builder, int64_t from, int64_t to) {
MakeEdge(builder, "Loves", from, to);
}
void CodedBy(Atom::TCoreVectorBuilder &builder, int64_t from, int64_t to) {
MakeEdge(builder, "CodedBy", from, to);
}
int main(int /*argc*/, char */*argv*/[]) {
const int64_t num_trans = 12L;
Atom::TCoreVectorBuilder builder;
builder.Push(num_trans); // num transactions
builder.Push(num_trans); // dummy meta data
Node(builder, 0, "Neo");
Node(builder, 1, "Morpheus");
Node(builder, 2, "Trinity");
Node(builder, 3, "Cypher");
Node(builder, 4, "Agent Smith");
Node(builder, 5, "The Architect");
Knows(builder, 0, 1);
Loves(builder, 0, 2);
Knows(builder, 1, 2);
Knows(builder, 1, 3);
Knows(builder, 3, 4);
CodedBy(builder, 4, 5);
assert(builder.GetCores().size() == 2UL + num_trans * 6UL);
Io::TBinaryOutputOnlyStream strm(make_shared<Io::TDevice>(open("matrix.bin", O_WRONLY | O_CREAT, 0777)));
builder.Write(strm);
return EXIT_SUCCESS;
}
| 28.427184
| 107
| 0.68306
|
orlyatomics
|
338dca44d419f5221660f02c955b519a3c009213
| 3,676
|
hpp
|
C++
|
shared/src/shared/scene/component_data.hpp
|
dumheter/wind_simulation
|
adf731847cb6145a85792a0ebceacc725a3acf9e
|
[
"MIT"
] | 1
|
2021-04-26T11:24:02.000Z
|
2021-04-26T11:24:02.000Z
|
shared/src/shared/scene/component_data.hpp
|
dumheter/wind_simulation
|
adf731847cb6145a85792a0ebceacc725a3acf9e
|
[
"MIT"
] | 1
|
2020-06-09T08:53:07.000Z
|
2020-06-16T13:37:15.000Z
|
shared/src/shared/scene/component_data.hpp
|
dumheter/wind_simulation
|
adf731847cb6145a85792a0ebceacc725a3acf9e
|
[
"MIT"
] | null | null | null |
// MIT License
//
// Copyright (c) 2020 Filip Bj�rklund, Christoffer Gustafsson
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#pragma once
// ========================================================================== //
// Headers
// ========================================================================== //
#include <variant>
#include "shared/math/math.hpp"
#include "shared/scene/types.hpp"
#include "shared/types.hpp"
#include "shared/wind/base_functions.hpp"
// ========================================================================== //
// ComponentData Declaration
// ========================================================================== //
namespace alflib {
class RawMemoryReader;
class RawMemoryWriter;
} // namespace alflib
namespace wind {
/// Union-like class with data about different components.
class ComponentData {
public:
/// Underlying enum type of the enum "ComponentType"
using TagType = std::underlying_type_t<ComponentType>;
struct RigidbodyData {};
struct WindData {
std::vector<BaseFn> functions;
u8 volumeType = 0;
Vec3F pos;
Vec3F scale;
};
struct RenderableData {
String pathTexture;
};
struct RotorData {
Quat rot;
};
struct ColliderData {
f32 restitution = 0.0f;
f32 mass = 0.0f;
};
struct WindAffectableData {};
public:
template <typename T> bool isType() const {
return std::holds_alternative<T>(m_data);
}
/// Returns the Rigidbody data
const RigidbodyData &rigidbodyData() const;
/// Returns the WindSource data
const WindData &windSourceData() const;
const RenderableData &renderableData() const;
const RotorData &rotorData() const;
const ColliderData &colliderData() const;
const WindAffectableData &windAffectableData() const;
/// Serializes to bytes
bool ToBytes(alflib::RawMemoryWriter &mw) const;
/// Deserializes from bytes
static ComponentData FromBytes(alflib::RawMemoryReader &mr);
/// Creates a component data representing rigidbody data
static ComponentData asRigidbody();
/// Creates a component data representing wind source data
static ComponentData asWind(const std::vector<BaseFn> &functions,
u8 volumeType, Vec3F pos, Vec3F scale);
static ComponentData asRenderable(const String &pathTexture);
static ComponentData asRotor(const Quat &rot);
static ComponentData asCollider(f32 restitution, f32 mass);
static ComponentData asWindAffectable();
private:
/// Variant
std::variant<RigidbodyData, WindData, RenderableData, RotorData, ColliderData,
WindAffectableData>
m_data;
};
} // namespace wind
| 29.174603
| 80
| 0.668662
|
dumheter
|
338ddeb1324987492a95bc227ed31b88f7b54563
| 709
|
hpp
|
C++
|
querier/BlockSeriesSet.hpp
|
Jimx-/tsdb-fork
|
f92cfa0a998c03b3a2cb4c8e46990de8b47aae15
|
[
"Apache-2.0"
] | 1
|
2020-06-04T06:56:40.000Z
|
2020-06-04T06:56:40.000Z
|
querier/BlockSeriesSet.hpp
|
Jimx-/tsdb-fork
|
f92cfa0a998c03b3a2cb4c8e46990de8b47aae15
|
[
"Apache-2.0"
] | null | null | null |
querier/BlockSeriesSet.hpp
|
Jimx-/tsdb-fork
|
f92cfa0a998c03b3a2cb4c8e46990de8b47aae15
|
[
"Apache-2.0"
] | 1
|
2020-06-04T03:35:58.000Z
|
2020-06-04T03:35:58.000Z
|
#ifndef BLOCKSERIESSET_H
#define BLOCKSERIESSET_H
#include "querier/ChunkSeriesSetInterface.hpp"
#include "querier/SeriesInterface.hpp"
#include "querier/SeriesSetInterface.hpp"
namespace tsdb{
namespace querier{
class BlockSeriesSet: public SeriesSetInterface{
private:
std::shared_ptr<ChunkSeriesSetInterface> cs;
mutable std::shared_ptr<SeriesInterface> cur;
int64_t min_time;
int64_t max_time;
mutable bool err_;
public:
BlockSeriesSet(const std::shared_ptr<ChunkSeriesSetInterface> & cs, int64_t min_time, int64_t max_time);
bool next() const;
std::shared_ptr<SeriesInterface> at();
bool error() const;
};
}}
#endif
| 22.870968
| 112
| 0.713681
|
Jimx-
|
338e609cba03482e791980e3222b6d78880b9209
| 434
|
cpp
|
C++
|
software_package/development/test/test_no_deps/test_states.cpp
|
scottzach1/Project-Beans
|
0c7a257409464d5e44ea7367e439ae0e12fd41f1
|
[
"MIT"
] | 5
|
2020-10-21T23:35:18.000Z
|
2021-02-02T19:44:46.000Z
|
software_package/development/test/test_states/test_states.cpp
|
scottzach1/Project-Beans
|
0c7a257409464d5e44ea7367e439ae0e12fd41f1
|
[
"MIT"
] | null | null | null |
software_package/development/test/test_states/test_states.cpp
|
scottzach1/Project-Beans
|
0c7a257409464d5e44ea7367e439ae0e12fd41f1
|
[
"MIT"
] | 1
|
2020-10-26T05:13:21.000Z
|
2020-10-26T05:13:21.000Z
|
#include "state_machine.h"
#include "states.h"
#include "unity.h"
void setUp(void) {
// set stuff up here
}
void tearDown(void) {
// clean stuff up here
}
void test_init_state() {
StateMachine sm;
if (PreLaunchState* initialStateInstance =
dynamic_cast<PreLaunchState*>(sm.currentState)) {
TEST_PASS();
}
TEST_FAIL();
}
int main() {
UNITY_BEGIN();
RUN_TEST(test_init_state);
return UNITY_END();
}
| 15.5
| 59
| 0.668203
|
scottzach1
|
338ecdfdd427bba421031330983577d638d79406
| 1,706
|
cpp
|
C++
|
testinc/testinc.cpp
|
gongluck/cghttp
|
19c41972c7a08d910b31b92cff4d4f26bc951197
|
[
"MIT"
] | null | null | null |
testinc/testinc.cpp
|
gongluck/cghttp
|
19c41972c7a08d910b31b92cff4d4f26bc951197
|
[
"MIT"
] | null | null | null |
testinc/testinc.cpp
|
gongluck/cghttp
|
19c41972c7a08d910b31b92cff4d4f26bc951197
|
[
"MIT"
] | null | null | null |
/*
* @Author: gongluck
* @Date: 2020-06-18 11:08:13
* @Last Modified by: gongluck
* @Last Modified time: 2020-06-18 17:30:42
*/
#include <stdio.h>
#include <time.h>
#include <thread>
#include <mutex>
std::mutex g_mutex;
#include "../cghttp.h"
int TESTTIMES = 100;
void test_get()
{
char* body = NULL;
size_t bodylen = 0;
int ret = Get("http://www.gongluck.icu/web", &body, &bodylen);
//printf("%d\n%s\n%zd\n", ret, body, bodylen);
static int t = 0;
g_mutex.lock();
printf("%d\n", ++t);
g_mutex.unlock();
//std::this_thread::sleep_for(std::chrono::microseconds(10));
Release(&body);
}
void test_post()
{
char* keys[] = {"name", "password"};
char* values[] = {"gongluck", "testtest"};
char* body = NULL;
size_t bodylen = 0;
int ret = Post("http://www.gongluck.icu/api/regist", keys, values, 2, &body, &bodylen);
//printf("%d\n%s\n%zd\n", ret, body, bodylen);
Release(&body);
}
int main()
{
std::thread* ths = new std::thread[TESTTIMES];
clock_t t1 = clock();
for (int i = 0; i < TESTTIMES; ++i)
{
std::thread th(test_get);
ths[i].swap(th);
}
for (int i = 0; i < TESTTIMES; ++i)
{
if (ths[i].joinable())
{
ths[i].join();
}
}
clock_t t2 = clock();
printf("%fms\n", difftime(t2, t1));
t1 = clock();
for (int i = 0; i < TESTTIMES; ++i)
{
std::thread th(test_post);
ths[i].swap(th);
}
for (int i = 0; i < TESTTIMES; ++i)
{
if (ths[i].joinable())
{
ths[i].join();
}
}
t2 = clock();
printf("%fms\n", difftime(t2, t1));
getchar();
return 0;
}
| 20.554217
| 91
| 0.519343
|
gongluck
|
338f0b7fdf58b11240ee6d5064d66cd8ba10a7b4
| 2,180
|
cpp
|
C++
|
SDK/ARKSurvivalEvolved_DroppedItemTorch_functions.cpp
|
2bite/ARK-SDK
|
c38ca9925309516b2093ad8c3a70ed9489e1d573
|
[
"MIT"
] | 10
|
2020-02-17T19:08:46.000Z
|
2021-07-31T11:07:19.000Z
|
SDK/ARKSurvivalEvolved_DroppedItemTorch_functions.cpp
|
2bite/ARK-SDK
|
c38ca9925309516b2093ad8c3a70ed9489e1d573
|
[
"MIT"
] | 9
|
2020-02-17T18:15:41.000Z
|
2021-06-06T19:17:34.000Z
|
SDK/ARKSurvivalEvolved_DroppedItemTorch_functions.cpp
|
2bite/ARK-SDK
|
c38ca9925309516b2093ad8c3a70ed9489e1d573
|
[
"MIT"
] | 3
|
2020-07-22T17:42:07.000Z
|
2021-06-19T17:16:13.000Z
|
// ARKSurvivalEvolved (329.9) SDK
#ifdef _MSC_VER
#pragma pack(push, 0x8)
#endif
#include "ARKSurvivalEvolved_DroppedItemTorch_parameters.hpp"
namespace sdk
{
//---------------------------------------------------------------------------
//Functions
//---------------------------------------------------------------------------
// Function DroppedItemTorch.DroppedItemTorch_C.ReceiveBeginPlay
// ()
void ADroppedItemTorch_C::ReceiveBeginPlay()
{
static auto fn = UObject::FindObject<UFunction>("Function DroppedItemTorch.DroppedItemTorch_C.ReceiveBeginPlay");
ADroppedItemTorch_C_ReceiveBeginPlay_Params params;
auto flags = fn->FunctionFlags;
UObject::ProcessEvent(fn, ¶ms);
fn->FunctionFlags = flags;
}
// Function DroppedItemTorch.DroppedItemTorch_C.UserConstructionScript
// ()
void ADroppedItemTorch_C::UserConstructionScript()
{
static auto fn = UObject::FindObject<UFunction>("Function DroppedItemTorch.DroppedItemTorch_C.UserConstructionScript");
ADroppedItemTorch_C_UserConstructionScript_Params params;
auto flags = fn->FunctionFlags;
UObject::ProcessEvent(fn, ¶ms);
fn->FunctionFlags = flags;
}
// Function DroppedItemTorch.DroppedItemTorch_C.CheckFire
// ()
void ADroppedItemTorch_C::CheckFire()
{
static auto fn = UObject::FindObject<UFunction>("Function DroppedItemTorch.DroppedItemTorch_C.CheckFire");
ADroppedItemTorch_C_CheckFire_Params params;
auto flags = fn->FunctionFlags;
UObject::ProcessEvent(fn, ¶ms);
fn->FunctionFlags = flags;
}
// Function DroppedItemTorch.DroppedItemTorch_C.ExecuteUbergraph_DroppedItemTorch
// ()
// Parameters:
// int EntryPoint (Parm, ZeroConstructor, IsPlainOldData)
void ADroppedItemTorch_C::ExecuteUbergraph_DroppedItemTorch(int EntryPoint)
{
static auto fn = UObject::FindObject<UFunction>("Function DroppedItemTorch.DroppedItemTorch_C.ExecuteUbergraph_DroppedItemTorch");
ADroppedItemTorch_C_ExecuteUbergraph_DroppedItemTorch_Params params;
params.EntryPoint = EntryPoint;
auto flags = fn->FunctionFlags;
UObject::ProcessEvent(fn, ¶ms);
fn->FunctionFlags = flags;
}
}
#ifdef _MSC_VER
#pragma pack(pop)
#endif
| 23.956044
| 131
| 0.724312
|
2bite
|
339360375a15d21d28fafa150117ffb04cacb89d
| 1,980
|
cpp
|
C++
|
dataset/test/modification/1471_all/8/transformation_1.cpp
|
Karina5005/Plagiarism
|
ce11f72ba21a754ca84a27e5f26a31a19d6cb6fb
|
[
"MIT"
] | 3
|
2022-02-15T00:29:39.000Z
|
2022-03-15T08:36:44.000Z
|
dataset/test/modification/1471_all/8/transformation_1.cpp
|
Kira5005-code/Plagiarism
|
ce11f72ba21a754ca84a27e5f26a31a19d6cb6fb
|
[
"MIT"
] | null | null | null |
dataset/test/modification/1471_all/8/transformation_1.cpp
|
Kira5005-code/Plagiarism
|
ce11f72ba21a754ca84a27e5f26a31a19d6cb6fb
|
[
"MIT"
] | null | null | null |
#include <iomanip>
#include <iostream>
#include<bits/stdc++.h>
#include <unordered_map>
#include<unordered_set>
using namespace std;
#define _USE_MATH_DEFINES
# define M_PI 3.14159265358979323846
#define ll long long
#define ull unsigned long long
#define ld long double
#define vbe(v) ((v).begin()), ((v).end())
#define sz(v) ((int)((v).size()))
#define prec(x) << fixed<< setprecision(x)
#define clr(v, d) memset(v, d, sizeof(v))
#define rep(i, v) for(int i=0;i<sz(v);++i)
#define lp(i, n) for(int i=0;i<(int)(n);++i)
#define lpi(i, j, n) for(int i=(j);i<(int)(n);++i)
#define lpd(i, j, n) for(int i=(j);i>=(int)(n);--i)
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
#define FASTIO ios_base::sync_with_stdio(false); cin.tie(NULL); cin.tie(0);
#define INFLL 1e18
#define INF 1e9
#define MOD 1000000007
#define MOD1 998244353
#define MAXN 200005
ll GCD(ll a, ll b) { return (a) ? GCD(b % a, a) : b; }
ll LCM(ll a, ll b) { return a * b / GCD(a, b); }
ll fastpow(ll b, ll p) {
/* 'if' begin */
if (!p) /* 'if' inside */
return 1;
ll ret = fastpow(b, p >> 1);
ret *= ret;
/* 'if' begin */
if (p & 1) /* 'if' inside */
ret *= b;
return ret;
}
void p(int cd_g) ;
int main() ;
void qap_qyc(int s) {
long long t_eb, zz_xna;
cin >> t_eb >> zz_xna;
long long n_xi = 0;
long long a_kx = 0;
{
int t = 0;
if (5 > 3) cout << "new code";for ( ; t < (int)(t_eb); )
/* 'for' inside */
{
long long r_ah;
cin >> r_ah;
n_xi += r_ah;
a_kx += (r_ah + zz_xna - 1) / zz_xna;
++t;
}}
cout << (n_xi + zz_xna - 1) / zz_xna << " " << a_kx;
}
int main() {
cin.tie(__null);
cin.tie(0);
;
int i = 1;
cin >> i;
{
int e_y = 0;
if (5 > 3) cout << "new code";for ( ; e_y < (int)(i); )
/* 'for' inside */
{
qap_qyc(e_y + 1);
cout << "\n";
++e_y;
}}
}
| 21.758242
| 76
| 0.506061
|
Karina5005
|
3395248f24298b9b46414a6c941ac43168e2b829
| 406
|
cpp
|
C++
|
20octombrie/Problema7.cpp
|
NegreanV/MyAlgorithms
|
7dd16d677d537d34280c3858ccf4cbea4b3ddf26
|
[
"Apache-2.0"
] | 1
|
2015-10-24T10:02:06.000Z
|
2015-10-24T10:02:06.000Z
|
20octombrie/Problema7.cpp
|
NegreanV/MyAlgorithms
|
7dd16d677d537d34280c3858ccf4cbea4b3ddf26
|
[
"Apache-2.0"
] | null | null | null |
20octombrie/Problema7.cpp
|
NegreanV/MyAlgorithms
|
7dd16d677d537d34280c3858ccf4cbea4b3ddf26
|
[
"Apache-2.0"
] | null | null | null |
/*Se citeste un numar natural n cu cel mult 2 cifre.
* Afisati pe ecran o figura formata din caracterul * ca in exemplul alaturat pentru n=5. */
#include <iostream>
using namespace std;
int main()
{
int n;
cin >> n;
for (int i = 1; i <= n; i++)
{
for (int j = 1; j <= n * 2 - i; j++)
{
cout << " ";
}
for (int j = 1; j <= i * 2 - 1; j++)
{
cout << "*";
}
cout << endl;
}
}
| 15.615385
| 92
| 0.512315
|
NegreanV
|
339585efbfcb1d93d63b89f170734beaaee6265c
| 79
|
cpp
|
C++
|
gui/personal_file_gui_qt/controller.cpp
|
puzzzzzzle/-personal_file_protector
|
d38eb7e8ea3c957a0bc09c51e3431e1aa453d039
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
gui/personal_file_gui_qt/controller.cpp
|
puzzzzzzle/-personal_file_protector
|
d38eb7e8ea3c957a0bc09c51e3431e1aa453d039
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
gui/personal_file_gui_qt/controller.cpp
|
puzzzzzzle/-personal_file_protector
|
d38eb7e8ea3c957a0bc09c51e3431e1aa453d039
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
#include "controller.h"
AbstractListModel *dataList = new AbstractListModel();
| 26.333333
| 54
| 0.797468
|
puzzzzzzle
|
339613466662194a82b854af5ef85dc1ac7a386c
| 1,824
|
cpp
|
C++
|
src/Native/libcryptonight/xmrig/common/cpu/BasicCpuInfo_arm.cpp
|
lurchinms/miningcore-2
|
1ab7d1ce1b3dd3c28e35348e0b701fd5b5e264b1
|
[
"MIT"
] | 28
|
2018-05-24T06:35:38.000Z
|
2021-11-29T17:18:32.000Z
|
src/Native/libcryptonight/xmrig/common/cpu/BasicCpuInfo_arm.cpp
|
lurchinms/miningcore-2
|
1ab7d1ce1b3dd3c28e35348e0b701fd5b5e264b1
|
[
"MIT"
] | 19
|
2019-03-30T23:23:02.000Z
|
2021-12-19T09:12:00.000Z
|
src/Native/libcryptonight/xmrig/common/cpu/BasicCpuInfo_arm.cpp
|
lurchinms/miningcore-2
|
1ab7d1ce1b3dd3c28e35348e0b701fd5b5e264b1
|
[
"MIT"
] | 37
|
2021-03-19T02:56:14.000Z
|
2022-03-19T16:42:51.000Z
|
/* XMRig
* Copyright 2010 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2012-2014 pooler <pooler@litecoinpool.org>
* Copyright 2014 Lucas Jones <https://github.com/lucasjones>
* Copyright 2014-2016 Wolf9466 <https://github.com/OhGodAPet>
* Copyright 2016 Jay D Dee <jayddee246@gmail.com>
* Copyright 2017-2019 XMR-Stak <https://github.com/fireice-uk>, <https://github.com/psychocrypt>
* Copyright 2018-2019 SChernykh <https://github.com/SChernykh>
* Copyright 2016-2019 XMRig <support@xmrig.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <string.h>
#include <thread>
#if __ARM_FEATURE_CRYPTO
# include <sys/auxv.h>
# include <asm/hwcap.h>
#endif
#include "common/cpu/BasicCpuInfo.h"
xmrig::BasicCpuInfo::BasicCpuInfo() :
m_aes(false),
m_avx2(false),
m_brand(),
m_threads(std::thread::hardware_concurrency())
{
# ifdef XMRIG_ARMv8
memcpy(m_brand, "ARMv8", 5);
# else
memcpy(m_brand, "ARMv7", 5);
# endif
# if __ARM_FEATURE_CRYPTO
m_aes = getauxval(AT_HWCAP) & HWCAP_AES;
# endif
}
size_t xmrig::BasicCpuInfo::optimalThreadsCount(size_t memSize, int maxCpuUsage) const
{
return threads();
}
| 30.915254
| 100
| 0.702303
|
lurchinms
|
339645d29f49ab8ac3dfa26919427c6534e1b24b
| 540
|
cpp
|
C++
|
src/UOJ_1038 - (1658937) Accepted.cpp
|
dreamtocode/URI
|
1f402853e8ae43f3761fc3099e7694bff721d5bc
|
[
"MIT"
] | 1
|
2015-04-26T03:55:07.000Z
|
2015-04-26T03:55:07.000Z
|
src/UOJ_1038 - (1658937) Accepted.cpp
|
dreamtocode/URI
|
1f402853e8ae43f3761fc3099e7694bff721d5bc
|
[
"MIT"
] | null | null | null |
src/UOJ_1038 - (1658937) Accepted.cpp
|
dreamtocode/URI
|
1f402853e8ae43f3761fc3099e7694bff721d5bc
|
[
"MIT"
] | null | null | null |
#include <iostream>
#include <iomanip>
using namespace std;
int main() {
double a, b, c=4, d=4.5, e=5, f=2, l=1.5, m;
cin>>a>>b;
cout<<fixed<<setprecision(2);
if(a==1) {
m=b*c;
cout<<"Total: R$ "<<m<<"\n";
}
if(a==2) {
m=b*d;
cout<<"Total: R$ "<<m<<"\n";
}
if(a==3) {
m=b*e;
cout<<"Total: R$ "<<m<<"\n";
}
if(a==4) {
m=b*f;
cout<<"Total: R$ "<<m<<"\n";
}
if(a==5) {
m=b*l;
cout<<"Total: R$ "<<m<<"\n";
}
return 0;
}
| 18
| 48
| 0.375926
|
dreamtocode
|
339cf550950327bed0f6466ca1bfb5b263ec8194
| 916
|
hpp
|
C++
|
include/caffe/layers/func/noise_label_layer.hpp
|
JEF1056/MetaLearning-Neural-Style
|
94ac33cb6a62c4de8ff2aeac3572afd61f1bda5d
|
[
"MIT"
] | 126
|
2017-09-14T01:53:15.000Z
|
2021-03-24T08:57:41.000Z
|
include/caffe/layers/func/noise_label_layer.hpp
|
hli1221/styletransfer
|
5101f2c024638d3e111644c64398b3290fdeaec6
|
[
"BSD-2-Clause"
] | 17
|
2017-09-14T09:11:50.000Z
|
2019-11-27T08:56:52.000Z
|
include/caffe/layers/func/noise_label_layer.hpp
|
hli1221/styletransfer
|
5101f2c024638d3e111644c64398b3290fdeaec6
|
[
"BSD-2-Clause"
] | 34
|
2017-09-14T09:14:21.000Z
|
2020-12-16T09:49:40.000Z
|
#ifndef CAFFE_NoiseLabel_LAYER_HPP_
#define CAFFE_NoiseLabel_LAYER_HPP_
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"
namespace caffe {
class NoiseLabelLayer : public Layer {
public:
explicit NoiseLabelLayer(const LayerParameter& param): Layer(param) {}
virtual inline const char* type() const { return "NoiseLabel"; }
virtual void Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top);
virtual void Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom);
virtual void SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top);
virtual void LayerSetUp(const vector<Blob*>& bottom, const vector<Blob*>& top);
virtual void Reshape(const vector<Blob*>& bottom, const vector<Blob*>& top);
protected:
int gpu_id_;
int rand_label_;
};
} // namespace caffe
#endif // CAFFE_NoiseLabelLAYER_HPP_
| 27.757576
| 85
| 0.742358
|
JEF1056
|
33a595320de14e89176a2d493257f78c829dddc7
| 2,217
|
hpp
|
C++
|
utils/UI.hpp
|
mad-penguins/Antarctica
|
2edb237283b652a280c0fb58cdb15e9290fca562
|
[
"MIT"
] | null | null | null |
utils/UI.hpp
|
mad-penguins/Antarctica
|
2edb237283b652a280c0fb58cdb15e9290fca562
|
[
"MIT"
] | 4
|
2019-04-19T19:29:57.000Z
|
2019-05-31T20:14:45.000Z
|
utils/UI.hpp
|
mad-penguins/Antarctica
|
2edb237283b652a280c0fb58cdb15e9290fca562
|
[
"MIT"
] | 5
|
2019-04-27T10:00:07.000Z
|
2019-06-07T14:32:27.000Z
|
/*!
* \file
* \author Nikita Mironov <nickfrom22nd@gmail.com>
* \brief Some GUI utilities
*
* \section LICENSE
*
* Copyright (c) 2019 Penguins of Madagascar
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef ANTARCTICA_UI_HPP
#define ANTARCTICA_UI_HPP
#include <QtCore/QAbstractItemModel>
/*!
* \namespace Utils
* \brief Namespace which contains some utilities classes with static methods
*/
namespace Utils {
/*!
* \class Utils::UI
* \brief Class with GUI utilities
*/
class UI {
public:
/*!
* \brief Get selected item in a entities tree and cast it to specific item object
* \tparam ItemType Specific type of tree item to cast to
* \tparam ModelType Specific type of a model
* \param model An object of a model
* \param index Current index got from Qt standard methods
* \return
*/
template<class ItemType, class ModelType>
inline static ItemType *getCurrentItem(QAbstractItemModel *model, QModelIndex index) {
return dynamic_cast<ItemType *>(dynamic_cast<ModelType *>(model)->getItem(index));
}
};
}
#endif //ANTARCTICA_UI_HPP
| 35.758065
| 94
| 0.709517
|
mad-penguins
|
33a981d9a552d0dbe862e7262d6265a5937ce047
| 13,993
|
cpp
|
C++
|
src/CodeGen_PTX_Dev.cpp
|
pstanczyk/Halide
|
19360a7462e713aef3287652b4cf6bee3f3a1cea
|
[
"MIT"
] | 1
|
2019-08-16T21:18:48.000Z
|
2019-08-16T21:18:48.000Z
|
src/CodeGen_PTX_Dev.cpp
|
pstanczyk/Halide
|
19360a7462e713aef3287652b4cf6bee3f3a1cea
|
[
"MIT"
] | null | null | null |
src/CodeGen_PTX_Dev.cpp
|
pstanczyk/Halide
|
19360a7462e713aef3287652b4cf6bee3f3a1cea
|
[
"MIT"
] | null | null | null |
#include "CodeGen_PTX_Dev.h"
#include "IROperator.h"
#include "IRPrinter.h"
#include "Debug.h"
#include "Target.h"
#include "LLVM_Headers.h"
// This is declared in NVPTX.h, which is not exported. Ugly, but seems better than
// hardcoding a path to the .h file.
#if WITH_PTX
namespace llvm { ModulePass *createNVVMReflectPass(const StringMap<int>& Mapping); }
#endif
namespace Halide {
namespace Internal {
using std::vector;
using std::string;
using namespace llvm;
CodeGen_PTX_Dev::CodeGen_PTX_Dev() : CodeGen() {
#if !(WITH_PTX)
assert(false && "ptx not enabled for this build of Halide.");
#endif
assert(llvm_NVPTX_enabled && "llvm build not configured with nvptx target enabled.");
}
void CodeGen_PTX_Dev::add_kernel(Stmt stmt, std::string name, const std::vector<Argument> &args) {
debug(2) << "In CodeGen_PTX_Dev::add_kernel\n";
// Now deduce the types of the arguments to our function
vector<llvm::Type *> arg_types(args.size());
for (size_t i = 0; i < args.size(); i++) {
if (args[i].is_buffer) {
arg_types[i] = llvm_type_of(UInt(8))->getPointerTo();
} else {
arg_types[i] = llvm_type_of(args[i].type);
}
}
// Make our function
function_name = name;
FunctionType *func_t = FunctionType::get(void_t, arg_types, false);
function = llvm::Function::Create(func_t, llvm::Function::ExternalLinkage, name, module);
// Mark the buffer args as no alias
for (size_t i = 0; i < args.size(); i++) {
if (args[i].is_buffer) {
function->setDoesNotAlias(i+1);
}
}
// Make the initial basic block
entry_block = BasicBlock::Create(*context, "entry", function);
builder->SetInsertPoint(entry_block);
// Put the arguments in the symbol table
vector<string> arg_sym_names;
{
size_t i = 0;
for (llvm::Function::arg_iterator iter = function->arg_begin();
iter != function->arg_end();
iter++) {
string arg_sym_name = args[i].name;
if (args[i].is_buffer) {
// HACK: codegen expects a load from foo to use base
// address 'foo.host', so we store the device pointer
// as foo.host in this scope.
arg_sym_name += ".host";
}
sym_push(arg_sym_name, iter);
iter->setName(arg_sym_name);
arg_sym_names.push_back(arg_sym_name);
i++;
}
}
// We won't end the entry block yet, because we'll want to add
// some allocas to it later if there are local allocations. Start
// a new block to put all the code.
BasicBlock *body_block = BasicBlock::Create(*context, "body", function);
builder->SetInsertPoint(body_block);
debug(1) << "Generating llvm bitcode for kernel...\n";
// Ok, we have a module, function, context, and a builder
// pointing at a brand new basic block. We're good to go.
stmt.accept(this);
// Now we need to end the function
builder->CreateRetVoid();
// Make the entry block point to the body block
builder->SetInsertPoint(entry_block);
builder->CreateBr(body_block);
// Add the nvvm annotation that it is a kernel function.
MDNode *mdNode = MDNode::get(*context, vec<Value *>(function,
MDString::get(*context, "kernel"),
ConstantInt::get(i32, 1)));
module->getOrInsertNamedMetadata("nvvm.annotations")->addOperand(mdNode);
// Now verify the function is ok
verifyFunction(*function);
// Finally, verify the module is ok
verifyModule(*module);
debug(2) << "Done generating llvm bitcode for PTX\n";
// Clear the symbol table
for (size_t i = 0; i < arg_sym_names.size(); i++) {
sym_pop(arg_sym_names[i]);
}
}
void CodeGen_PTX_Dev::init_module() {
CodeGen::init_module();
#if WITH_PTX
module = get_initial_module_for_ptx_device(context);
#endif
owns_module = true;
}
string CodeGen_PTX_Dev::simt_intrinsic(const string &name) {
if (ends_with(name, ".threadidx")) {
return "llvm.nvvm.read.ptx.sreg.tid.x";
} else if (ends_with(name, ".threadidy")) {
return "llvm.nvvm.read.ptx.sreg.tid.y";
} else if (ends_with(name, ".threadidz")) {
return "llvm.nvvm.read.ptx.sreg.tid.z";
} else if (ends_with(name, ".threadidw")) {
return "llvm.nvvm.read.ptx.sreg.tid.w";
} else if (ends_with(name, ".blockidx")) {
return "llvm.nvvm.read.ptx.sreg.ctaid.x";
} else if (ends_with(name, ".blockidy")) {
return "llvm.nvvm.read.ptx.sreg.ctaid.y";
} else if (ends_with(name, ".blockidz")) {
return "llvm.nvvm.read.ptx.sreg.ctaid.z";
} else if (ends_with(name, ".blockidw")) {
return "llvm.nvvm.read.ptx.sreg.ctaid.w";
}
assert(false && "simt_intrinsic called on bad variable name");
return "";
}
void CodeGen_PTX_Dev::visit(const For *loop) {
if (is_gpu_var(loop->name)) {
debug(2) << "Dropping loop " << loop->name << " (" << loop->min << ", " << loop->extent << ")\n";
assert(loop->for_type == For::Parallel && "kernel loop must be parallel");
Expr simt_idx = Call::make(Int(32), simt_intrinsic(loop->name), std::vector<Expr>(), Call::Extern);
Expr loop_var = loop->min + simt_idx;
Expr cond = simt_idx < loop->extent;
debug(3) << "for -> if (" << cond << ")\n";
BasicBlock *loop_bb = BasicBlock::Create(*context, loop->name + "_loop", function);
BasicBlock *after_bb = BasicBlock::Create(*context, loop->name + "_after_loop", function);
builder->CreateCondBr(codegen(cond), loop_bb, after_bb);
builder->SetInsertPoint(loop_bb);
sym_push(loop->name, codegen(loop_var));
codegen(loop->body);
sym_pop(loop->name);
builder->CreateBr(after_bb);
builder->SetInsertPoint(after_bb);
} else {
CodeGen::visit(loop);
}
}
void CodeGen_PTX_Dev::visit(const Pipeline *n) {
n->produce.accept(this);
// Grab the syncthreads intrinsic, or declare it if it doesn't exist yet
llvm::Function *syncthreads = module->getFunction("llvm.nvvm.barrier0");
if (!syncthreads) {
FunctionType *func_t = FunctionType::get(llvm::Type::getVoidTy(*context), vector<llvm::Type *>(), false);
syncthreads = llvm::Function::Create(func_t, llvm::Function::ExternalLinkage, "llvm.nvvm.barrier0", module);
syncthreads->setCallingConv(CallingConv::C);
debug(2) << "Declaring syncthreads intrinsic\n";
}
if (n->update.defined()) {
// If we're producing into shared or global memory we need a
// syncthreads before continuing.
builder->CreateCall(syncthreads, std::vector<Value *>());
n->update.accept(this);
}
builder->CreateCall(syncthreads, std::vector<Value *>());
n->consume.accept(this);
}
void CodeGen_PTX_Dev::visit(const Allocate *alloc) {
debug(1) << "Allocate " << alloc->name << " on device\n";
llvm::Type *llvm_type = llvm_type_of(alloc->type);
string allocation_name = alloc->name + ".host";
debug(3) << "Pushing allocation called " << allocation_name << " onto the symbol table\n";
// If this is a shared allocation, there should already be a
// pointer into shared memory in the symbol table.
Value *ptr;
Value *offset = sym_get(alloc->name + ".shared_mem", false);
if (offset) {
// Bit-cast it to a shared memory pointer (address-space 3 is shared memory)
ptr = builder->CreateIntToPtr(offset, PointerType::get(llvm_type, 3));
} else {
// Otherwise jump back to the entry and generate an
// alloca. Note that by jumping back we're rendering any
// expression we carry back meaningless, so we had better only
// be dealing with constants here.
const IntImm *size = alloc->size.as<IntImm>();
assert(size && "Only fixed-size allocations are supported on the gpu. Try storing into shared memory instead.");
BasicBlock *here = builder->GetInsertBlock();
builder->SetInsertPoint(entry_block);
ptr = builder->CreateAlloca(llvm_type_of(alloc->type), ConstantInt::get(i32, size->value));
builder->SetInsertPoint(here);
}
sym_push(allocation_name, ptr);
codegen(alloc->body);
}
void CodeGen_PTX_Dev::visit(const Free *f) {
sym_pop(f->name + ".host");
}
string CodeGen_PTX_Dev::march() const {
return "nvptx64";
}
string CodeGen_PTX_Dev::mcpu() const {
return "sm_20";
}
string CodeGen_PTX_Dev::mattrs() const {
return "";
}
bool CodeGen_PTX_Dev::use_soft_float_abi() const {
return false;
}
vector<char> CodeGen_PTX_Dev::compile_to_src() {
#if WITH_PTX
debug(2) << "In CodeGen_PTX_Dev::compile_to_src";
optimize_module();
// DISABLED - hooked in here to force PrintBeforeAll option - seems to be the only way?
/*char* argv[] = { "llc", "-print-before-all" };*/
/*int argc = sizeof(argv)/sizeof(char*);*/
/*cl::ParseCommandLineOptions(argc, argv, "Halide PTX internal compiler\n");*/
// Generic llvm optimizations on the module.
optimize_module();
// Set up TargetTriple
module->setTargetTriple(Triple::normalize(march()+"--"));
Triple TheTriple(module->getTargetTriple());
// Allocate target machine
const std::string MArch = march();
const std::string MCPU = mcpu();
const llvm::Target* TheTarget = 0;
std::string errStr;
TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), errStr);
assert(TheTarget);
TargetOptions Options;
Options.LessPreciseFPMADOption = true;
Options.PrintMachineCode = false;
Options.NoFramePointerElim = false;
//Options.NoExcessFPPrecision = false;
Options.AllowFPOpFusion = FPOpFusion::Fast;
Options.UnsafeFPMath = true;
Options.NoInfsFPMath = false;
Options.NoNaNsFPMath = false;
Options.HonorSignDependentRoundingFPMathOption = false;
Options.UseSoftFloat = false;
/* if (FloatABIForCalls != FloatABI::Default) */
/* Options.FloatABIType = FloatABIForCalls; */
Options.NoZerosInBSS = false;
#if LLVM_VERSION < 33
Options.JITExceptionHandling = false;
#endif
Options.JITEmitDebugInfo = false;
Options.JITEmitDebugInfoToDisk = false;
Options.GuaranteedTailCallOpt = false;
Options.StackAlignmentOverride = 0;
// Options.DisableJumpTables = false;
Options.TrapFuncName = "";
Options.EnableSegmentedStacks = false;
CodeGenOpt::Level OLvl = CodeGenOpt::Aggressive;
const std::string FeaturesStr = "";
std::auto_ptr<TargetMachine>
target(TheTarget->createTargetMachine(TheTriple.getTriple(),
MCPU, FeaturesStr, Options,
llvm::Reloc::Default,
llvm::CodeModel::Default,
OLvl));
assert(target.get() && "Could not allocate target machine!");
TargetMachine &Target = *target.get();
// Set up passes
PassManager PM;
TargetLibraryInfo *TLI = new TargetLibraryInfo(TheTriple);
PM.add(TLI);
if (target.get()) {
#if LLVM_VERSION < 33
PM.add(new TargetTransformInfo(target->getScalarTargetTransformInfo(),
target->getVectorTargetTransformInfo()));
#else
target->addAnalysisPasses(PM);
#endif
}
// Add the target data from the target machine, if it exists, or the module.
if (const DataLayout *TD = Target.getDataLayout())
PM.add(new DataLayout(*TD));
else
PM.add(new DataLayout(module));
// NVidia's libdevice library uses a __nvvm_reflect to choose
// how to handle denormalized numbers. (The pass replaces calls
// to __nvvm_reflect with a constant via a map lookup. The inliner
// pass then resolves these situations to fast code, often a single
// instruction per decision point.)
//
// The default is (more) IEEE like handling. FTZ mode flushes them
// to zero. (This may only apply to single-precision.)
//
// The libdevice documentation covers other options for math accuracy
// such as replacing division with multiply by the reciprocal and
// use of fused-multiply-add, but they do not seem to be controlled
// by this __nvvvm_reflect mechanism and may be flags to earlier compiler
// passes.
#define kDefaultDenorms 0
#define kFTZDenorms 1
StringMap<int> reflect_mapping;
reflect_mapping[StringRef("__CUDA_FTZ")] = kFTZDenorms;
PM.add(createNVVMReflectPass(reflect_mapping));
// Inlining functions is essential to PTX
PM.add(createAlwaysInlinerPass());
// Override default to generate verbose assembly.
Target.setAsmVerbosityDefault(true);
// Output string stream
std::string outstr;
raw_string_ostream outs(outstr);
formatted_raw_ostream ostream(outs);
// Ask the target to add backend passes as necessary.
bool fail = Target.addPassesToEmitFile(PM, ostream,
TargetMachine::CGFT_AssemblyFile,
true);
if (fail) {
debug(0) << "Failed to set up passes to emit PTX source\n";
assert(false);
}
PM.run(*module);
ostream.flush();
if (debug::debug_level >= 2) {
module->dump();
}
debug(2) << "Done with CodeGen_PTX_Dev::compile_to_src";
string str = outs.str();
vector<char> buffer(str.begin(), str.end());
buffer.push_back(0);
return buffer;
#else // WITH_PTX
return vector<char>();
#endif
}
string CodeGen_PTX_Dev::get_current_kernel_name() {
return function->getName();
}
void CodeGen_PTX_Dev::dump() {
module->dump();
}
}}
| 33.396181
| 120
| 0.631602
|
pstanczyk
|
33ac04b5394584f969b608712ab5d590012a9ba9
| 41,070
|
cpp
|
C++
|
src/thirdparty/cryptopp860/rijndael.cpp
|
cstom4994/SourceEngineRebuild
|
edfd7f8ce8af13e9d23586318350319a2e193c08
|
[
"MIT"
] | 6
|
2022-01-23T09:40:33.000Z
|
2022-03-20T20:53:25.000Z
|
src/thirdparty/cryptopp860/rijndael.cpp
|
cstom4994/SourceEngineRebuild
|
edfd7f8ce8af13e9d23586318350319a2e193c08
|
[
"MIT"
] | null | null | null |
src/thirdparty/cryptopp860/rijndael.cpp
|
cstom4994/SourceEngineRebuild
|
edfd7f8ce8af13e9d23586318350319a2e193c08
|
[
"MIT"
] | 1
|
2022-02-06T21:05:23.000Z
|
2022-02-06T21:05:23.000Z
|
// rijndael.cpp - modified by Chris Morgan <cmorgan@wpi.edu>
// and Wei Dai from Paulo Baretto's Rijndael implementation
// The original code and all modifications are in the public domain.
// use "cl /EP /P /DCRYPTOPP_GENERATE_X64_MASM rijndael.cpp" to generate MASM code
/*
July 2018: Added support for ARMv7 AES instructions via Cryptogams ASM.
See the head notes in aes_armv4.S for copyright and license.
*/
/*
September 2017: Added support for Power8 AES instructions via compiler intrinsics.
*/
/*
July 2017: Added support for ARMv8 AES instructions via compiler intrinsics.
*/
/*
July 2010: Added support for AES-NI instructions via compiler intrinsics.
*/
/*
Feb 2009: The x86/x64 assembly code was rewritten in by Wei Dai to do counter mode
caching, which was invented by Hongjun Wu and popularized by Daniel J. Bernstein
and Peter Schwabe in their paper "New AES software speed records". The round
function was also modified to include a trick similar to one in Brian Gladman's
x86 assembly code, doing an 8-bit register move to minimize the number of
register spills. Also switched to compressed tables and copying round keys to
the stack.
The C++ implementation uses compressed tables if
CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS is defined.
It is defined on x86 platforms by default but no others.
*/
/*
July 2006: Defense against timing attacks was added in by Wei Dai.
The code now uses smaller tables in the first and last rounds,
and preloads them into L1 cache before usage (by loading at least
one element in each cache line).
We try to delay subsequent accesses to each table (used in the first
and last rounds) until all of the table has been preloaded. Hopefully
the compiler isn't smart enough to optimize that code away.
After preloading the table, we also try not to access any memory location
other than the table and the stack, in order to prevent table entries from
being unloaded from L1 cache, until that round is finished.
(Some popular CPUs have 2-way associative caches.)
*/
// This is the original introductory comment:
/**
* version 3.0 (December 2000)
*
* Optimised ANSI C code for the Rijndael cipher (now AES)
*
* author Vincent Rijmen <vincent.rijmen@esat.kuleuven.ac.be>
* author Antoon Bosselaers <antoon.bosselaers@esat.kuleuven.ac.be>
* author Paulo Barreto <paulo.barreto@terra.com.br>
*
* This code is hereby placed in the public domain.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "pch.h"
#include "config.h"
#ifndef CRYPTOPP_IMPORTS
#ifndef CRYPTOPP_GENERATE_X64_MASM
#include "rijndael.h"
#include "misc.h"
#include "cpu.h"
// VS2017 and global optimization bug. TODO, figure out when
// we can re-enable full optimizations for VS2017. Also see
// https://github.com/weidai11/cryptopp/issues/649
#if (_MSC_VER >= 1910)
# ifndef CRYPTOPP_DEBUG
# pragma optimize("", off)
# pragma optimize("ts", on)
# endif
#endif
NAMESPACE_BEGIN(CryptoPP)
// Hack for http://github.com/weidai11/cryptopp/issues/42 and http://github.com/weidai11/cryptopp/issues/132
#if (CRYPTOPP_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE))
# define CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS 1
#endif
// Clang intrinsic casts
#define M128I_CAST(x) ((__m128i *)(void *)(x))
#define CONST_M128I_CAST(x) ((const __m128i *)(const void *)(x))
#if defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
# if (CRYPTOPP_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_DISABLE_RIJNDAEL_ASM)
namespace rdtable {CRYPTOPP_ALIGN_DATA(16) word64 Te[256+2];}
using namespace rdtable;
# else
static word64 Te[256];
# endif
static word64 Td[256];
#else // Not CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS
# if defined(CRYPTOPP_X64_MASM_AVAILABLE)
// Unused; avoids linker error on Microsoft X64 non-AESNI platforms
namespace rdtable {CRYPTOPP_ALIGN_DATA(16) word64 Te[256+2];}
# endif
CRYPTOPP_ALIGN_DATA(16) static word32 Te[256*4];
CRYPTOPP_ALIGN_DATA(16) static word32 Td[256*4];
#endif // CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS
static volatile bool s_TeFilled = false, s_TdFilled = false;
ANONYMOUS_NAMESPACE_BEGIN
#if CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X32 || CRYPTOPP_BOOL_X86
// Determine whether the range between begin and end overlaps
// with the same 4k block offsets as the Te table. Logically,
// the code is trying to create the condition:
//
// Two sepearate memory pages:
//
// +-----+ +-----+
// |XXXXX| |YYYYY|
// |XXXXX| |YYYYY|
// | | | |
// | | | |
// +-----+ +-----+
// Te Table Locals
//
// Have a logical cache view of (X and Y may be inverted):
//
// +-----+
// |XXXXX|
// |XXXXX|
// |YYYYY|
// |YYYYY|
// +-----+
//
static inline bool AliasedWithTable(const byte *begin, const byte *end)
{
ptrdiff_t s0 = uintptr_t(begin)%4096, s1 = uintptr_t(end)%4096;
ptrdiff_t t0 = uintptr_t(Te)%4096, t1 = (uintptr_t(Te)+sizeof(Te))%4096;
if (t1 > t0)
return (s0 >= t0 && s0 < t1) || (s1 > t0 && s1 <= t1);
else
return (s0 < t1 || s1 <= t1) || (s0 >= t0 || s1 > t0);
}
struct Locals
{
word32 subkeys[4*12], workspace[8];
const byte *inBlocks, *inXorBlocks, *outXorBlocks;
byte *outBlocks;
size_t inIncrement, inXorIncrement, outXorIncrement, outIncrement;
size_t regSpill, lengthAndCounterFlag, keysBegin;
};
const size_t s_aliasPageSize = 4096;
const size_t s_aliasBlockSize = 256;
const size_t s_sizeToAllocate = s_aliasPageSize + s_aliasBlockSize + sizeof(Locals);
#endif // CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X32 || CRYPTOPP_BOOL_X86
ANONYMOUS_NAMESPACE_END
// ************************* Portable Code ************************************
#define QUARTER_ROUND(L, T, t, a, b, c, d) \
a ^= L(T, 3, byte(t)); t >>= 8;\
b ^= L(T, 2, byte(t)); t >>= 8;\
c ^= L(T, 1, byte(t)); t >>= 8;\
d ^= L(T, 0, t);
#define QUARTER_ROUND_LE(t, a, b, c, d) \
tempBlock[a] = ((byte *)(Te+byte(t)))[1]; t >>= 8;\
tempBlock[b] = ((byte *)(Te+byte(t)))[1]; t >>= 8;\
tempBlock[c] = ((byte *)(Te+byte(t)))[1]; t >>= 8;\
tempBlock[d] = ((byte *)(Te+t))[1];
#if defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
#define QUARTER_ROUND_LD(t, a, b, c, d) \
tempBlock[a] = ((byte *)(Td+byte(t)))[GetNativeByteOrder()*7]; t >>= 8;\
tempBlock[b] = ((byte *)(Td+byte(t)))[GetNativeByteOrder()*7]; t >>= 8;\
tempBlock[c] = ((byte *)(Td+byte(t)))[GetNativeByteOrder()*7]; t >>= 8;\
tempBlock[d] = ((byte *)(Td+t))[GetNativeByteOrder()*7];
#else
#define QUARTER_ROUND_LD(t, a, b, c, d) \
tempBlock[a] = Sd[byte(t)]; t >>= 8;\
tempBlock[b] = Sd[byte(t)]; t >>= 8;\
tempBlock[c] = Sd[byte(t)]; t >>= 8;\
tempBlock[d] = Sd[t];
#endif
#define QUARTER_ROUND_E(t, a, b, c, d) QUARTER_ROUND(TL_M, Te, t, a, b, c, d)
#define QUARTER_ROUND_D(t, a, b, c, d) QUARTER_ROUND(TL_M, Td, t, a, b, c, d)
#if (CRYPTOPP_LITTLE_ENDIAN)
#define QUARTER_ROUND_FE(t, a, b, c, d) QUARTER_ROUND(TL_F, Te, t, d, c, b, a)
#define QUARTER_ROUND_FD(t, a, b, c, d) QUARTER_ROUND(TL_F, Td, t, d, c, b, a)
#if defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
#define TL_F(T, i, x) (*(word32 *)(void *)((byte *)T + x*8 + (6-i)%4+1))
#define TL_M(T, i, x) (*(word32 *)(void *)((byte *)T + x*8 + (i+3)%4+1))
#else
#define TL_F(T, i, x) rotrFixed(T[x], (3-i)*8)
#define TL_M(T, i, x) T[i*256 + x]
#endif
#else
#define QUARTER_ROUND_FE(t, a, b, c, d) QUARTER_ROUND(TL_F, Te, t, a, b, c, d)
#define QUARTER_ROUND_FD(t, a, b, c, d) QUARTER_ROUND(TL_F, Td, t, a, b, c, d)
#if defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
#define TL_F(T, i, x) (*(word32 *)(void *)((byte *)T + x*8 + (4-i)%4))
#define TL_M TL_F
#else
#define TL_F(T, i, x) rotrFixed(T[x], i*8)
#define TL_M(T, i, x) T[i*256 + x]
#endif
#endif
#define f2(x) ((x<<1)^(((x>>7)&1)*0x11b))
#define f4(x) ((x<<2)^(((x>>6)&1)*0x11b)^(((x>>6)&2)*0x11b))
#define f8(x) ((x<<3)^(((x>>5)&1)*0x11b)^(((x>>5)&2)*0x11b)^(((x>>5)&4)*0x11b))
#define f3(x) (f2(x) ^ x)
#define f9(x) (f8(x) ^ x)
#define fb(x) (f8(x) ^ f2(x) ^ x)
#define fd(x) (f8(x) ^ f4(x) ^ x)
#define fe(x) (f8(x) ^ f4(x) ^ f2(x))
unsigned int Rijndael::Base::OptimalDataAlignment() const
{
#if (CRYPTOPP_AESNI_AVAILABLE)
if (HasAESNI())
return 16; // load __m128i
#endif
#if (CRYPTOPP_ARM_AES_AVAILABLE)
if (HasAES())
return 4; // load uint32x4_t
#endif
#if (CRYPTOGAMS_ARM_AES)
// Must use 1 here for Cryptogams AES. Also see
// https://github.com/weidai11/cryptopp/issues/683
if (HasARMv7())
return 1;
#endif
#if (CRYPTOPP_POWER8_AES_AVAILABLE)
if (HasAES())
return 16; // load uint32x4_p
#endif
return BlockTransformation::OptimalDataAlignment();
}
void Rijndael::Base::FillEncTable()
{
for (int i=0; i<256; i++)
{
byte x = Se[i];
#if defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
word32 y = word32(x)<<8 | word32(x)<<16 | word32(f2(x))<<24;
Te[i] = word64(y | f3(x))<<32 | y;
#else
word32 y = f3(x) | word32(x)<<8 | word32(x)<<16 | word32(f2(x))<<24;
for (int j=0; j<4; j++)
{
Te[i+j*256] = y;
y = rotrConstant<8>(y);
}
#endif
}
#if (CRYPTOPP_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_DISABLE_RIJNDAEL_ASM)
Te[256] = Te[257] = 0;
#endif
s_TeFilled = true;
}
void Rijndael::Base::FillDecTable()
{
for (int i=0; i<256; i++)
{
byte x = Sd[i];
#if defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
word32 y = word32(fd(x))<<8 | word32(f9(x))<<16 | word32(fe(x))<<24;
Td[i] = word64(y | fb(x))<<32 | y | x;
#else
word32 y = fb(x) | word32(fd(x))<<8 | word32(f9(x))<<16 | word32(fe(x))<<24;
for (int j=0; j<4; j++)
{
Td[i+j*256] = y;
y = rotrConstant<8>(y);
}
#endif
}
s_TdFilled = true;
}
#if (CRYPTOPP_AESNI_AVAILABLE)
extern void Rijndael_UncheckedSetKey_SSE4_AESNI(const byte *userKey, size_t keyLen, word32* rk);
extern void Rijndael_UncheckedSetKeyRev_AESNI(word32 *key, unsigned int rounds);
extern size_t Rijndael_Enc_AdvancedProcessBlocks_AESNI(const word32 *subkeys, size_t rounds,
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags);
extern size_t Rijndael_Dec_AdvancedProcessBlocks_AESNI(const word32 *subkeys, size_t rounds,
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags);
#endif
#if (CRYPTOPP_ARM_AES_AVAILABLE)
extern size_t Rijndael_Enc_AdvancedProcessBlocks_ARMV8(const word32 *subkeys, size_t rounds,
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags);
extern size_t Rijndael_Dec_AdvancedProcessBlocks_ARMV8(const word32 *subkeys, size_t rounds,
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags);
#endif
#if (CRYPTOGAMS_ARM_AES)
extern "C" int cryptogams_AES_set_encrypt_key(const unsigned char *userKey, const int bitLen, word32 *rkey);
extern "C" int cryptogams_AES_set_decrypt_key(const unsigned char *userKey, const int bitLen, word32 *rkey);
extern "C" void cryptogams_AES_encrypt_block(const unsigned char *in, unsigned char *out, const word32 *rkey);
extern "C" void cryptogams_AES_decrypt_block(const unsigned char *in, unsigned char *out, const word32 *rkey);
#endif
#if (CRYPTOPP_POWER8_AES_AVAILABLE)
extern void Rijndael_UncheckedSetKey_POWER8(const byte* userKey, size_t keyLen,
word32* rk, const byte* Se);
extern size_t Rijndael_Enc_AdvancedProcessBlocks128_6x1_ALTIVEC(const word32 *subkeys, size_t rounds,
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags);
extern size_t Rijndael_Dec_AdvancedProcessBlocks128_6x1_ALTIVEC(const word32 *subkeys, size_t rounds,
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags);
#endif
#if (CRYPTOGAMS_ARM_AES)
int CRYPTOGAMS_set_encrypt_key(const byte *userKey, const int bitLen, word32 *rkey)
{
return cryptogams_AES_set_encrypt_key(userKey, bitLen, rkey);
}
int CRYPTOGAMS_set_decrypt_key(const byte *userKey, const int bitLen, word32 *rkey)
{
return cryptogams_AES_set_decrypt_key(userKey, bitLen, rkey);
}
void CRYPTOGAMS_encrypt(const byte *inBlock, const byte *xorBlock, byte *outBlock, const word32 *rkey)
{
cryptogams_AES_encrypt_block(inBlock, outBlock, rkey);
if (xorBlock)
xorbuf (outBlock, xorBlock, 16);
}
void CRYPTOGAMS_decrypt(const byte *inBlock, const byte *xorBlock, byte *outBlock, const word32 *rkey)
{
cryptogams_AES_decrypt_block(inBlock, outBlock, rkey);
if (xorBlock)
xorbuf (outBlock, xorBlock, 16);
}
#endif
std::string Rijndael::Base::AlgorithmProvider() const
{
#if (CRYPTOPP_AESNI_AVAILABLE)
if (HasAESNI())
return "AESNI";
#endif
#if CRYPTOPP_SSE2_ASM_AVAILABLE && !defined(CRYPTOPP_DISABLE_RIJNDAEL_ASM)
if (HasSSE2())
return "SSE2";
#endif
#if (CRYPTOPP_ARM_AES_AVAILABLE)
if (HasAES())
return "ARMv8";
#endif
#if (CRYPTOGAMS_ARM_AES)
if (HasARMv7())
return "ARMv7";
#endif
#if (CRYPTOPP_POWER8_AES_AVAILABLE)
if (HasAES())
return "Power8";
#endif
return "C++";
}
void Rijndael::Base::UncheckedSetKey(const byte *userKey, unsigned int keyLen, const NameValuePairs &)
{
AssertValidKeyLength(keyLen);
#if (CRYPTOGAMS_ARM_AES)
if (HasARMv7())
{
m_rounds = keyLen/4 + 6;
m_key.New(4*(14+1)+4);
if (IsForwardTransformation())
CRYPTOGAMS_set_encrypt_key(userKey, keyLen*8, m_key.begin());
else
CRYPTOGAMS_set_decrypt_key(userKey, keyLen*8, m_key.begin());
return;
}
#endif
#if CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X32 || CRYPTOPP_BOOL_X86
m_aliasBlock.New(s_sizeToAllocate);
// The alias block is only used on IA-32 when unaligned data access is in effect.
// Setting the low water mark to 0 avoids zeroization when m_aliasBlock is unused.
m_aliasBlock.SetMark(0);
#endif
m_rounds = keyLen/4 + 6;
m_key.New(4*(m_rounds+1));
word32 *rk = m_key;
#if (CRYPTOPP_AESNI_AVAILABLE && CRYPTOPP_SSE41_AVAILABLE && (!defined(_MSC_VER) || _MSC_VER >= 1600 || CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32))
// MSVC 2008 SP1 generates bad code for _mm_extract_epi32() when compiling for X64
if (HasAESNI() && HasSSE41())
{
// TODO: Add non-SSE4.1 variant for low-end Atoms. The low-end
// Atoms have SSE2-SSSE3 and AES-NI, but not SSE4.1 or SSE4.2.
Rijndael_UncheckedSetKey_SSE4_AESNI(userKey, keyLen, rk);
if (!IsForwardTransformation())
Rijndael_UncheckedSetKeyRev_AESNI(m_key, m_rounds);
return;
}
#endif
#if CRYPTOPP_POWER8_AES_AVAILABLE
if (HasAES())
{
// We still need rcon and Se to fallback to C/C++ for AES-192 and AES-256.
// The IBM docs on AES sucks. Intel's docs on AESNI puts IBM to shame.
Rijndael_UncheckedSetKey_POWER8(userKey, keyLen, rk, Se);
return;
}
#endif
GetUserKey(BIG_ENDIAN_ORDER, rk, keyLen/4, userKey, keyLen);
const word32 *rc = rcon;
word32 temp;
while (true)
{
temp = rk[keyLen/4-1];
word32 x = (word32(Se[GETBYTE(temp, 2)]) << 24) ^ (word32(Se[GETBYTE(temp, 1)]) << 16) ^
(word32(Se[GETBYTE(temp, 0)]) << 8) ^ Se[GETBYTE(temp, 3)];
rk[keyLen/4] = rk[0] ^ x ^ *(rc++);
rk[keyLen/4+1] = rk[1] ^ rk[keyLen/4];
rk[keyLen/4+2] = rk[2] ^ rk[keyLen/4+1];
rk[keyLen/4+3] = rk[3] ^ rk[keyLen/4+2];
if (rk + keyLen/4 + 4 == m_key.end())
break;
if (keyLen == 24)
{
rk[10] = rk[ 4] ^ rk[ 9];
rk[11] = rk[ 5] ^ rk[10];
}
else if (keyLen == 32)
{
temp = rk[11];
rk[12] = rk[ 4] ^ (word32(Se[GETBYTE(temp, 3)]) << 24) ^ (word32(Se[GETBYTE(temp, 2)]) << 16) ^ (word32(Se[GETBYTE(temp, 1)]) << 8) ^ Se[GETBYTE(temp, 0)];
rk[13] = rk[ 5] ^ rk[12];
rk[14] = rk[ 6] ^ rk[13];
rk[15] = rk[ 7] ^ rk[14];
}
rk += keyLen/4;
}
rk = m_key;
if (IsForwardTransformation())
{
if (!s_TeFilled)
FillEncTable();
ConditionalByteReverse(BIG_ENDIAN_ORDER, rk, rk, 16);
ConditionalByteReverse(BIG_ENDIAN_ORDER, rk + m_rounds*4, rk + m_rounds*4, 16);
}
else
{
if (!s_TdFilled)
FillDecTable();
#define InverseMixColumn(x) \
TL_M(Td, 0, Se[GETBYTE(x, 3)]) ^ TL_M(Td, 1, Se[GETBYTE(x, 2)]) ^ \
TL_M(Td, 2, Se[GETBYTE(x, 1)]) ^ TL_M(Td, 3, Se[GETBYTE(x, 0)])
unsigned int i, j;
for (i = 4, j = 4*m_rounds-4; i < j; i += 4, j -= 4)
{
temp = InverseMixColumn(rk[i ]); rk[i ] = InverseMixColumn(rk[j ]); rk[j ] = temp;
temp = InverseMixColumn(rk[i + 1]); rk[i + 1] = InverseMixColumn(rk[j + 1]); rk[j + 1] = temp;
temp = InverseMixColumn(rk[i + 2]); rk[i + 2] = InverseMixColumn(rk[j + 2]); rk[j + 2] = temp;
temp = InverseMixColumn(rk[i + 3]); rk[i + 3] = InverseMixColumn(rk[j + 3]); rk[j + 3] = temp;
}
rk[i+0] = InverseMixColumn(rk[i+0]);
rk[i+1] = InverseMixColumn(rk[i+1]);
rk[i+2] = InverseMixColumn(rk[i+2]);
rk[i+3] = InverseMixColumn(rk[i+3]);
temp = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[0]); rk[0] = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[4*m_rounds+0]); rk[4*m_rounds+0] = temp;
temp = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[1]); rk[1] = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[4*m_rounds+1]); rk[4*m_rounds+1] = temp;
temp = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[2]); rk[2] = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[4*m_rounds+2]); rk[4*m_rounds+2] = temp;
temp = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[3]); rk[3] = ConditionalByteReverse(BIG_ENDIAN_ORDER, rk[4*m_rounds+3]); rk[4*m_rounds+3] = temp;
}
#if CRYPTOPP_AESNI_AVAILABLE
if (HasAESNI())
ConditionalByteReverse(BIG_ENDIAN_ORDER, rk+4, rk+4, (m_rounds-1)*16);
#endif
#if CRYPTOPP_ARM_AES_AVAILABLE
if (HasAES())
ConditionalByteReverse(BIG_ENDIAN_ORDER, rk+4, rk+4, (m_rounds-1)*16);
#endif
}
void Rijndael::Enc::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, byte *outBlock) const
{
#if CRYPTOPP_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE) || CRYPTOPP_AESNI_AVAILABLE
# if (CRYPTOPP_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_DISABLE_RIJNDAEL_ASM)
if (HasSSE2())
# else
if (HasAESNI())
# endif
{
(void)Rijndael::Enc::AdvancedProcessBlocks(inBlock, xorBlock, outBlock, 16, 0);
return;
}
#endif
#if (CRYPTOPP_ARM_AES_AVAILABLE)
if (HasAES())
{
(void)Rijndael::Enc::AdvancedProcessBlocks(inBlock, xorBlock, outBlock, 16, 0);
return;
}
#endif
#if (CRYPTOGAMS_ARM_AES)
if (HasARMv7())
{
CRYPTOGAMS_encrypt(inBlock, xorBlock, outBlock, m_key.begin());
return;
}
#endif
#if (CRYPTOPP_POWER8_AES_AVAILABLE)
if (HasAES())
{
(void)Rijndael::Enc::AdvancedProcessBlocks(inBlock, xorBlock, outBlock, 16, 0);
return;
}
#endif
typedef BlockGetAndPut<word32, NativeByteOrder> Block;
word32 s0, s1, s2, s3, t0, t1, t2, t3;
Block::Get(inBlock)(s0)(s1)(s2)(s3);
const word32 *rk = m_key;
s0 ^= rk[0];
s1 ^= rk[1];
s2 ^= rk[2];
s3 ^= rk[3];
t0 = rk[4];
t1 = rk[5];
t2 = rk[6];
t3 = rk[7];
rk += 8;
// timing attack countermeasure. see comments at top for more details.
// also see http://github.com/weidai11/cryptopp/issues/146
const int cacheLineSize = GetCacheLineSize();
unsigned int i;
volatile word32 _u = 0;
word32 u = _u;
#if defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
for (i=0; i<2048; i+=cacheLineSize)
#else
for (i=0; i<1024; i+=cacheLineSize)
#endif
u &= *(const word32 *)(const void *)(((const byte *)Te)+i);
u &= Te[255];
s0 |= u; s1 |= u; s2 |= u; s3 |= u;
QUARTER_ROUND_FE(s3, t0, t1, t2, t3)
QUARTER_ROUND_FE(s2, t3, t0, t1, t2)
QUARTER_ROUND_FE(s1, t2, t3, t0, t1)
QUARTER_ROUND_FE(s0, t1, t2, t3, t0)
// Nr - 2 full rounds:
unsigned int r = m_rounds/2 - 1;
do
{
s0 = rk[0]; s1 = rk[1]; s2 = rk[2]; s3 = rk[3];
QUARTER_ROUND_E(t3, s0, s1, s2, s3)
QUARTER_ROUND_E(t2, s3, s0, s1, s2)
QUARTER_ROUND_E(t1, s2, s3, s0, s1)
QUARTER_ROUND_E(t0, s1, s2, s3, s0)
t0 = rk[4]; t1 = rk[5]; t2 = rk[6]; t3 = rk[7];
QUARTER_ROUND_E(s3, t0, t1, t2, t3)
QUARTER_ROUND_E(s2, t3, t0, t1, t2)
QUARTER_ROUND_E(s1, t2, t3, t0, t1)
QUARTER_ROUND_E(s0, t1, t2, t3, t0)
rk += 8;
} while (--r);
word32 tbw[4];
byte *const tempBlock = (byte *)tbw;
QUARTER_ROUND_LE(t2, 15, 2, 5, 8)
QUARTER_ROUND_LE(t1, 11, 14, 1, 4)
QUARTER_ROUND_LE(t0, 7, 10, 13, 0)
QUARTER_ROUND_LE(t3, 3, 6, 9, 12)
Block::Put(xorBlock, outBlock)(tbw[0]^rk[0])(tbw[1]^rk[1])(tbw[2]^rk[2])(tbw[3]^rk[3]);
}
void Rijndael::Dec::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, byte *outBlock) const
{
#if CRYPTOPP_AESNI_AVAILABLE
if (HasAESNI())
{
(void)Rijndael::Dec::AdvancedProcessBlocks(inBlock, xorBlock, outBlock, 16, 0);
return;
}
#endif
#if (CRYPTOPP_ARM_AES_AVAILABLE)
if (HasAES())
{
(void)Rijndael::Dec::AdvancedProcessBlocks(inBlock, xorBlock, outBlock, 16, 0);
return;
}
#endif
#if (CRYPTOGAMS_ARM_AES)
if (HasARMv7())
{
CRYPTOGAMS_decrypt(inBlock, xorBlock, outBlock, m_key.begin());
return;
}
#endif
#if (CRYPTOPP_POWER8_AES_AVAILABLE)
if (HasAES())
{
(void)Rijndael::Dec::AdvancedProcessBlocks(inBlock, xorBlock, outBlock, 16, 0);
return;
}
#endif
typedef BlockGetAndPut<word32, NativeByteOrder> Block;
word32 s0, s1, s2, s3, t0, t1, t2, t3;
Block::Get(inBlock)(s0)(s1)(s2)(s3);
const word32 *rk = m_key;
s0 ^= rk[0];
s1 ^= rk[1];
s2 ^= rk[2];
s3 ^= rk[3];
t0 = rk[4];
t1 = rk[5];
t2 = rk[6];
t3 = rk[7];
rk += 8;
// timing attack countermeasure. see comments at top for more details.
// also see http://github.com/weidai11/cryptopp/issues/146
const int cacheLineSize = GetCacheLineSize();
unsigned int i;
volatile word32 _u = 0;
word32 u = _u;
#if defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS)
for (i=0; i<2048; i+=cacheLineSize)
#else
for (i=0; i<1024; i+=cacheLineSize)
#endif
u &= *(const word32 *)(const void *)(((const byte *)Td)+i);
u &= Td[255];
s0 |= u; s1 |= u; s2 |= u; s3 |= u;
QUARTER_ROUND_FD(s3, t2, t1, t0, t3)
QUARTER_ROUND_FD(s2, t1, t0, t3, t2)
QUARTER_ROUND_FD(s1, t0, t3, t2, t1)
QUARTER_ROUND_FD(s0, t3, t2, t1, t0)
// Nr - 2 full rounds:
unsigned int r = m_rounds/2 - 1;
do
{
s0 = rk[0]; s1 = rk[1]; s2 = rk[2]; s3 = rk[3];
QUARTER_ROUND_D(t3, s2, s1, s0, s3)
QUARTER_ROUND_D(t2, s1, s0, s3, s2)
QUARTER_ROUND_D(t1, s0, s3, s2, s1)
QUARTER_ROUND_D(t0, s3, s2, s1, s0)
t0 = rk[4]; t1 = rk[5]; t2 = rk[6]; t3 = rk[7];
QUARTER_ROUND_D(s3, t2, t1, t0, t3)
QUARTER_ROUND_D(s2, t1, t0, t3, t2)
QUARTER_ROUND_D(s1, t0, t3, t2, t1)
QUARTER_ROUND_D(s0, t3, t2, t1, t0)
rk += 8;
} while (--r);
#if !(defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS))
// timing attack countermeasure. see comments at top for more details
// If CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS is defined,
// QUARTER_ROUND_LD will use Td, which is already preloaded.
u = _u;
for (i=0; i<256; i+=cacheLineSize)
u &= *(const word32 *)(const void *)(Sd+i);
u &= *(const word32 *)(const void *)(Sd+252);
t0 |= u; t1 |= u; t2 |= u; t3 |= u;
#endif
word32 tbw[4];
byte *const tempBlock = (byte *)tbw;
QUARTER_ROUND_LD(t2, 7, 2, 13, 8)
QUARTER_ROUND_LD(t1, 3, 14, 9, 4)
QUARTER_ROUND_LD(t0, 15, 10, 5, 0)
QUARTER_ROUND_LD(t3, 11, 6, 1, 12)
Block::Put(xorBlock, outBlock)(tbw[0]^rk[0])(tbw[1]^rk[1])(tbw[2]^rk[2])(tbw[3]^rk[3]);
}
// ************************* Assembly Code ************************************
#if CRYPTOPP_MSC_VERSION
# pragma warning(disable: 4731) // frame pointer register 'ebp' modified by inline assembly code
#endif
#endif // #ifndef CRYPTOPP_GENERATE_X64_MASM
#if CRYPTOPP_SSE2_ASM_AVAILABLE && !defined(CRYPTOPP_DISABLE_RIJNDAEL_ASM)
CRYPTOPP_NAKED void CRYPTOPP_FASTCALL Rijndael_Enc_AdvancedProcessBlocks_SSE2(void *locals, const word32 *k)
{
CRYPTOPP_UNUSED(locals); CRYPTOPP_UNUSED(k);
#if CRYPTOPP_BOOL_X86
#define L_REG esp
#define L_INDEX(i) (L_REG+768+i)
#define L_INXORBLOCKS L_INBLOCKS+4
#define L_OUTXORBLOCKS L_INBLOCKS+8
#define L_OUTBLOCKS L_INBLOCKS+12
#define L_INCREMENTS L_INDEX(16*15)
#define L_SP L_INDEX(16*16)
#define L_LENGTH L_INDEX(16*16+4)
#define L_KEYS_BEGIN L_INDEX(16*16+8)
#define MOVD movd
#define MM(i) mm##i
#define MXOR(a,b,c) \
AS2( movzx esi, b)\
AS2( movd mm7, DWORD PTR [AS_REG_7+8*WORD_REG(si)+MAP0TO4(c)])\
AS2( pxor MM(a), mm7)\
#define MMOV(a,b,c) \
AS2( movzx esi, b)\
AS2( movd MM(a), DWORD PTR [AS_REG_7+8*WORD_REG(si)+MAP0TO4(c)])\
#else
#define L_REG r8
#define L_INDEX(i) (L_REG+i)
#define L_INXORBLOCKS L_INBLOCKS+8
#define L_OUTXORBLOCKS L_INBLOCKS+16
#define L_OUTBLOCKS L_INBLOCKS+24
#define L_INCREMENTS L_INDEX(16*16)
#define L_LENGTH L_INDEX(16*18+8)
#define L_KEYS_BEGIN L_INDEX(16*19)
#define MOVD mov
#define MM_0 r9d
#define MM_1 r12d
#ifdef __GNUC__
#define MM_2 r11d
#else
#define MM_2 r10d
#endif
#define MM(i) MM_##i
#define MXOR(a,b,c) \
AS2( movzx esi, b)\
AS2( xor MM(a), DWORD PTR [AS_REG_7+8*WORD_REG(si)+MAP0TO4(c)])\
#define MMOV(a,b,c) \
AS2( movzx esi, b)\
AS2( mov MM(a), DWORD PTR [AS_REG_7+8*WORD_REG(si)+MAP0TO4(c)])\
#endif
#define L_SUBKEYS L_INDEX(0)
#define L_SAVED_X L_SUBKEYS
#define L_KEY12 L_INDEX(16*12)
#define L_LASTROUND L_INDEX(16*13)
#define L_INBLOCKS L_INDEX(16*14)
#define MAP0TO4(i) (ASM_MOD(i+3,4)+1)
#define XOR(a,b,c) \
AS2( movzx esi, b)\
AS2( xor a, DWORD PTR [AS_REG_7+8*WORD_REG(si)+MAP0TO4(c)])\
#define MOV(a,b,c) \
AS2( movzx esi, b)\
AS2( mov a, DWORD PTR [AS_REG_7+8*WORD_REG(si)+MAP0TO4(c)])\
#ifdef CRYPTOPP_GENERATE_X64_MASM
ALIGN 8
Rijndael_Enc_AdvancedProcessBlocks PROC FRAME
rex_push_reg rsi
push_reg rdi
push_reg rbx
push_reg r12
.endprolog
mov L_REG, rcx
mov AS_REG_7, ?Te@rdtable@CryptoPP@@3PA_KA
mov edi, DWORD PTR [?g_cacheLineSize@CryptoPP@@3IA]
#elif defined(__GNUC__)
__asm__ __volatile__
(
INTEL_NOPREFIX
#if CRYPTOPP_BOOL_X64
AS2( mov L_REG, rcx)
#endif
AS_PUSH_IF86(bx)
AS_PUSH_IF86(bp)
AS2( mov AS_REG_7, WORD_REG(si))
#else
AS_PUSH_IF86(si)
AS_PUSH_IF86(di)
AS_PUSH_IF86(bx)
AS_PUSH_IF86(bp)
AS2( lea AS_REG_7, [Te])
AS2( mov edi, [g_cacheLineSize])
#endif
#if CRYPTOPP_BOOL_X86
AS2( mov [ecx+16*12+16*4], esp) // save esp to L_SP
AS2( lea esp, [ecx-768])
#endif
// copy subkeys to stack
AS2( mov WORD_REG(si), [L_KEYS_BEGIN])
AS2( mov WORD_REG(ax), 16)
AS2( and WORD_REG(ax), WORD_REG(si))
AS2( movdqa xmm3, XMMWORD_PTR [WORD_REG(dx)+16+WORD_REG(ax)]) // subkey 1 (non-counter) or 2 (counter)
AS2( movdqa [L_KEY12], xmm3)
AS2( lea WORD_REG(ax), [WORD_REG(dx)+WORD_REG(ax)+2*16])
AS2( sub WORD_REG(ax), WORD_REG(si))
ASL(0)
AS2( movdqa xmm0, [WORD_REG(ax)+WORD_REG(si)])
AS2( movdqa XMMWORD_PTR [L_SUBKEYS+WORD_REG(si)], xmm0)
AS2( add WORD_REG(si), 16)
AS2( cmp WORD_REG(si), 16*12)
ATT_NOPREFIX
ASJ( jl, 0, b)
INTEL_NOPREFIX
// read subkeys 0, 1 and last
AS2( movdqa xmm4, [WORD_REG(ax)+WORD_REG(si)]) // last subkey
AS2( movdqa xmm1, [WORD_REG(dx)]) // subkey 0
AS2( MOVD MM(1), [WORD_REG(dx)+4*4]) // 0,1,2,3
AS2( mov ebx, [WORD_REG(dx)+5*4]) // 4,5,6,7
AS2( mov ecx, [WORD_REG(dx)+6*4]) // 8,9,10,11
AS2( mov edx, [WORD_REG(dx)+7*4]) // 12,13,14,15
// load table into cache
AS2( xor WORD_REG(ax), WORD_REG(ax))
ASL(9)
AS2( mov esi, [AS_REG_7+WORD_REG(ax)])
AS2( add WORD_REG(ax), WORD_REG(di))
AS2( mov esi, [AS_REG_7+WORD_REG(ax)])
AS2( add WORD_REG(ax), WORD_REG(di))
AS2( mov esi, [AS_REG_7+WORD_REG(ax)])
AS2( add WORD_REG(ax), WORD_REG(di))
AS2( mov esi, [AS_REG_7+WORD_REG(ax)])
AS2( add WORD_REG(ax), WORD_REG(di))
AS2( cmp WORD_REG(ax), 2048)
ATT_NOPREFIX
ASJ( jl, 9, b)
INTEL_NOPREFIX
AS1( lfence)
AS2( test DWORD PTR [L_LENGTH], 1)
ATT_NOPREFIX
ASJ( jz, 8, f)
INTEL_NOPREFIX
// counter mode one-time setup
AS2( mov WORD_REG(si), [L_INBLOCKS])
AS2( movdqu xmm2, [WORD_REG(si)]) // counter
AS2( pxor xmm2, xmm1)
AS2( psrldq xmm1, 14)
AS2( movd eax, xmm1)
AS2( mov al, BYTE PTR [WORD_REG(si)+15])
AS2( MOVD MM(2), eax)
#if CRYPTOPP_BOOL_X86
AS2( mov eax, 1)
AS2( movd mm3, eax)
#endif
// partial first round, in: xmm2(15,14,13,12;11,10,9,8;7,6,5,4;3,2,1,0), out: mm1, ebx, ecx, edx
AS2( movd eax, xmm2)
AS2( psrldq xmm2, 4)
AS2( movd edi, xmm2)
AS2( psrldq xmm2, 4)
MXOR( 1, al, 0) // 0
XOR( edx, ah, 1) // 1
AS2( shr eax, 16)
XOR( ecx, al, 2) // 2
XOR( ebx, ah, 3) // 3
AS2( mov eax, edi)
AS2( movd edi, xmm2)
AS2( psrldq xmm2, 4)
XOR( ebx, al, 0) // 4
MXOR( 1, ah, 1) // 5
AS2( shr eax, 16)
XOR( edx, al, 2) // 6
XOR( ecx, ah, 3) // 7
AS2( mov eax, edi)
AS2( movd edi, xmm2)
XOR( ecx, al, 0) // 8
XOR( ebx, ah, 1) // 9
AS2( shr eax, 16)
MXOR( 1, al, 2) // 10
XOR( edx, ah, 3) // 11
AS2( mov eax, edi)
XOR( edx, al, 0) // 12
XOR( ecx, ah, 1) // 13
AS2( shr eax, 16)
XOR( ebx, al, 2) // 14
AS2( psrldq xmm2, 3)
// partial second round, in: ebx(4,5,6,7), ecx(8,9,10,11), edx(12,13,14,15), out: eax, ebx, edi, mm0
AS2( mov eax, [L_KEY12+0*4])
AS2( mov edi, [L_KEY12+2*4])
AS2( MOVD MM(0), [L_KEY12+3*4])
MXOR( 0, cl, 3) /* 11 */
XOR( edi, bl, 3) /* 7 */
MXOR( 0, bh, 2) /* 6 */
AS2( shr ebx, 16) /* 4,5 */
XOR( eax, bl, 1) /* 5 */
MOV( ebx, bh, 0) /* 4 */
AS2( xor ebx, [L_KEY12+1*4])
XOR( eax, ch, 2) /* 10 */
AS2( shr ecx, 16) /* 8,9 */
XOR( eax, dl, 3) /* 15 */
XOR( ebx, dh, 2) /* 14 */
AS2( shr edx, 16) /* 12,13 */
XOR( edi, ch, 0) /* 8 */
XOR( ebx, cl, 1) /* 9 */
XOR( edi, dl, 1) /* 13 */
MXOR( 0, dh, 0) /* 12 */
AS2( movd ecx, xmm2)
AS2( MOVD edx, MM(1))
AS2( MOVD [L_SAVED_X+3*4], MM(0))
AS2( mov [L_SAVED_X+0*4], eax)
AS2( mov [L_SAVED_X+1*4], ebx)
AS2( mov [L_SAVED_X+2*4], edi)
ATT_NOPREFIX
ASJ( jmp, 5, f)
INTEL_NOPREFIX
ASL(3)
// non-counter mode per-block setup
AS2( MOVD MM(1), [L_KEY12+0*4]) // 0,1,2,3
AS2( mov ebx, [L_KEY12+1*4]) // 4,5,6,7
AS2( mov ecx, [L_KEY12+2*4]) // 8,9,10,11
AS2( mov edx, [L_KEY12+3*4]) // 12,13,14,15
ASL(8)
AS2( mov WORD_REG(ax), [L_INBLOCKS])
AS2( movdqu xmm2, [WORD_REG(ax)])
AS2( mov WORD_REG(si), [L_INXORBLOCKS])
AS2( movdqu xmm5, [WORD_REG(si)])
AS2( pxor xmm2, xmm1)
AS2( pxor xmm2, xmm5)
// first round, in: xmm2(15,14,13,12;11,10,9,8;7,6,5,4;3,2,1,0), out: eax, ebx, ecx, edx
AS2( movd eax, xmm2)
AS2( psrldq xmm2, 4)
AS2( movd edi, xmm2)
AS2( psrldq xmm2, 4)
MXOR( 1, al, 0) // 0
XOR( edx, ah, 1) // 1
AS2( shr eax, 16)
XOR( ecx, al, 2) // 2
XOR( ebx, ah, 3) // 3
AS2( mov eax, edi)
AS2( movd edi, xmm2)
AS2( psrldq xmm2, 4)
XOR( ebx, al, 0) // 4
MXOR( 1, ah, 1) // 5
AS2( shr eax, 16)
XOR( edx, al, 2) // 6
XOR( ecx, ah, 3) // 7
AS2( mov eax, edi)
AS2( movd edi, xmm2)
XOR( ecx, al, 0) // 8
XOR( ebx, ah, 1) // 9
AS2( shr eax, 16)
MXOR( 1, al, 2) // 10
XOR( edx, ah, 3) // 11
AS2( mov eax, edi)
XOR( edx, al, 0) // 12
XOR( ecx, ah, 1) // 13
AS2( shr eax, 16)
XOR( ebx, al, 2) // 14
MXOR( 1, ah, 3) // 15
AS2( MOVD eax, MM(1))
AS2( add L_REG, [L_KEYS_BEGIN])
AS2( add L_REG, 4*16)
ATT_NOPREFIX
ASJ( jmp, 2, f)
INTEL_NOPREFIX
ASL(1)
// counter-mode per-block setup
AS2( MOVD ecx, MM(2))
AS2( MOVD edx, MM(1))
AS2( mov eax, [L_SAVED_X+0*4])
AS2( mov ebx, [L_SAVED_X+1*4])
AS2( xor cl, ch)
AS2( and WORD_REG(cx), 255)
ASL(5)
#if CRYPTOPP_BOOL_X86
AS2( paddb MM(2), mm3)
#else
AS2( add MM(2), 1)
#endif
// remaining part of second round, in: edx(previous round),esi(keyed counter byte) eax,ebx,[L_SAVED_X+2*4],[L_SAVED_X+3*4], out: eax,ebx,ecx,edx
AS2( xor edx, DWORD PTR [AS_REG_7+WORD_REG(cx)*8+3])
XOR( ebx, dl, 3)
MOV( ecx, dh, 2)
AS2( shr edx, 16)
AS2( xor ecx, [L_SAVED_X+2*4])
XOR( eax, dh, 0)
MOV( edx, dl, 1)
AS2( xor edx, [L_SAVED_X+3*4])
AS2( add L_REG, [L_KEYS_BEGIN])
AS2( add L_REG, 3*16)
ATT_NOPREFIX
ASJ( jmp, 4, f)
INTEL_NOPREFIX
// in: eax(0,1,2,3), ebx(4,5,6,7), ecx(8,9,10,11), edx(12,13,14,15)
// out: eax, ebx, edi, mm0
#define ROUND() \
MXOR( 0, cl, 3) /* 11 */\
AS2( mov cl, al) /* 8,9,10,3 */\
XOR( edi, ah, 2) /* 2 */\
AS2( shr eax, 16) /* 0,1 */\
XOR( edi, bl, 3) /* 7 */\
MXOR( 0, bh, 2) /* 6 */\
AS2( shr ebx, 16) /* 4,5 */\
MXOR( 0, al, 1) /* 1 */\
MOV( eax, ah, 0) /* 0 */\
XOR( eax, bl, 1) /* 5 */\
MOV( ebx, bh, 0) /* 4 */\
XOR( eax, ch, 2) /* 10 */\
XOR( ebx, cl, 3) /* 3 */\
AS2( shr ecx, 16) /* 8,9 */\
XOR( eax, dl, 3) /* 15 */\
XOR( ebx, dh, 2) /* 14 */\
AS2( shr edx, 16) /* 12,13 */\
XOR( edi, ch, 0) /* 8 */\
XOR( ebx, cl, 1) /* 9 */\
XOR( edi, dl, 1) /* 13 */\
MXOR( 0, dh, 0) /* 12 */\
ASL(2) // 2-round loop
AS2( MOVD MM(0), [L_SUBKEYS-4*16+3*4])
AS2( mov edi, [L_SUBKEYS-4*16+2*4])
ROUND()
AS2( mov ecx, edi)
AS2( xor eax, [L_SUBKEYS-4*16+0*4])
AS2( xor ebx, [L_SUBKEYS-4*16+1*4])
AS2( MOVD edx, MM(0))
ASL(4)
AS2( MOVD MM(0), [L_SUBKEYS-4*16+7*4])
AS2( mov edi, [L_SUBKEYS-4*16+6*4])
ROUND()
AS2( mov ecx, edi)
AS2( xor eax, [L_SUBKEYS-4*16+4*4])
AS2( xor ebx, [L_SUBKEYS-4*16+5*4])
AS2( MOVD edx, MM(0))
AS2( add L_REG, 32)
AS2( test L_REG, 255)
ATT_NOPREFIX
ASJ( jnz, 2, b)
INTEL_NOPREFIX
AS2( sub L_REG, 16*16)
#define LAST(a, b, c) \
AS2( movzx esi, a )\
AS2( movzx edi, BYTE PTR [AS_REG_7+WORD_REG(si)*8+1] )\
AS2( movzx esi, b )\
AS2( xor edi, DWORD PTR [AS_REG_7+WORD_REG(si)*8+0] )\
AS2( mov WORD PTR [L_LASTROUND+c], di )\
// last round
LAST(ch, dl, 2)
LAST(dh, al, 6)
AS2( shr edx, 16)
LAST(ah, bl, 10)
AS2( shr eax, 16)
LAST(bh, cl, 14)
AS2( shr ebx, 16)
LAST(dh, al, 12)
AS2( shr ecx, 16)
LAST(ah, bl, 0)
LAST(bh, cl, 4)
LAST(ch, dl, 8)
AS2( mov WORD_REG(ax), [L_OUTXORBLOCKS])
AS2( mov WORD_REG(bx), [L_OUTBLOCKS])
AS2( mov WORD_REG(cx), [L_LENGTH])
AS2( sub WORD_REG(cx), 16)
AS2( movdqu xmm2, [WORD_REG(ax)])
AS2( pxor xmm2, xmm4)
#if CRYPTOPP_BOOL_X86
AS2( movdqa xmm0, [L_INCREMENTS])
AS2( paddd xmm0, [L_INBLOCKS])
AS2( movdqa [L_INBLOCKS], xmm0)
#else
AS2( movdqa xmm0, [L_INCREMENTS+16])
AS2( paddq xmm0, [L_INBLOCKS+16])
AS2( movdqa [L_INBLOCKS+16], xmm0)
#endif
AS2( pxor xmm2, [L_LASTROUND])
AS2( movdqu [WORD_REG(bx)], xmm2)
ATT_NOPREFIX
ASJ( jle, 7, f)
INTEL_NOPREFIX
AS2( mov [L_LENGTH], WORD_REG(cx))
AS2( test WORD_REG(cx), 1)
ATT_NOPREFIX
ASJ( jnz, 1, b)
INTEL_NOPREFIX
#if CRYPTOPP_BOOL_X64
AS2( movdqa xmm0, [L_INCREMENTS])
AS2( paddq xmm0, [L_INBLOCKS])
AS2( movdqa [L_INBLOCKS], xmm0)
#endif
ATT_NOPREFIX
ASJ( jmp, 3, b)
INTEL_NOPREFIX
ASL(7)
// erase keys on stack
AS2( xorps xmm0, xmm0)
AS2( lea WORD_REG(ax), [L_SUBKEYS+7*16])
AS2( movaps [WORD_REG(ax)-7*16], xmm0)
AS2( movaps [WORD_REG(ax)-6*16], xmm0)
AS2( movaps [WORD_REG(ax)-5*16], xmm0)
AS2( movaps [WORD_REG(ax)-4*16], xmm0)
AS2( movaps [WORD_REG(ax)-3*16], xmm0)
AS2( movaps [WORD_REG(ax)-2*16], xmm0)
AS2( movaps [WORD_REG(ax)-1*16], xmm0)
AS2( movaps [WORD_REG(ax)+0*16], xmm0)
AS2( movaps [WORD_REG(ax)+1*16], xmm0)
AS2( movaps [WORD_REG(ax)+2*16], xmm0)
AS2( movaps [WORD_REG(ax)+3*16], xmm0)
AS2( movaps [WORD_REG(ax)+4*16], xmm0)
AS2( movaps [WORD_REG(ax)+5*16], xmm0)
AS2( movaps [WORD_REG(ax)+6*16], xmm0)
#if CRYPTOPP_BOOL_X86
AS2( mov esp, [L_SP])
AS1( emms)
#endif
AS_POP_IF86(bp)
AS_POP_IF86(bx)
#if defined(_MSC_VER) && CRYPTOPP_BOOL_X86
AS_POP_IF86(di)
AS_POP_IF86(si)
AS1(ret)
#endif
#ifdef CRYPTOPP_GENERATE_X64_MASM
pop r12
pop rbx
pop rdi
pop rsi
ret
Rijndael_Enc_AdvancedProcessBlocks ENDP
#endif
#ifdef __GNUC__
ATT_PREFIX
:
: "c" (locals), "d" (k), "S" (Te), "D" (g_cacheLineSize)
: "memory", "cc", "%eax"
#if CRYPTOPP_BOOL_X64
, "%rbx", "%r8", "%r9", "%r10", "%r11", "%r12"
#endif
);
#endif
}
#endif
#ifndef CRYPTOPP_GENERATE_X64_MASM
#ifdef CRYPTOPP_X64_MASM_AVAILABLE
extern "C" {
void Rijndael_Enc_AdvancedProcessBlocks_SSE2(void *locals, const word32 *k);
}
#endif
#if CRYPTOPP_RIJNDAEL_ADVANCED_PROCESS_BLOCKS
size_t Rijndael::Enc::AdvancedProcessBlocks(const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) const
{
#if CRYPTOPP_AESNI_AVAILABLE
if (HasAESNI())
return Rijndael_Enc_AdvancedProcessBlocks_AESNI(m_key, m_rounds, inBlocks, xorBlocks, outBlocks, length, flags);
#endif
#if CRYPTOPP_ARM_AES_AVAILABLE
if (HasAES())
return Rijndael_Enc_AdvancedProcessBlocks_ARMV8(m_key, m_rounds, inBlocks, xorBlocks, outBlocks, length, flags);
#endif
#if CRYPTOPP_POWER8_AES_AVAILABLE
if (HasAES())
return Rijndael_Enc_AdvancedProcessBlocks128_6x1_ALTIVEC(m_key, m_rounds, inBlocks, xorBlocks, outBlocks, length, flags);
#endif
#if (CRYPTOPP_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_DISABLE_RIJNDAEL_ASM)
if (HasSSE2())
{
if (length < BLOCKSIZE)
return length;
static const byte *zeros = (const byte*)(Te+256);
m_aliasBlock.SetMark(m_aliasBlock.size());
byte *space = NULLPTR, *originalSpace = const_cast<byte*>(m_aliasBlock.data());
// round up to nearest 256 byte boundary
space = originalSpace + (s_aliasBlockSize - (uintptr_t)originalSpace % s_aliasBlockSize) % s_aliasBlockSize;
while (AliasedWithTable(space, space + sizeof(Locals)))
{
space += 256;
CRYPTOPP_ASSERT(space < (originalSpace + s_aliasPageSize));
}
size_t increment = BLOCKSIZE;
if (flags & BT_ReverseDirection)
{
CRYPTOPP_ASSERT(length % BLOCKSIZE == 0);
inBlocks += length - BLOCKSIZE;
xorBlocks += length - BLOCKSIZE;
outBlocks += length - BLOCKSIZE;
increment = 0-increment;
}
Locals &locals = *(Locals *)(void *)space;
locals.inBlocks = inBlocks;
locals.inXorBlocks = (flags & BT_XorInput) && xorBlocks ? xorBlocks : zeros;
locals.outXorBlocks = (flags & BT_XorInput) || !xorBlocks ? zeros : xorBlocks;
locals.outBlocks = outBlocks;
locals.inIncrement = (flags & BT_DontIncrementInOutPointers) ? 0 : increment;
locals.inXorIncrement = (flags & BT_XorInput) && xorBlocks ? increment : 0;
locals.outXorIncrement = (flags & BT_XorInput) || !xorBlocks ? 0 : increment;
locals.outIncrement = (flags & BT_DontIncrementInOutPointers) ? 0 : increment;
locals.lengthAndCounterFlag = length - (length%16) - bool(flags & BT_InBlockIsCounter);
int keysToCopy = m_rounds - (flags & BT_InBlockIsCounter ? 3 : 2);
locals.keysBegin = (12-keysToCopy)*16;
Rijndael_Enc_AdvancedProcessBlocks_SSE2(&locals, m_key);
return length % BLOCKSIZE;
}
#endif
return BlockTransformation::AdvancedProcessBlocks(inBlocks, xorBlocks, outBlocks, length, flags);
}
size_t Rijndael::Dec::AdvancedProcessBlocks(const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) const
{
#if CRYPTOPP_AESNI_AVAILABLE
if (HasAESNI())
return Rijndael_Dec_AdvancedProcessBlocks_AESNI(m_key, m_rounds, inBlocks, xorBlocks, outBlocks, length, flags);
#endif
#if CRYPTOPP_ARM_AES_AVAILABLE
if (HasAES())
return Rijndael_Dec_AdvancedProcessBlocks_ARMV8(m_key, m_rounds, inBlocks, xorBlocks, outBlocks, length, flags);
#endif
#if CRYPTOPP_POWER8_AES_AVAILABLE
if (HasAES())
return Rijndael_Dec_AdvancedProcessBlocks128_6x1_ALTIVEC(m_key, m_rounds, inBlocks, xorBlocks, outBlocks, length, flags);
#endif
return BlockTransformation::AdvancedProcessBlocks(inBlocks, xorBlocks, outBlocks, length, flags);
}
#endif // CRYPTOPP_RIJNDAEL_ADVANCED_PROCESS_BLOCKS
NAMESPACE_END
#endif
#endif
| 30.764045
| 162
| 0.648113
|
cstom4994
|
33acfa33f7b3dff96051567ebb9205afd317288c
| 1,851
|
cpp
|
C++
|
tests/Bluzelle/TWCoinTypeTests.cpp
|
Raden-Hor/wallet-core
|
3e64de57ab70e2ce8ecd78e43cdaf290bf334821
|
[
"MIT"
] | 1,306
|
2019-08-08T13:25:24.000Z
|
2022-03-31T23:32:28.000Z
|
tests/Bluzelle/TWCoinTypeTests.cpp
|
Raden-Hor/wallet-core
|
3e64de57ab70e2ce8ecd78e43cdaf290bf334821
|
[
"MIT"
] | 1,179
|
2019-08-08T07:06:10.000Z
|
2022-03-31T12:33:47.000Z
|
tests/Bluzelle/TWCoinTypeTests.cpp
|
Raden-Hor/wallet-core
|
3e64de57ab70e2ce8ecd78e43cdaf290bf334821
|
[
"MIT"
] | 811
|
2019-08-08T13:27:44.000Z
|
2022-03-31T21:22:53.000Z
|
// Copyright © 2017-2020 Trust Wallet.
//
// This file is part of Trust. The full Trust copyright notice, including
// terms governing use, modification, and redistribution, is contained in the
// file LICENSE at the root of the source code distribution tree.
//
// This is a GENERATED FILE, changes made here MAY BE LOST.
// Generated one-time (codegen/bin/cointests)
//
#include "../interface/TWTestUtilities.h"
#include <TrustWalletCore/TWCoinTypeConfiguration.h>
#include <gtest/gtest.h>
TEST(TWCoinTypeBluzelle, TWCoinType) {
auto symbol = WRAPS(TWCoinTypeConfigurationGetSymbol(TWCoinTypeBluzelle));
auto txId = WRAPS(TWStringCreateWithUTF8Bytes("AC026E0EC6E33A77D5EA6B9CEF9810699BC2AD8C5582E007E7857457C6D3B819"));
auto txUrl = WRAPS(TWCoinTypeConfigurationGetTransactionURL(TWCoinTypeBluzelle, txId.get()));
auto accId = WRAPS(TWStringCreateWithUTF8Bytes("bluzelle1q9cryfal7u3jvnq6er5ufety20xtzw6ycx2te9"));
auto accUrl = WRAPS(TWCoinTypeConfigurationGetAccountURL(TWCoinTypeBluzelle, accId.get()));
auto id = WRAPS(TWCoinTypeConfigurationGetID(TWCoinTypeBluzelle));
auto name = WRAPS(TWCoinTypeConfigurationGetName(TWCoinTypeBluzelle));
ASSERT_EQ(TWCoinTypeConfigurationGetDecimals(TWCoinTypeBluzelle), 6);
ASSERT_EQ(TWBlockchainCosmos, TWCoinTypeBlockchain(TWCoinTypeBluzelle));
ASSERT_EQ(0x0, TWCoinTypeP2shPrefix(TWCoinTypeBluzelle));
ASSERT_EQ(0x0, TWCoinTypeStaticPrefix(TWCoinTypeBluzelle));
assertStringsEqual(symbol, "BLZ");
assertStringsEqual(txUrl, "https://bigdipper.net.bluzelle.com/transactions/AC026E0EC6E33A77D5EA6B9CEF9810699BC2AD8C5582E007E7857457C6D3B819");
assertStringsEqual(accUrl, "https://bigdipper.net.bluzelle.com/account/bluzelle1q9cryfal7u3jvnq6er5ufety20xtzw6ycx2te9");
assertStringsEqual(id, "bluzelle");
assertStringsEqual(name, "Bluzelle");
}
| 52.885714
| 146
| 0.801189
|
Raden-Hor
|
33b134b32b03ee9ceaf7999429f5bf23d6dff181
| 4,246
|
cpp
|
C++
|
src/subcommand/subcommand.cpp
|
lnceballosz/vg
|
82d8ba2f38299525c0b0a6b19dcb785d2c439cfa
|
[
"MIT"
] | null | null | null |
src/subcommand/subcommand.cpp
|
lnceballosz/vg
|
82d8ba2f38299525c0b0a6b19dcb785d2c439cfa
|
[
"MIT"
] | null | null | null |
src/subcommand/subcommand.cpp
|
lnceballosz/vg
|
82d8ba2f38299525c0b0a6b19dcb785d2c439cfa
|
[
"MIT"
] | null | null | null |
// SPDX-FileCopyrightText: 2014 Erik Garrison
//
// SPDX-License-Identifier: MIT
// subcommand.cpp: subcommand registry system implementation
#include "subcommand.hpp"
#include <algorithm>
#include <utility>
#include <vector>
#include <limits>
namespace vg {
namespace subcommand {
std::ostream& operator<<(std::ostream& out, const CommandCategory& category) {
switch(category) {
case PIPELINE:
out << "main mapping and calling pipeline";
break;
case TOOLKIT:
out << "useful graph tools";
break;
case WIDGET:
out << "specialized graph tools";
break;
case DEVELOPMENT:
out << "developer commands";
break;
case DEPRECATED:
// we don't show these
break;
}
return out;
}
Subcommand::Subcommand(std::string name, std::string description,
CommandCategory category, int priority,
std::function<int(int, char**)> main_function) : name(name),
category(category), priority(priority), description(description),
main_function(main_function) {
// Add this subcommand to the registry
Subcommand::get_registry()[name] = this;
}
Subcommand::Subcommand(std::string name, std::string description,
CommandCategory category,
std::function<int(int, char**)> main_function) : Subcommand(name,
description, category, std::numeric_limits<int>::max(), main_function) {
// Nothing to do!
}
Subcommand::Subcommand(std::string name, std::string description,
std::function<int(int, char**)> main_function) : Subcommand(name, description, WIDGET, main_function) {
// Nothing to do!
}
const std::string& Subcommand::get_name() const {
return name;
}
const std::string& Subcommand::get_description() const {
return description;
}
const CommandCategory& Subcommand::get_category() const {
return category;
}
const int& Subcommand::get_priority() const {
return priority;
}
const int Subcommand::operator()(int argc, char** argv) const {
return main_function(argc, argv);
}
const Subcommand* Subcommand::get(int argc, char** argv) {
if(argc < 2) {
// We don't have a subcommand name
return nullptr;
}
if(Subcommand::get_registry().count(argv[1])) {
// We have a matching subcommand pointer, so return it.
return Subcommand::get_registry()[argv[1]];
} else {
// No matching subcommand was found
return nullptr;
}
}
void Subcommand::for_each(const std::function<void(const Subcommand&)>& lambda) {
for(auto& kv : Subcommand::get_registry()) {
// For every subcommand, call the callback
lambda(*kv.second);
}
}
void Subcommand::for_each(CommandCategory category, const std::function<void(const Subcommand&)>& lambda) {
if (category == PIPELINE) {
// Pipeline commands get a special priority order
// We will store them with their priorities and sort them.
// Easier than writing a custom comparator.
std::vector<std::pair<int, const Subcommand*>> by_priority;
for_each([&](const Subcommand& command) {
// Loop over all the subcommands
if (command.category == category) {
// And add the ones we care about by priority
by_priority.push_back(std::make_pair(command.priority, &command));
}
});
std::sort(by_priority.begin(), by_priority.end());
for (auto& kv : by_priority) {
// Now in order of decreasing priority
// Run the lambda
lambda(*kv.second);
}
} else {
// All other categories just list in alphabetical order
for_each([&](const Subcommand& command) {
// Loop over all the subcommands
if (command.category == category) {
// And subset to the ones we want
lambda(command);
}
});
}
}
std::map<std::string, Subcommand*>& Subcommand::get_registry() {
// We keep a static local, which gets initialized when we get called.
static std::map<std::string, Subcommand*> registry;
// Return a reference to it
return registry;
}
}
}
| 28.306667
| 107
| 0.626472
|
lnceballosz
|
33b2b3da8a7235edfe503706d8c1a38edbab4596
| 584
|
hpp
|
C++
|
src/semantic/semantic-analysis.hpp
|
noct-lang/noct-bootstrap
|
6fd5ef91feda665dc3f1d8f5dca6403512ac44be
|
[
"0BSD"
] | 1
|
2019-07-01T02:02:40.000Z
|
2019-07-01T02:02:40.000Z
|
src/semantic/semantic-analysis.hpp
|
noct-lang/noct-bootstrap
|
6fd5ef91feda665dc3f1d8f5dca6403512ac44be
|
[
"0BSD"
] | null | null | null |
src/semantic/semantic-analysis.hpp
|
noct-lang/noct-bootstrap
|
6fd5ef91feda665dc3f1d8f5dca6403512ac44be
|
[
"0BSD"
] | null | null | null |
#pragma once
#include "common/defs.hpp"
#include "semantic-pass.hpp"
namespace Noctis
{
struct AstTree;
struct Context;
FWDECL_CLASS_SPTR(QualName);
class AstSemanticAnalysis
{
public:
AstSemanticAnalysis();
void Run(AstTree& tree);
private:
template<typename T>
void RunPass();
void Import(QualNameSPtr modQualName);
AstTree* m_pTree;
};
class ITrSemanticAnalysis
{
public:
ITrSemanticAnalysis();
void Run(ITrModule& mod);
private:
template<typename T, typename... Args>
void RunPass(const Args&... args);
ITrModule* m_pMod;
};
}
| 13.581395
| 40
| 0.702055
|
noct-lang
|
33b4580991344b83b1dc51dccfd718b03bb94626
| 1,378
|
cpp
|
C++
|
example/1d_x_external.cpp
|
pabristow/svg_plot
|
59e06b752acc252498e0ddff560b01fb951cb909
|
[
"BSL-1.0"
] | 24
|
2016-03-09T03:23:06.000Z
|
2021-01-12T14:02:07.000Z
|
example/1d_x_external.cpp
|
pabristow/svg_plot
|
59e06b752acc252498e0ddff560b01fb951cb909
|
[
"BSL-1.0"
] | 11
|
2018-03-05T14:39:48.000Z
|
2021-08-22T09:00:33.000Z
|
example/1d_x_external.cpp
|
pabristow/svg_plot
|
59e06b752acc252498e0ddff560b01fb951cb909
|
[
"BSL-1.0"
] | 10
|
2016-11-04T14:36:04.000Z
|
2020-07-17T08:12:03.000Z
|
/*! \file 1d_x_external.cpp
\brief Simple 1D plot from two vectors of containers of type vector<double>.
*/
// Copyright Jacob Voytko 2007
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt
// or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <boost/svg_plot/svg_1d_plot.hpp>
#include <vector>
using std::vector;
using namespace boost::svg;
int main()
{
vector<double> dan_times;
vector<double> elaine_times;
dan_times.push_back(3.1);
dan_times.push_back(4.2);
elaine_times.push_back(2.1);
elaine_times.push_back(7.8);
svg_1d_plot my_plot;
// Adding generic settings.
my_plot.background_border_color(black)
.legend_on(true)
.plot_window_on(true)
.title("Race Times")
.x_range(-1, 11);
// Adding grid information.
my_plot.x_major_grid_on(true)
.x_minor_grid_on(true);
// Styling grid.
my_plot.x_major_grid_color(black)
.x_minor_grid_color(lightgray);
// External style
my_plot.x_ticks_on_window_or_axis(-1); // on bottom, not on axis.
// Write to plot.
my_plot.plot(dan_times, "Dan").stroke_color(blue);
my_plot.plot(elaine_times, "Elaine").stroke_color(orange);
// Write to file.
my_plot.write("./1d_x_external.svg");
return 0;
} // int main()
| 23.355932
| 78
| 0.686502
|
pabristow
|
33b4f2e1d32f11ea69a1d584860b533d13e8d455
| 731
|
cpp
|
C++
|
Equinox/ModuleMeshManager.cpp
|
jowie94/EquinoxEngine
|
45f5efaa00e35a264bf3537ec3bdfe8b221325bf
|
[
"MIT"
] | null | null | null |
Equinox/ModuleMeshManager.cpp
|
jowie94/EquinoxEngine
|
45f5efaa00e35a264bf3537ec3bdfe8b221325bf
|
[
"MIT"
] | null | null | null |
Equinox/ModuleMeshManager.cpp
|
jowie94/EquinoxEngine
|
45f5efaa00e35a264bf3537ec3bdfe8b221325bf
|
[
"MIT"
] | 1
|
2018-10-16T20:12:59.000Z
|
2018-10-16T20:12:59.000Z
|
#include "ModuleMeshManager.h"
#include "MeshComponent.h"
#include <cassert>
ModuleMeshManager::ModuleMeshManager()
{
}
ModuleMeshManager::~ModuleMeshManager()
{
}
bool ModuleMeshManager::CleanUp()
{
LOG("Cleaning meshes and MeshManager");
LOG("%i meshes were loaded", _meshContainer.size());
for (auto meshPair : _meshContainer)
{
RELEASE(meshPair.second);
}
_meshContainer.clear();
return true;
}
Mesh* ModuleMeshManager::CreateMesh()
{
Mesh* mesh = new Mesh;
mesh->id = _lastId++;
_meshContainer[mesh->id] = mesh;
return mesh;
}
Mesh* ModuleMeshManager::GetMeshById(int id) const
{
const auto it = _meshContainer.find(id);
if (it == _meshContainer.end())
{
return nullptr;
}
return it->second;
}
| 14.918367
| 53
| 0.708618
|
jowie94
|
33b95356c6639be3fe57bde7bebcf1b91ff5835d
| 595
|
hpp
|
C++
|
ColombiaSupremo/Source/DirectMLHelperFunction.hpp
|
Yoon0423/DirectML-Inference-Module
|
3a8bc7ab06c07091345df96f1bc6727bacec9533
|
[
"MIT"
] | null | null | null |
ColombiaSupremo/Source/DirectMLHelperFunction.hpp
|
Yoon0423/DirectML-Inference-Module
|
3a8bc7ab06c07091345df96f1bc6727bacec9533
|
[
"MIT"
] | null | null | null |
ColombiaSupremo/Source/DirectMLHelperFunction.hpp
|
Yoon0423/DirectML-Inference-Module
|
3a8bc7ab06c07091345df96f1bc6727bacec9533
|
[
"MIT"
] | null | null | null |
// Copyright (c) 2020 Yoonsung Kim. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cstdint>
#include <winrt/base.h>
#include "Tensor.hpp"
#include "d3dx12.h"
namespace colombia_supremo::dml_helper {
winrt::com_ptr<IDMLDevice> CreateDevice(winrt::com_ptr<ID3D12Device> d3D12Device);
winrt::com_ptr<IDMLCommandRecorder>
CreateCommandRecorder(winrt::com_ptr<IDMLDevice> device);
uint64_t CalculateBufferTensorSize(const TensorShape &tensorShape);
TensorShape CalculateStrides(const TensorShape &tensorShape);
} // namespace colombia_supremo::dml_helper
| 24.791667
| 82
| 0.793277
|
Yoon0423
|
33ba4f8f81ebf8e7a2723f45f707c69031ba9933
| 5,102
|
cpp
|
C++
|
src/ngraph/pass/constant_folding_unsqueeze.cpp
|
GBuella/ngraph
|
ec935c0bafc5ba8c6940d9c9dcf45ddeac487513
|
[
"Apache-2.0"
] | null | null | null |
src/ngraph/pass/constant_folding_unsqueeze.cpp
|
GBuella/ngraph
|
ec935c0bafc5ba8c6940d9c9dcf45ddeac487513
|
[
"Apache-2.0"
] | null | null | null |
src/ngraph/pass/constant_folding_unsqueeze.cpp
|
GBuella/ngraph
|
ec935c0bafc5ba8c6940d9c9dcf45ddeac487513
|
[
"Apache-2.0"
] | null | null | null |
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "constant_folding.hpp"
#include "ngraph/op/fused/unsqueeze.hpp"
using namespace std;
using namespace ngraph;
template <class T>
shared_ptr<op::Constant> fold_constant_unsqueeze(shared_ptr<op::Constant> constant,
shared_ptr<op::Unsqueeze> unsqueeze)
{
const Shape& out_shape = unsqueeze->get_shape();
return make_shared<op::Constant>(
constant->get_element_type(), out_shape, constant->get_data_ptr());
}
void pass::ConstantFolding::construct_constant_unsqueeze()
{
auto constant_data_label = make_shared<pattern::op::Label>(
element::f32, Shape{2, 4}, pattern::has_class<op::Constant>());
Shape axes_shape{1};
vector<int64_t> values_axes{1};
auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes);
auto unsqueeze = make_shared<op::Unsqueeze>(constant_data_label, constant_axes);
auto constant_unsqueeze_callback = [&, constant_data_label](pattern::Matcher& m) {
NGRAPH_DEBUG << "In callback for constant_unsqueeze_callback against node = "
<< m.get_match_root()->get_name();
auto pattern_map = m.get_pattern_map();
auto constant_match = static_pointer_cast<op::Constant>(pattern_map[constant_data_label]);
auto unsqueeze_match = static_pointer_cast<op::Unsqueeze>(m.get_match_root());
NGRAPH_CHECK(revalidate_and_ensure_static(unsqueeze_match));
std::shared_ptr<Node> replacement;
auto type = constant_match->get_element_type();
switch (type)
{
case element::Type_t::undefined:
NGRAPH_CHECK(false,
"Encountered 'undefined' element type in constant_unsqueeze_callback");
break;
case element::Type_t::dynamic:
NGRAPH_CHECK(false,
"Encountered 'dynamic' element type in constant_unsqueeze_callback");
break;
case element::Type_t::u1:
NGRAPH_CHECK(false, "Encountered 'u1' element type in constant_unsqueeze_callback");
break;
case element::Type_t::boolean:
replacement = fold_constant_unsqueeze<char>(constant_match, unsqueeze_match);
break;
case element::Type_t::bf16:
replacement = fold_constant_unsqueeze<bfloat16>(constant_match, unsqueeze_match);
break;
case element::Type_t::f16:
replacement = fold_constant_unsqueeze<float16>(constant_match, unsqueeze_match);
break;
case element::Type_t::f32:
replacement = fold_constant_unsqueeze<float>(constant_match, unsqueeze_match);
break;
case element::Type_t::f64:
replacement = fold_constant_unsqueeze<double>(constant_match, unsqueeze_match);
break;
case element::Type_t::i8:
replacement = fold_constant_unsqueeze<int8_t>(constant_match, unsqueeze_match);
break;
case element::Type_t::i16:
replacement = fold_constant_unsqueeze<int16_t>(constant_match, unsqueeze_match);
break;
case element::Type_t::i32:
replacement = fold_constant_unsqueeze<int32_t>(constant_match, unsqueeze_match);
break;
case element::Type_t::i64:
replacement = fold_constant_unsqueeze<int64_t>(constant_match, unsqueeze_match);
break;
case element::Type_t::u8:
replacement = fold_constant_unsqueeze<uint8_t>(constant_match, unsqueeze_match);
break;
case element::Type_t::u16:
replacement = fold_constant_unsqueeze<uint16_t>(constant_match, unsqueeze_match);
break;
case element::Type_t::u32:
replacement = fold_constant_unsqueeze<uint32_t>(constant_match, unsqueeze_match);
break;
case element::Type_t::u64:
replacement = fold_constant_unsqueeze<uint64_t>(constant_match, unsqueeze_match);
break;
}
replace_node(m.get_match_root(), replacement);
return true;
};
auto unsqueeze_matcher =
make_shared<pattern::Matcher>(unsqueeze, "ConstantFolding.ConstantUnsqueeze");
this->add_matcher(
unsqueeze_matcher, constant_unsqueeze_callback, PassProperty::CHANGE_DYNAMIC_STATE);
}
| 43.606838
| 98
| 0.648961
|
GBuella
|
33ba990277ba23eea0305975eaabc6ae248204ed
| 12,136
|
hpp
|
C++
|
vendor/sll/include/sll/bsplines_non_uniform.hpp
|
gyselax/gyselalibxx
|
5f9b4b1e20050f87e2a9f05d510bedf0f9a15b34
|
[
"MIT"
] | 3
|
2022-02-28T08:47:07.000Z
|
2022-03-01T10:29:08.000Z
|
vendor/sll/include/sll/bsplines_non_uniform.hpp
|
gyselax/gyselalibxx
|
5f9b4b1e20050f87e2a9f05d510bedf0f9a15b34
|
[
"MIT"
] | null | null | null |
vendor/sll/include/sll/bsplines_non_uniform.hpp
|
gyselax/gyselalibxx
|
5f9b4b1e20050f87e2a9f05d510bedf0f9a15b34
|
[
"MIT"
] | null | null | null |
#pragma once
#include <array>
#include <cassert>
#include <memory>
#include <vector>
#include <ddc/ddc.hpp>
#include "sll/bspline.hpp"
#include "sll/view.hpp"
/// NonUniformDiscretization specialization of BSplines
template <class Tag, std::size_t D>
class NonUniformBSplines
{
static_assert(D > 0, "Parameter `D` must be positive");
private:
using mesh_type = NonUniformDiscretization<Tag>;
using domain_type = DiscreteDomain<mesh_type>;
public:
using rdim_type = BSpline<Tag>;
using tag_type = Tag;
using rcoord_type = Coordinate<NonUniformBSplines>;
using mcoord_type = DiscreteCoordinate<NonUniformBSplines>;
public:
static constexpr std::size_t rank()
{
return 1;
}
static constexpr std::size_t degree() noexcept
{
return D;
}
static constexpr bool is_periodic() noexcept
{
return Tag::PERIODIC;
}
static constexpr bool is_radial() noexcept
{
return false;
}
static constexpr bool is_uniform() noexcept
{
return false;
}
private:
std::vector<Coordinate<Tag>> m_knots;
public:
NonUniformBSplines() = default;
/// @brief Construct a `NonUniformBSplines` using a brace-list, i.e. `NonUniformBSplines bsplines({0., 1.})`
explicit NonUniformBSplines(std::initializer_list<Coordinate<Tag>> breaks)
: NonUniformBSplines(breaks.begin(), breaks.end())
{
}
/// @brief Construct a `NonUniformBSplines` using a C++20 "common range".
template <class InputRange>
inline constexpr NonUniformBSplines(InputRange&& breaks)
: NonUniformBSplines(breaks.begin(), breaks.end())
{
}
/// @brief Construct a `NonUniformBSplines` using a pair of iterators.
template <class RandomIt>
inline constexpr NonUniformBSplines(RandomIt breaks_begin, RandomIt breaks_end);
NonUniformBSplines(NonUniformBSplines const& x) = default;
NonUniformBSplines(NonUniformBSplines&& x) = default;
~NonUniformBSplines() = default;
NonUniformBSplines& operator=(NonUniformBSplines const& x) = default;
NonUniformBSplines& operator=(NonUniformBSplines&& x) = default;
void eval_basis(DSpan1D values, int& jmin, double x) const;
void eval_deriv(DSpan1D derivs, int& jmin, double x) const;
void eval_basis_and_n_derivs(DSpan2D derivs, int& jmin, double x, std::size_t n) const;
DSpan1D integrals(DSpan1D int_vals) const;
double get_knot(int break_idx) const noexcept
{
// TODO: assert break_idx >= 1 - degree
// TODO: assert break_idx <= npoints + degree
return m_knots[break_idx + degree()];
}
Coordinate<Tag> rmin() const noexcept
{
return Coordinate<Tag>(get_knot(0));
}
Coordinate<Tag> rmax() const noexcept
{
return Coordinate<Tag>(get_knot(ncells()));
}
double length() const noexcept
{
return rmax() - rmin();
}
std::size_t size() const noexcept
{
return degree() + ncells();
}
std::size_t npoints() const noexcept
{
return m_knots.size() - 2 * degree();
}
std::size_t nbasis() const noexcept
{
return ncells() + !is_periodic() * degree();
}
std::size_t ncells() const noexcept
{
return npoints() - 1;
}
private:
int find_cell(double x) const;
double& get_knot(int break_idx)
{
// TODO: assert break_idx >= 1 - degree
// TODO: assert break_idx <= npoints + degree
return m_knots[break_idx + degree()];
}
};
template <class Tag, std::size_t D>
template <class RandomIt>
inline constexpr NonUniformBSplines<Tag, D>::NonUniformBSplines(
RandomIt const break_begin,
RandomIt const break_end)
: m_knots((break_end - break_begin) + 2 * degree())
{
assert(m_knots.size() > 2 * degree() + 1);
// Fill the provided knots
int ii = 0;
for (RandomIt it = break_begin; it < break_end; ++it) {
get_knot(ii) = *it;
++ii;
}
assert(rmin() < rmax());
// Fill out the extra knots
if constexpr (is_periodic()) {
double const period = rmax() - rmin();
for (std::size_t i = 1; i < degree() + 1; ++i) {
get_knot(-i) = get_knot(ncells() - i) - period;
get_knot(ncells() + i) = get_knot(i) + period;
}
} else // open
{
for (std::size_t i = 1; i < degree() + 1; ++i) {
get_knot(-i) = rmin();
get_knot(npoints() - 1 + i) = rmax();
}
}
}
template <class Tag, std::size_t D>
void NonUniformBSplines<Tag, D>::eval_basis(DSpan1D const values, int& jmin, double const x) const
{
std::array<double, degree()> left;
std::array<double, degree()> right;
assert(x >= rmin());
assert(x <= rmax());
assert(values.extent(0) == degree() + 1);
// 1. Compute cell index 'icell'
int const icell = find_cell(x);
assert(icell >= 0);
assert(icell <= int(ncells() - 1));
assert(get_knot(icell) <= x);
assert(get_knot(icell + 1) >= x);
// 2. Compute index range of B-splines with support over cell 'icell'
jmin = icell;
// 3. Compute values of aforementioned B-splines
double temp;
values(0) = 1.0;
for (std::size_t j = 0; j < degree(); ++j) {
left[j] = x - get_knot(icell - j);
right[j] = get_knot(icell + j + 1) - x;
double saved = 0.0;
for (std::size_t r = 0; r < j + 1; ++r) {
temp = values(r) / (right[r] + left[j - r]);
values(r) = saved + right[r] * temp;
saved = left[j - r] * temp;
}
values(j + 1) = saved;
}
}
template <class Tag, std::size_t D>
void NonUniformBSplines<Tag, D>::eval_deriv(DSpan1D const derivs, int& jmin, double const x) const
{
std::array<double, degree()> left;
std::array<double, degree()> right;
assert(x >= rmin());
assert(x <= rmax());
assert(derivs.extent(0) == degree() + 1);
// 1. Compute cell index 'icell'
int const icell = find_cell(x);
assert(icell >= 0);
assert(icell <= int(ncells() - 1));
assert(get_knot(icell) <= x);
assert(get_knot(icell + 1) >= x);
// 2. Compute index range of B-splines with support over cell 'icell'
jmin = icell;
// 3. Compute values of aforementioned B-splines
/*
* Compute nonzero basis functions and knot differences
* for splines up to degree degree-1 which are needed to compute derivative
* First part of Algorithm A3.2 of NURBS book
*/
double saved, temp;
derivs(0) = 1.0;
for (std::size_t j = 0; j < degree() - 1; ++j) {
left[j] = x - get_knot(icell - j);
right[j] = get_knot(icell + j + 1) - x;
saved = 0.0;
for (std::size_t r = 0; r < j + 1; ++r) {
temp = derivs(r) / (right[r] + left[j - r]);
derivs(r) = saved + right[r] * temp;
saved = left[j - r] * temp;
}
derivs(j + 1) = saved;
}
/*
* Compute derivatives at x using values stored in bsdx and formula
* for spline derivative based on difference of splines of degree degree-1
*/
saved = degree() * derivs(0) / (get_knot(icell + 1) - get_knot(icell + 1 - degree()));
derivs(0) = -saved;
for (std::size_t j = 1; j < degree(); ++j) {
temp = saved;
saved = degree() * derivs(j)
/ (get_knot(icell + j + 1) - get_knot(icell + j + 1 - degree()));
derivs(j) = temp - saved;
}
derivs(degree()) = saved;
}
template <class Tag, std::size_t D>
void NonUniformBSplines<Tag, D>::eval_basis_and_n_derivs(
DSpan2D const derivs,
int& jmin,
double const x,
std::size_t const n) const
{
std::array<double, degree()> left;
std::array<double, degree()> right;
std::array<double, 2 * (degree() + 1)> a_ptr;
std::experimental::mdspan<double, std::experimental::extents<degree() + 1, 2>> const a(
a_ptr.data());
std::array<double, (degree() + 1) * (degree() + 1)> ndu_ptr;
std::experimental::mdspan<double, std::experimental::extents<degree() + 1, degree() + 1>> const
ndu(ndu_ptr.data());
assert(x >= rmin());
assert(x <= rmax());
assert(n >= 0);
assert(n <= degree());
assert(derivs.extent(0) == 1 + degree());
assert(derivs.extent(1) == 1 + n);
// 1. Compute cell index 'icell' and x_offset
int const icell = find_cell(x);
assert(icell >= 0);
assert(icell <= int(ncells() - 1));
assert(get_knot(icell) <= x);
assert(get_knot(icell + 1) >= x);
// 2. Compute index range of B-splines with support over cell 'icell'
jmin = icell;
// 3. Compute nonzero basis functions and knot differences for splines
// up to degree (degree-1) which are needed to compute derivative
// Algorithm A2.3 of NURBS book
//
// 21.08.2017: save inverse of knot differences to avoid unnecessary
// divisions
// [Yaman Güçlü, Edoardo Zoni]
double saved, temp;
ndu(0, 0) = 1.0;
for (std::size_t j = 0; j < degree(); ++j) {
left[j] = x - get_knot(icell - j);
right[j] = get_knot(icell + j + 1) - x;
saved = 0.0;
for (std::size_t r = 0; r < j + 1; ++r) {
// compute inverse of knot differences and save them into lower
// triangular part of ndu
ndu(r, j + 1) = 1.0 / (right[r] + left[j - r]);
// compute basis functions and save them into upper triangular part
// of ndu
temp = ndu(j, r) * ndu(r, j + 1);
ndu(j + 1, r) = saved + right[r] * temp;
saved = left[j - r] * temp;
}
ndu(j + 1, j + 1) = saved;
}
// Save 0-th derivative
for (std::size_t j = 0; j < degree() + 1; ++j) {
derivs(j, 0) = ndu(degree(), j);
}
for (int r = 0; r < int(degree() + 1); ++r) {
int s1 = 0;
int s2 = 1;
a(0, 0) = 1.0;
for (int k = 1; k < int(n + 1); ++k) {
double d = 0.0;
int const rk = r - k;
int const pk = degree() - k;
if (r >= k) {
a(0, s2) = a(0, s1) * ndu(rk, pk + 1);
d = a(0, s2) * ndu(pk, rk);
}
int const j1 = rk > -1 ? 1 : (-rk);
int const j2 = (r - 1) <= pk ? k : (degree() - r + 1);
for (int j = j1; j < j2; ++j) {
a(j, s2) = (a(j, s1) - a(j - 1, s1)) * ndu(rk + j, pk + 1);
d += a(j, s2) * ndu(pk, rk + j);
}
if (r <= pk) {
a(k, s2) = -a(k - 1, s1) * ndu(r, pk + 1);
d += a(k, s2) * ndu(pk, r);
}
derivs(r, k) = d;
std::swap(s1, s2);
}
}
int r = degree();
for (int k = 1; k < int(n + 1); ++k) {
for (std::size_t i = 0; i < derivs.extent(0); i++) {
derivs(i, k) *= r;
}
r *= degree() - k;
}
}
template <class Tag, std::size_t D>
int NonUniformBSplines<Tag, D>::find_cell(double const x) const
{
if (x > rmax())
return -1;
if (x < rmin())
return -1;
if (x == rmin())
return 0;
if (x == rmax())
return ncells() - 1;
// Binary search
int low = 0, high = ncells();
int icell = (low + high) / 2;
while (x < get_knot(icell) || x >= get_knot(icell + 1)) {
if (x < get_knot(icell)) {
high = icell;
} else {
low = icell;
}
icell = (low + high) / 2;
}
return icell;
}
template <class Tag, std::size_t D>
DSpan1D NonUniformBSplines<Tag, D>::integrals(DSpan1D const int_vals) const
{
assert(int_vals.extent(0) == nbasis() + degree() * is_periodic());
double const inv_deg = 1.0 / (degree() + 1);
for (std::size_t i = 0; i < nbasis(); ++i) {
int_vals(i) = (get_knot(i + 1) - get_knot(i - degree())) * inv_deg;
}
if constexpr (is_periodic()) {
for (std::size_t i = 0; i < degree(); ++i) {
int_vals(nbasis() + i) = 0;
}
}
return int_vals;
}
| 28.35514
| 112
| 0.549028
|
gyselax
|
33bd88a01ccb419f5963ff13a73413ff1a71fc30
| 4,587
|
cpp
|
C++
|
tests/variable_ndarray.cpp
|
kahoooo/cnumpy
|
67993216fcb434a78bdb52b09c035ef483a9c0dd
|
[
"MIT"
] | 1
|
2021-10-05T14:12:32.000Z
|
2021-10-05T14:12:32.000Z
|
tests/variable_ndarray.cpp
|
kahoooo/cnumpy
|
67993216fcb434a78bdb52b09c035ef483a9c0dd
|
[
"MIT"
] | null | null | null |
tests/variable_ndarray.cpp
|
kahoooo/cnumpy
|
67993216fcb434a78bdb52b09c035ef483a9c0dd
|
[
"MIT"
] | null | null | null |
#include <cassert>
#include "cnumpy/ndarray.hpp"
using namespace std;
using namespace cnumpy;
int main() {
// default constructor, one element expected since ndim == 0, destruct immediately
{
ndarray<int> arr;
assert(arr.size() == 1);
assert(&arr() == arr.data());
}
// constructor with explicit shape array, 120 elements expected
{
vector<size_t> shape = {2, 3, 4, 5};
ndarray<int> arr(shape);
assert(arr.size() == 120);
assert(arr.ndim() == 4);
}
// constructor with implicit shape array, 120 elements expected
{
ndarray<int> arr(2, 3, 4, 5);
assert(arr.size() == 120);
// indexing
int cnt = 0;
assert((&arr[{}] == &arr.data()[cnt]));
for (size_t i = 0; i < 2; i++) {
assert((&arr[{i}] == &arr.data()[cnt]));
for (size_t j = 0; j < 3; j++) {
assert((&arr[{i, j}] == &arr.data()[cnt]));
for (size_t k = 0; k < 4; k++) {
assert((&arr[{i, j, k}] == &arr.data()[cnt]));
for (size_t l = 0; l < 5; l++) {
assert((&arr[{i, j, k, l}] == &arr.data()[cnt]));
cnt++;
}
}
}
}
// indexing with variadic arguments
cnt = 0;
assert(&arr() == &arr.data()[cnt]);
for (size_t i = 0; i < 2; i++) {
assert(&arr(i) == &arr.data()[cnt]);
for (size_t j = 0; j < 3; j++) {
assert(&arr(i, j) == &arr.data()[cnt]);
for (size_t k = 0; k < 4; k++) {
assert(&arr(i, j, k) == &arr.data()[cnt]);
for (size_t l = 0; l < 5; l++) {
assert(&arr(i, j, k, l) == &arr.data()[cnt]);
cnt++;
}
}
}
}
}
// copy constructor
{
ndarray<int> arr1(2, 3, 4, 5);
int cnt = 0;
for (size_t i = 0; i < 2; i++) {
for (size_t j = 0; j < 3; j++) {
for (size_t k = 0; k < 4; k++) {
for (size_t l = 0; l < 5; l++) {
arr1(i, j, k, l) = cnt++;
}
}
}
}
ndarray<int> arr2(arr1);
cnt = 0;
for (size_t i = 0; i < 2; i++) {
for (size_t j = 0; j < 3; j++) {
for (size_t k = 0; k < 4; k++) {
for (size_t l = 0; l < 5; l++) {
assert(arr1(i, j, k, l) == cnt);
arr1[{i, j, k, l}] = 0;
assert(arr2(i, j, k, l) == cnt);
cnt++;
}
}
}
}
}
// move constructor
{
ndarray<int> arr(ndarray<int>(2, 3, 4, 5));
int cnt = 0;
for (size_t i = 0; i < 2; i++) {
for (size_t j = 0; j < 3; j++) {
for (size_t k = 0; k < 4; k++) {
for (size_t l = 0; l < 5; l++) {
arr(i, j, k, l) = cnt++;
}
}
}
}
}
// copy-assignment operator
{
ndarray<int> arr1(2, 3, 4, 5);
int cnt = 0;
for (size_t i = 0; i < 2; i++) {
for (size_t j = 0; j < 3; j++) {
for (size_t k = 0; k < 4; k++) {
for (size_t l = 0; l < 5; l++) {
arr1(i, j, k, l) = cnt++;
}
}
}
}
ndarray<int> arr2 = arr1;
cnt = 0;
for (size_t i = 0; i < 2; i++) {
for (size_t j = 0; j < 3; j++) {
for (size_t k = 0; k < 4; k++) {
for (size_t l = 0; l < 5; l++) {
assert(arr1(i, j, k, l) == cnt);
arr1[{i, j, k, l}] = 0;
assert(arr2(i, j, k, l) == cnt);
cnt++;
}
}
}
}
}
// move-assignment operator
{
ndarray<int> arr = ndarray<int>(2, 3, 4, 5);
int cnt = 0;
for (size_t i = 0; i < 2; i++) {
for (size_t j = 0; j < 3; j++) {
for (size_t k = 0; k < 4; k++) {
for (size_t l = 0; l < 5; l++) {
arr(i, j, k, l) = cnt++;
}
}
}
}
}
return 0;
}
| 29.593548
| 86
| 0.323523
|
kahoooo
|
33be33b805d034ffd94f2afaab6aa3c4ef7fa74e
| 515
|
hpp
|
C++
|
include/pyplot_cpp/Plot.hpp
|
CalebCintary/pyplot_cpp
|
de33c3e921229e5efd72a7fc4fdb17212edc9aa9
|
[
"MIT"
] | 1
|
2022-03-29T10:52:21.000Z
|
2022-03-29T10:52:21.000Z
|
include/pyplot_cpp/Plot.hpp
|
CalebCintary/pyplot_cpp
|
de33c3e921229e5efd72a7fc4fdb17212edc9aa9
|
[
"MIT"
] | 1
|
2022-03-20T12:17:11.000Z
|
2022-03-20T17:50:12.000Z
|
include/pyplot_cpp/Plot.hpp
|
CalebCintary/pyplot_cpp
|
de33c3e921229e5efd72a7fc4fdb17212edc9aa9
|
[
"MIT"
] | 1
|
2022-03-29T19:40:59.000Z
|
2022-03-29T19:40:59.000Z
|
//
// Created by calebcintary on 3/15/22.
//
#ifndef PYPLOT_CPP_PLOT_HPP
#define PYPLOT_CPP_PLOT_HPP
#include "BasePlot.hpp"
namespace pyplot_cpp {
class Plot : public BasePlot {
public:
Plot();
/**
* Creates simple plots with data
* @param _x
* @param _y
*/
Plot(std::vector<double> _x, std::vector<double> _y);
Plot(Plot const & plot);
void dynamicScript_Configuration() override;
};
}
#endif //PYPLOT_CPP_PLOT_HPP
| 16.09375
| 61
| 0.594175
|
CalebCintary
|
33c13e69c343603384c6b590ad1a90f17c8d12e9
| 191,589
|
cc
|
C++
|
deps/rocksdb/rocksdb/db/db_test2.cc
|
buu700/rocksdb-tmp
|
3e9e3d8c84c4eb0c3fd7d67e139ad746ab8520d7
|
[
"MIT"
] | null | null | null |
deps/rocksdb/rocksdb/db/db_test2.cc
|
buu700/rocksdb-tmp
|
3e9e3d8c84c4eb0c3fd7d67e139ad746ab8520d7
|
[
"MIT"
] | null | null | null |
deps/rocksdb/rocksdb/db/db_test2.cc
|
buu700/rocksdb-tmp
|
3e9e3d8c84c4eb0c3fd7d67e139ad746ab8520d7
|
[
"MIT"
] | null | null | null |
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <atomic>
#include <cstdlib>
#include <functional>
#include "db/db_test_util.h"
#include "db/read_callback.h"
#include "options/options_helper.h"
#include "port/port.h"
#include "port/stack_trace.h"
#include "rocksdb/persistent_cache.h"
#include "rocksdb/wal_filter.h"
#include "util/random.h"
#include "utilities/fault_injection_env.h"
namespace ROCKSDB_NAMESPACE {
class DBTest2 : public DBTestBase {
public:
DBTest2() : DBTestBase("/db_test2", /*env_do_fsync=*/true) {}
};
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, OpenForReadOnly) {
DB* db_ptr = nullptr;
std::string dbname = test::PerThreadDBPath("db_readonly");
Options options = CurrentOptions();
options.create_if_missing = true;
// OpenForReadOnly should fail but will create <dbname> in the file system
ASSERT_NOK(DB::OpenForReadOnly(options, dbname, &db_ptr));
// Since <dbname> is created, we should be able to delete the dir
// We first get the list files under <dbname>
// There should not be any subdirectories -- this is not checked here
std::vector<std::string> files;
ASSERT_OK(env_->GetChildren(dbname, &files));
for (auto& f : files) {
ASSERT_OK(env_->DeleteFile(dbname + "/" + f));
}
// <dbname> should be empty now and we should be able to delete it
ASSERT_OK(env_->DeleteDir(dbname));
options.create_if_missing = false;
// OpenForReadOnly should fail since <dbname> was successfully deleted
ASSERT_NOK(DB::OpenForReadOnly(options, dbname, &db_ptr));
// With create_if_missing false, there should not be a dir in the file system
ASSERT_NOK(env_->FileExists(dbname));
}
TEST_F(DBTest2, OpenForReadOnlyWithColumnFamilies) {
DB* db_ptr = nullptr;
std::string dbname = test::PerThreadDBPath("db_readonly");
Options options = CurrentOptions();
options.create_if_missing = true;
ColumnFamilyOptions cf_options(options);
std::vector<ColumnFamilyDescriptor> column_families;
column_families.push_back(
ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
column_families.push_back(ColumnFamilyDescriptor("goku", cf_options));
std::vector<ColumnFamilyHandle*> handles;
// OpenForReadOnly should fail but will create <dbname> in the file system
ASSERT_NOK(
DB::OpenForReadOnly(options, dbname, column_families, &handles, &db_ptr));
// Since <dbname> is created, we should be able to delete the dir
// We first get the list files under <dbname>
// There should not be any subdirectories -- this is not checked here
std::vector<std::string> files;
ASSERT_OK(env_->GetChildren(dbname, &files));
for (auto& f : files) {
ASSERT_OK(env_->DeleteFile(dbname + "/" + f));
}
// <dbname> should be empty now and we should be able to delete it
ASSERT_OK(env_->DeleteDir(dbname));
options.create_if_missing = false;
// OpenForReadOnly should fail since <dbname> was successfully deleted
ASSERT_NOK(
DB::OpenForReadOnly(options, dbname, column_families, &handles, &db_ptr));
// With create_if_missing false, there should not be a dir in the file system
ASSERT_NOK(env_->FileExists(dbname));
}
class TestReadOnlyWithCompressedCache
: public DBTestBase,
public testing::WithParamInterface<std::tuple<int, bool>> {
public:
TestReadOnlyWithCompressedCache()
: DBTestBase("/test_readonly_with_compressed_cache",
/*env_do_fsync=*/true) {
max_open_files_ = std::get<0>(GetParam());
use_mmap_ = std::get<1>(GetParam());
}
int max_open_files_;
bool use_mmap_;
};
TEST_P(TestReadOnlyWithCompressedCache, ReadOnlyWithCompressedCache) {
if (use_mmap_ && !IsMemoryMappedAccessSupported()) {
ROCKSDB_GTEST_SKIP("Test requires MMAP support");
return;
}
ASSERT_OK(Put("foo", "bar"));
ASSERT_OK(Put("foo2", "barbarbarbarbarbarbarbar"));
ASSERT_OK(Flush());
DB* db_ptr = nullptr;
Options options = CurrentOptions();
options.allow_mmap_reads = use_mmap_;
options.max_open_files = max_open_files_;
options.compression = kSnappyCompression;
BlockBasedTableOptions table_options;
table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024);
table_options.no_block_cache = true;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.statistics = CreateDBStatistics();
ASSERT_OK(DB::OpenForReadOnly(options, dbname_, &db_ptr));
std::string v;
ASSERT_OK(db_ptr->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("bar", v);
ASSERT_EQ(0, options.statistics->getTickerCount(BLOCK_CACHE_COMPRESSED_HIT));
ASSERT_OK(db_ptr->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("bar", v);
if (Snappy_Supported()) {
if (use_mmap_) {
ASSERT_EQ(0,
options.statistics->getTickerCount(BLOCK_CACHE_COMPRESSED_HIT));
} else {
ASSERT_EQ(1,
options.statistics->getTickerCount(BLOCK_CACHE_COMPRESSED_HIT));
}
}
delete db_ptr;
}
INSTANTIATE_TEST_CASE_P(TestReadOnlyWithCompressedCache,
TestReadOnlyWithCompressedCache,
::testing::Combine(::testing::Values(-1, 100),
::testing::Bool()));
class PartitionedIndexTestListener : public EventListener {
public:
void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override {
ASSERT_GT(info.table_properties.index_partitions, 1);
ASSERT_EQ(info.table_properties.index_key_is_user_key, 0);
}
};
TEST_F(DBTest2, PartitionedIndexUserToInternalKey) {
BlockBasedTableOptions table_options;
Options options = CurrentOptions();
table_options.index_type = BlockBasedTableOptions::kTwoLevelIndexSearch;
PartitionedIndexTestListener* listener = new PartitionedIndexTestListener();
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.listeners.emplace_back(listener);
std::vector<const Snapshot*> snapshots;
Reopen(options);
Random rnd(301);
for (int i = 0; i < 3000; i++) {
int j = i % 30;
std::string value = rnd.RandomString(10500);
ASSERT_OK(Put("keykey_" + std::to_string(j), value));
snapshots.push_back(db_->GetSnapshot());
}
ASSERT_OK(Flush());
dbfull()->TEST_WaitForFlushMemTable();
for (auto s : snapshots) {
db_->ReleaseSnapshot(s);
}
}
#endif // ROCKSDB_LITE
class PrefixFullBloomWithReverseComparator
: public DBTestBase,
public ::testing::WithParamInterface<bool> {
public:
PrefixFullBloomWithReverseComparator()
: DBTestBase("/prefix_bloom_reverse", /*env_do_fsync=*/true) {}
void SetUp() override { if_cache_filter_ = GetParam(); }
bool if_cache_filter_;
};
TEST_P(PrefixFullBloomWithReverseComparator,
PrefixFullBloomWithReverseComparator) {
Options options = last_options_;
options.comparator = ReverseBytewiseComparator();
options.prefix_extractor.reset(NewCappedPrefixTransform(3));
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
BlockBasedTableOptions bbto;
if (if_cache_filter_) {
bbto.no_block_cache = false;
bbto.cache_index_and_filter_blocks = true;
bbto.block_cache = NewLRUCache(1);
}
bbto.filter_policy.reset(NewBloomFilterPolicy(10, false));
bbto.whole_key_filtering = false;
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
DestroyAndReopen(options);
ASSERT_OK(dbfull()->Put(WriteOptions(), "bar123", "foo"));
ASSERT_OK(dbfull()->Put(WriteOptions(), "bar234", "foo2"));
ASSERT_OK(dbfull()->Put(WriteOptions(), "foo123", "foo3"));
dbfull()->Flush(FlushOptions());
if (bbto.block_cache) {
bbto.block_cache->EraseUnRefEntries();
}
std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
iter->Seek("bar345");
ASSERT_OK(iter->status());
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("bar234", iter->key().ToString());
ASSERT_EQ("foo2", iter->value().ToString());
iter->Next();
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("bar123", iter->key().ToString());
ASSERT_EQ("foo", iter->value().ToString());
iter->Seek("foo234");
ASSERT_OK(iter->status());
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("foo123", iter->key().ToString());
ASSERT_EQ("foo3", iter->value().ToString());
iter->Seek("bar");
ASSERT_OK(iter->status());
ASSERT_TRUE(!iter->Valid());
}
INSTANTIATE_TEST_CASE_P(PrefixFullBloomWithReverseComparator,
PrefixFullBloomWithReverseComparator, testing::Bool());
TEST_F(DBTest2, IteratorPropertyVersionNumber) {
Put("", "");
Iterator* iter1 = db_->NewIterator(ReadOptions());
std::string prop_value;
ASSERT_OK(
iter1->GetProperty("rocksdb.iterator.super-version-number", &prop_value));
uint64_t version_number1 =
static_cast<uint64_t>(std::atoi(prop_value.c_str()));
Put("", "");
Flush();
Iterator* iter2 = db_->NewIterator(ReadOptions());
ASSERT_OK(
iter2->GetProperty("rocksdb.iterator.super-version-number", &prop_value));
uint64_t version_number2 =
static_cast<uint64_t>(std::atoi(prop_value.c_str()));
ASSERT_GT(version_number2, version_number1);
Put("", "");
Iterator* iter3 = db_->NewIterator(ReadOptions());
ASSERT_OK(
iter3->GetProperty("rocksdb.iterator.super-version-number", &prop_value));
uint64_t version_number3 =
static_cast<uint64_t>(std::atoi(prop_value.c_str()));
ASSERT_EQ(version_number2, version_number3);
iter1->SeekToFirst();
ASSERT_OK(
iter1->GetProperty("rocksdb.iterator.super-version-number", &prop_value));
uint64_t version_number1_new =
static_cast<uint64_t>(std::atoi(prop_value.c_str()));
ASSERT_EQ(version_number1, version_number1_new);
delete iter1;
delete iter2;
delete iter3;
}
TEST_F(DBTest2, CacheIndexAndFilterWithDBRestart) {
Options options = CurrentOptions();
options.create_if_missing = true;
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
BlockBasedTableOptions table_options;
table_options.cache_index_and_filter_blocks = true;
table_options.filter_policy.reset(NewBloomFilterPolicy(20));
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
CreateAndReopenWithCF({"pikachu"}, options);
Put(1, "a", "begin");
Put(1, "z", "end");
ASSERT_OK(Flush(1));
TryReopenWithColumnFamilies({"default", "pikachu"}, options);
std::string value;
value = Get(1, "a");
}
TEST_F(DBTest2, MaxSuccessiveMergesChangeWithDBRecovery) {
Options options = CurrentOptions();
options.create_if_missing = true;
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
options.max_successive_merges = 3;
options.merge_operator = MergeOperators::CreatePutOperator();
options.disable_auto_compactions = true;
DestroyAndReopen(options);
Put("poi", "Finch");
db_->Merge(WriteOptions(), "poi", "Reese");
db_->Merge(WriteOptions(), "poi", "Shaw");
db_->Merge(WriteOptions(), "poi", "Root");
options.max_successive_merges = 2;
Reopen(options);
}
#ifndef ROCKSDB_LITE
class DBTestSharedWriteBufferAcrossCFs
: public DBTestBase,
public testing::WithParamInterface<std::tuple<bool, bool>> {
public:
DBTestSharedWriteBufferAcrossCFs()
: DBTestBase("/db_test_shared_write_buffer", /*env_do_fsync=*/true) {}
void SetUp() override {
use_old_interface_ = std::get<0>(GetParam());
cost_cache_ = std::get<1>(GetParam());
}
bool use_old_interface_;
bool cost_cache_;
};
TEST_P(DBTestSharedWriteBufferAcrossCFs, SharedWriteBufferAcrossCFs) {
Options options = CurrentOptions();
options.arena_block_size = 4096;
// Avoid undeterministic value by malloc_usable_size();
// Force arena block size to 1
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"Arena::Arena:0", [&](void* arg) {
size_t* block_size = static_cast<size_t*>(arg);
*block_size = 1;
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"Arena::AllocateNewBlock:0", [&](void* arg) {
std::pair<size_t*, size_t*>* pair =
static_cast<std::pair<size_t*, size_t*>*>(arg);
*std::get<0>(*pair) = *std::get<1>(*pair);
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
// The total soft write buffer size is about 105000
std::shared_ptr<Cache> cache = NewLRUCache(4 * 1024 * 1024, 2);
ASSERT_LT(cache->GetUsage(), 256 * 1024);
if (use_old_interface_) {
options.db_write_buffer_size = 120000; // this is the real limit
} else if (!cost_cache_) {
options.write_buffer_manager.reset(new WriteBufferManager(114285));
} else {
options.write_buffer_manager.reset(new WriteBufferManager(114285, cache));
}
options.write_buffer_size = 500000; // this is never hit
CreateAndReopenWithCF({"pikachu", "dobrynia", "nikitich"}, options);
WriteOptions wo;
wo.disableWAL = true;
std::function<void()> wait_flush = [&]() {
dbfull()->TEST_WaitForFlushMemTable(handles_[0]);
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
dbfull()->TEST_WaitForFlushMemTable(handles_[2]);
dbfull()->TEST_WaitForFlushMemTable(handles_[3]);
};
// Create some data and flush "default" and "nikitich" so that they
// are newer CFs created.
ASSERT_OK(Put(3, Key(1), DummyString(1), wo));
Flush(3);
ASSERT_OK(Put(3, Key(1), DummyString(1), wo));
ASSERT_OK(Put(0, Key(1), DummyString(1), wo));
Flush(0);
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
static_cast<uint64_t>(1));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
static_cast<uint64_t>(1));
ASSERT_OK(Put(3, Key(1), DummyString(30000), wo));
if (cost_cache_) {
ASSERT_GE(cache->GetUsage(), 256 * 1024);
ASSERT_LE(cache->GetUsage(), 2 * 256 * 1024);
}
wait_flush();
ASSERT_OK(Put(0, Key(1), DummyString(60000), wo));
if (cost_cache_) {
ASSERT_GE(cache->GetUsage(), 256 * 1024);
ASSERT_LE(cache->GetUsage(), 2 * 256 * 1024);
}
wait_flush();
ASSERT_OK(Put(2, Key(1), DummyString(1), wo));
// No flush should trigger
wait_flush();
{
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
static_cast<uint64_t>(1));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"),
static_cast<uint64_t>(0));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
static_cast<uint64_t>(0));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
static_cast<uint64_t>(1));
}
// Trigger a flush. Flushing "nikitich".
ASSERT_OK(Put(3, Key(2), DummyString(30000), wo));
wait_flush();
ASSERT_OK(Put(0, Key(1), DummyString(1), wo));
wait_flush();
{
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
static_cast<uint64_t>(1));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"),
static_cast<uint64_t>(0));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
static_cast<uint64_t>(0));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
static_cast<uint64_t>(2));
}
// Without hitting the threshold, no flush should trigger.
ASSERT_OK(Put(2, Key(1), DummyString(30000), wo));
wait_flush();
ASSERT_OK(Put(2, Key(1), DummyString(1), wo));
wait_flush();
ASSERT_OK(Put(2, Key(1), DummyString(1), wo));
wait_flush();
{
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
static_cast<uint64_t>(1));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"),
static_cast<uint64_t>(0));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
static_cast<uint64_t>(0));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
static_cast<uint64_t>(2));
}
// Hit the write buffer limit again. "default"
// will have been flushed.
ASSERT_OK(Put(2, Key(2), DummyString(10000), wo));
wait_flush();
ASSERT_OK(Put(3, Key(1), DummyString(1), wo));
wait_flush();
ASSERT_OK(Put(0, Key(1), DummyString(1), wo));
wait_flush();
ASSERT_OK(Put(0, Key(1), DummyString(1), wo));
wait_flush();
ASSERT_OK(Put(0, Key(1), DummyString(1), wo));
wait_flush();
{
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
static_cast<uint64_t>(2));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"),
static_cast<uint64_t>(0));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
static_cast<uint64_t>(0));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
static_cast<uint64_t>(2));
}
// Trigger another flush. This time "dobrynia". "pikachu" should not
// be flushed, althrough it was never flushed.
ASSERT_OK(Put(1, Key(1), DummyString(1), wo));
wait_flush();
ASSERT_OK(Put(2, Key(1), DummyString(80000), wo));
wait_flush();
ASSERT_OK(Put(1, Key(1), DummyString(1), wo));
wait_flush();
ASSERT_OK(Put(2, Key(1), DummyString(1), wo));
wait_flush();
{
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
static_cast<uint64_t>(2));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"),
static_cast<uint64_t>(0));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"),
static_cast<uint64_t>(1));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
static_cast<uint64_t>(2));
}
if (cost_cache_) {
ASSERT_GE(cache->GetUsage(), 256 * 1024);
Close();
options.write_buffer_manager.reset();
last_options_.write_buffer_manager.reset();
ASSERT_LT(cache->GetUsage(), 256 * 1024);
}
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
INSTANTIATE_TEST_CASE_P(DBTestSharedWriteBufferAcrossCFs,
DBTestSharedWriteBufferAcrossCFs,
::testing::Values(std::make_tuple(true, false),
std::make_tuple(false, false),
std::make_tuple(false, true)));
TEST_F(DBTest2, SharedWriteBufferLimitAcrossDB) {
std::string dbname2 = test::PerThreadDBPath("db_shared_wb_db2");
Options options = CurrentOptions();
options.arena_block_size = 4096;
// Avoid undeterministic value by malloc_usable_size();
// Force arena block size to 1
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"Arena::Arena:0", [&](void* arg) {
size_t* block_size = static_cast<size_t*>(arg);
*block_size = 1;
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"Arena::AllocateNewBlock:0", [&](void* arg) {
std::pair<size_t*, size_t*>* pair =
static_cast<std::pair<size_t*, size_t*>*>(arg);
*std::get<0>(*pair) = *std::get<1>(*pair);
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
options.write_buffer_size = 500000; // this is never hit
// Use a write buffer total size so that the soft limit is about
// 105000.
options.write_buffer_manager.reset(new WriteBufferManager(120000));
CreateAndReopenWithCF({"cf1", "cf2"}, options);
ASSERT_OK(DestroyDB(dbname2, options));
DB* db2 = nullptr;
ASSERT_OK(DB::Open(options, dbname2, &db2));
WriteOptions wo;
wo.disableWAL = true;
std::function<void()> wait_flush = [&]() {
dbfull()->TEST_WaitForFlushMemTable(handles_[0]);
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
dbfull()->TEST_WaitForFlushMemTable(handles_[2]);
static_cast<DBImpl*>(db2)->TEST_WaitForFlushMemTable();
};
// Trigger a flush on cf2
ASSERT_OK(Put(2, Key(1), DummyString(70000), wo));
wait_flush();
ASSERT_OK(Put(0, Key(1), DummyString(20000), wo));
wait_flush();
// Insert to DB2
ASSERT_OK(db2->Put(wo, Key(2), DummyString(20000)));
wait_flush();
ASSERT_OK(Put(2, Key(1), DummyString(1), wo));
wait_flush();
static_cast<DBImpl*>(db2)->TEST_WaitForFlushMemTable();
{
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default") +
GetNumberOfSstFilesForColumnFamily(db_, "cf1") +
GetNumberOfSstFilesForColumnFamily(db_, "cf2"),
static_cast<uint64_t>(1));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db2, "default"),
static_cast<uint64_t>(0));
}
// Triggering to flush another CF in DB1
ASSERT_OK(db2->Put(wo, Key(2), DummyString(70000)));
wait_flush();
ASSERT_OK(Put(2, Key(1), DummyString(1), wo));
wait_flush();
{
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
static_cast<uint64_t>(1));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "cf1"),
static_cast<uint64_t>(0));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "cf2"),
static_cast<uint64_t>(1));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db2, "default"),
static_cast<uint64_t>(0));
}
// Triggering flush in DB2.
ASSERT_OK(db2->Put(wo, Key(3), DummyString(40000)));
wait_flush();
ASSERT_OK(db2->Put(wo, Key(1), DummyString(1)));
wait_flush();
static_cast<DBImpl*>(db2)->TEST_WaitForFlushMemTable();
{
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
static_cast<uint64_t>(1));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "cf1"),
static_cast<uint64_t>(0));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "cf2"),
static_cast<uint64_t>(1));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db2, "default"),
static_cast<uint64_t>(1));
}
delete db2;
ASSERT_OK(DestroyDB(dbname2, options));
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, TestWriteBufferNoLimitWithCache) {
Options options = CurrentOptions();
options.arena_block_size = 4096;
std::shared_ptr<Cache> cache =
NewLRUCache(LRUCacheOptions(10000000, 1, false, 0.0));
options.write_buffer_size = 50000; // this is never hit
// Use a write buffer total size so that the soft limit is about
// 105000.
options.write_buffer_manager.reset(new WriteBufferManager(0, cache));
Reopen(options);
ASSERT_OK(Put("foo", "bar"));
// One dummy entry is 256KB.
ASSERT_GT(cache->GetUsage(), 128000);
}
namespace {
void ValidateKeyExistence(DB* db, const std::vector<Slice>& keys_must_exist,
const std::vector<Slice>& keys_must_not_exist) {
// Ensure that expected keys exist
std::vector<std::string> values;
if (keys_must_exist.size() > 0) {
std::vector<Status> status_list =
db->MultiGet(ReadOptions(), keys_must_exist, &values);
for (size_t i = 0; i < keys_must_exist.size(); i++) {
ASSERT_OK(status_list[i]);
}
}
// Ensure that given keys don't exist
if (keys_must_not_exist.size() > 0) {
std::vector<Status> status_list =
db->MultiGet(ReadOptions(), keys_must_not_exist, &values);
for (size_t i = 0; i < keys_must_not_exist.size(); i++) {
ASSERT_TRUE(status_list[i].IsNotFound());
}
}
}
} // namespace
TEST_F(DBTest2, WalFilterTest) {
class TestWalFilter : public WalFilter {
private:
// Processing option that is requested to be applied at the given index
WalFilter::WalProcessingOption wal_processing_option_;
// Index at which to apply wal_processing_option_
// At other indexes default wal_processing_option::kContinueProcessing is
// returned.
size_t apply_option_at_record_index_;
// Current record index, incremented with each record encountered.
size_t current_record_index_;
public:
TestWalFilter(WalFilter::WalProcessingOption wal_processing_option,
size_t apply_option_for_record_index)
: wal_processing_option_(wal_processing_option),
apply_option_at_record_index_(apply_option_for_record_index),
current_record_index_(0) {}
WalProcessingOption LogRecord(const WriteBatch& /*batch*/,
WriteBatch* /*new_batch*/,
bool* /*batch_changed*/) const override {
WalFilter::WalProcessingOption option_to_return;
if (current_record_index_ == apply_option_at_record_index_) {
option_to_return = wal_processing_option_;
}
else {
option_to_return = WalProcessingOption::kContinueProcessing;
}
// Filter is passed as a const object for RocksDB to not modify the
// object, however we modify it for our own purpose here and hence
// cast the constness away.
(const_cast<TestWalFilter*>(this)->current_record_index_)++;
return option_to_return;
}
const char* Name() const override { return "TestWalFilter"; }
};
// Create 3 batches with two keys each
std::vector<std::vector<std::string>> batch_keys(3);
batch_keys[0].push_back("key1");
batch_keys[0].push_back("key2");
batch_keys[1].push_back("key3");
batch_keys[1].push_back("key4");
batch_keys[2].push_back("key5");
batch_keys[2].push_back("key6");
// Test with all WAL processing options
for (int option = 0;
option < static_cast<int>(
WalFilter::WalProcessingOption::kWalProcessingOptionMax);
option++) {
Options options = OptionsForLogIterTest();
DestroyAndReopen(options);
CreateAndReopenWithCF({ "pikachu" }, options);
// Write given keys in given batches
for (size_t i = 0; i < batch_keys.size(); i++) {
WriteBatch batch;
for (size_t j = 0; j < batch_keys[i].size(); j++) {
batch.Put(handles_[0], batch_keys[i][j], DummyString(1024));
}
dbfull()->Write(WriteOptions(), &batch);
}
WalFilter::WalProcessingOption wal_processing_option =
static_cast<WalFilter::WalProcessingOption>(option);
// Create a test filter that would apply wal_processing_option at the first
// record
size_t apply_option_for_record_index = 1;
TestWalFilter test_wal_filter(wal_processing_option,
apply_option_for_record_index);
// Reopen database with option to use WAL filter
options = OptionsForLogIterTest();
options.wal_filter = &test_wal_filter;
Status status =
TryReopenWithColumnFamilies({ "default", "pikachu" }, options);
if (wal_processing_option ==
WalFilter::WalProcessingOption::kCorruptedRecord) {
assert(!status.ok());
// In case of corruption we can turn off paranoid_checks to reopen
// databse
options.paranoid_checks = false;
ReopenWithColumnFamilies({ "default", "pikachu" }, options);
}
else {
assert(status.ok());
}
// Compute which keys we expect to be found
// and which we expect not to be found after recovery.
std::vector<Slice> keys_must_exist;
std::vector<Slice> keys_must_not_exist;
switch (wal_processing_option) {
case WalFilter::WalProcessingOption::kCorruptedRecord:
case WalFilter::WalProcessingOption::kContinueProcessing: {
fprintf(stderr, "Testing with complete WAL processing\n");
// we expect all records to be processed
for (size_t i = 0; i < batch_keys.size(); i++) {
for (size_t j = 0; j < batch_keys[i].size(); j++) {
keys_must_exist.push_back(Slice(batch_keys[i][j]));
}
}
break;
}
case WalFilter::WalProcessingOption::kIgnoreCurrentRecord: {
fprintf(stderr,
"Testing with ignoring record %" ROCKSDB_PRIszt " only\n",
apply_option_for_record_index);
// We expect the record with apply_option_for_record_index to be not
// found.
for (size_t i = 0; i < batch_keys.size(); i++) {
for (size_t j = 0; j < batch_keys[i].size(); j++) {
if (i == apply_option_for_record_index) {
keys_must_not_exist.push_back(Slice(batch_keys[i][j]));
}
else {
keys_must_exist.push_back(Slice(batch_keys[i][j]));
}
}
}
break;
}
case WalFilter::WalProcessingOption::kStopReplay: {
fprintf(stderr,
"Testing with stopping replay from record %" ROCKSDB_PRIszt
"\n",
apply_option_for_record_index);
// We expect records beyond apply_option_for_record_index to be not
// found.
for (size_t i = 0; i < batch_keys.size(); i++) {
for (size_t j = 0; j < batch_keys[i].size(); j++) {
if (i >= apply_option_for_record_index) {
keys_must_not_exist.push_back(Slice(batch_keys[i][j]));
}
else {
keys_must_exist.push_back(Slice(batch_keys[i][j]));
}
}
}
break;
}
default:
assert(false); // unhandled case
}
bool checked_after_reopen = false;
while (true) {
// Ensure that expected keys exists
// and not expected keys don't exist after recovery
ValidateKeyExistence(db_, keys_must_exist, keys_must_not_exist);
if (checked_after_reopen) {
break;
}
// reopen database again to make sure previous log(s) are not used
//(even if they were skipped)
// reopn database with option to use WAL filter
options = OptionsForLogIterTest();
ReopenWithColumnFamilies({ "default", "pikachu" }, options);
checked_after_reopen = true;
}
}
}
TEST_F(DBTest2, WalFilterTestWithChangeBatch) {
class ChangeBatchHandler : public WriteBatch::Handler {
private:
// Batch to insert keys in
WriteBatch* new_write_batch_;
// Number of keys to add in the new batch
size_t num_keys_to_add_in_new_batch_;
// Number of keys added to new batch
size_t num_keys_added_;
public:
ChangeBatchHandler(WriteBatch* new_write_batch,
size_t num_keys_to_add_in_new_batch)
: new_write_batch_(new_write_batch),
num_keys_to_add_in_new_batch_(num_keys_to_add_in_new_batch),
num_keys_added_(0) {}
void Put(const Slice& key, const Slice& value) override {
if (num_keys_added_ < num_keys_to_add_in_new_batch_) {
new_write_batch_->Put(key, value);
++num_keys_added_;
}
}
};
class TestWalFilterWithChangeBatch : public WalFilter {
private:
// Index at which to start changing records
size_t change_records_from_index_;
// Number of keys to add in the new batch
size_t num_keys_to_add_in_new_batch_;
// Current record index, incremented with each record encountered.
size_t current_record_index_;
public:
TestWalFilterWithChangeBatch(size_t change_records_from_index,
size_t num_keys_to_add_in_new_batch)
: change_records_from_index_(change_records_from_index),
num_keys_to_add_in_new_batch_(num_keys_to_add_in_new_batch),
current_record_index_(0) {}
WalProcessingOption LogRecord(const WriteBatch& batch,
WriteBatch* new_batch,
bool* batch_changed) const override {
if (current_record_index_ >= change_records_from_index_) {
ChangeBatchHandler handler(new_batch, num_keys_to_add_in_new_batch_);
batch.Iterate(&handler);
*batch_changed = true;
}
// Filter is passed as a const object for RocksDB to not modify the
// object, however we modify it for our own purpose here and hence
// cast the constness away.
(const_cast<TestWalFilterWithChangeBatch*>(this)
->current_record_index_)++;
return WalProcessingOption::kContinueProcessing;
}
const char* Name() const override { return "TestWalFilterWithChangeBatch"; }
};
std::vector<std::vector<std::string>> batch_keys(3);
batch_keys[0].push_back("key1");
batch_keys[0].push_back("key2");
batch_keys[1].push_back("key3");
batch_keys[1].push_back("key4");
batch_keys[2].push_back("key5");
batch_keys[2].push_back("key6");
Options options = OptionsForLogIterTest();
DestroyAndReopen(options);
CreateAndReopenWithCF({ "pikachu" }, options);
// Write given keys in given batches
for (size_t i = 0; i < batch_keys.size(); i++) {
WriteBatch batch;
for (size_t j = 0; j < batch_keys[i].size(); j++) {
batch.Put(handles_[0], batch_keys[i][j], DummyString(1024));
}
dbfull()->Write(WriteOptions(), &batch);
}
// Create a test filter that would apply wal_processing_option at the first
// record
size_t change_records_from_index = 1;
size_t num_keys_to_add_in_new_batch = 1;
TestWalFilterWithChangeBatch test_wal_filter_with_change_batch(
change_records_from_index, num_keys_to_add_in_new_batch);
// Reopen database with option to use WAL filter
options = OptionsForLogIterTest();
options.wal_filter = &test_wal_filter_with_change_batch;
ReopenWithColumnFamilies({ "default", "pikachu" }, options);
// Ensure that all keys exist before change_records_from_index_
// And after that index only single key exists
// as our filter adds only single key for each batch
std::vector<Slice> keys_must_exist;
std::vector<Slice> keys_must_not_exist;
for (size_t i = 0; i < batch_keys.size(); i++) {
for (size_t j = 0; j < batch_keys[i].size(); j++) {
if (i >= change_records_from_index && j >= num_keys_to_add_in_new_batch) {
keys_must_not_exist.push_back(Slice(batch_keys[i][j]));
}
else {
keys_must_exist.push_back(Slice(batch_keys[i][j]));
}
}
}
bool checked_after_reopen = false;
while (true) {
// Ensure that expected keys exists
// and not expected keys don't exist after recovery
ValidateKeyExistence(db_, keys_must_exist, keys_must_not_exist);
if (checked_after_reopen) {
break;
}
// reopen database again to make sure previous log(s) are not used
//(even if they were skipped)
// reopn database with option to use WAL filter
options = OptionsForLogIterTest();
ReopenWithColumnFamilies({ "default", "pikachu" }, options);
checked_after_reopen = true;
}
}
TEST_F(DBTest2, WalFilterTestWithChangeBatchExtraKeys) {
class TestWalFilterWithChangeBatchAddExtraKeys : public WalFilter {
public:
WalProcessingOption LogRecord(const WriteBatch& batch, WriteBatch* new_batch,
bool* batch_changed) const override {
*new_batch = batch;
new_batch->Put("key_extra", "value_extra");
*batch_changed = true;
return WalProcessingOption::kContinueProcessing;
}
const char* Name() const override {
return "WalFilterTestWithChangeBatchExtraKeys";
}
};
std::vector<std::vector<std::string>> batch_keys(3);
batch_keys[0].push_back("key1");
batch_keys[0].push_back("key2");
batch_keys[1].push_back("key3");
batch_keys[1].push_back("key4");
batch_keys[2].push_back("key5");
batch_keys[2].push_back("key6");
Options options = OptionsForLogIterTest();
DestroyAndReopen(options);
CreateAndReopenWithCF({ "pikachu" }, options);
// Write given keys in given batches
for (size_t i = 0; i < batch_keys.size(); i++) {
WriteBatch batch;
for (size_t j = 0; j < batch_keys[i].size(); j++) {
batch.Put(handles_[0], batch_keys[i][j], DummyString(1024));
}
dbfull()->Write(WriteOptions(), &batch);
}
// Create a test filter that would add extra keys
TestWalFilterWithChangeBatchAddExtraKeys test_wal_filter_extra_keys;
// Reopen database with option to use WAL filter
options = OptionsForLogIterTest();
options.wal_filter = &test_wal_filter_extra_keys;
Status status = TryReopenWithColumnFamilies({"default", "pikachu"}, options);
ASSERT_TRUE(status.IsNotSupported());
// Reopen without filter, now reopen should succeed - previous
// attempt to open must not have altered the db.
options = OptionsForLogIterTest();
ReopenWithColumnFamilies({ "default", "pikachu" }, options);
std::vector<Slice> keys_must_exist;
std::vector<Slice> keys_must_not_exist; // empty vector
for (size_t i = 0; i < batch_keys.size(); i++) {
for (size_t j = 0; j < batch_keys[i].size(); j++) {
keys_must_exist.push_back(Slice(batch_keys[i][j]));
}
}
ValidateKeyExistence(db_, keys_must_exist, keys_must_not_exist);
}
TEST_F(DBTest2, WalFilterTestWithColumnFamilies) {
class TestWalFilterWithColumnFamilies : public WalFilter {
private:
// column_family_id -> log_number map (provided to WALFilter)
std::map<uint32_t, uint64_t> cf_log_number_map_;
// column_family_name -> column_family_id map (provided to WALFilter)
std::map<std::string, uint32_t> cf_name_id_map_;
// column_family_name -> keys_found_in_wal map
// We store keys that are applicable to the column_family
// during recovery (i.e. aren't already flushed to SST file(s))
// for verification against the keys we expect.
std::map<uint32_t, std::vector<std::string>> cf_wal_keys_;
public:
void ColumnFamilyLogNumberMap(
const std::map<uint32_t, uint64_t>& cf_lognumber_map,
const std::map<std::string, uint32_t>& cf_name_id_map) override {
cf_log_number_map_ = cf_lognumber_map;
cf_name_id_map_ = cf_name_id_map;
}
WalProcessingOption LogRecordFound(unsigned long long log_number,
const std::string& /*log_file_name*/,
const WriteBatch& batch,
WriteBatch* /*new_batch*/,
bool* /*batch_changed*/) override {
class LogRecordBatchHandler : public WriteBatch::Handler {
private:
const std::map<uint32_t, uint64_t> & cf_log_number_map_;
std::map<uint32_t, std::vector<std::string>> & cf_wal_keys_;
unsigned long long log_number_;
public:
LogRecordBatchHandler(unsigned long long current_log_number,
const std::map<uint32_t, uint64_t> & cf_log_number_map,
std::map<uint32_t, std::vector<std::string>> & cf_wal_keys) :
cf_log_number_map_(cf_log_number_map),
cf_wal_keys_(cf_wal_keys),
log_number_(current_log_number){}
Status PutCF(uint32_t column_family_id, const Slice& key,
const Slice& /*value*/) override {
auto it = cf_log_number_map_.find(column_family_id);
assert(it != cf_log_number_map_.end());
unsigned long long log_number_for_cf = it->second;
// If the current record is applicable for column_family_id
// (i.e. isn't flushed to SST file(s) for column_family_id)
// add it to the cf_wal_keys_ map for verification.
if (log_number_ >= log_number_for_cf) {
cf_wal_keys_[column_family_id].push_back(std::string(key.data(),
key.size()));
}
return Status::OK();
}
} handler(log_number, cf_log_number_map_, cf_wal_keys_);
batch.Iterate(&handler);
return WalProcessingOption::kContinueProcessing;
}
const char* Name() const override {
return "WalFilterTestWithColumnFamilies";
}
const std::map<uint32_t, std::vector<std::string>>& GetColumnFamilyKeys() {
return cf_wal_keys_;
}
const std::map<std::string, uint32_t> & GetColumnFamilyNameIdMap() {
return cf_name_id_map_;
}
};
std::vector<std::vector<std::string>> batch_keys_pre_flush(3);
batch_keys_pre_flush[0].push_back("key1");
batch_keys_pre_flush[0].push_back("key2");
batch_keys_pre_flush[1].push_back("key3");
batch_keys_pre_flush[1].push_back("key4");
batch_keys_pre_flush[2].push_back("key5");
batch_keys_pre_flush[2].push_back("key6");
Options options = OptionsForLogIterTest();
DestroyAndReopen(options);
CreateAndReopenWithCF({ "pikachu" }, options);
// Write given keys in given batches
for (size_t i = 0; i < batch_keys_pre_flush.size(); i++) {
WriteBatch batch;
for (size_t j = 0; j < batch_keys_pre_flush[i].size(); j++) {
batch.Put(handles_[0], batch_keys_pre_flush[i][j], DummyString(1024));
batch.Put(handles_[1], batch_keys_pre_flush[i][j], DummyString(1024));
}
dbfull()->Write(WriteOptions(), &batch);
}
//Flush default column-family
db_->Flush(FlushOptions(), handles_[0]);
// Do some more writes
std::vector<std::vector<std::string>> batch_keys_post_flush(3);
batch_keys_post_flush[0].push_back("key7");
batch_keys_post_flush[0].push_back("key8");
batch_keys_post_flush[1].push_back("key9");
batch_keys_post_flush[1].push_back("key10");
batch_keys_post_flush[2].push_back("key11");
batch_keys_post_flush[2].push_back("key12");
// Write given keys in given batches
for (size_t i = 0; i < batch_keys_post_flush.size(); i++) {
WriteBatch batch;
for (size_t j = 0; j < batch_keys_post_flush[i].size(); j++) {
batch.Put(handles_[0], batch_keys_post_flush[i][j], DummyString(1024));
batch.Put(handles_[1], batch_keys_post_flush[i][j], DummyString(1024));
}
dbfull()->Write(WriteOptions(), &batch);
}
// On Recovery we should only find the second batch applicable to default CF
// But both batches applicable to pikachu CF
// Create a test filter that would add extra keys
TestWalFilterWithColumnFamilies test_wal_filter_column_families;
// Reopen database with option to use WAL filter
options = OptionsForLogIterTest();
options.wal_filter = &test_wal_filter_column_families;
Status status =
TryReopenWithColumnFamilies({ "default", "pikachu" }, options);
ASSERT_TRUE(status.ok());
// verify that handles_[0] only has post_flush keys
// while handles_[1] has pre and post flush keys
auto cf_wal_keys = test_wal_filter_column_families.GetColumnFamilyKeys();
auto name_id_map = test_wal_filter_column_families.GetColumnFamilyNameIdMap();
size_t index = 0;
auto keys_cf = cf_wal_keys[name_id_map[kDefaultColumnFamilyName]];
//default column-family, only post_flush keys are expected
for (size_t i = 0; i < batch_keys_post_flush.size(); i++) {
for (size_t j = 0; j < batch_keys_post_flush[i].size(); j++) {
Slice key_from_the_log(keys_cf[index++]);
Slice batch_key(batch_keys_post_flush[i][j]);
ASSERT_TRUE(key_from_the_log.compare(batch_key) == 0);
}
}
ASSERT_TRUE(index == keys_cf.size());
index = 0;
keys_cf = cf_wal_keys[name_id_map["pikachu"]];
//pikachu column-family, all keys are expected
for (size_t i = 0; i < batch_keys_pre_flush.size(); i++) {
for (size_t j = 0; j < batch_keys_pre_flush[i].size(); j++) {
Slice key_from_the_log(keys_cf[index++]);
Slice batch_key(batch_keys_pre_flush[i][j]);
ASSERT_TRUE(key_from_the_log.compare(batch_key) == 0);
}
}
for (size_t i = 0; i < batch_keys_post_flush.size(); i++) {
for (size_t j = 0; j < batch_keys_post_flush[i].size(); j++) {
Slice key_from_the_log(keys_cf[index++]);
Slice batch_key(batch_keys_post_flush[i][j]);
ASSERT_TRUE(key_from_the_log.compare(batch_key) == 0);
}
}
ASSERT_TRUE(index == keys_cf.size());
}
TEST_F(DBTest2, PresetCompressionDict) {
// Verifies that compression ratio improves when dictionary is enabled, and
// improves even further when the dictionary is trained by ZSTD.
const size_t kBlockSizeBytes = 4 << 10;
const size_t kL0FileBytes = 128 << 10;
const size_t kApproxPerBlockOverheadBytes = 50;
const int kNumL0Files = 5;
Options options;
// Make sure to use any custom env that the test is configured with.
options.env = CurrentOptions().env;
options.allow_concurrent_memtable_write = false;
options.arena_block_size = kBlockSizeBytes;
options.create_if_missing = true;
options.disable_auto_compactions = true;
options.level0_file_num_compaction_trigger = kNumL0Files;
options.memtable_factory.reset(
new SpecialSkipListFactory(kL0FileBytes / kBlockSizeBytes));
options.num_levels = 2;
options.target_file_size_base = kL0FileBytes;
options.target_file_size_multiplier = 2;
options.write_buffer_size = kL0FileBytes;
BlockBasedTableOptions table_options;
table_options.block_size = kBlockSizeBytes;
std::vector<CompressionType> compression_types;
if (Zlib_Supported()) {
compression_types.push_back(kZlibCompression);
}
#if LZ4_VERSION_NUMBER >= 10400 // r124+
compression_types.push_back(kLZ4Compression);
compression_types.push_back(kLZ4HCCompression);
#endif // LZ4_VERSION_NUMBER >= 10400
if (ZSTD_Supported()) {
compression_types.push_back(kZSTD);
}
enum DictionaryTypes : int {
kWithoutDict,
kWithDict,
kWithZSTDTrainedDict,
kDictEnd,
};
for (auto compression_type : compression_types) {
options.compression = compression_type;
size_t bytes_without_dict = 0;
size_t bytes_with_dict = 0;
size_t bytes_with_zstd_trained_dict = 0;
for (int i = kWithoutDict; i < kDictEnd; i++) {
// First iteration: compress without preset dictionary
// Second iteration: compress with preset dictionary
// Third iteration (zstd only): compress with zstd-trained dictionary
//
// To make sure the compression dictionary has the intended effect, we
// verify the compressed size is smaller in successive iterations. Also in
// the non-first iterations, verify the data we get out is the same data
// we put in.
switch (i) {
case kWithoutDict:
options.compression_opts.max_dict_bytes = 0;
options.compression_opts.zstd_max_train_bytes = 0;
break;
case kWithDict:
options.compression_opts.max_dict_bytes = kBlockSizeBytes;
options.compression_opts.zstd_max_train_bytes = 0;
break;
case kWithZSTDTrainedDict:
if (compression_type != kZSTD) {
continue;
}
options.compression_opts.max_dict_bytes = kBlockSizeBytes;
options.compression_opts.zstd_max_train_bytes = kL0FileBytes;
break;
default:
assert(false);
}
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
CreateAndReopenWithCF({"pikachu"}, options);
Random rnd(301);
std::string seq_datas[10];
for (int j = 0; j < 10; ++j) {
seq_datas[j] =
rnd.RandomString(kBlockSizeBytes - kApproxPerBlockOverheadBytes);
}
ASSERT_EQ(0, NumTableFilesAtLevel(0, 1));
for (int j = 0; j < kNumL0Files; ++j) {
for (size_t k = 0; k < kL0FileBytes / kBlockSizeBytes + 1; ++k) {
auto key_num = j * (kL0FileBytes / kBlockSizeBytes) + k;
ASSERT_OK(Put(1, Key(static_cast<int>(key_num)),
seq_datas[(key_num / 10) % 10]));
}
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
ASSERT_EQ(j + 1, NumTableFilesAtLevel(0, 1));
}
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1],
true /* disallow_trivial_move */);
ASSERT_EQ(0, NumTableFilesAtLevel(0, 1));
ASSERT_GT(NumTableFilesAtLevel(1, 1), 0);
// Get the live sst files size
size_t total_sst_bytes = TotalSize(1);
if (i == kWithoutDict) {
bytes_without_dict = total_sst_bytes;
} else if (i == kWithDict) {
bytes_with_dict = total_sst_bytes;
} else if (i == kWithZSTDTrainedDict) {
bytes_with_zstd_trained_dict = total_sst_bytes;
}
for (size_t j = 0; j < kNumL0Files * (kL0FileBytes / kBlockSizeBytes);
j++) {
ASSERT_EQ(seq_datas[(j / 10) % 10], Get(1, Key(static_cast<int>(j))));
}
if (i == kWithDict) {
ASSERT_GT(bytes_without_dict, bytes_with_dict);
} else if (i == kWithZSTDTrainedDict) {
// In zstd compression, it is sometimes possible that using a trained
// dictionary does not get as good a compression ratio as without
// training.
// But using a dictionary (with or without training) should always get
// better compression ratio than not using one.
ASSERT_TRUE(bytes_with_dict > bytes_with_zstd_trained_dict ||
bytes_without_dict > bytes_with_zstd_trained_dict);
}
DestroyAndReopen(options);
}
}
}
TEST_F(DBTest2, PresetCompressionDictLocality) {
if (!ZSTD_Supported()) {
return;
}
// Verifies that compression dictionary is generated from local data. The
// verification simply checks all output SSTs have different compression
// dictionaries. We do not verify effectiveness as that'd likely be flaky in
// the future.
const int kNumEntriesPerFile = 1 << 10; // 1KB
const int kNumBytesPerEntry = 1 << 10; // 1KB
const int kNumFiles = 4;
Options options = CurrentOptions();
options.compression = kZSTD;
options.compression_opts.max_dict_bytes = 1 << 14; // 16KB
options.compression_opts.zstd_max_train_bytes = 1 << 18; // 256KB
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
options.target_file_size_base = kNumEntriesPerFile * kNumBytesPerEntry;
BlockBasedTableOptions table_options;
table_options.cache_index_and_filter_blocks = true;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
Reopen(options);
Random rnd(301);
for (int i = 0; i < kNumFiles; ++i) {
for (int j = 0; j < kNumEntriesPerFile; ++j) {
ASSERT_OK(Put(Key(i * kNumEntriesPerFile + j),
rnd.RandomString(kNumBytesPerEntry)));
}
ASSERT_OK(Flush());
MoveFilesToLevel(1);
ASSERT_EQ(NumTableFilesAtLevel(1), i + 1);
}
// Store all the dictionaries generated during a full compaction.
std::vector<std::string> compression_dicts;
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"BlockBasedTableBuilder::WriteCompressionDictBlock:RawDict",
[&](void* arg) {
compression_dicts.emplace_back(static_cast<Slice*>(arg)->ToString());
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
CompactRangeOptions compact_range_opts;
compact_range_opts.bottommost_level_compaction =
BottommostLevelCompaction::kForceOptimized;
ASSERT_OK(db_->CompactRange(compact_range_opts, nullptr, nullptr));
// Dictionary compression should not be so good as to compress four totally
// random files into one. If it does then there's probably something wrong
// with the test.
ASSERT_GT(NumTableFilesAtLevel(1), 1);
// Furthermore, there should be one compression dictionary generated per file.
// And they should all be different from each other.
ASSERT_EQ(NumTableFilesAtLevel(1),
static_cast<int>(compression_dicts.size()));
for (size_t i = 1; i < compression_dicts.size(); ++i) {
std::string& a = compression_dicts[i - 1];
std::string& b = compression_dicts[i];
size_t alen = a.size();
size_t blen = b.size();
ASSERT_TRUE(alen != blen || memcmp(a.data(), b.data(), alen) != 0);
}
}
class PresetCompressionDictTest
: public DBTestBase,
public testing::WithParamInterface<std::tuple<CompressionType, bool>> {
public:
PresetCompressionDictTest()
: DBTestBase("/db_test2", false /* env_do_fsync */),
compression_type_(std::get<0>(GetParam())),
bottommost_(std::get<1>(GetParam())) {}
protected:
const CompressionType compression_type_;
const bool bottommost_;
};
INSTANTIATE_TEST_CASE_P(
DBTest2, PresetCompressionDictTest,
::testing::Combine(::testing::ValuesIn(GetSupportedDictCompressions()),
::testing::Bool()));
TEST_P(PresetCompressionDictTest, Flush) {
// Verifies that dictionary is generated and written during flush only when
// `ColumnFamilyOptions::compression` enables dictionary.
const size_t kValueLen = 256;
const size_t kKeysPerFile = 1 << 10;
const size_t kDictLen = 4 << 10;
Options options = CurrentOptions();
if (bottommost_) {
options.bottommost_compression = compression_type_;
options.bottommost_compression_opts.enabled = true;
options.bottommost_compression_opts.max_dict_bytes = kDictLen;
} else {
options.compression = compression_type_;
options.compression_opts.max_dict_bytes = kDictLen;
}
options.memtable_factory.reset(new SpecialSkipListFactory(kKeysPerFile));
options.statistics = CreateDBStatistics();
BlockBasedTableOptions bbto;
bbto.cache_index_and_filter_blocks = true;
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
Reopen(options);
uint64_t prev_compression_dict_misses =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS);
Random rnd(301);
for (size_t i = 0; i <= kKeysPerFile; ++i) {
ASSERT_OK(Put(Key(static_cast<int>(i)), rnd.RandomString(kValueLen)));
}
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
// If there's a compression dictionary, it should have been loaded when the
// flush finished, incurring a cache miss.
uint64_t expected_compression_dict_misses;
if (bottommost_) {
expected_compression_dict_misses = prev_compression_dict_misses;
} else {
expected_compression_dict_misses = prev_compression_dict_misses + 1;
}
ASSERT_EQ(expected_compression_dict_misses,
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS));
}
TEST_P(PresetCompressionDictTest, CompactNonBottommost) {
// Verifies that dictionary is generated and written during compaction to
// non-bottommost level only when `ColumnFamilyOptions::compression` enables
// dictionary.
const size_t kValueLen = 256;
const size_t kKeysPerFile = 1 << 10;
const size_t kDictLen = 4 << 10;
Options options = CurrentOptions();
if (bottommost_) {
options.bottommost_compression = compression_type_;
options.bottommost_compression_opts.enabled = true;
options.bottommost_compression_opts.max_dict_bytes = kDictLen;
} else {
options.compression = compression_type_;
options.compression_opts.max_dict_bytes = kDictLen;
}
options.disable_auto_compactions = true;
options.statistics = CreateDBStatistics();
BlockBasedTableOptions bbto;
bbto.cache_index_and_filter_blocks = true;
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
Reopen(options);
Random rnd(301);
for (size_t j = 0; j <= kKeysPerFile; ++j) {
ASSERT_OK(Put(Key(static_cast<int>(j)), rnd.RandomString(kValueLen)));
}
ASSERT_OK(Flush());
MoveFilesToLevel(2);
for (int i = 0; i < 2; ++i) {
for (size_t j = 0; j <= kKeysPerFile; ++j) {
ASSERT_OK(Put(Key(static_cast<int>(j)), rnd.RandomString(kValueLen)));
}
ASSERT_OK(Flush());
}
#ifndef ROCKSDB_LITE
ASSERT_EQ("2,0,1", FilesPerLevel(0));
#endif // ROCKSDB_LITE
uint64_t prev_compression_dict_misses =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS);
// This L0->L1 compaction merges the two L0 files into L1. The produced L1
// file is not bottommost due to the existing L2 file covering the same key-
// range.
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,1,1", FilesPerLevel(0));
#endif // ROCKSDB_LITE
// If there's a compression dictionary, it should have been loaded when the
// compaction finished, incurring a cache miss.
uint64_t expected_compression_dict_misses;
if (bottommost_) {
expected_compression_dict_misses = prev_compression_dict_misses;
} else {
expected_compression_dict_misses = prev_compression_dict_misses + 1;
}
ASSERT_EQ(expected_compression_dict_misses,
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS));
}
TEST_P(PresetCompressionDictTest, CompactBottommost) {
// Verifies that dictionary is generated and written during compaction to
// non-bottommost level only when either `ColumnFamilyOptions::compression` or
// `ColumnFamilyOptions::bottommost_compression` enables dictionary.
const size_t kValueLen = 256;
const size_t kKeysPerFile = 1 << 10;
const size_t kDictLen = 4 << 10;
Options options = CurrentOptions();
if (bottommost_) {
options.bottommost_compression = compression_type_;
options.bottommost_compression_opts.enabled = true;
options.bottommost_compression_opts.max_dict_bytes = kDictLen;
} else {
options.compression = compression_type_;
options.compression_opts.max_dict_bytes = kDictLen;
}
options.disable_auto_compactions = true;
options.statistics = CreateDBStatistics();
BlockBasedTableOptions bbto;
bbto.cache_index_and_filter_blocks = true;
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
Reopen(options);
Random rnd(301);
for (int i = 0; i < 2; ++i) {
for (size_t j = 0; j <= kKeysPerFile; ++j) {
ASSERT_OK(Put(Key(static_cast<int>(j)), rnd.RandomString(kValueLen)));
}
ASSERT_OK(Flush());
}
#ifndef ROCKSDB_LITE
ASSERT_EQ("2", FilesPerLevel(0));
#endif // ROCKSDB_LITE
uint64_t prev_compression_dict_misses =
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS);
CompactRangeOptions cro;
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,1", FilesPerLevel(0));
#endif // ROCKSDB_LITE
// If there's a compression dictionary, it should have been loaded when the
// compaction finished, incurring a cache miss.
ASSERT_EQ(prev_compression_dict_misses + 1,
TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS));
}
class CompactionCompressionListener : public EventListener {
public:
explicit CompactionCompressionListener(Options* db_options)
: db_options_(db_options) {}
void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override {
// Figure out last level with files
int bottommost_level = 0;
for (int level = 0; level < db->NumberLevels(); level++) {
std::string files_at_level;
ASSERT_TRUE(
db->GetProperty("rocksdb.num-files-at-level" + NumberToString(level),
&files_at_level));
if (files_at_level != "0") {
bottommost_level = level;
}
}
if (db_options_->bottommost_compression != kDisableCompressionOption &&
ci.output_level == bottommost_level) {
ASSERT_EQ(ci.compression, db_options_->bottommost_compression);
} else if (db_options_->compression_per_level.size() != 0) {
ASSERT_EQ(ci.compression,
db_options_->compression_per_level[ci.output_level]);
} else {
ASSERT_EQ(ci.compression, db_options_->compression);
}
max_level_checked = std::max(max_level_checked, ci.output_level);
}
int max_level_checked = 0;
const Options* db_options_;
};
enum CompressionFailureType {
kTestCompressionFail,
kTestDecompressionFail,
kTestDecompressionCorruption
};
class CompressionFailuresTest
: public DBTest2,
public testing::WithParamInterface<std::tuple<
CompressionFailureType, CompressionType, uint32_t, uint32_t>> {
public:
CompressionFailuresTest() {
std::tie(compression_failure_type_, compression_type_,
compression_max_dict_bytes_, compression_parallel_threads_) =
GetParam();
}
CompressionFailureType compression_failure_type_ = kTestCompressionFail;
CompressionType compression_type_ = kNoCompression;
uint32_t compression_max_dict_bytes_ = 0;
uint32_t compression_parallel_threads_ = 0;
};
INSTANTIATE_TEST_CASE_P(
DBTest2, CompressionFailuresTest,
::testing::Combine(::testing::Values(kTestCompressionFail,
kTestDecompressionFail,
kTestDecompressionCorruption),
::testing::ValuesIn(GetSupportedCompressions()),
::testing::Values(0, 10), ::testing::Values(1, 4)));
TEST_P(CompressionFailuresTest, CompressionFailures) {
if (compression_type_ == kNoCompression) {
return;
}
Options options = CurrentOptions();
options.level0_file_num_compaction_trigger = 2;
options.max_bytes_for_level_base = 1024;
options.max_bytes_for_level_multiplier = 2;
options.num_levels = 7;
options.max_background_compactions = 1;
options.target_file_size_base = 512;
BlockBasedTableOptions table_options;
table_options.block_size = 512;
table_options.verify_compression = true;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.compression = compression_type_;
options.compression_opts.parallel_threads = compression_parallel_threads_;
options.compression_opts.max_dict_bytes = compression_max_dict_bytes_;
options.bottommost_compression_opts.parallel_threads =
compression_parallel_threads_;
options.bottommost_compression_opts.max_dict_bytes =
compression_max_dict_bytes_;
if (compression_failure_type_ == kTestCompressionFail) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"CompressData:TamperWithReturnValue", [](void* arg) {
bool* ret = static_cast<bool*>(arg);
*ret = false;
});
} else if (compression_failure_type_ == kTestDecompressionFail) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"UncompressBlockContentsForCompressionType:TamperWithReturnValue",
[](void* arg) {
Status* ret = static_cast<Status*>(arg);
ASSERT_OK(*ret);
*ret = Status::Corruption("kTestDecompressionFail");
});
} else if (compression_failure_type_ == kTestDecompressionCorruption) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"UncompressBlockContentsForCompressionType:"
"TamperWithDecompressionOutput",
[](void* arg) {
BlockContents* contents = static_cast<BlockContents*>(arg);
// Ensure uncompressed data != original data
const size_t len = contents->data.size() + 1;
std::unique_ptr<char[]> fake_data(new char[len]());
*contents = BlockContents(std::move(fake_data), len);
});
}
std::map<std::string, std::string> key_value_written;
const int kKeySize = 5;
const int kValUnitSize = 16;
const int kValSize = 256;
Random rnd(405);
Status s = Status::OK();
DestroyAndReopen(options);
// Write 10 random files
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 5; j++) {
std::string key = rnd.RandomString(kKeySize);
// Ensure good compression ratio
std::string valueUnit = rnd.RandomString(kValUnitSize);
std::string value;
for (int k = 0; k < kValSize; k += kValUnitSize) {
value += valueUnit;
}
s = Put(key, value);
if (compression_failure_type_ == kTestCompressionFail) {
key_value_written[key] = value;
ASSERT_OK(s);
}
}
s = Flush();
if (compression_failure_type_ == kTestCompressionFail) {
ASSERT_OK(s);
}
s = dbfull()->TEST_WaitForCompact();
if (compression_failure_type_ == kTestCompressionFail) {
ASSERT_OK(s);
}
if (i == 4) {
// Make compression fail at the mid of table building
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
}
}
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
if (compression_failure_type_ == kTestCompressionFail) {
// Should be kNoCompression, check content consistency
std::unique_ptr<Iterator> db_iter(db_->NewIterator(ReadOptions()));
for (db_iter->SeekToFirst(); db_iter->Valid(); db_iter->Next()) {
std::string key = db_iter->key().ToString();
std::string value = db_iter->value().ToString();
ASSERT_NE(key_value_written.find(key), key_value_written.end());
ASSERT_EQ(key_value_written[key], value);
key_value_written.erase(key);
}
ASSERT_EQ(0, key_value_written.size());
} else if (compression_failure_type_ == kTestDecompressionFail) {
ASSERT_EQ(std::string(s.getState()),
"Could not decompress: kTestDecompressionFail");
} else if (compression_failure_type_ == kTestDecompressionCorruption) {
ASSERT_EQ(std::string(s.getState()),
"Decompressed block did not match raw block");
}
}
TEST_F(DBTest2, CompressionOptions) {
if (!Zlib_Supported() || !Snappy_Supported()) {
return;
}
Options options = CurrentOptions();
options.level0_file_num_compaction_trigger = 2;
options.max_bytes_for_level_base = 100;
options.max_bytes_for_level_multiplier = 2;
options.num_levels = 7;
options.max_background_compactions = 1;
CompactionCompressionListener* listener =
new CompactionCompressionListener(&options);
options.listeners.emplace_back(listener);
const int kKeySize = 5;
const int kValSize = 20;
Random rnd(301);
std::vector<uint32_t> compression_parallel_threads = {1, 4};
std::map<std::string, std::string> key_value_written;
for (int iter = 0; iter <= 2; iter++) {
listener->max_level_checked = 0;
if (iter == 0) {
// Use different compression algorithms for different levels but
// always use Zlib for bottommost level
options.compression_per_level = {kNoCompression, kNoCompression,
kNoCompression, kSnappyCompression,
kSnappyCompression, kSnappyCompression,
kZlibCompression};
options.compression = kNoCompression;
options.bottommost_compression = kZlibCompression;
} else if (iter == 1) {
// Use Snappy except for bottommost level use ZLib
options.compression_per_level = {};
options.compression = kSnappyCompression;
options.bottommost_compression = kZlibCompression;
} else if (iter == 2) {
// Use Snappy everywhere
options.compression_per_level = {};
options.compression = kSnappyCompression;
options.bottommost_compression = kDisableCompressionOption;
}
for (auto num_threads : compression_parallel_threads) {
options.compression_opts.parallel_threads = num_threads;
options.bottommost_compression_opts.parallel_threads = num_threads;
DestroyAndReopen(options);
// Write 10 random files
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 5; j++) {
std::string key = rnd.RandomString(kKeySize);
std::string value = rnd.RandomString(kValSize);
key_value_written[key] = value;
ASSERT_OK(Put(key, value));
}
ASSERT_OK(Flush());
dbfull()->TEST_WaitForCompact();
}
// Make sure that we wrote enough to check all 7 levels
ASSERT_EQ(listener->max_level_checked, 6);
// Make sure database content is the same as key_value_written
std::unique_ptr<Iterator> db_iter(db_->NewIterator(ReadOptions()));
for (db_iter->SeekToFirst(); db_iter->Valid(); db_iter->Next()) {
std::string key = db_iter->key().ToString();
std::string value = db_iter->value().ToString();
ASSERT_NE(key_value_written.find(key), key_value_written.end());
ASSERT_EQ(key_value_written[key], value);
key_value_written.erase(key);
}
ASSERT_EQ(0, key_value_written.size());
}
}
}
class CompactionStallTestListener : public EventListener {
public:
CompactionStallTestListener() : compacting_files_cnt_(0), compacted_files_cnt_(0) {}
void OnCompactionBegin(DB* /*db*/, const CompactionJobInfo& ci) override {
ASSERT_EQ(ci.cf_name, "default");
ASSERT_EQ(ci.base_input_level, 0);
ASSERT_EQ(ci.compaction_reason, CompactionReason::kLevelL0FilesNum);
compacting_files_cnt_ += ci.input_files.size();
}
void OnCompactionCompleted(DB* /*db*/, const CompactionJobInfo& ci) override {
ASSERT_EQ(ci.cf_name, "default");
ASSERT_EQ(ci.base_input_level, 0);
ASSERT_EQ(ci.compaction_reason, CompactionReason::kLevelL0FilesNum);
compacted_files_cnt_ += ci.input_files.size();
}
std::atomic<size_t> compacting_files_cnt_;
std::atomic<size_t> compacted_files_cnt_;
};
TEST_F(DBTest2, CompactionStall) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
{{"DBImpl::BGWorkCompaction", "DBTest2::CompactionStall:0"},
{"DBImpl::BGWorkCompaction", "DBTest2::CompactionStall:1"},
{"DBTest2::CompactionStall:2",
"DBImpl::NotifyOnCompactionBegin::UnlockMutex"},
{"DBTest2::CompactionStall:3",
"DBImpl::NotifyOnCompactionCompleted::UnlockMutex"}});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
Options options = CurrentOptions();
options.level0_file_num_compaction_trigger = 4;
options.max_background_compactions = 40;
CompactionStallTestListener* listener = new CompactionStallTestListener();
options.listeners.emplace_back(listener);
DestroyAndReopen(options);
// make sure all background compaction jobs can be scheduled
auto stop_token =
dbfull()->TEST_write_controler().GetCompactionPressureToken();
Random rnd(301);
// 4 Files in L0
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 10; j++) {
ASSERT_OK(Put(rnd.RandomString(10), rnd.RandomString(10)));
}
ASSERT_OK(Flush());
}
// Wait for compaction to be triggered
TEST_SYNC_POINT("DBTest2::CompactionStall:0");
// Clear "DBImpl::BGWorkCompaction" SYNC_POINT since we want to hold it again
// at DBTest2::CompactionStall::1
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearTrace();
// Another 6 L0 files to trigger compaction again
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 10; j++) {
ASSERT_OK(Put(rnd.RandomString(10), rnd.RandomString(10)));
}
ASSERT_OK(Flush());
}
// Wait for another compaction to be triggered
TEST_SYNC_POINT("DBTest2::CompactionStall:1");
// Hold NotifyOnCompactionBegin in the unlock mutex section
TEST_SYNC_POINT("DBTest2::CompactionStall:2");
// Hold NotifyOnCompactionCompleted in the unlock mutex section
TEST_SYNC_POINT("DBTest2::CompactionStall:3");
dbfull()->TEST_WaitForCompact();
ASSERT_LT(NumTableFilesAtLevel(0),
options.level0_file_num_compaction_trigger);
ASSERT_GT(listener->compacted_files_cnt_.load(),
10 - options.level0_file_num_compaction_trigger);
ASSERT_EQ(listener->compacting_files_cnt_.load(), listener->compacted_files_cnt_.load());
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
#endif // ROCKSDB_LITE
TEST_F(DBTest2, FirstSnapshotTest) {
Options options;
options.write_buffer_size = 100000; // Small write buffer
options = CurrentOptions(options);
CreateAndReopenWithCF({"pikachu"}, options);
// This snapshot will have sequence number 0 what is expected behaviour.
const Snapshot* s1 = db_->GetSnapshot();
Put(1, "k1", std::string(100000, 'x')); // Fill memtable
Put(1, "k2", std::string(100000, 'y')); // Trigger flush
db_->ReleaseSnapshot(s1);
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, DuplicateSnapshot) {
Options options;
options = CurrentOptions(options);
std::vector<const Snapshot*> snapshots;
DBImpl* dbi = static_cast_with_check<DBImpl>(db_);
SequenceNumber oldest_ww_snap, first_ww_snap;
Put("k", "v"); // inc seq
snapshots.push_back(db_->GetSnapshot());
snapshots.push_back(db_->GetSnapshot());
Put("k", "v"); // inc seq
snapshots.push_back(db_->GetSnapshot());
snapshots.push_back(dbi->GetSnapshotForWriteConflictBoundary());
first_ww_snap = snapshots.back()->GetSequenceNumber();
Put("k", "v"); // inc seq
snapshots.push_back(dbi->GetSnapshotForWriteConflictBoundary());
snapshots.push_back(db_->GetSnapshot());
Put("k", "v"); // inc seq
snapshots.push_back(db_->GetSnapshot());
{
InstrumentedMutexLock l(dbi->mutex());
auto seqs = dbi->snapshots().GetAll(&oldest_ww_snap);
ASSERT_EQ(seqs.size(), 4); // duplicates are not counted
ASSERT_EQ(oldest_ww_snap, first_ww_snap);
}
for (auto s : snapshots) {
db_->ReleaseSnapshot(s);
}
}
#endif // ROCKSDB_LITE
class PinL0IndexAndFilterBlocksTest
: public DBTestBase,
public testing::WithParamInterface<std::tuple<bool, bool>> {
public:
PinL0IndexAndFilterBlocksTest()
: DBTestBase("/db_pin_l0_index_bloom_test", /*env_do_fsync=*/true) {}
void SetUp() override {
infinite_max_files_ = std::get<0>(GetParam());
disallow_preload_ = std::get<1>(GetParam());
}
void CreateTwoLevels(Options* options, bool close_afterwards) {
if (infinite_max_files_) {
options->max_open_files = -1;
}
options->create_if_missing = true;
options->statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
BlockBasedTableOptions table_options;
table_options.cache_index_and_filter_blocks = true;
table_options.pin_l0_filter_and_index_blocks_in_cache = true;
table_options.filter_policy.reset(NewBloomFilterPolicy(20));
options->table_factory.reset(NewBlockBasedTableFactory(table_options));
CreateAndReopenWithCF({"pikachu"}, *options);
Put(1, "a", "begin");
Put(1, "z", "end");
ASSERT_OK(Flush(1));
// move this table to L1
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
// reset block cache
table_options.block_cache = NewLRUCache(64 * 1024);
options->table_factory.reset(NewBlockBasedTableFactory(table_options));
TryReopenWithColumnFamilies({"default", "pikachu"}, *options);
// create new table at L0
Put(1, "a2", "begin2");
Put(1, "z2", "end2");
ASSERT_OK(Flush(1));
if (close_afterwards) {
Close(); // This ensures that there is no ref to block cache entries
}
table_options.block_cache->EraseUnRefEntries();
}
bool infinite_max_files_;
bool disallow_preload_;
};
TEST_P(PinL0IndexAndFilterBlocksTest,
IndexAndFilterBlocksOfNewTableAddedToCacheWithPinning) {
Options options = CurrentOptions();
if (infinite_max_files_) {
options.max_open_files = -1;
}
options.create_if_missing = true;
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
BlockBasedTableOptions table_options;
table_options.cache_index_and_filter_blocks = true;
table_options.pin_l0_filter_and_index_blocks_in_cache = true;
table_options.filter_policy.reset(NewBloomFilterPolicy(20));
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
CreateAndReopenWithCF({"pikachu"}, options);
ASSERT_OK(Put(1, "key", "val"));
// Create a new table.
ASSERT_OK(Flush(1));
// index/filter blocks added to block cache right after table creation.
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
// only index/filter were added
ASSERT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_ADD));
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
std::string value;
// Miss and hit count should remain the same, they're all pinned.
db_->KeyMayExist(ReadOptions(), handles_[1], "key", &value);
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
// Miss and hit count should remain the same, they're all pinned.
value = Get(1, "key");
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
}
TEST_P(PinL0IndexAndFilterBlocksTest,
MultiLevelIndexAndFilterBlocksCachedWithPinning) {
Options options = CurrentOptions();
PinL0IndexAndFilterBlocksTest::CreateTwoLevels(&options, false);
// get base cache values
uint64_t fm = TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS);
uint64_t fh = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT);
uint64_t im = TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS);
uint64_t ih = TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT);
std::string value;
// this should be read from L0
// so cache values don't change
value = Get(1, "a2");
ASSERT_EQ(fm, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(im, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
// this should be read from L1
// the file is opened, prefetching results in a cache filter miss
// the block is loaded and added to the cache,
// then the get results in a cache hit for L1
// When we have inifinite max_files, there is still cache miss because we have
// reset the block cache
value = Get(1, "a");
ASSERT_EQ(fm + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(im + 1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
}
TEST_P(PinL0IndexAndFilterBlocksTest, DisablePrefetchingNonL0IndexAndFilter) {
Options options = CurrentOptions();
// This ensures that db does not ref anything in the block cache, so
// EraseUnRefEntries could clear them up.
bool close_afterwards = true;
PinL0IndexAndFilterBlocksTest::CreateTwoLevels(&options, close_afterwards);
// Get base cache values
uint64_t fm = TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS);
uint64_t fh = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT);
uint64_t im = TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS);
uint64_t ih = TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT);
if (disallow_preload_) {
// Now we have two files. We narrow the max open files to allow 3 entries
// so that preloading SST files won't happen.
options.max_open_files = 13;
// RocksDB sanitize max open files to at least 20. Modify it back.
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"SanitizeOptions::AfterChangeMaxOpenFiles", [&](void* arg) {
int* max_open_files = static_cast<int*>(arg);
*max_open_files = 13;
});
}
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
// Reopen database. If max_open_files is set as -1, table readers will be
// preloaded. This will trigger a BlockBasedTable::Open() and prefetch
// L0 index and filter. Level 1's prefetching is disabled in DB::Open()
TryReopenWithColumnFamilies({"default", "pikachu"}, options);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
if (!disallow_preload_) {
// After reopen, cache miss are increased by one because we read (and only
// read) filter and index on L0
ASSERT_EQ(fm + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(im + 1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
} else {
// If max_open_files is not -1, we do not preload table readers, so there is
// no change.
ASSERT_EQ(fm, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(im, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
}
std::string value;
// this should be read from L0
value = Get(1, "a2");
// If max_open_files is -1, we have pinned index and filter in Rep, so there
// will not be changes in index and filter misses or hits. If max_open_files
// is not -1, Get() will open a TableReader and prefetch index and filter.
ASSERT_EQ(fm + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(im + 1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
// this should be read from L1
value = Get(1, "a");
if (!disallow_preload_) {
// In inifinite max files case, there's a cache miss in executing Get()
// because index and filter are not prefetched before.
ASSERT_EQ(fm + 2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(im + 2, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
} else {
// In this case, cache miss will be increased by one in
// BlockBasedTable::Open() because this is not in DB::Open() code path so we
// will prefetch L1's index and filter. Cache hit will also be increased by
// one because Get() will read index and filter from the block cache
// prefetched in previous Open() call.
ASSERT_EQ(fm + 2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(fh + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(im + 2, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(ih + 1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
}
// Force a full compaction to one single file. There will be a block
// cache read for both of index and filter. If prefetch doesn't explicitly
// happen, it will happen when verifying the file.
Compact(1, "a", "zzzzz");
dbfull()->TEST_WaitForCompact();
if (!disallow_preload_) {
ASSERT_EQ(fm + 3, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(im + 3, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(ih + 3, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
} else {
ASSERT_EQ(fm + 3, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(fh + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(im + 3, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(ih + 4, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
}
// Bloom and index hit will happen when a Get() happens.
value = Get(1, "a");
if (!disallow_preload_) {
ASSERT_EQ(fm + 3, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(fh + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(im + 3, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(ih + 4, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
} else {
ASSERT_EQ(fm + 3, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(fh + 2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
ASSERT_EQ(im + 3, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(ih + 5, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
}
}
INSTANTIATE_TEST_CASE_P(PinL0IndexAndFilterBlocksTest,
PinL0IndexAndFilterBlocksTest,
::testing::Values(std::make_tuple(true, false),
std::make_tuple(false, false),
std::make_tuple(false, true)));
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, MaxCompactionBytesTest) {
Options options = CurrentOptions();
options.memtable_factory.reset(
new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
options.compaction_style = kCompactionStyleLevel;
options.write_buffer_size = 200 << 10;
options.arena_block_size = 4 << 10;
options.level0_file_num_compaction_trigger = 4;
options.num_levels = 4;
options.compression = kNoCompression;
options.max_bytes_for_level_base = 450 << 10;
options.target_file_size_base = 100 << 10;
// Infinite for full compaction.
options.max_compaction_bytes = options.target_file_size_base * 100;
Reopen(options);
Random rnd(301);
for (int num = 0; num < 8; num++) {
GenerateNewRandomFile(&rnd);
}
CompactRangeOptions cro;
cro.bottommost_level_compaction = BottommostLevelCompaction::kForceOptimized;
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
ASSERT_EQ("0,0,8", FilesPerLevel(0));
// When compact from Ln -> Ln+1, cut a file if the file overlaps with
// more than three files in Ln+1.
options.max_compaction_bytes = options.target_file_size_base * 3;
Reopen(options);
GenerateNewRandomFile(&rnd);
// Add three more small files that overlap with the previous file
for (int i = 0; i < 3; i++) {
Put("a", "z");
ASSERT_OK(Flush());
}
dbfull()->TEST_WaitForCompact();
// Output files to L1 are cut to three pieces, according to
// options.max_compaction_bytes
ASSERT_EQ("0,3,8", FilesPerLevel(0));
}
static void UniqueIdCallback(void* arg) {
int* result = reinterpret_cast<int*>(arg);
if (*result == -1) {
*result = 0;
}
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearTrace();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"GetUniqueIdFromFile:FS_IOC_GETVERSION", UniqueIdCallback);
}
class MockPersistentCache : public PersistentCache {
public:
explicit MockPersistentCache(const bool is_compressed, const size_t max_size)
: is_compressed_(is_compressed), max_size_(max_size) {
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"GetUniqueIdFromFile:FS_IOC_GETVERSION", UniqueIdCallback);
}
~MockPersistentCache() override {}
PersistentCache::StatsType Stats() override {
return PersistentCache::StatsType();
}
uint64_t NewId() override {
return last_id_.fetch_add(1, std::memory_order_relaxed);
}
Status Insert(const Slice& page_key, const char* data,
const size_t size) override {
MutexLock _(&lock_);
if (size_ > max_size_) {
size_ -= data_.begin()->second.size();
data_.erase(data_.begin());
}
data_.insert(std::make_pair(page_key.ToString(), std::string(data, size)));
size_ += size;
return Status::OK();
}
Status Lookup(const Slice& page_key, std::unique_ptr<char[]>* data,
size_t* size) override {
MutexLock _(&lock_);
auto it = data_.find(page_key.ToString());
if (it == data_.end()) {
return Status::NotFound();
}
assert(page_key.ToString() == it->first);
data->reset(new char[it->second.size()]);
memcpy(data->get(), it->second.c_str(), it->second.size());
*size = it->second.size();
return Status::OK();
}
bool IsCompressed() override { return is_compressed_; }
std::string GetPrintableOptions() const override {
return "MockPersistentCache";
}
port::Mutex lock_;
std::map<std::string, std::string> data_;
const bool is_compressed_ = true;
size_t size_ = 0;
const size_t max_size_ = 10 * 1024; // 10KiB
std::atomic<uint64_t> last_id_{1};
};
#ifdef OS_LINUX
// Make sure that in CPU time perf context counters, Env::NowCPUNanos()
// is used, rather than Env::CPUNanos();
TEST_F(DBTest2, TestPerfContextGetCpuTime) {
// force resizing table cache so table handle is not preloaded so that
// we can measure find_table_nanos during Get().
dbfull()->TEST_table_cache()->SetCapacity(0);
ASSERT_OK(Put("foo", "bar"));
ASSERT_OK(Flush());
env_->now_cpu_count_.store(0);
env_->SetMockSleep();
// NOTE: Presumed unnecessary and removed: resetting mock time in env
// CPU timing is not enabled with kEnableTimeExceptForMutex
SetPerfLevel(PerfLevel::kEnableTimeExceptForMutex);
ASSERT_EQ("bar", Get("foo"));
ASSERT_EQ(0, get_perf_context()->get_cpu_nanos);
ASSERT_EQ(0, env_->now_cpu_count_.load());
constexpr uint64_t kDummyAddonSeconds = uint64_t{1000000};
constexpr uint64_t kDummyAddonNanos = 1000000000U * kDummyAddonSeconds;
// Add time to NowNanos() reading.
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"TableCache::FindTable:0",
[&](void* /*arg*/) { env_->MockSleepForSeconds(kDummyAddonSeconds); });
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
SetPerfLevel(PerfLevel::kEnableTimeAndCPUTimeExceptForMutex);
ASSERT_EQ("bar", Get("foo"));
ASSERT_GT(env_->now_cpu_count_.load(), 2);
ASSERT_LT(get_perf_context()->get_cpu_nanos, kDummyAddonNanos);
ASSERT_GT(get_perf_context()->find_table_nanos, kDummyAddonNanos);
SetPerfLevel(PerfLevel::kDisable);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, TestPerfContextIterCpuTime) {
DestroyAndReopen(CurrentOptions());
// force resizing table cache so table handle is not preloaded so that
// we can measure find_table_nanos during iteration
dbfull()->TEST_table_cache()->SetCapacity(0);
const size_t kNumEntries = 10;
for (size_t i = 0; i < kNumEntries; ++i) {
ASSERT_OK(Put("k" + ToString(i), "v" + ToString(i)));
}
ASSERT_OK(Flush());
for (size_t i = 0; i < kNumEntries; ++i) {
ASSERT_EQ("v" + ToString(i), Get("k" + ToString(i)));
}
std::string last_key = "k" + ToString(kNumEntries - 1);
std::string last_value = "v" + ToString(kNumEntries - 1);
env_->now_cpu_count_.store(0);
env_->SetMockSleep();
// NOTE: Presumed unnecessary and removed: resetting mock time in env
// CPU timing is not enabled with kEnableTimeExceptForMutex
SetPerfLevel(PerfLevel::kEnableTimeExceptForMutex);
Iterator* iter = db_->NewIterator(ReadOptions());
iter->Seek("k0");
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("v0", iter->value().ToString());
iter->SeekForPrev(last_key);
ASSERT_TRUE(iter->Valid());
iter->SeekToLast();
ASSERT_TRUE(iter->Valid());
ASSERT_EQ(last_value, iter->value().ToString());
iter->SeekToFirst();
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("v0", iter->value().ToString());
ASSERT_EQ(0, get_perf_context()->iter_seek_cpu_nanos);
iter->Next();
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("v1", iter->value().ToString());
ASSERT_EQ(0, get_perf_context()->iter_next_cpu_nanos);
iter->Prev();
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("v0", iter->value().ToString());
ASSERT_EQ(0, get_perf_context()->iter_prev_cpu_nanos);
ASSERT_EQ(0, env_->now_cpu_count_.load());
delete iter;
constexpr uint64_t kDummyAddonSeconds = uint64_t{1000000};
constexpr uint64_t kDummyAddonNanos = 1000000000U * kDummyAddonSeconds;
// Add time to NowNanos() reading.
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"TableCache::FindTable:0",
[&](void* /*arg*/) { env_->MockSleepForSeconds(kDummyAddonSeconds); });
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
SetPerfLevel(PerfLevel::kEnableTimeAndCPUTimeExceptForMutex);
iter = db_->NewIterator(ReadOptions());
iter->Seek("k0");
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("v0", iter->value().ToString());
iter->SeekForPrev(last_key);
ASSERT_TRUE(iter->Valid());
iter->SeekToLast();
ASSERT_TRUE(iter->Valid());
ASSERT_EQ(last_value, iter->value().ToString());
iter->SeekToFirst();
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("v0", iter->value().ToString());
ASSERT_GT(get_perf_context()->iter_seek_cpu_nanos, 0);
ASSERT_LT(get_perf_context()->iter_seek_cpu_nanos, kDummyAddonNanos);
iter->Next();
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("v1", iter->value().ToString());
ASSERT_GT(get_perf_context()->iter_next_cpu_nanos, 0);
ASSERT_LT(get_perf_context()->iter_next_cpu_nanos, kDummyAddonNanos);
iter->Prev();
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("v0", iter->value().ToString());
ASSERT_GT(get_perf_context()->iter_prev_cpu_nanos, 0);
ASSERT_LT(get_perf_context()->iter_prev_cpu_nanos, kDummyAddonNanos);
ASSERT_GE(env_->now_cpu_count_.load(), 12);
ASSERT_GT(get_perf_context()->find_table_nanos, kDummyAddonNanos);
SetPerfLevel(PerfLevel::kDisable);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
delete iter;
}
#endif // OS_LINUX
#if !defined OS_SOLARIS
TEST_F(DBTest2, PersistentCache) {
int num_iter = 80;
Options options;
options.write_buffer_size = 64 * 1024; // small write buffer
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
options = CurrentOptions(options);
auto bsizes = {/*no block cache*/ 0, /*1M*/ 1 * 1024 * 1024};
auto types = {/*compressed*/ 1, /*uncompressed*/ 0};
for (auto bsize : bsizes) {
for (auto type : types) {
BlockBasedTableOptions table_options;
table_options.persistent_cache.reset(
new MockPersistentCache(type, 10 * 1024));
table_options.no_block_cache = true;
table_options.block_cache = bsize ? NewLRUCache(bsize) : nullptr;
table_options.block_cache_compressed = nullptr;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
DestroyAndReopen(options);
CreateAndReopenWithCF({"pikachu"}, options);
// default column family doesn't have block cache
Options no_block_cache_opts;
no_block_cache_opts.statistics = options.statistics;
no_block_cache_opts = CurrentOptions(no_block_cache_opts);
BlockBasedTableOptions table_options_no_bc;
table_options_no_bc.no_block_cache = true;
no_block_cache_opts.table_factory.reset(
NewBlockBasedTableFactory(table_options_no_bc));
ReopenWithColumnFamilies(
{"default", "pikachu"},
std::vector<Options>({no_block_cache_opts, options}));
Random rnd(301);
// Write 8MB (80 values, each 100K)
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
std::vector<std::string> values;
std::string str;
for (int i = 0; i < num_iter; i++) {
if (i % 4 == 0) { // high compression ratio
str = rnd.RandomString(1000);
}
values.push_back(str);
ASSERT_OK(Put(1, Key(i), values[i]));
}
// flush all data from memtable so that reads are from block cache
ASSERT_OK(Flush(1));
for (int i = 0; i < num_iter; i++) {
ASSERT_EQ(Get(1, Key(i)), values[i]);
}
auto hit = options.statistics->getTickerCount(PERSISTENT_CACHE_HIT);
auto miss = options.statistics->getTickerCount(PERSISTENT_CACHE_MISS);
ASSERT_GT(hit, 0);
ASSERT_GT(miss, 0);
}
}
}
#endif // !defined OS_SOLARIS
namespace {
void CountSyncPoint() {
TEST_SYNC_POINT_CALLBACK("DBTest2::MarkedPoint", nullptr /* arg */);
}
} // namespace
TEST_F(DBTest2, SyncPointMarker) {
std::atomic<int> sync_point_called(0);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"DBTest2::MarkedPoint",
[&](void* /*arg*/) { sync_point_called.fetch_add(1); });
// The first dependency enforces Marker can be loaded before MarkedPoint.
// The second checks that thread 1's MarkedPoint should be disabled here.
// Execution order:
// | Thread 1 | Thread 2 |
// | | Marker |
// | MarkedPoint | |
// | Thread1First | |
// | | MarkedPoint |
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependencyAndMarkers(
{{"DBTest2::SyncPointMarker:Thread1First", "DBTest2::MarkedPoint"}},
{{"DBTest2::SyncPointMarker:Marker", "DBTest2::MarkedPoint"}});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
std::function<void()> func1 = [&]() {
CountSyncPoint();
TEST_SYNC_POINT("DBTest2::SyncPointMarker:Thread1First");
};
std::function<void()> func2 = [&]() {
TEST_SYNC_POINT("DBTest2::SyncPointMarker:Marker");
CountSyncPoint();
};
auto thread1 = port::Thread(func1);
auto thread2 = port::Thread(func2);
thread1.join();
thread2.join();
// Callback is only executed once
ASSERT_EQ(sync_point_called.load(), 1);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
#endif
size_t GetEncodedEntrySize(size_t key_size, size_t value_size) {
std::string buffer;
PutVarint32(&buffer, static_cast<uint32_t>(0));
PutVarint32(&buffer, static_cast<uint32_t>(key_size));
PutVarint32(&buffer, static_cast<uint32_t>(value_size));
return buffer.size() + key_size + value_size;
}
TEST_F(DBTest2, ReadAmpBitmap) {
Options options = CurrentOptions();
BlockBasedTableOptions bbto;
uint32_t bytes_per_bit[2] = {1, 16};
for (size_t k = 0; k < 2; k++) {
// Disable delta encoding to make it easier to calculate read amplification
bbto.use_delta_encoding = false;
// Huge block cache to make it easier to calculate read amplification
bbto.block_cache = NewLRUCache(1024 * 1024 * 1024);
bbto.read_amp_bytes_per_bit = bytes_per_bit[k];
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
DestroyAndReopen(options);
const size_t kNumEntries = 10000;
Random rnd(301);
for (size_t i = 0; i < kNumEntries; i++) {
ASSERT_OK(Put(Key(static_cast<int>(i)), rnd.RandomString(100)));
}
ASSERT_OK(Flush());
Close();
Reopen(options);
// Read keys/values randomly and verify that reported read amp error
// is less than 2%
uint64_t total_useful_bytes = 0;
std::set<int> read_keys;
std::string value;
for (size_t i = 0; i < kNumEntries * 5; i++) {
int key_idx = rnd.Next() % kNumEntries;
std::string key = Key(key_idx);
ASSERT_OK(db_->Get(ReadOptions(), key, &value));
if (read_keys.find(key_idx) == read_keys.end()) {
auto internal_key = InternalKey(key, 0, ValueType::kTypeValue);
total_useful_bytes +=
GetEncodedEntrySize(internal_key.size(), value.size());
read_keys.insert(key_idx);
}
double expected_read_amp =
static_cast<double>(total_useful_bytes) /
options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
double read_amp =
static_cast<double>(options.statistics->getTickerCount(
READ_AMP_ESTIMATE_USEFUL_BYTES)) /
options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
double error_pct = fabs(expected_read_amp - read_amp) * 100;
// Error between reported read amp and real read amp should be less than
// 2%
EXPECT_LE(error_pct, 2);
}
// Make sure we read every thing in the DB (which is smaller than our cache)
Iterator* iter = db_->NewIterator(ReadOptions());
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ASSERT_EQ(iter->value().ToString(), Get(iter->key().ToString()));
}
delete iter;
// Read amp is on average 100% since we read all what we loaded in memory
if (k == 0) {
ASSERT_EQ(
options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES),
options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES));
} else {
ASSERT_NEAR(
options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES) *
1.0f /
options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES),
1, .01);
}
}
}
#ifndef OS_SOLARIS // GetUniqueIdFromFile is not implemented
TEST_F(DBTest2, ReadAmpBitmapLiveInCacheAfterDBClose) {
{
const int kIdBufLen = 100;
char id_buf[kIdBufLen];
Status s = Status::NotSupported();
#ifndef OS_WIN
// You can't open a directory on windows using random access file
std::unique_ptr<RandomAccessFile> file;
s = env_->NewRandomAccessFile(dbname_, &file, EnvOptions());
if (s.ok()) {
if (file->GetUniqueId(id_buf, kIdBufLen) == 0) {
// fs holding db directory doesn't support getting a unique file id,
// this means that running this test will fail because lru_cache will
// load the blocks again regardless of them being already in the cache
return;
}
}
#endif
if (!s.ok()) {
std::unique_ptr<Directory> dir;
ASSERT_OK(env_->NewDirectory(dbname_, &dir));
if (dir->GetUniqueId(id_buf, kIdBufLen) == 0) {
// fs holding db directory doesn't support getting a unique file id,
// this means that running this test will fail because lru_cache will
// load the blocks again regardless of them being already in the cache
return;
}
}
}
uint32_t bytes_per_bit[2] = {1, 16};
for (size_t k = 0; k < 2; k++) {
std::shared_ptr<Cache> lru_cache = NewLRUCache(1024 * 1024 * 1024);
std::shared_ptr<Statistics> stats = ROCKSDB_NAMESPACE::CreateDBStatistics();
Options options = CurrentOptions();
BlockBasedTableOptions bbto;
// Disable delta encoding to make it easier to calculate read amplification
bbto.use_delta_encoding = false;
// Huge block cache to make it easier to calculate read amplification
bbto.block_cache = lru_cache;
bbto.read_amp_bytes_per_bit = bytes_per_bit[k];
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
options.statistics = stats;
DestroyAndReopen(options);
const int kNumEntries = 10000;
Random rnd(301);
for (int i = 0; i < kNumEntries; i++) {
ASSERT_OK(Put(Key(i), rnd.RandomString(100)));
}
ASSERT_OK(Flush());
Close();
Reopen(options);
uint64_t total_useful_bytes = 0;
std::set<int> read_keys;
std::string value;
// Iter1: Read half the DB, Read even keys
// Key(0), Key(2), Key(4), Key(6), Key(8), ...
for (int i = 0; i < kNumEntries; i += 2) {
std::string key = Key(i);
ASSERT_OK(db_->Get(ReadOptions(), key, &value));
if (read_keys.find(i) == read_keys.end()) {
auto internal_key = InternalKey(key, 0, ValueType::kTypeValue);
total_useful_bytes +=
GetEncodedEntrySize(internal_key.size(), value.size());
read_keys.insert(i);
}
}
size_t total_useful_bytes_iter1 =
options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES);
size_t total_loaded_bytes_iter1 =
options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
Close();
std::shared_ptr<Statistics> new_statistics =
ROCKSDB_NAMESPACE::CreateDBStatistics();
// Destroy old statistics obj that the blocks in lru_cache are pointing to
options.statistics.reset();
// Use the statistics object that we just created
options.statistics = new_statistics;
Reopen(options);
// Iter2: Read half the DB, Read odd keys
// Key(1), Key(3), Key(5), Key(7), Key(9), ...
for (int i = 1; i < kNumEntries; i += 2) {
std::string key = Key(i);
ASSERT_OK(db_->Get(ReadOptions(), key, &value));
if (read_keys.find(i) == read_keys.end()) {
auto internal_key = InternalKey(key, 0, ValueType::kTypeValue);
total_useful_bytes +=
GetEncodedEntrySize(internal_key.size(), value.size());
read_keys.insert(i);
}
}
size_t total_useful_bytes_iter2 =
options.statistics->getTickerCount(READ_AMP_ESTIMATE_USEFUL_BYTES);
size_t total_loaded_bytes_iter2 =
options.statistics->getTickerCount(READ_AMP_TOTAL_READ_BYTES);
// Read amp is on average 100% since we read all what we loaded in memory
if (k == 0) {
ASSERT_EQ(total_useful_bytes_iter1 + total_useful_bytes_iter2,
total_loaded_bytes_iter1 + total_loaded_bytes_iter2);
} else {
ASSERT_NEAR((total_useful_bytes_iter1 + total_useful_bytes_iter2) * 1.0f /
(total_loaded_bytes_iter1 + total_loaded_bytes_iter2),
1, .01);
}
}
}
#endif // !OS_SOLARIS
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, AutomaticCompactionOverlapManualCompaction) {
Options options = CurrentOptions();
options.num_levels = 3;
options.IncreaseParallelism(20);
DestroyAndReopen(options);
ASSERT_OK(Put(Key(0), "a"));
ASSERT_OK(Put(Key(5), "a"));
ASSERT_OK(Flush());
ASSERT_OK(Put(Key(10), "a"));
ASSERT_OK(Put(Key(15), "a"));
ASSERT_OK(Flush());
CompactRangeOptions cro;
cro.change_level = true;
cro.target_level = 2;
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
auto get_stat = [](std::string level_str, LevelStatType type,
std::map<std::string, std::string> props) {
auto prop_str =
"compaction." + level_str + "." +
InternalStats::compaction_level_stats.at(type).property_name.c_str();
auto prop_item = props.find(prop_str);
return prop_item == props.end() ? 0 : std::stod(prop_item->second);
};
// Trivial move 2 files to L2
ASSERT_EQ("0,0,2", FilesPerLevel());
// Also test that the stats GetMapProperty API reporting the same result
{
std::map<std::string, std::string> prop;
ASSERT_TRUE(dbfull()->GetMapProperty("rocksdb.cfstats", &prop));
ASSERT_EQ(0, get_stat("L0", LevelStatType::NUM_FILES, prop));
ASSERT_EQ(0, get_stat("L1", LevelStatType::NUM_FILES, prop));
ASSERT_EQ(2, get_stat("L2", LevelStatType::NUM_FILES, prop));
ASSERT_EQ(2, get_stat("Sum", LevelStatType::NUM_FILES, prop));
}
// While the compaction is running, we will create 2 new files that
// can fit in L2, these 2 files will be moved to L2 and overlap with
// the running compaction and break the LSM consistency.
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run():Start", [&](void* /*arg*/) {
ASSERT_OK(
dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "2"},
{"max_bytes_for_level_base", "1"}}));
ASSERT_OK(Put(Key(6), "a"));
ASSERT_OK(Put(Key(7), "a"));
ASSERT_OK(Flush());
ASSERT_OK(Put(Key(8), "a"));
ASSERT_OK(Put(Key(9), "a"));
ASSERT_OK(Flush());
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
// Run a manual compaction that will compact the 2 files in L2
// into 1 file in L2
cro.exclusive_manual_compaction = false;
cro.bottommost_level_compaction = BottommostLevelCompaction::kForceOptimized;
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
// Test that the stats GetMapProperty API reporting 1 file in L2
{
std::map<std::string, std::string> prop;
ASSERT_TRUE(dbfull()->GetMapProperty("rocksdb.cfstats", &prop));
ASSERT_EQ(1, get_stat("L2", LevelStatType::NUM_FILES, prop));
}
}
TEST_F(DBTest2, ManualCompactionOverlapManualCompaction) {
Options options = CurrentOptions();
options.num_levels = 2;
options.IncreaseParallelism(20);
options.disable_auto_compactions = true;
DestroyAndReopen(options);
ASSERT_OK(Put(Key(0), "a"));
ASSERT_OK(Put(Key(5), "a"));
ASSERT_OK(Flush());
ASSERT_OK(Put(Key(10), "a"));
ASSERT_OK(Put(Key(15), "a"));
ASSERT_OK(Flush());
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
// Trivial move 2 files to L1
ASSERT_EQ("0,2", FilesPerLevel());
std::function<void()> bg_manual_compact = [&]() {
std::string k1 = Key(6);
std::string k2 = Key(9);
Slice k1s(k1);
Slice k2s(k2);
CompactRangeOptions cro;
cro.exclusive_manual_compaction = false;
ASSERT_OK(db_->CompactRange(cro, &k1s, &k2s));
};
ROCKSDB_NAMESPACE::port::Thread bg_thread;
// While the compaction is running, we will create 2 new files that
// can fit in L1, these 2 files will be moved to L1 and overlap with
// the running compaction and break the LSM consistency.
std::atomic<bool> flag(false);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run():Start", [&](void* /*arg*/) {
if (flag.exchange(true)) {
// We want to make sure to call this callback only once
return;
}
ASSERT_OK(Put(Key(6), "a"));
ASSERT_OK(Put(Key(7), "a"));
ASSERT_OK(Flush());
ASSERT_OK(Put(Key(8), "a"));
ASSERT_OK(Put(Key(9), "a"));
ASSERT_OK(Flush());
// Start a non-exclusive manual compaction in a bg thread
bg_thread = port::Thread(bg_manual_compact);
// This manual compaction conflict with the other manual compaction
// so it should wait until the first compaction finish
env_->SleepForMicroseconds(1000000);
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
// Run a manual compaction that will compact the 2 files in L1
// into 1 file in L1
CompactRangeOptions cro;
cro.exclusive_manual_compaction = false;
cro.bottommost_level_compaction = BottommostLevelCompaction::kForceOptimized;
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
bg_thread.join();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, PausingManualCompaction1) {
Options options = CurrentOptions();
options.disable_auto_compactions = true;
options.num_levels = 7;
DestroyAndReopen(options);
Random rnd(301);
// Generate a file containing 10 keys.
for (int i = 0; i < 10; i++) {
ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
}
ASSERT_OK(Flush());
// Generate another file containing same keys
for (int i = 0; i < 10; i++) {
ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
}
ASSERT_OK(Flush());
int manual_compactions_paused = 0;
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run():PausingManualCompaction:1", [&](void* arg) {
auto paused = static_cast<std::atomic<int>*>(arg);
ASSERT_EQ(0, paused->load(std::memory_order_acquire));
paused->fetch_add(1, std::memory_order_release);
manual_compactions_paused += 1;
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
std::vector<std::string> files_before_compact, files_after_compact;
// Remember file name before compaction is triggered
std::vector<LiveFileMetaData> files_meta;
dbfull()->GetLiveFilesMetaData(&files_meta);
for (auto file : files_meta) {
files_before_compact.push_back(file.name);
}
// OK, now trigger a manual compaction
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
// Wait for compactions to get scheduled and stopped
dbfull()->TEST_WaitForCompact(true);
// Get file names after compaction is stopped
files_meta.clear();
dbfull()->GetLiveFilesMetaData(&files_meta);
for (auto file : files_meta) {
files_after_compact.push_back(file.name);
}
// Like nothing happened
ASSERT_EQ(files_before_compact, files_after_compact);
ASSERT_EQ(manual_compactions_paused, 1);
manual_compactions_paused = 0;
// Now make sure CompactFiles also not run
dbfull()->CompactFiles(ROCKSDB_NAMESPACE::CompactionOptions(),
files_before_compact, 0);
// Wait for manual compaction to get scheduled and finish
dbfull()->TEST_WaitForCompact(true);
files_meta.clear();
files_after_compact.clear();
dbfull()->GetLiveFilesMetaData(&files_meta);
for (auto file : files_meta) {
files_after_compact.push_back(file.name);
}
ASSERT_EQ(files_before_compact, files_after_compact);
// CompactFiles returns at entry point
ASSERT_EQ(manual_compactions_paused, 0);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
// PausingManualCompaction does not affect auto compaction
TEST_F(DBTest2, PausingManualCompaction2) {
Options options = CurrentOptions();
options.level0_file_num_compaction_trigger = 2;
options.disable_auto_compactions = false;
DestroyAndReopen(options);
dbfull()->DisableManualCompaction();
Random rnd(301);
for (int i = 0; i < 2; i++) {
// Generate a file containing 10 keys.
for (int j = 0; j < 100; j++) {
ASSERT_OK(Put(Key(j), rnd.RandomString(50)));
}
ASSERT_OK(Flush());
}
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
std::vector<LiveFileMetaData> files_meta;
dbfull()->GetLiveFilesMetaData(&files_meta);
ASSERT_EQ(files_meta.size(), 1);
}
TEST_F(DBTest2, PausingManualCompaction3) {
CompactRangeOptions compact_options;
Options options = CurrentOptions();
options.disable_auto_compactions = true;
options.num_levels = 7;
Random rnd(301);
auto generate_files = [&]() {
for (int i = 0; i < options.num_levels; i++) {
for (int j = 0; j < options.num_levels - i + 1; j++) {
for (int k = 0; k < 1000; k++) {
ASSERT_OK(Put(Key(k + j * 1000), rnd.RandomString(50)));
}
Flush();
}
for (int l = 1; l < options.num_levels - i; l++) {
MoveFilesToLevel(l);
}
}
};
DestroyAndReopen(options);
generate_files();
#ifndef ROCKSDB_LITE
ASSERT_EQ("2,3,4,5,6,7,8", FilesPerLevel());
#endif // !ROCKSDB_LITE
int run_manual_compactions = 0;
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run():PausingManualCompaction:1",
[&](void* /*arg*/) { run_manual_compactions++; });
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
dbfull()->DisableManualCompaction();
dbfull()->CompactRange(compact_options, nullptr, nullptr);
dbfull()->TEST_WaitForCompact(true);
// As manual compaction disabled, not even reach sync point
ASSERT_EQ(run_manual_compactions, 0);
#ifndef ROCKSDB_LITE
ASSERT_EQ("2,3,4,5,6,7,8", FilesPerLevel());
#endif // !ROCKSDB_LITE
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack(
"CompactionJob::Run():PausingManualCompaction:1");
dbfull()->EnableManualCompaction();
dbfull()->CompactRange(compact_options, nullptr, nullptr);
dbfull()->TEST_WaitForCompact(true);
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel());
#endif // !ROCKSDB_LITE
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, PausingManualCompaction4) {
CompactRangeOptions compact_options;
Options options = CurrentOptions();
options.disable_auto_compactions = true;
options.num_levels = 7;
Random rnd(301);
auto generate_files = [&]() {
for (int i = 0; i < options.num_levels; i++) {
for (int j = 0; j < options.num_levels - i + 1; j++) {
for (int k = 0; k < 1000; k++) {
ASSERT_OK(Put(Key(k + j * 1000), rnd.RandomString(50)));
}
Flush();
}
for (int l = 1; l < options.num_levels - i; l++) {
MoveFilesToLevel(l);
}
}
};
DestroyAndReopen(options);
generate_files();
#ifndef ROCKSDB_LITE
ASSERT_EQ("2,3,4,5,6,7,8", FilesPerLevel());
#endif // !ROCKSDB_LITE
int run_manual_compactions = 0;
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run():PausingManualCompaction:2", [&](void* arg) {
auto paused = static_cast<std::atomic<int>*>(arg);
ASSERT_EQ(0, paused->load(std::memory_order_acquire));
paused->fetch_add(1, std::memory_order_release);
run_manual_compactions++;
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
dbfull()->CompactRange(compact_options, nullptr, nullptr);
dbfull()->TEST_WaitForCompact(true);
ASSERT_EQ(run_manual_compactions, 1);
#ifndef ROCKSDB_LITE
ASSERT_EQ("2,3,4,5,6,7,8", FilesPerLevel());
#endif // !ROCKSDB_LITE
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearCallBack(
"CompactionJob::Run():PausingManualCompaction:2");
dbfull()->EnableManualCompaction();
dbfull()->CompactRange(compact_options, nullptr, nullptr);
dbfull()->TEST_WaitForCompact(true);
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel());
#endif // !ROCKSDB_LITE
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, OptimizeForPointLookup) {
Options options = CurrentOptions();
Close();
options.OptimizeForPointLookup(2);
ASSERT_OK(DB::Open(options, dbname_, &db_));
ASSERT_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
Flush();
ASSERT_EQ("v1", Get("foo"));
}
TEST_F(DBTest2, OptimizeForSmallDB) {
Options options = CurrentOptions();
Close();
options.OptimizeForSmallDb();
// Find the cache object
ASSERT_TRUE(options.table_factory->IsInstanceOf(
TableFactory::kBlockBasedTableName()));
auto table_options =
options.table_factory->GetOptions<BlockBasedTableOptions>();
ASSERT_TRUE(table_options != nullptr);
std::shared_ptr<Cache> cache = table_options->block_cache;
ASSERT_EQ(0, cache->GetUsage());
ASSERT_OK(DB::Open(options, dbname_, &db_));
ASSERT_OK(Put("foo", "v1"));
// memtable size is costed to the block cache
ASSERT_NE(0, cache->GetUsage());
ASSERT_EQ("v1", Get("foo"));
Flush();
size_t prev_size = cache->GetUsage();
// Remember block cache size, so that we can find that
// it is filled after Get().
// Use pinnable slice so that it can ping the block so that
// when we check the size it is not evicted.
PinnableSlice value;
ASSERT_OK(db_->Get(ReadOptions(), db_->DefaultColumnFamily(), "foo", &value));
ASSERT_GT(cache->GetUsage(), prev_size);
value.Reset();
}
#endif // ROCKSDB_LITE
TEST_F(DBTest2, IterRaceFlush1) {
ASSERT_OK(Put("foo", "v1"));
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
{{"DBImpl::NewIterator:1", "DBTest2::IterRaceFlush:1"},
{"DBTest2::IterRaceFlush:2", "DBImpl::NewIterator:2"}});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
ROCKSDB_NAMESPACE::port::Thread t1([&] {
TEST_SYNC_POINT("DBTest2::IterRaceFlush:1");
ASSERT_OK(Put("foo", "v2"));
Flush();
TEST_SYNC_POINT("DBTest2::IterRaceFlush:2");
});
// iterator is created after the first Put(), so it should see either
// "v1" or "v2".
{
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
it->Seek("foo");
ASSERT_TRUE(it->Valid());
ASSERT_EQ("foo", it->key().ToString());
}
t1.join();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, IterRaceFlush2) {
ASSERT_OK(Put("foo", "v1"));
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
{{"DBImpl::NewIterator:3", "DBTest2::IterRaceFlush2:1"},
{"DBTest2::IterRaceFlush2:2", "DBImpl::NewIterator:4"}});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
ROCKSDB_NAMESPACE::port::Thread t1([&] {
TEST_SYNC_POINT("DBTest2::IterRaceFlush2:1");
ASSERT_OK(Put("foo", "v2"));
Flush();
TEST_SYNC_POINT("DBTest2::IterRaceFlush2:2");
});
// iterator is created after the first Put(), so it should see either
// "v1" or "v2".
{
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
it->Seek("foo");
ASSERT_TRUE(it->Valid());
ASSERT_EQ("foo", it->key().ToString());
}
t1.join();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, IterRefreshRaceFlush) {
ASSERT_OK(Put("foo", "v1"));
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
{{"ArenaWrappedDBIter::Refresh:1", "DBTest2::IterRefreshRaceFlush:1"},
{"DBTest2::IterRefreshRaceFlush:2", "ArenaWrappedDBIter::Refresh:2"}});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
ROCKSDB_NAMESPACE::port::Thread t1([&] {
TEST_SYNC_POINT("DBTest2::IterRefreshRaceFlush:1");
ASSERT_OK(Put("foo", "v2"));
Flush();
TEST_SYNC_POINT("DBTest2::IterRefreshRaceFlush:2");
});
// iterator is created after the first Put(), so it should see either
// "v1" or "v2".
{
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
it->Refresh();
it->Seek("foo");
ASSERT_TRUE(it->Valid());
ASSERT_EQ("foo", it->key().ToString());
}
t1.join();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, GetRaceFlush1) {
ASSERT_OK(Put("foo", "v1"));
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
{{"DBImpl::GetImpl:1", "DBTest2::GetRaceFlush:1"},
{"DBTest2::GetRaceFlush:2", "DBImpl::GetImpl:2"}});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
ROCKSDB_NAMESPACE::port::Thread t1([&] {
TEST_SYNC_POINT("DBTest2::GetRaceFlush:1");
ASSERT_OK(Put("foo", "v2"));
Flush();
TEST_SYNC_POINT("DBTest2::GetRaceFlush:2");
});
// Get() is issued after the first Put(), so it should see either
// "v1" or "v2".
ASSERT_NE("NOT_FOUND", Get("foo"));
t1.join();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, GetRaceFlush2) {
ASSERT_OK(Put("foo", "v1"));
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
{{"DBImpl::GetImpl:3", "DBTest2::GetRaceFlush:1"},
{"DBTest2::GetRaceFlush:2", "DBImpl::GetImpl:4"}});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
port::Thread t1([&] {
TEST_SYNC_POINT("DBTest2::GetRaceFlush:1");
ASSERT_OK(Put("foo", "v2"));
Flush();
TEST_SYNC_POINT("DBTest2::GetRaceFlush:2");
});
// Get() is issued after the first Put(), so it should see either
// "v1" or "v2".
ASSERT_NE("NOT_FOUND", Get("foo"));
t1.join();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, DirectIO) {
if (!IsDirectIOSupported()) {
return;
}
Options options = CurrentOptions();
options.use_direct_reads = options.use_direct_io_for_flush_and_compaction =
true;
options.allow_mmap_reads = options.allow_mmap_writes = false;
DestroyAndReopen(options);
ASSERT_OK(Put(Key(0), "a"));
ASSERT_OK(Put(Key(5), "a"));
ASSERT_OK(Flush());
ASSERT_OK(Put(Key(10), "a"));
ASSERT_OK(Put(Key(15), "a"));
ASSERT_OK(Flush());
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
Reopen(options);
}
TEST_F(DBTest2, MemtableOnlyIterator) {
Options options = CurrentOptions();
CreateAndReopenWithCF({"pikachu"}, options);
ASSERT_OK(Put(1, "foo", "first"));
ASSERT_OK(Put(1, "bar", "second"));
ReadOptions ropt;
ropt.read_tier = kMemtableTier;
std::string value;
Iterator* it = nullptr;
// Before flushing
// point lookups
ASSERT_OK(db_->Get(ropt, handles_[1], "foo", &value));
ASSERT_EQ("first", value);
ASSERT_OK(db_->Get(ropt, handles_[1], "bar", &value));
ASSERT_EQ("second", value);
// Memtable-only iterator (read_tier=kMemtableTier); data not flushed yet.
it = db_->NewIterator(ropt, handles_[1]);
int count = 0;
for (it->SeekToFirst(); it->Valid(); it->Next()) {
ASSERT_TRUE(it->Valid());
count++;
}
ASSERT_TRUE(!it->Valid());
ASSERT_EQ(2, count);
delete it;
Flush(1);
// After flushing
// point lookups
ASSERT_OK(db_->Get(ropt, handles_[1], "foo", &value));
ASSERT_EQ("first", value);
ASSERT_OK(db_->Get(ropt, handles_[1], "bar", &value));
ASSERT_EQ("second", value);
// nothing should be returned using memtable-only iterator after flushing.
it = db_->NewIterator(ropt, handles_[1]);
count = 0;
for (it->SeekToFirst(); it->Valid(); it->Next()) {
ASSERT_TRUE(it->Valid());
count++;
}
ASSERT_TRUE(!it->Valid());
ASSERT_EQ(0, count);
delete it;
// Add a key to memtable
ASSERT_OK(Put(1, "foobar", "third"));
it = db_->NewIterator(ropt, handles_[1]);
count = 0;
for (it->SeekToFirst(); it->Valid(); it->Next()) {
ASSERT_TRUE(it->Valid());
ASSERT_EQ("foobar", it->key().ToString());
ASSERT_EQ("third", it->value().ToString());
count++;
}
ASSERT_TRUE(!it->Valid());
ASSERT_EQ(1, count);
delete it;
}
TEST_F(DBTest2, LowPriWrite) {
Options options = CurrentOptions();
// Compaction pressure should trigger since 6 files
options.level0_file_num_compaction_trigger = 4;
options.level0_slowdown_writes_trigger = 12;
options.level0_stop_writes_trigger = 30;
options.delayed_write_rate = 8 * 1024 * 1024;
Reopen(options);
std::atomic<int> rate_limit_count(0);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"GenericRateLimiter::Request:1", [&](void* arg) {
rate_limit_count.fetch_add(1);
int64_t* rate_bytes_per_sec = static_cast<int64_t*>(arg);
ASSERT_EQ(1024 * 1024, *rate_bytes_per_sec);
});
// Block compaction
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency({
{"DBTest.LowPriWrite:0", "DBImpl::BGWorkCompaction"},
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
WriteOptions wo;
for (int i = 0; i < 6; i++) {
wo.low_pri = false;
Put("", "", wo);
wo.low_pri = true;
Put("", "", wo);
Flush();
}
ASSERT_EQ(0, rate_limit_count.load());
wo.low_pri = true;
Put("", "", wo);
ASSERT_EQ(1, rate_limit_count.load());
wo.low_pri = false;
Put("", "", wo);
ASSERT_EQ(1, rate_limit_count.load());
TEST_SYNC_POINT("DBTest.LowPriWrite:0");
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
dbfull()->TEST_WaitForCompact();
wo.low_pri = true;
Put("", "", wo);
ASSERT_EQ(1, rate_limit_count.load());
wo.low_pri = false;
Put("", "", wo);
ASSERT_EQ(1, rate_limit_count.load());
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, RateLimitedCompactionReads) {
// compaction input has 512KB data
const int kNumKeysPerFile = 128;
const int kBytesPerKey = 1024;
const int kNumL0Files = 4;
for (auto use_direct_io : {false, true}) {
if (use_direct_io && !IsDirectIOSupported()) {
continue;
}
Options options = CurrentOptions();
options.compression = kNoCompression;
options.level0_file_num_compaction_trigger = kNumL0Files;
options.memtable_factory.reset(new SpecialSkipListFactory(kNumKeysPerFile));
options.new_table_reader_for_compaction_inputs = true;
// takes roughly one second, split into 100 x 10ms intervals. Each interval
// permits 5.12KB, which is smaller than the block size, so this test
// exercises the code for chunking reads.
options.rate_limiter.reset(NewGenericRateLimiter(
static_cast<int64_t>(kNumL0Files * kNumKeysPerFile *
kBytesPerKey) /* rate_bytes_per_sec */,
10 * 1000 /* refill_period_us */, 10 /* fairness */,
RateLimiter::Mode::kReadsOnly));
options.use_direct_reads = options.use_direct_io_for_flush_and_compaction =
use_direct_io;
BlockBasedTableOptions bbto;
bbto.block_size = 16384;
bbto.no_block_cache = true;
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
DestroyAndReopen(options);
for (int i = 0; i < kNumL0Files; ++i) {
for (int j = 0; j <= kNumKeysPerFile; ++j) {
ASSERT_OK(Put(Key(j), DummyString(kBytesPerKey)));
}
dbfull()->TEST_WaitForFlushMemTable();
ASSERT_EQ(i + 1, NumTableFilesAtLevel(0));
}
dbfull()->TEST_WaitForCompact();
ASSERT_EQ(0, NumTableFilesAtLevel(0));
ASSERT_EQ(0, options.rate_limiter->GetTotalBytesThrough(Env::IO_HIGH));
// should be slightly above 512KB due to non-data blocks read. Arbitrarily
// chose 1MB as the upper bound on the total bytes read.
size_t rate_limited_bytes =
options.rate_limiter->GetTotalBytesThrough(Env::IO_LOW);
// Include the explicit prefetch of the footer in direct I/O case.
size_t direct_io_extra = use_direct_io ? 512 * 1024 : 0;
ASSERT_GE(
rate_limited_bytes,
static_cast<size_t>(kNumKeysPerFile * kBytesPerKey * kNumL0Files));
ASSERT_LT(
rate_limited_bytes,
static_cast<size_t>(2 * kNumKeysPerFile * kBytesPerKey * kNumL0Files +
direct_io_extra));
Iterator* iter = db_->NewIterator(ReadOptions());
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ASSERT_EQ(iter->value().ToString(), DummyString(kBytesPerKey));
}
delete iter;
// bytes read for user iterator shouldn't count against the rate limit.
ASSERT_EQ(rate_limited_bytes,
static_cast<size_t>(
options.rate_limiter->GetTotalBytesThrough(Env::IO_LOW)));
}
}
#endif // ROCKSDB_LITE
// Make sure DB can be reopen with reduced number of levels, given no file
// is on levels higher than the new num_levels.
TEST_F(DBTest2, ReduceLevel) {
Options options;
options.env = env_;
options.disable_auto_compactions = true;
options.num_levels = 7;
Reopen(options);
Put("foo", "bar");
Flush();
MoveFilesToLevel(6);
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,0,0,0,0,0,1", FilesPerLevel());
#endif // !ROCKSDB_LITE
CompactRangeOptions compact_options;
compact_options.change_level = true;
compact_options.target_level = 1;
dbfull()->CompactRange(compact_options, nullptr, nullptr);
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,1", FilesPerLevel());
#endif // !ROCKSDB_LITE
options.num_levels = 3;
Reopen(options);
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,1", FilesPerLevel());
#endif // !ROCKSDB_LITE
}
// Test that ReadCallback is actually used in both memtbale and sst tables
TEST_F(DBTest2, ReadCallbackTest) {
Options options;
options.disable_auto_compactions = true;
options.num_levels = 7;
options.env = env_;
Reopen(options);
std::vector<const Snapshot*> snapshots;
// Try to create a db with multiple layers and a memtable
const std::string key = "foo";
const std::string value = "bar";
// This test assumes that the seq start with 1 and increased by 1 after each
// write batch of size 1. If that behavior changes, the test needs to be
// updated as well.
// TODO(myabandeh): update this test to use the seq number that is returned by
// the DB instead of assuming what seq the DB used.
int i = 1;
for (; i < 10; i++) {
Put(key, value + std::to_string(i));
// Take a snapshot to avoid the value being removed during compaction
auto snapshot = dbfull()->GetSnapshot();
snapshots.push_back(snapshot);
}
Flush();
for (; i < 20; i++) {
Put(key, value + std::to_string(i));
// Take a snapshot to avoid the value being removed during compaction
auto snapshot = dbfull()->GetSnapshot();
snapshots.push_back(snapshot);
}
Flush();
MoveFilesToLevel(6);
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel());
#endif // !ROCKSDB_LITE
for (; i < 30; i++) {
Put(key, value + std::to_string(i));
auto snapshot = dbfull()->GetSnapshot();
snapshots.push_back(snapshot);
}
Flush();
#ifndef ROCKSDB_LITE
ASSERT_EQ("1,0,0,0,0,0,2", FilesPerLevel());
#endif // !ROCKSDB_LITE
// And also add some values to the memtable
for (; i < 40; i++) {
Put(key, value + std::to_string(i));
auto snapshot = dbfull()->GetSnapshot();
snapshots.push_back(snapshot);
}
class TestReadCallback : public ReadCallback {
public:
explicit TestReadCallback(SequenceNumber snapshot)
: ReadCallback(snapshot), snapshot_(snapshot) {}
bool IsVisibleFullCheck(SequenceNumber seq) override {
return seq <= snapshot_;
}
private:
SequenceNumber snapshot_;
};
for (int seq = 1; seq < i; seq++) {
PinnableSlice pinnable_val;
ReadOptions roptions;
TestReadCallback callback(seq);
bool dont_care = true;
DBImpl::GetImplOptions get_impl_options;
get_impl_options.column_family = dbfull()->DefaultColumnFamily();
get_impl_options.value = &pinnable_val;
get_impl_options.value_found = &dont_care;
get_impl_options.callback = &callback;
Status s = dbfull()->GetImpl(roptions, key, get_impl_options);
ASSERT_TRUE(s.ok());
// Assuming that after each Put the DB increased seq by one, the value and
// seq number must be equal since we also inc value by 1 after each Put.
ASSERT_EQ(value + std::to_string(seq), pinnable_val.ToString());
}
for (auto snapshot : snapshots) {
dbfull()->ReleaseSnapshot(snapshot);
}
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, LiveFilesOmitObsoleteFiles) {
// Regression test for race condition where an obsolete file is returned to
// user as a "live file" but then deleted, all while file deletions are
// disabled.
//
// It happened like this:
//
// 1. [flush thread] Log file "x.log" found by FindObsoleteFiles
// 2. [user thread] DisableFileDeletions, GetSortedWalFiles are called and the
// latter returned "x.log"
// 3. [flush thread] PurgeObsoleteFiles deleted "x.log"
// 4. [user thread] Reading "x.log" failed
//
// Unfortunately the only regression test I can come up with involves sleep.
// We cannot set SyncPoints to repro since, once the fix is applied, the
// SyncPoints would cause a deadlock as the repro's sequence of events is now
// prohibited.
//
// Instead, if we sleep for a second between Find and Purge, and ensure the
// read attempt happens after purge, then the sequence of events will almost
// certainly happen on the old code.
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency({
{"DBImpl::BackgroundCallFlush:FilesFound",
"DBTest2::LiveFilesOmitObsoleteFiles:FlushTriggered"},
{"DBImpl::PurgeObsoleteFiles:End",
"DBTest2::LiveFilesOmitObsoleteFiles:LiveFilesCaptured"},
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::PurgeObsoleteFiles:Begin",
[&](void* /*arg*/) { env_->SleepForMicroseconds(1000000); });
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
Put("key", "val");
FlushOptions flush_opts;
flush_opts.wait = false;
db_->Flush(flush_opts);
TEST_SYNC_POINT("DBTest2::LiveFilesOmitObsoleteFiles:FlushTriggered");
db_->DisableFileDeletions();
VectorLogPtr log_files;
db_->GetSortedWalFiles(log_files);
TEST_SYNC_POINT("DBTest2::LiveFilesOmitObsoleteFiles:LiveFilesCaptured");
for (const auto& log_file : log_files) {
ASSERT_OK(env_->FileExists(LogFileName(dbname_, log_file->LogNumber())));
}
db_->EnableFileDeletions();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, TestNumPread) {
Options options = CurrentOptions();
bool prefetch_supported =
test::IsPrefetchSupported(env_->GetFileSystem(), dbname_);
// disable block cache
BlockBasedTableOptions table_options;
table_options.no_block_cache = true;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
Reopen(options);
env_->count_random_reads_ = true;
env_->random_file_open_counter_.store(0);
ASSERT_OK(Put("bar", "foo"));
ASSERT_OK(Put("foo", "bar"));
ASSERT_OK(Flush());
if (prefetch_supported) {
// After flush, we'll open the file and read footer, meta block,
// property block and index block.
ASSERT_EQ(4, env_->random_read_counter_.Read());
} else {
// With prefetch not supported, we will do a single read into a buffer
ASSERT_EQ(1, env_->random_read_counter_.Read());
}
ASSERT_EQ(1, env_->random_file_open_counter_.load());
// One pread per a normal data block read
env_->random_file_open_counter_.store(0);
env_->random_read_counter_.Reset();
ASSERT_EQ("bar", Get("foo"));
ASSERT_EQ(1, env_->random_read_counter_.Read());
// All files are already opened.
ASSERT_EQ(0, env_->random_file_open_counter_.load());
env_->random_file_open_counter_.store(0);
env_->random_read_counter_.Reset();
ASSERT_OK(Put("bar2", "foo2"));
ASSERT_OK(Put("foo2", "bar2"));
ASSERT_OK(Flush());
if (prefetch_supported) {
// After flush, we'll open the file and read footer, meta block,
// property block and index block.
ASSERT_EQ(4, env_->random_read_counter_.Read());
} else {
// With prefetch not supported, we will do a single read into a buffer
ASSERT_EQ(1, env_->random_read_counter_.Read());
}
ASSERT_EQ(1, env_->random_file_open_counter_.load());
env_->random_file_open_counter_.store(0);
env_->random_read_counter_.Reset();
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
if (prefetch_supported) {
// Compaction needs two input blocks, which requires 2 preads, and
// generate a new SST file which needs 4 preads (footer, meta block,
// property block and index block). In total 6.
ASSERT_EQ(6, env_->random_read_counter_.Read());
} else {
// With prefetch off, compaction needs two input blocks,
// followed by a single buffered read. In total 3.
ASSERT_EQ(3, env_->random_read_counter_.Read());
}
// All compaction input files should have already been opened.
ASSERT_EQ(1, env_->random_file_open_counter_.load());
// One pread per a normal data block read
env_->random_file_open_counter_.store(0);
env_->random_read_counter_.Reset();
ASSERT_EQ("foo2", Get("bar2"));
ASSERT_EQ(1, env_->random_read_counter_.Read());
// SST files are already opened.
ASSERT_EQ(0, env_->random_file_open_counter_.load());
}
TEST_F(DBTest2, TraceAndReplay) {
Options options = CurrentOptions();
options.merge_operator = MergeOperators::CreatePutOperator();
ReadOptions ro;
WriteOptions wo;
TraceOptions trace_opts;
EnvOptions env_opts;
CreateAndReopenWithCF({"pikachu"}, options);
Random rnd(301);
Iterator* single_iter = nullptr;
ASSERT_TRUE(db_->EndTrace().IsIOError());
std::string trace_filename = dbname_ + "/rocksdb.trace";
std::unique_ptr<TraceWriter> trace_writer;
ASSERT_OK(NewFileTraceWriter(env_, env_opts, trace_filename, &trace_writer));
ASSERT_OK(db_->StartTrace(trace_opts, std::move(trace_writer)));
ASSERT_OK(Put(0, "a", "1"));
ASSERT_OK(Merge(0, "b", "2"));
ASSERT_OK(Delete(0, "c"));
ASSERT_OK(SingleDelete(0, "d"));
ASSERT_OK(db_->DeleteRange(wo, dbfull()->DefaultColumnFamily(), "e", "f"));
WriteBatch batch;
ASSERT_OK(batch.Put("f", "11"));
ASSERT_OK(batch.Merge("g", "12"));
ASSERT_OK(batch.Delete("h"));
ASSERT_OK(batch.SingleDelete("i"));
ASSERT_OK(batch.DeleteRange("j", "k"));
ASSERT_OK(db_->Write(wo, &batch));
single_iter = db_->NewIterator(ro);
single_iter->Seek("f");
single_iter->SeekForPrev("g");
delete single_iter;
ASSERT_EQ("1", Get(0, "a"));
ASSERT_EQ("12", Get(0, "g"));
ASSERT_OK(Put(1, "foo", "bar"));
ASSERT_OK(Put(1, "rocksdb", "rocks"));
ASSERT_EQ("NOT_FOUND", Get(1, "leveldb"));
ASSERT_OK(db_->EndTrace());
// These should not get into the trace file as it is after EndTrace.
Put("hello", "world");
Merge("foo", "bar");
// Open another db, replay, and verify the data
std::string value;
std::string dbname2 = test::PerThreadDBPath(env_, "/db_replay");
ASSERT_OK(DestroyDB(dbname2, options));
// Using a different name than db2, to pacify infer's use-after-lifetime
// warnings (http://fbinfer.com).
DB* db2_init = nullptr;
options.create_if_missing = true;
ASSERT_OK(DB::Open(options, dbname2, &db2_init));
ColumnFamilyHandle* cf;
ASSERT_OK(
db2_init->CreateColumnFamily(ColumnFamilyOptions(), "pikachu", &cf));
delete cf;
delete db2_init;
DB* db2 = nullptr;
std::vector<ColumnFamilyDescriptor> column_families;
ColumnFamilyOptions cf_options;
cf_options.merge_operator = MergeOperators::CreatePutOperator();
column_families.push_back(ColumnFamilyDescriptor("default", cf_options));
column_families.push_back(
ColumnFamilyDescriptor("pikachu", ColumnFamilyOptions()));
std::vector<ColumnFamilyHandle*> handles;
DBOptions db_opts;
db_opts.env = env_;
ASSERT_OK(DB::Open(db_opts, dbname2, column_families, &handles, &db2));
env_->SleepForMicroseconds(100);
// Verify that the keys don't already exist
ASSERT_TRUE(db2->Get(ro, handles[0], "a", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "g", &value).IsNotFound());
std::unique_ptr<TraceReader> trace_reader;
ASSERT_OK(NewFileTraceReader(env_, env_opts, trace_filename, &trace_reader));
Replayer replayer(db2, handles_, std::move(trace_reader));
ASSERT_OK(replayer.Replay());
ASSERT_OK(db2->Get(ro, handles[0], "a", &value));
ASSERT_EQ("1", value);
ASSERT_OK(db2->Get(ro, handles[0], "g", &value));
ASSERT_EQ("12", value);
ASSERT_TRUE(db2->Get(ro, handles[0], "hello", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "world", &value).IsNotFound());
ASSERT_OK(db2->Get(ro, handles[1], "foo", &value));
ASSERT_EQ("bar", value);
ASSERT_OK(db2->Get(ro, handles[1], "rocksdb", &value));
ASSERT_EQ("rocks", value);
for (auto handle : handles) {
delete handle;
}
delete db2;
ASSERT_OK(DestroyDB(dbname2, options));
}
TEST_F(DBTest2, TraceWithLimit) {
Options options = CurrentOptions();
options.merge_operator = MergeOperators::CreatePutOperator();
ReadOptions ro;
WriteOptions wo;
TraceOptions trace_opts;
EnvOptions env_opts;
CreateAndReopenWithCF({"pikachu"}, options);
Random rnd(301);
// test the max trace file size options
trace_opts.max_trace_file_size = 5;
std::string trace_filename = dbname_ + "/rocksdb.trace1";
std::unique_ptr<TraceWriter> trace_writer;
ASSERT_OK(NewFileTraceWriter(env_, env_opts, trace_filename, &trace_writer));
ASSERT_OK(db_->StartTrace(trace_opts, std::move(trace_writer)));
ASSERT_OK(Put(0, "a", "1"));
ASSERT_OK(Put(0, "b", "1"));
ASSERT_OK(Put(0, "c", "1"));
ASSERT_OK(db_->EndTrace());
std::string dbname2 = test::PerThreadDBPath(env_, "/db_replay2");
std::string value;
ASSERT_OK(DestroyDB(dbname2, options));
// Using a different name than db2, to pacify infer's use-after-lifetime
// warnings (http://fbinfer.com).
DB* db2_init = nullptr;
options.create_if_missing = true;
ASSERT_OK(DB::Open(options, dbname2, &db2_init));
ColumnFamilyHandle* cf;
ASSERT_OK(
db2_init->CreateColumnFamily(ColumnFamilyOptions(), "pikachu", &cf));
delete cf;
delete db2_init;
DB* db2 = nullptr;
std::vector<ColumnFamilyDescriptor> column_families;
ColumnFamilyOptions cf_options;
cf_options.merge_operator = MergeOperators::CreatePutOperator();
column_families.push_back(ColumnFamilyDescriptor("default", cf_options));
column_families.push_back(
ColumnFamilyDescriptor("pikachu", ColumnFamilyOptions()));
std::vector<ColumnFamilyHandle*> handles;
DBOptions db_opts;
db_opts.env = env_;
ASSERT_OK(DB::Open(db_opts, dbname2, column_families, &handles, &db2));
env_->SleepForMicroseconds(100);
// Verify that the keys don't already exist
ASSERT_TRUE(db2->Get(ro, handles[0], "a", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "b", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "c", &value).IsNotFound());
std::unique_ptr<TraceReader> trace_reader;
ASSERT_OK(NewFileTraceReader(env_, env_opts, trace_filename, &trace_reader));
Replayer replayer(db2, handles_, std::move(trace_reader));
ASSERT_OK(replayer.Replay());
ASSERT_TRUE(db2->Get(ro, handles[0], "a", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "b", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "c", &value).IsNotFound());
for (auto handle : handles) {
delete handle;
}
delete db2;
ASSERT_OK(DestroyDB(dbname2, options));
}
TEST_F(DBTest2, TraceWithSampling) {
Options options = CurrentOptions();
ReadOptions ro;
WriteOptions wo;
TraceOptions trace_opts;
EnvOptions env_opts;
CreateAndReopenWithCF({"pikachu"}, options);
Random rnd(301);
// test the trace file sampling options
trace_opts.sampling_frequency = 2;
std::string trace_filename = dbname_ + "/rocksdb.trace_sampling";
std::unique_ptr<TraceWriter> trace_writer;
ASSERT_OK(NewFileTraceWriter(env_, env_opts, trace_filename, &trace_writer));
ASSERT_OK(db_->StartTrace(trace_opts, std::move(trace_writer)));
ASSERT_OK(Put(0, "a", "1"));
ASSERT_OK(Put(0, "b", "2"));
ASSERT_OK(Put(0, "c", "3"));
ASSERT_OK(Put(0, "d", "4"));
ASSERT_OK(Put(0, "e", "5"));
ASSERT_OK(db_->EndTrace());
std::string dbname2 = test::PerThreadDBPath(env_, "/db_replay_sampling");
std::string value;
ASSERT_OK(DestroyDB(dbname2, options));
// Using a different name than db2, to pacify infer's use-after-lifetime
// warnings (http://fbinfer.com).
DB* db2_init = nullptr;
options.create_if_missing = true;
ASSERT_OK(DB::Open(options, dbname2, &db2_init));
ColumnFamilyHandle* cf;
ASSERT_OK(
db2_init->CreateColumnFamily(ColumnFamilyOptions(), "pikachu", &cf));
delete cf;
delete db2_init;
DB* db2 = nullptr;
std::vector<ColumnFamilyDescriptor> column_families;
ColumnFamilyOptions cf_options;
column_families.push_back(ColumnFamilyDescriptor("default", cf_options));
column_families.push_back(
ColumnFamilyDescriptor("pikachu", ColumnFamilyOptions()));
std::vector<ColumnFamilyHandle*> handles;
DBOptions db_opts;
db_opts.env = env_;
ASSERT_OK(DB::Open(db_opts, dbname2, column_families, &handles, &db2));
env_->SleepForMicroseconds(100);
ASSERT_TRUE(db2->Get(ro, handles[0], "a", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "b", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "c", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "d", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "e", &value).IsNotFound());
std::unique_ptr<TraceReader> trace_reader;
ASSERT_OK(NewFileTraceReader(env_, env_opts, trace_filename, &trace_reader));
Replayer replayer(db2, handles_, std::move(trace_reader));
ASSERT_OK(replayer.Replay());
ASSERT_TRUE(db2->Get(ro, handles[0], "a", &value).IsNotFound());
ASSERT_FALSE(db2->Get(ro, handles[0], "b", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "c", &value).IsNotFound());
ASSERT_FALSE(db2->Get(ro, handles[0], "d", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "e", &value).IsNotFound());
for (auto handle : handles) {
delete handle;
}
delete db2;
ASSERT_OK(DestroyDB(dbname2, options));
}
TEST_F(DBTest2, TraceWithFilter) {
Options options = CurrentOptions();
options.merge_operator = MergeOperators::CreatePutOperator();
ReadOptions ro;
WriteOptions wo;
TraceOptions trace_opts;
EnvOptions env_opts;
CreateAndReopenWithCF({"pikachu"}, options);
Random rnd(301);
Iterator* single_iter = nullptr;
trace_opts.filter = TraceFilterType::kTraceFilterWrite;
std::string trace_filename = dbname_ + "/rocksdb.trace";
std::unique_ptr<TraceWriter> trace_writer;
ASSERT_OK(NewFileTraceWriter(env_, env_opts, trace_filename, &trace_writer));
ASSERT_OK(db_->StartTrace(trace_opts, std::move(trace_writer)));
ASSERT_OK(Put(0, "a", "1"));
ASSERT_OK(Merge(0, "b", "2"));
ASSERT_OK(Delete(0, "c"));
ASSERT_OK(SingleDelete(0, "d"));
ASSERT_OK(db_->DeleteRange(wo, dbfull()->DefaultColumnFamily(), "e", "f"));
WriteBatch batch;
ASSERT_OK(batch.Put("f", "11"));
ASSERT_OK(batch.Merge("g", "12"));
ASSERT_OK(batch.Delete("h"));
ASSERT_OK(batch.SingleDelete("i"));
ASSERT_OK(batch.DeleteRange("j", "k"));
ASSERT_OK(db_->Write(wo, &batch));
single_iter = db_->NewIterator(ro);
single_iter->Seek("f");
single_iter->SeekForPrev("g");
delete single_iter;
ASSERT_EQ("1", Get(0, "a"));
ASSERT_EQ("12", Get(0, "g"));
ASSERT_OK(Put(1, "foo", "bar"));
ASSERT_OK(Put(1, "rocksdb", "rocks"));
ASSERT_EQ("NOT_FOUND", Get(1, "leveldb"));
ASSERT_OK(db_->EndTrace());
// These should not get into the trace file as it is after EndTrace.
Put("hello", "world");
Merge("foo", "bar");
// Open another db, replay, and verify the data
std::string value;
std::string dbname2 = test::TmpDir(env_) + "/db_replay";
ASSERT_OK(DestroyDB(dbname2, options));
// Using a different name than db2, to pacify infer's use-after-lifetime
// warnings (http://fbinfer.com).
DB* db2_init = nullptr;
options.create_if_missing = true;
ASSERT_OK(DB::Open(options, dbname2, &db2_init));
ColumnFamilyHandle* cf;
ASSERT_OK(
db2_init->CreateColumnFamily(ColumnFamilyOptions(), "pikachu", &cf));
delete cf;
delete db2_init;
DB* db2 = nullptr;
std::vector<ColumnFamilyDescriptor> column_families;
ColumnFamilyOptions cf_options;
cf_options.merge_operator = MergeOperators::CreatePutOperator();
column_families.push_back(ColumnFamilyDescriptor("default", cf_options));
column_families.push_back(
ColumnFamilyDescriptor("pikachu", ColumnFamilyOptions()));
std::vector<ColumnFamilyHandle*> handles;
DBOptions db_opts;
db_opts.env = env_;
ASSERT_OK(DB::Open(db_opts, dbname2, column_families, &handles, &db2));
env_->SleepForMicroseconds(100);
// Verify that the keys don't already exist
ASSERT_TRUE(db2->Get(ro, handles[0], "a", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "g", &value).IsNotFound());
std::unique_ptr<TraceReader> trace_reader;
ASSERT_OK(NewFileTraceReader(env_, env_opts, trace_filename, &trace_reader));
Replayer replayer(db2, handles_, std::move(trace_reader));
ASSERT_OK(replayer.Replay());
// All the key-values should not present since we filter out the WRITE ops.
ASSERT_TRUE(db2->Get(ro, handles[0], "a", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "g", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "hello", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "world", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "foo", &value).IsNotFound());
ASSERT_TRUE(db2->Get(ro, handles[0], "rocksdb", &value).IsNotFound());
for (auto handle : handles) {
delete handle;
}
delete db2;
ASSERT_OK(DestroyDB(dbname2, options));
// Set up a new db.
std::string dbname3 = test::TmpDir(env_) + "/db_not_trace_read";
ASSERT_OK(DestroyDB(dbname3, options));
DB* db3_init = nullptr;
options.create_if_missing = true;
ColumnFamilyHandle* cf3;
ASSERT_OK(DB::Open(options, dbname3, &db3_init));
ASSERT_OK(
db3_init->CreateColumnFamily(ColumnFamilyOptions(), "pikachu", &cf3));
delete cf3;
delete db3_init;
column_families.clear();
column_families.push_back(ColumnFamilyDescriptor("default", cf_options));
column_families.push_back(
ColumnFamilyDescriptor("pikachu", ColumnFamilyOptions()));
handles.clear();
DB* db3 = nullptr;
ASSERT_OK(DB::Open(db_opts, dbname3, column_families, &handles, &db3));
env_->SleepForMicroseconds(100);
// Verify that the keys don't already exist
ASSERT_TRUE(db3->Get(ro, handles[0], "a", &value).IsNotFound());
ASSERT_TRUE(db3->Get(ro, handles[0], "g", &value).IsNotFound());
//The tracer will not record the READ ops.
trace_opts.filter = TraceFilterType::kTraceFilterGet;
std::string trace_filename3 = dbname_ + "/rocksdb.trace_3";
std::unique_ptr<TraceWriter> trace_writer3;
ASSERT_OK(
NewFileTraceWriter(env_, env_opts, trace_filename3, &trace_writer3));
ASSERT_OK(db3->StartTrace(trace_opts, std::move(trace_writer3)));
ASSERT_OK(db3->Put(wo, handles[0], "a", "1"));
ASSERT_OK(db3->Merge(wo, handles[0], "b", "2"));
ASSERT_OK(db3->Delete(wo, handles[0], "c"));
ASSERT_OK(db3->SingleDelete(wo, handles[0], "d"));
ASSERT_OK(db3->Get(ro, handles[0], "a", &value));
ASSERT_EQ(value, "1");
ASSERT_TRUE(db3->Get(ro, handles[0], "c", &value).IsNotFound());
ASSERT_OK(db3->EndTrace());
for (auto handle : handles) {
delete handle;
}
delete db3;
ASSERT_OK(DestroyDB(dbname3, options));
std::unique_ptr<TraceReader> trace_reader3;
ASSERT_OK(
NewFileTraceReader(env_, env_opts, trace_filename3, &trace_reader3));
// Count the number of records in the trace file;
int count = 0;
std::string data;
Status s;
while (true) {
s = trace_reader3->Read(&data);
if (!s.ok()) {
break;
}
count += 1;
}
// We also need to count the header and footer
// 4 WRITE + HEADER + FOOTER = 6
ASSERT_EQ(count, 6);
}
#endif // ROCKSDB_LITE
TEST_F(DBTest2, PinnableSliceAndMmapReads) {
Options options = CurrentOptions();
options.env = env_;
if (!IsMemoryMappedAccessSupported()) {
ROCKSDB_GTEST_SKIP("Test requires default environment");
return;
}
options.allow_mmap_reads = true;
options.max_open_files = 100;
options.compression = kNoCompression;
Reopen(options);
ASSERT_OK(Put("foo", "bar"));
ASSERT_OK(Flush());
PinnableSlice pinned_value;
ASSERT_EQ(Get("foo", &pinned_value), Status::OK());
// It is not safe to pin mmap files as they might disappear by compaction
ASSERT_FALSE(pinned_value.IsPinned());
ASSERT_EQ(pinned_value.ToString(), "bar");
dbfull()->TEST_CompactRange(0 /* level */, nullptr /* begin */,
nullptr /* end */, nullptr /* column_family */,
true /* disallow_trivial_move */);
// Ensure pinned_value doesn't rely on memory munmap'd by the above
// compaction. It crashes if it does.
ASSERT_EQ(pinned_value.ToString(), "bar");
#ifndef ROCKSDB_LITE
pinned_value.Reset();
// Unsafe to pin mmap files when they could be kicked out of table cache
Close();
ASSERT_OK(ReadOnlyReopen(options));
ASSERT_EQ(Get("foo", &pinned_value), Status::OK());
ASSERT_FALSE(pinned_value.IsPinned());
ASSERT_EQ(pinned_value.ToString(), "bar");
pinned_value.Reset();
// In read-only mode with infinite capacity on table cache it should pin the
// value and avoid the memcpy
Close();
options.max_open_files = -1;
ASSERT_OK(ReadOnlyReopen(options));
ASSERT_EQ(Get("foo", &pinned_value), Status::OK());
ASSERT_TRUE(pinned_value.IsPinned());
ASSERT_EQ(pinned_value.ToString(), "bar");
#endif
}
TEST_F(DBTest2, DISABLED_IteratorPinnedMemory) {
Options options = CurrentOptions();
options.create_if_missing = true;
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
BlockBasedTableOptions bbto;
bbto.no_block_cache = false;
bbto.cache_index_and_filter_blocks = false;
bbto.block_cache = NewLRUCache(100000);
bbto.block_size = 400; // small block size
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
Reopen(options);
Random rnd(301);
std::string v = rnd.RandomString(400);
// Since v is the size of a block, each key should take a block
// of 400+ bytes.
Put("1", v);
Put("3", v);
Put("5", v);
Put("7", v);
ASSERT_OK(Flush());
ASSERT_EQ(0, bbto.block_cache->GetPinnedUsage());
// Verify that iterators don't pin more than one data block in block cache
// at each time.
{
std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
iter->SeekToFirst();
for (int i = 0; i < 4; i++) {
ASSERT_TRUE(iter->Valid());
// Block cache should contain exactly one block.
ASSERT_GT(bbto.block_cache->GetPinnedUsage(), 0);
ASSERT_LT(bbto.block_cache->GetPinnedUsage(), 800);
iter->Next();
}
ASSERT_FALSE(iter->Valid());
iter->Seek("4");
ASSERT_TRUE(iter->Valid());
ASSERT_GT(bbto.block_cache->GetPinnedUsage(), 0);
ASSERT_LT(bbto.block_cache->GetPinnedUsage(), 800);
iter->Seek("3");
ASSERT_TRUE(iter->Valid());
ASSERT_GT(bbto.block_cache->GetPinnedUsage(), 0);
ASSERT_LT(bbto.block_cache->GetPinnedUsage(), 800);
}
ASSERT_EQ(0, bbto.block_cache->GetPinnedUsage());
// Test compaction case
Put("2", v);
Put("5", v);
Put("6", v);
Put("8", v);
ASSERT_OK(Flush());
// Clear existing data in block cache
bbto.block_cache->SetCapacity(0);
bbto.block_cache->SetCapacity(100000);
// Verify compaction input iterators don't hold more than one data blocks at
// one time.
std::atomic<bool> finished(false);
std::atomic<int> block_newed(0);
std::atomic<int> block_destroyed(0);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"Block::Block:0", [&](void* /*arg*/) {
if (finished) {
return;
}
// Two iterators. At most 2 outstanding blocks.
EXPECT_GE(block_newed.load(), block_destroyed.load());
EXPECT_LE(block_newed.load(), block_destroyed.load() + 1);
block_newed.fetch_add(1);
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"Block::~Block", [&](void* /*arg*/) {
if (finished) {
return;
}
// Two iterators. At most 2 outstanding blocks.
EXPECT_GE(block_newed.load(), block_destroyed.load() + 1);
EXPECT_LE(block_newed.load(), block_destroyed.load() + 2);
block_destroyed.fetch_add(1);
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run:BeforeVerify",
[&](void* /*arg*/) { finished = true; });
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
// Two input files. Each of them has 4 data blocks.
ASSERT_EQ(8, block_newed.load());
ASSERT_EQ(8, block_destroyed.load());
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, TestBBTTailPrefetch) {
std::atomic<bool> called(false);
size_t expected_lower_bound = 512 * 1024;
size_t expected_higher_bound = 512 * 1024;
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"BlockBasedTable::Open::TailPrefetchLen", [&](void* arg) {
size_t* prefetch_size = static_cast<size_t*>(arg);
EXPECT_LE(expected_lower_bound, *prefetch_size);
EXPECT_GE(expected_higher_bound, *prefetch_size);
called = true;
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
Put("1", "1");
Put("9", "1");
Flush();
expected_lower_bound = 0;
expected_higher_bound = 8 * 1024;
Put("1", "1");
Put("9", "1");
Flush();
Put("1", "1");
Put("9", "1");
Flush();
// Full compaction to make sure there is no L0 file after the open.
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_TRUE(called.load());
called = false;
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
std::atomic<bool> first_call(true);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
"BlockBasedTable::Open::TailPrefetchLen", [&](void* arg) {
size_t* prefetch_size = static_cast<size_t*>(arg);
if (first_call) {
EXPECT_EQ(4 * 1024, *prefetch_size);
first_call = false;
} else {
EXPECT_GE(4 * 1024, *prefetch_size);
}
called = true;
});
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
Options options = CurrentOptions();
options.max_file_opening_threads = 1; // one thread
BlockBasedTableOptions table_options;
table_options.cache_index_and_filter_blocks = true;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.max_open_files = -1;
Reopen(options);
Put("1", "1");
Put("9", "1");
Flush();
Put("1", "1");
Put("9", "1");
Flush();
ASSERT_TRUE(called.load());
called = false;
// Parallel loading SST files
options.max_file_opening_threads = 16;
Reopen(options);
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_TRUE(called.load());
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
}
TEST_F(DBTest2, TestGetColumnFamilyHandleUnlocked) {
// Setup sync point dependency to reproduce the race condition of
// DBImpl::GetColumnFamilyHandleUnlocked
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency({
{"TestGetColumnFamilyHandleUnlocked::GetColumnFamilyHandleUnlocked1",
"TestGetColumnFamilyHandleUnlocked::PreGetColumnFamilyHandleUnlocked2"},
{"TestGetColumnFamilyHandleUnlocked::GetColumnFamilyHandleUnlocked2",
"TestGetColumnFamilyHandleUnlocked::ReadColumnFamilyHandle1"},
});
SyncPoint::GetInstance()->EnableProcessing();
CreateColumnFamilies({"test1", "test2"}, Options());
ASSERT_EQ(handles_.size(), 2);
DBImpl* dbi = static_cast_with_check<DBImpl>(db_);
port::Thread user_thread1([&]() {
auto cfh = dbi->GetColumnFamilyHandleUnlocked(handles_[0]->GetID());
ASSERT_EQ(cfh->GetID(), handles_[0]->GetID());
TEST_SYNC_POINT("TestGetColumnFamilyHandleUnlocked::GetColumnFamilyHandleUnlocked1");
TEST_SYNC_POINT("TestGetColumnFamilyHandleUnlocked::ReadColumnFamilyHandle1");
ASSERT_EQ(cfh->GetID(), handles_[0]->GetID());
});
port::Thread user_thread2([&]() {
TEST_SYNC_POINT("TestGetColumnFamilyHandleUnlocked::PreGetColumnFamilyHandleUnlocked2");
auto cfh = dbi->GetColumnFamilyHandleUnlocked(handles_[1]->GetID());
ASSERT_EQ(cfh->GetID(), handles_[1]->GetID());
TEST_SYNC_POINT("TestGetColumnFamilyHandleUnlocked::GetColumnFamilyHandleUnlocked2");
ASSERT_EQ(cfh->GetID(), handles_[1]->GetID());
});
user_thread1.join();
user_thread2.join();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, TestCompactFiles) {
// Setup sync point dependency to reproduce the race condition of
// DBImpl::GetColumnFamilyHandleUnlocked
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency({
{"TestCompactFiles::IngestExternalFile1",
"TestCompactFiles::IngestExternalFile2"},
});
SyncPoint::GetInstance()->EnableProcessing();
Options options;
options.env = env_;
options.num_levels = 2;
options.disable_auto_compactions = true;
Reopen(options);
auto* handle = db_->DefaultColumnFamily();
ASSERT_EQ(db_->NumberLevels(handle), 2);
ROCKSDB_NAMESPACE::SstFileWriter sst_file_writer{
ROCKSDB_NAMESPACE::EnvOptions(), options};
std::string external_file1 = dbname_ + "/test_compact_files1.sst_t";
std::string external_file2 = dbname_ + "/test_compact_files2.sst_t";
std::string external_file3 = dbname_ + "/test_compact_files3.sst_t";
ASSERT_OK(sst_file_writer.Open(external_file1));
ASSERT_OK(sst_file_writer.Put("1", "1"));
ASSERT_OK(sst_file_writer.Put("2", "2"));
ASSERT_OK(sst_file_writer.Finish());
ASSERT_OK(sst_file_writer.Open(external_file2));
ASSERT_OK(sst_file_writer.Put("3", "3"));
ASSERT_OK(sst_file_writer.Put("4", "4"));
ASSERT_OK(sst_file_writer.Finish());
ASSERT_OK(sst_file_writer.Open(external_file3));
ASSERT_OK(sst_file_writer.Put("5", "5"));
ASSERT_OK(sst_file_writer.Put("6", "6"));
ASSERT_OK(sst_file_writer.Finish());
ASSERT_OK(db_->IngestExternalFile(handle, {external_file1, external_file3},
IngestExternalFileOptions()));
ASSERT_EQ(NumTableFilesAtLevel(1, 0), 2);
std::vector<std::string> files;
GetSstFiles(env_, dbname_, &files);
ASSERT_EQ(files.size(), 2);
port::Thread user_thread1(
[&]() { db_->CompactFiles(CompactionOptions(), handle, files, 1); });
port::Thread user_thread2([&]() {
ASSERT_OK(db_->IngestExternalFile(handle, {external_file2},
IngestExternalFileOptions()));
TEST_SYNC_POINT("TestCompactFiles::IngestExternalFile1");
});
user_thread1.join();
user_thread2.join();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
}
#endif // ROCKSDB_LITE
// TODO: figure out why this test fails in appveyor
#ifndef OS_WIN
TEST_F(DBTest2, MultiDBParallelOpenTest) {
const int kNumDbs = 2;
Options options = CurrentOptions();
std::vector<std::string> dbnames;
for (int i = 0; i < kNumDbs; ++i) {
dbnames.emplace_back(test::TmpDir(env_) + "/db" + ToString(i));
ASSERT_OK(DestroyDB(dbnames.back(), options));
}
// Verify empty DBs can be created in parallel
std::vector<std::thread> open_threads;
std::vector<DB*> dbs{static_cast<unsigned int>(kNumDbs), nullptr};
options.create_if_missing = true;
for (int i = 0; i < kNumDbs; ++i) {
open_threads.emplace_back(
[&](int dbnum) {
ASSERT_OK(DB::Open(options, dbnames[dbnum], &dbs[dbnum]));
},
i);
}
// Now add some data and close, so next we can verify non-empty DBs can be
// recovered in parallel
for (int i = 0; i < kNumDbs; ++i) {
open_threads[i].join();
ASSERT_OK(dbs[i]->Put(WriteOptions(), "xi", "gua"));
delete dbs[i];
}
// Verify non-empty DBs can be recovered in parallel
dbs.clear();
open_threads.clear();
for (int i = 0; i < kNumDbs; ++i) {
open_threads.emplace_back(
[&](int dbnum) {
ASSERT_OK(DB::Open(options, dbnames[dbnum], &dbs[dbnum]));
},
i);
}
// Wait and cleanup
for (int i = 0; i < kNumDbs; ++i) {
open_threads[i].join();
delete dbs[i];
ASSERT_OK(DestroyDB(dbnames[i], options));
}
}
#endif // OS_WIN
namespace {
class DummyOldStats : public Statistics {
public:
uint64_t getTickerCount(uint32_t /*ticker_type*/) const override { return 0; }
void recordTick(uint32_t /* ticker_type */, uint64_t /* count */) override {
num_rt++;
}
void setTickerCount(uint32_t /*ticker_type*/, uint64_t /*count*/) override {}
uint64_t getAndResetTickerCount(uint32_t /*ticker_type*/) override {
return 0;
}
void measureTime(uint32_t /*histogram_type*/, uint64_t /*count*/) override {
num_mt++;
}
void histogramData(
uint32_t /*histogram_type*/,
ROCKSDB_NAMESPACE::HistogramData* const /*data*/) const override {}
std::string getHistogramString(uint32_t /*type*/) const override {
return "";
}
bool HistEnabledForType(uint32_t /*type*/) const override { return false; }
std::string ToString() const override { return ""; }
std::atomic<int> num_rt{0};
std::atomic<int> num_mt{0};
};
} // namespace
TEST_F(DBTest2, OldStatsInterface) {
DummyOldStats* dos = new DummyOldStats();
std::shared_ptr<Statistics> stats(dos);
Options options = CurrentOptions();
options.create_if_missing = true;
options.statistics = stats;
Reopen(options);
Put("foo", "bar");
ASSERT_EQ("bar", Get("foo"));
ASSERT_OK(Flush());
ASSERT_EQ("bar", Get("foo"));
ASSERT_GT(dos->num_rt, 0);
ASSERT_GT(dos->num_mt, 0);
}
TEST_F(DBTest2, CloseWithUnreleasedSnapshot) {
const Snapshot* ss = db_->GetSnapshot();
for (auto h : handles_) {
db_->DestroyColumnFamilyHandle(h);
}
handles_.clear();
ASSERT_NOK(db_->Close());
db_->ReleaseSnapshot(ss);
ASSERT_OK(db_->Close());
delete db_;
db_ = nullptr;
}
TEST_F(DBTest2, PrefixBloomReseek) {
Options options = CurrentOptions();
options.create_if_missing = true;
options.prefix_extractor.reset(NewCappedPrefixTransform(3));
BlockBasedTableOptions bbto;
bbto.filter_policy.reset(NewBloomFilterPolicy(10, false));
bbto.whole_key_filtering = false;
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
DestroyAndReopen(options);
// Construct two L1 files with keys:
// f1:[aaa1 ccc1] f2:[ddd0]
ASSERT_OK(Put("aaa1", ""));
ASSERT_OK(Put("ccc1", ""));
ASSERT_OK(Flush());
ASSERT_OK(Put("ddd0", ""));
ASSERT_OK(Flush());
CompactRangeOptions cro;
cro.bottommost_level_compaction = BottommostLevelCompaction::kSkip;
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
ASSERT_OK(Put("bbb1", ""));
Iterator* iter = db_->NewIterator(ReadOptions());
// Seeking into f1, the iterator will check bloom filter which returns the
// file iterator ot be invalidate, and the cursor will put into f2, with
// the next key to be "ddd0".
iter->Seek("bbb1");
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("bbb1", iter->key().ToString());
// Reseek ccc1, the L1 iterator needs to go back to f1 and reseek.
iter->Seek("ccc1");
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("ccc1", iter->key().ToString());
delete iter;
}
TEST_F(DBTest2, PrefixBloomFilteredOut) {
Options options = CurrentOptions();
options.create_if_missing = true;
options.prefix_extractor.reset(NewCappedPrefixTransform(3));
BlockBasedTableOptions bbto;
bbto.filter_policy.reset(NewBloomFilterPolicy(10, false));
bbto.whole_key_filtering = false;
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
DestroyAndReopen(options);
// Construct two L1 files with keys:
// f1:[aaa1 ccc1] f2:[ddd0]
ASSERT_OK(Put("aaa1", ""));
ASSERT_OK(Put("ccc1", ""));
ASSERT_OK(Flush());
ASSERT_OK(Put("ddd0", ""));
ASSERT_OK(Flush());
CompactRangeOptions cro;
cro.bottommost_level_compaction = BottommostLevelCompaction::kSkip;
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
Iterator* iter = db_->NewIterator(ReadOptions());
// Bloom filter is filterd out by f1.
// This is just one of several valid position following the contract.
// Postioning to ccc1 or ddd0 is also valid. This is just to validate
// the behavior of the current implementation. If underlying implementation
// changes, the test might fail here.
iter->Seek("bbb1");
ASSERT_FALSE(iter->Valid());
delete iter;
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, RowCacheSnapshot) {
Options options = CurrentOptions();
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
options.row_cache = NewLRUCache(8 * 8192);
DestroyAndReopen(options);
ASSERT_OK(Put("foo", "bar1"));
const Snapshot* s1 = db_->GetSnapshot();
ASSERT_OK(Put("foo", "bar2"));
ASSERT_OK(Flush());
ASSERT_OK(Put("foo2", "bar"));
const Snapshot* s2 = db_->GetSnapshot();
ASSERT_OK(Put("foo3", "bar"));
const Snapshot* s3 = db_->GetSnapshot();
ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 0);
ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 0);
ASSERT_EQ(Get("foo"), "bar2");
ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 0);
ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 1);
ASSERT_EQ(Get("foo"), "bar2");
ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 1);
ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 1);
ASSERT_EQ(Get("foo", s1), "bar1");
ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 1);
ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 2);
ASSERT_EQ(Get("foo", s2), "bar2");
ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 2);
ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 2);
ASSERT_EQ(Get("foo", s1), "bar1");
ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 3);
ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 2);
ASSERT_EQ(Get("foo", s3), "bar2");
ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 4);
ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 2);
db_->ReleaseSnapshot(s1);
db_->ReleaseSnapshot(s2);
db_->ReleaseSnapshot(s3);
}
#endif // ROCKSDB_LITE
// When DB is reopened with multiple column families, the manifest file
// is written after the first CF is flushed, and it is written again
// after each flush. If DB crashes between the flushes, the flushed CF
// flushed will pass the latest log file, and now we require it not
// to be corrupted, and triggering a corruption report.
// We need to fix the bug and enable the test.
TEST_F(DBTest2, CrashInRecoveryMultipleCF) {
const std::vector<std::string> sync_points = {
"DBImpl::RecoverLogFiles:BeforeFlushFinalMemtable",
"VersionSet::ProcessManifestWrites:BeforeWriteLastVersionEdit:0"};
for (const auto& test_sync_point : sync_points) {
Options options = CurrentOptions();
// First destroy original db to ensure a clean start.
DestroyAndReopen(options);
options.create_if_missing = true;
options.wal_recovery_mode = WALRecoveryMode::kPointInTimeRecovery;
CreateAndReopenWithCF({"pikachu"}, options);
ASSERT_OK(Put("foo", "bar"));
ASSERT_OK(Flush());
ASSERT_OK(Put(1, "foo", "bar"));
ASSERT_OK(Flush(1));
ASSERT_OK(Put("foo", "bar"));
ASSERT_OK(Put(1, "foo", "bar"));
// The value is large enough to be divided to two blocks.
std::string large_value(400, ' ');
ASSERT_OK(Put("foo1", large_value));
ASSERT_OK(Put("foo2", large_value));
Close();
// Corrupt the log file in the middle, so that it is not corrupted
// in the tail.
std::vector<std::string> filenames;
ASSERT_OK(env_->GetChildren(dbname_, &filenames));
for (const auto& f : filenames) {
uint64_t number;
FileType type;
if (ParseFileName(f, &number, &type) && type == FileType::kWalFile) {
std::string fname = dbname_ + "/" + f;
std::string file_content;
ASSERT_OK(ReadFileToString(env_, fname, &file_content));
file_content[400] = 'h';
file_content[401] = 'a';
ASSERT_OK(WriteStringToFile(env_, file_content, fname));
break;
}
}
// Reopen and freeze the file system after the first manifest write.
FaultInjectionTestEnv fit_env(options.env);
options.env = &fit_env;
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
test_sync_point,
[&](void* /*arg*/) { fit_env.SetFilesystemActive(false); });
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
ASSERT_NOK(TryReopenWithColumnFamilies(
{kDefaultColumnFamilyName, "pikachu"}, options));
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
fit_env.SetFilesystemActive(true);
// If we continue using failure ingestion Env, it will conplain something
// when renaming current file, which is not expected. Need to investigate
// why.
options.env = env_;
ASSERT_OK(TryReopenWithColumnFamilies({kDefaultColumnFamilyName, "pikachu"},
options));
}
}
TEST_F(DBTest2, SeekFileRangeDeleteTail) {
Options options = CurrentOptions();
options.prefix_extractor.reset(NewCappedPrefixTransform(1));
options.num_levels = 3;
DestroyAndReopen(options);
ASSERT_OK(Put("a", "a"));
const Snapshot* s1 = db_->GetSnapshot();
ASSERT_OK(
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "f"));
ASSERT_OK(Put("b", "a"));
ASSERT_OK(Flush());
ASSERT_OK(Put("x", "a"));
ASSERT_OK(Put("z", "a"));
ASSERT_OK(Flush());
CompactRangeOptions cro;
cro.change_level = true;
cro.target_level = 2;
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
{
ReadOptions ro;
ro.total_order_seek = true;
std::unique_ptr<Iterator> iter(db_->NewIterator(ro));
iter->Seek("e");
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("x", iter->key().ToString());
}
db_->ReleaseSnapshot(s1);
}
TEST_F(DBTest2, BackgroundPurgeTest) {
Options options = CurrentOptions();
options.write_buffer_manager =
std::make_shared<ROCKSDB_NAMESPACE::WriteBufferManager>(1 << 20);
options.avoid_unnecessary_blocking_io = true;
DestroyAndReopen(options);
size_t base_value = options.write_buffer_manager->memory_usage();
ASSERT_OK(Put("a", "a"));
Iterator* iter = db_->NewIterator(ReadOptions());
ASSERT_OK(Flush());
size_t value = options.write_buffer_manager->memory_usage();
ASSERT_GT(value, base_value);
db_->GetEnv()->SetBackgroundThreads(1, Env::Priority::HIGH);
test::SleepingBackgroundTask sleeping_task_after;
db_->GetEnv()->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
&sleeping_task_after, Env::Priority::HIGH);
delete iter;
Env::Default()->SleepForMicroseconds(100000);
value = options.write_buffer_manager->memory_usage();
ASSERT_GT(value, base_value);
sleeping_task_after.WakeUp();
sleeping_task_after.WaitUntilDone();
test::SleepingBackgroundTask sleeping_task_after2;
db_->GetEnv()->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
&sleeping_task_after2, Env::Priority::HIGH);
sleeping_task_after2.WakeUp();
sleeping_task_after2.WaitUntilDone();
value = options.write_buffer_manager->memory_usage();
ASSERT_EQ(base_value, value);
}
TEST_F(DBTest2, SwitchMemtableRaceWithNewManifest) {
Options options = CurrentOptions();
DestroyAndReopen(options);
options.max_manifest_file_size = 10;
options.create_if_missing = true;
CreateAndReopenWithCF({"pikachu"}, options);
ASSERT_EQ(2, handles_.size());
ASSERT_OK(Put("foo", "value"));
const int kL0Files = options.level0_file_num_compaction_trigger;
for (int i = 0; i < kL0Files; ++i) {
ASSERT_OK(Put(/*cf=*/1, "a", std::to_string(i)));
ASSERT_OK(Flush(/*cf=*/1));
}
port::Thread thread([&]() { ASSERT_OK(Flush()); });
ASSERT_OK(dbfull()->TEST_WaitForCompact());
thread.join();
}
TEST_F(DBTest2, SameSmallestInSameLevel) {
// This test validates fractional casacading logic when several files at one
// one level only contains the same user key.
Options options = CurrentOptions();
options.merge_operator = MergeOperators::CreateStringAppendOperator();
DestroyAndReopen(options);
ASSERT_OK(Put("key", "1"));
ASSERT_OK(Put("key", "2"));
ASSERT_OK(db_->Merge(WriteOptions(), "key", "3"));
ASSERT_OK(db_->Merge(WriteOptions(), "key", "4"));
Flush();
CompactRangeOptions cro;
cro.change_level = true;
cro.target_level = 2;
ASSERT_OK(dbfull()->CompactRange(cro, db_->DefaultColumnFamily(), nullptr,
nullptr));
ASSERT_OK(db_->Merge(WriteOptions(), "key", "5"));
Flush();
ASSERT_OK(db_->Merge(WriteOptions(), "key", "6"));
Flush();
ASSERT_OK(db_->Merge(WriteOptions(), "key", "7"));
Flush();
ASSERT_OK(db_->Merge(WriteOptions(), "key", "8"));
Flush();
dbfull()->TEST_WaitForCompact(true);
#ifndef ROCKSDB_LITE
ASSERT_EQ("0,4,1", FilesPerLevel());
#endif // ROCKSDB_LITE
ASSERT_EQ("2,3,4,5,6,7,8", Get("key"));
}
TEST_F(DBTest2, FileConsistencyCheckInOpen) {
Put("foo", "bar");
Flush();
SyncPoint::GetInstance()->SetCallBack(
"VersionBuilder::CheckConsistencyBeforeReturn", [&](void* arg) {
Status* ret_s = static_cast<Status*>(arg);
*ret_s = Status::Corruption("fcc");
});
SyncPoint::GetInstance()->EnableProcessing();
Options options = CurrentOptions();
options.force_consistency_checks = true;
ASSERT_NOK(TryReopen(options));
SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, BlockBasedTablePrefixIndexSeekForPrev) {
// create a DB with block prefix index
BlockBasedTableOptions table_options;
Options options = CurrentOptions();
table_options.block_size = 300;
table_options.index_type = BlockBasedTableOptions::kHashSearch;
table_options.index_shortening =
BlockBasedTableOptions::IndexShorteningMode::kNoShortening;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
Reopen(options);
Random rnd(301);
std::string large_value = rnd.RandomString(500);
ASSERT_OK(Put("a1", large_value));
ASSERT_OK(Put("x1", large_value));
ASSERT_OK(Put("y1", large_value));
Flush();
{
std::unique_ptr<Iterator> iterator(db_->NewIterator(ReadOptions()));
iterator->SeekForPrev("x3");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("x1", iterator->key().ToString());
iterator->SeekForPrev("a3");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("a1", iterator->key().ToString());
iterator->SeekForPrev("y3");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("y1", iterator->key().ToString());
// Query more than one non-existing prefix to cover the case both
// of empty hash bucket and hash bucket conflict.
iterator->SeekForPrev("b1");
// Result should be not valid or "a1".
if (iterator->Valid()) {
ASSERT_EQ("a1", iterator->key().ToString());
}
iterator->SeekForPrev("c1");
// Result should be not valid or "a1".
if (iterator->Valid()) {
ASSERT_EQ("a1", iterator->key().ToString());
}
iterator->SeekForPrev("d1");
// Result should be not valid or "a1".
if (iterator->Valid()) {
ASSERT_EQ("a1", iterator->key().ToString());
}
iterator->SeekForPrev("y3");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("y1", iterator->key().ToString());
}
}
TEST_F(DBTest2, PartitionedIndexPrefetchFailure) {
Options options = last_options_;
options.env = env_;
options.max_open_files = 20;
BlockBasedTableOptions bbto;
bbto.index_type = BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
bbto.metadata_block_size = 128;
bbto.block_size = 128;
bbto.block_cache = NewLRUCache(16777216);
bbto.cache_index_and_filter_blocks = true;
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
DestroyAndReopen(options);
// Force no table cache so every read will preload the SST file.
dbfull()->TEST_table_cache()->SetCapacity(0);
bbto.block_cache->SetCapacity(0);
Random rnd(301);
for (int i = 0; i < 4096; i++) {
ASSERT_OK(Put(Key(i), rnd.RandomString(32)));
}
ASSERT_OK(Flush());
// Try different random failures in table open for 300 times.
for (int i = 0; i < 300; i++) {
env_->num_reads_fails_ = 0;
env_->rand_reads_fail_odd_ = 8;
std::string value;
Status s = dbfull()->Get(ReadOptions(), Key(1), &value);
if (env_->num_reads_fails_ > 0) {
ASSERT_NOK(s);
} else {
ASSERT_OK(s);
}
}
env_->rand_reads_fail_odd_ = 0;
}
TEST_F(DBTest2, ChangePrefixExtractor) {
for (bool use_partitioned_filter : {true, false}) {
// create a DB with block prefix index
BlockBasedTableOptions table_options;
Options options = CurrentOptions();
// Sometimes filter is checked based on upper bound. Assert counters
// for that case. Otherwise, only check data correctness.
#ifndef ROCKSDB_LITE
bool expect_filter_check = !use_partitioned_filter;
#else
bool expect_filter_check = false;
#endif
table_options.partition_filters = use_partitioned_filter;
if (use_partitioned_filter) {
table_options.index_type =
BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
}
table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.statistics = CreateDBStatistics();
options.prefix_extractor.reset(NewFixedPrefixTransform(2));
DestroyAndReopen(options);
Random rnd(301);
ASSERT_OK(Put("aa", ""));
ASSERT_OK(Put("xb", ""));
ASSERT_OK(Put("xx1", ""));
ASSERT_OK(Put("xz1", ""));
ASSERT_OK(Put("zz", ""));
Flush();
// After reopening DB with prefix size 2 => 1, prefix extractor
// won't take effective unless it won't change results based
// on upper bound and seek key.
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
Reopen(options);
{
std::unique_ptr<Iterator> iterator(db_->NewIterator(ReadOptions()));
iterator->Seek("xa");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("xb", iterator->key().ToString());
// It's a bug that the counter BLOOM_FILTER_PREFIX_CHECKED is not
// correct in this case. So don't check counters in this case.
if (expect_filter_check) {
ASSERT_EQ(0, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
iterator->Seek("xz");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("xz1", iterator->key().ToString());
if (expect_filter_check) {
ASSERT_EQ(0, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
}
std::string ub_str = "xg9";
Slice ub(ub_str);
ReadOptions ro;
ro.iterate_upper_bound = &ub;
{
std::unique_ptr<Iterator> iterator(db_->NewIterator(ro));
// SeekForPrev() never uses prefix bloom if it is changed.
iterator->SeekForPrev("xg0");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("xb", iterator->key().ToString());
if (expect_filter_check) {
ASSERT_EQ(0, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
}
ub_str = "xx9";
ub = Slice(ub_str);
{
std::unique_ptr<Iterator> iterator(db_->NewIterator(ro));
iterator->Seek("x");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("xb", iterator->key().ToString());
if (expect_filter_check) {
ASSERT_EQ(0, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
iterator->Seek("xx0");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("xx1", iterator->key().ToString());
if (expect_filter_check) {
ASSERT_EQ(1, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
}
CompactRangeOptions compact_range_opts;
compact_range_opts.bottommost_level_compaction =
BottommostLevelCompaction::kForce;
ASSERT_OK(db_->CompactRange(compact_range_opts, nullptr, nullptr));
ASSERT_OK(db_->CompactRange(compact_range_opts, nullptr, nullptr));
// Re-execute similar queries after a full compaction
{
std::unique_ptr<Iterator> iterator(db_->NewIterator(ReadOptions()));
iterator->Seek("x");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("xb", iterator->key().ToString());
if (expect_filter_check) {
ASSERT_EQ(2, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
iterator->Seek("xg");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("xx1", iterator->key().ToString());
if (expect_filter_check) {
ASSERT_EQ(3, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
iterator->Seek("xz");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("xz1", iterator->key().ToString());
if (expect_filter_check) {
ASSERT_EQ(4, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
}
{
std::unique_ptr<Iterator> iterator(db_->NewIterator(ro));
iterator->SeekForPrev("xx0");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("xb", iterator->key().ToString());
if (expect_filter_check) {
ASSERT_EQ(5, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
iterator->Seek("xx0");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("xx1", iterator->key().ToString());
if (expect_filter_check) {
ASSERT_EQ(6, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
}
ub_str = "xg9";
ub = Slice(ub_str);
{
std::unique_ptr<Iterator> iterator(db_->NewIterator(ro));
iterator->SeekForPrev("xg0");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("xb", iterator->key().ToString());
if (expect_filter_check) {
ASSERT_EQ(7, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
}
}
}
TEST_F(DBTest2, BlockBasedTablePrefixGetIndexNotFound) {
// create a DB with block prefix index
BlockBasedTableOptions table_options;
Options options = CurrentOptions();
table_options.block_size = 300;
table_options.index_type = BlockBasedTableOptions::kHashSearch;
table_options.index_shortening =
BlockBasedTableOptions::IndexShorteningMode::kNoShortening;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
options.level0_file_num_compaction_trigger = 8;
Reopen(options);
ASSERT_OK(Put("b1", "ok"));
Flush();
// Flushing several files so that the chance that hash bucket
// is empty fo "b" in at least one of the files is high.
ASSERT_OK(Put("a1", ""));
ASSERT_OK(Put("c1", ""));
Flush();
ASSERT_OK(Put("a2", ""));
ASSERT_OK(Put("c2", ""));
Flush();
ASSERT_OK(Put("a3", ""));
ASSERT_OK(Put("c3", ""));
Flush();
ASSERT_OK(Put("a4", ""));
ASSERT_OK(Put("c4", ""));
Flush();
ASSERT_OK(Put("a5", ""));
ASSERT_OK(Put("c5", ""));
Flush();
ASSERT_EQ("ok", Get("b1"));
}
#ifndef ROCKSDB_LITE
TEST_F(DBTest2, AutoPrefixMode1) {
// create a DB with block prefix index
BlockBasedTableOptions table_options;
Options options = CurrentOptions();
table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
options.statistics = CreateDBStatistics();
Reopen(options);
Random rnd(301);
std::string large_value = rnd.RandomString(500);
ASSERT_OK(Put("a1", large_value));
ASSERT_OK(Put("x1", large_value));
ASSERT_OK(Put("y1", large_value));
Flush();
ReadOptions ro;
ro.total_order_seek = false;
ro.auto_prefix_mode = true;
{
std::unique_ptr<Iterator> iterator(db_->NewIterator(ro));
iterator->Seek("b1");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("x1", iterator->key().ToString());
ASSERT_EQ(0, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
std::string ub_str = "b9";
Slice ub(ub_str);
ro.iterate_upper_bound = &ub;
{
std::unique_ptr<Iterator> iterator(db_->NewIterator(ro));
iterator->Seek("b1");
ASSERT_FALSE(iterator->Valid());
ASSERT_EQ(1, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
ub_str = "z";
ub = Slice(ub_str);
{
std::unique_ptr<Iterator> iterator(db_->NewIterator(ro));
iterator->Seek("b1");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("x1", iterator->key().ToString());
ASSERT_EQ(1, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
ub_str = "c";
ub = Slice(ub_str);
{
std::unique_ptr<Iterator> iterator(db_->NewIterator(ro));
iterator->Seek("b1");
ASSERT_FALSE(iterator->Valid());
ASSERT_EQ(2, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
}
// The same queries without recreating iterator
{
ub_str = "b9";
ub = Slice(ub_str);
ro.iterate_upper_bound = &ub;
std::unique_ptr<Iterator> iterator(db_->NewIterator(ro));
iterator->Seek("b1");
ASSERT_FALSE(iterator->Valid());
ASSERT_EQ(3, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
ub_str = "z";
ub = Slice(ub_str);
iterator->Seek("b1");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("x1", iterator->key().ToString());
ASSERT_EQ(3, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
ub_str = "c";
ub = Slice(ub_str);
iterator->Seek("b1");
ASSERT_FALSE(iterator->Valid());
ASSERT_EQ(4, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
ub_str = "b9";
ub = Slice(ub_str);
ro.iterate_upper_bound = &ub;
iterator->SeekForPrev("b1");
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("a1", iterator->key().ToString());
ASSERT_EQ(4, TestGetTickerCount(options, BLOOM_FILTER_PREFIX_CHECKED));
ub_str = "zz";
ub = Slice(ub_str);
ro.iterate_upper_bound = &ub;
iterator->SeekToLast();
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("y1", iterator->key().ToString());
iterator->SeekToFirst();
ASSERT_TRUE(iterator->Valid());
ASSERT_EQ("a1", iterator->key().ToString());
}
}
#endif // ROCKSDB_LITE
// WAL recovery mode is WALRecoveryMode::kPointInTimeRecovery.
TEST_F(DBTest2, PointInTimeRecoveryWithIOErrorWhileReadingWal) {
Options options = CurrentOptions();
DestroyAndReopen(options);
ASSERT_OK(Put("foo", "value0"));
Close();
SyncPoint::GetInstance()->DisableProcessing();
SyncPoint::GetInstance()->ClearAllCallBacks();
bool should_inject_error = false;
SyncPoint::GetInstance()->SetCallBack(
"DBImpl::RecoverLogFiles:BeforeReadWal",
[&](void* /*arg*/) { should_inject_error = true; });
SyncPoint::GetInstance()->SetCallBack(
"LogReader::ReadMore:AfterReadFile", [&](void* arg) {
if (should_inject_error) {
ASSERT_NE(nullptr, arg);
*reinterpret_cast<Status*>(arg) = Status::IOError("Injected IOError");
}
});
SyncPoint::GetInstance()->EnableProcessing();
options.avoid_flush_during_recovery = true;
options.wal_recovery_mode = WALRecoveryMode::kPointInTimeRecovery;
Status s = TryReopen(options);
ASSERT_TRUE(s.IsIOError());
}
} // namespace ROCKSDB_NAMESPACE
#ifdef ROCKSDB_UNITTESTS_WITH_CUSTOM_OBJECTS_FROM_STATIC_LIBS
extern "C" {
void RegisterCustomObjects(int argc, char** argv);
}
#else
void RegisterCustomObjects(int /*argc*/, char** /*argv*/) {}
#endif // !ROCKSDB_UNITTESTS_WITH_CUSTOM_OBJECTS_FROM_STATIC_LIBS
int main(int argc, char** argv) {
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
::testing::InitGoogleTest(&argc, argv);
RegisterCustomObjects(argc, argv);
return RUN_ALL_TESTS();
}
| 35.42041
| 92
| 0.691496
|
buu700
|
33c3d12a4605b93d0c8b327c0a3d3fa95b1968cd
| 8,167
|
cpp
|
C++
|
samples/sample04/sample04.cpp
|
grim210/tdr
|
5c83829fc82ebc9c8125bfce1f5494f037da9510
|
[
"Zlib"
] | null | null | null |
samples/sample04/sample04.cpp
|
grim210/tdr
|
5c83829fc82ebc9c8125bfce1f5494f037da9510
|
[
"Zlib"
] | null | null | null |
samples/sample04/sample04.cpp
|
grim210/tdr
|
5c83829fc82ebc9c8125bfce1f5494f037da9510
|
[
"Zlib"
] | null | null | null |
#include <fstream>
#include <iostream>
#include <memory>
#include <string>
#include <sstream>
#include <glad/glad.h>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_access.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <SDL2/SDL.h>
#include <tdrmain.h>
#include <tdrrenderer.h>
class Model : public RenderObject {
public:
Model(void) { };
virtual ~Model(void) { };
static std::shared_ptr<Model> Create(void);
static void Delete(std::shared_ptr<Model> model);
bool addTexture(std::shared_ptr<DirectDrawTexture> ddt);
/* overridden from RenderObject */
void draw(void);
glm::vec3 getPosition(void);
bool setPosition(glm::vec3 location);
bool translate(glm::vec3 offset);
void update(double elapsed, glm::mat4 view, glm::mat4 proj);
private:
std::shared_ptr<ShaderProgram> m_program;
std::shared_ptr<GLTexture> m_texture;
std::shared_ptr<TDRMesh> m_mesh;
/* raw vertex and uv data */
std::vector<float> m_vertices;
std::vector<float> m_uvs;
GLint m_color_attrib;
GLint m_uv_attrib;
GLint m_vpos_attrib;
GLint m_mvp_uniform;
GLint m_tex_uniform;
GLuint m_uvbuffer;
GLuint m_vbuffer;
GLuint m_texid;
glm::vec3 m_pos;
glm::mat4 m_model;
glm::mat4 m_mvp;
void dump_doubles(std::vector<float> arr);
void dump_triplets(std::vector<float> arr);
/* used in game logic */
double last;
char direction;
};
int main(int argc, char* argv[])
{
SDL_Init(SDL_INIT_EVENTS);
std::unique_ptr<Window> window = Window::Initialize(800, 600, false);
if (window == nullptr) {
std::cerr << "nullptr returned. Aborting." << std::endl;
return -1;
}
window->setClearColor(0.2f, 0.2f, 0.2f, 1.0f);
std::shared_ptr<Timer> clock = Timer::Create();
std::shared_ptr<Model> model = Model::Create();
std::shared_ptr<DirectDrawTexture> ddtex(
new DirectDrawTexture("textures/uvtemplate.dds"));
model->addTexture(ddtex);
std::shared_ptr<Camera> camera = Camera::Create();
glm::mat4 proj = glm::perspective(45.0f, 800.0f / 600.0f, 0.1f, 100.0f);
clock->start();
bool running = true;
while (running) {
SDL_Event ev;
while (SDL_PollEvent(&ev)) {
if (ev.type == SDL_QUIT) {
running = false;
}
}
double elapsed = clock->getTime();
model->update(elapsed, camera->getView(), proj);
window->clear();
model->draw();
window->swap();
}
Model::Delete(model);
Window::Destroy(window.get());
SDL_Quit();
return 0;
}
std::shared_ptr<Model> Model::Create(void)
{
std::shared_ptr<Model> model(new Model());
std::string modeldata = load_text_file("./cube.json");
model->m_mesh = TDRMesh::Load2(modeldata.c_str(), modeldata.length());
std::stringstream vsp;
vsp << "./shaders/" << model->m_mesh->getData(0,
TDRMesh::VertexShaderPath);
std::string vshader = load_text_file(vsp.str());
std::stringstream fsp;
fsp << "./shaders/" << model->m_mesh->getData(0,
TDRMesh::FragmentShaderPath);
std::string fshader = load_text_file(fsp.str());
std::stringstream texp;
texp << "./textures/" << model->m_mesh->getData(0,
TDRMesh::TexturePath);
std::shared_ptr<DirectDrawTexture> ddt(new DirectDrawTexture(texp.str()));
model->m_texture = std::shared_ptr<GLTexture>(GLTexture::Create(ddt));
model->m_program = ShaderProgram::Create();
model->m_program->attachShader(GL_VERTEX_SHADER, vshader);
model->m_program->attachShader(GL_FRAGMENT_SHADER, fshader);
if (!model->m_program->link()) {
#ifdef TDR_DEBUG
std::cerr << "Failed to link shader code. Aborting." << std::endl;
#endif
return nullptr;
}
model->m_uv_attrib = model->m_program->getAttributeLocation("vertexUV");
model->m_vpos_attrib = model->m_program->getAttributeLocation(
"vertexPosition_modelspace");
model->m_mvp_uniform = model->m_program->getUniformLocation("MVP");
if (model->m_uv_attrib == -1 || model->m_vpos_attrib == -1 ||
model->m_mvp_uniform == -1) {
#ifdef TDR_DEBUG
if (model->m_uv_attrib == -1) {
std::cerr << "Failed to find UV attribute." << std::endl;
}
if (model->m_vpos_attrib == -1) {
std::cerr << "Failed to find vertex position attribute.";
std::cerr << std::endl;
}
if (model->m_mvp_uniform == -1) {
std::cerr << "Failed to find MVP matrix uniform." << std::endl;
}
#endif
return nullptr;
}
std::vector<float> verts = model->m_mesh->get(TDRMesh::Vertex);
std::vector<float> uvs = model->m_mesh->get(TDRMesh::UV);
#ifdef TDR_DEBUG
model->dump_triplets(verts);
model->dump_doubles(uvs);
#endif
glGenBuffers(1, &model->m_vbuffer);
glBindBuffer(GL_ARRAY_BUFFER, model->m_vbuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * verts.size(), verts.data(),
GL_STATIC_DRAW);
glGenBuffers(1, &model->m_uvbuffer);
glBindBuffer(GL_ARRAY_BUFFER, model->m_uvbuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * uvs.size(), uvs.data(),
GL_STATIC_DRAW);
model->m_pos = glm::vec3(0.0f, 0.0f, 0.0f);
model->m_model = glm::translate(glm::mat4(1.0f), model->m_pos);
model->direction = 'u';
model->last = 0.0;
return model;
}
void Model::Delete(std::shared_ptr<Model> model)
{
glDeleteBuffers(1, &model->m_vbuffer);
glDeleteBuffers(1, &model->m_uvbuffer);
ShaderProgram::Destroy(model->m_program);
}
bool Model::addTexture(std::shared_ptr<DirectDrawTexture> ddt)
{
m_texture = std::shared_ptr<GLTexture>(GLTexture::Create(ddt));
if (!m_texture->isValid()) {
return false;
}
return true;
}
void Model::draw(void)
{
glUseProgram(m_program->getProgram());
glUniformMatrix4fv(m_mvp_uniform, 1, GL_FALSE, glm::value_ptr(m_mvp));
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_texture->getID());
glUniform1i(m_tex_uniform, 0);
glEnableVertexAttribArray(m_vpos_attrib);
glBindBuffer(GL_ARRAY_BUFFER, m_vbuffer);
glVertexAttribPointer(m_vpos_attrib, 3, GL_FLOAT, GL_FALSE, 0, nullptr);
glEnableVertexAttribArray(m_uv_attrib);
glBindBuffer(GL_ARRAY_BUFFER, m_uvbuffer);
glVertexAttribPointer(m_uv_attrib, 2, GL_FLOAT, GL_FALSE, 0, nullptr);
glDrawArrays(GL_TRIANGLES, 0, 12*3);
glDisableVertexAttribArray(m_vpos_attrib);
glDisableVertexAttribArray(m_uv_attrib);
}
glm::vec3 Model::getPosition(void)
{
glm::vec4 col = glm::column(m_model, 3);
return glm::vec3(col.x, col.y, col.z);
}
bool Model::setPosition(glm::vec3 position)
{
m_model = glm::translate(glm::mat4(1.0f), position);
return true;
}
bool Model::translate(glm::vec3 offset)
{
m_model = glm::translate(m_model, offset);
return true;
}
void Model::update(double elapsed, glm::mat4 view, glm::mat4 proj)
{
glm::vec3 position = getPosition();
double delta = elapsed - last;
float change = static_cast<float>(delta) * 2.0f;
/* outrageous logic to ratchet back and forth on the Y axis. */
if (direction == 'u') {
position.y += change;
} else if (direction == 'd') {
position.y -= change;
}
if (position.y > 1.0f) {
position.y = 1.0f;
direction = 'd';
} else if (position.y < -1.0f) {
position.y = -1.0f;
direction = 'u';
}
this->setPosition(position);
m_mvp = proj * view * m_model;
last = elapsed;
}
void Model::dump_triplets(std::vector<float> arr)
{
for (size_t i = 0; i < arr.size(); i++) {
if ((i != 0) && (i % 3 == 0)) {
std::cout << std::endl;
}
fprintf(stdout, "%.4f, ", arr[i]);
}
std::cout << std::endl;
}
void Model::dump_doubles(std::vector<float> arr)
{
for (size_t i = 0; i < arr.size(); i++) {
if ((i != 0) && (i % 2 == 0)) {
std::cout << std::endl;
}
fprintf(stdout, "%.4f, ", arr[i]);
}
std::cout << std::endl;
}
| 27.223333
| 78
| 0.628138
|
grim210
|
33c4523b96f7cf5b453da4638a86b9c209da6db5
| 3,115
|
cc
|
C++
|
src/ops/matmul.cc
|
yywing/CTranslate2
|
0459171ee16ca6896633340218d4577845afab64
|
[
"MIT"
] | 259
|
2019-10-09T13:14:30.000Z
|
2022-03-28T02:54:28.000Z
|
src/ops/matmul.cc
|
yywing/CTranslate2
|
0459171ee16ca6896633340218d4577845afab64
|
[
"MIT"
] | 197
|
2019-10-10T08:56:29.000Z
|
2022-03-31T12:07:04.000Z
|
src/ops/matmul.cc
|
yywing/CTranslate2
|
0459171ee16ca6896633340218d4577845afab64
|
[
"MIT"
] | 69
|
2019-10-09T13:31:10.000Z
|
2022-03-09T11:15:08.000Z
|
#include "ctranslate2/ops/matmul.h"
#include "dispatch.h"
namespace ctranslate2 {
namespace ops {
MatMul::MatMul(bool trans_a, bool trans_b, float alpha)
: _trans_a(trans_a)
, _trans_b(trans_b)
, _alpha(alpha) {
}
void MatMul::operator()(const StorageView& a, const StorageView& b, StorageView& c) const {
PROFILE("MatMul");
switch (a.dtype()) {
case DataType::FLOAT:
DEVICE_DISPATCH(a.device(), (compute<D, float>(a, b, c)));
break;
#ifdef CT2_WITH_CUDA
case DataType::FLOAT16:
if (a.device() != Device::CUDA)
throw std::invalid_argument("FP16 MatMul is only supported on CUDA");
compute<Device::CUDA, float16_t>(a, b, c);
break;
#endif
default:
throw std::invalid_argument("MatMul: unsupported compute type " + dtype_name(a.dtype()));
}
}
template <Device D, typename T>
void MatMul::compute(const StorageView& a, const StorageView& b, StorageView& c) const {
dim_t m, k_a;
if (_trans_a) {
m = a.dim(-1);
k_a = a.dim(-2);
} else {
m = a.dim(-2);
k_a = a.dim(-1);
}
dim_t k_b, n;
if (_trans_b) {
n = b.dim(-2);
k_b = b.dim(-1);
} else {
n = b.dim(-1);
k_b = b.dim(-2);
}
if (k_a != k_b)
throw std::invalid_argument("MatMul: k dimension of inputs a and b should match");
const dim_t k = k_a;
const dim_t a_batch_size = a.size() / (m * k);
const dim_t b_batch_size = b.size() / (k * n);
if (a_batch_size != b_batch_size)
throw std::invalid_argument("MatMul: batch dimension of inputs a and b should match");
{
Shape output_shape(a.shape());
output_shape[output_shape.size() - 1] = n;
output_shape[output_shape.size() - 2] = m;
c.resize(std::move(output_shape));
}
const dim_t batch_size = a_batch_size;
const dim_t lda = _trans_a ? m : k;
const dim_t ldb = _trans_b ? k : n;
const dim_t ldc = n;
const float beta = 0;
if (batch_size > 1) {
const dim_t stridea = m * k;
const dim_t strideb = k * n;
const dim_t stridec = m * n;
primitives<D>::gemm_batch_strided(_trans_a, _trans_b,
m, n, k,
_alpha,
a.data<T>(), lda, stridea,
b.data<T>(), ldb, strideb,
beta,
c.data<T>(), ldc, stridec,
batch_size);
} else {
primitives<D>::gemm(/*a_is_packed=*/false, /*b_is_packed=*/false,
_trans_a, _trans_b,
m, n, k,
_alpha,
a.data<T>(), lda,
b.data<T>(), ldb,
beta,
c.data<T>(), ldc);
}
}
}
}
| 30.841584
| 97
| 0.475441
|
yywing
|
33cb3a70c6b166ff1489d826ea262e95350be912
| 15,472
|
c++
|
C++
|
src/extern/inventor/apps/tools/ivnorm/Faces.c++
|
OpenXIP/xip-libraries
|
9f0fef66038b20ff0c81c089d7dd0038e3126e40
|
[
"Apache-2.0"
] | 2
|
2020-05-21T07:06:07.000Z
|
2021-06-28T02:14:34.000Z
|
src/extern/inventor/apps/tools/ivnorm/Faces.c++
|
OpenXIP/xip-libraries
|
9f0fef66038b20ff0c81c089d7dd0038e3126e40
|
[
"Apache-2.0"
] | null | null | null |
src/extern/inventor/apps/tools/ivnorm/Faces.c++
|
OpenXIP/xip-libraries
|
9f0fef66038b20ff0c81c089d7dd0038e3126e40
|
[
"Apache-2.0"
] | 6
|
2016-03-21T19:53:18.000Z
|
2021-06-08T18:06:03.000Z
|
/*
*
* Copyright (C) 2000 Silicon Graphics, Inc. All Rights Reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan/
*
*/
//
// Some convenient routines for doing stuff with faces.
//
#include <assert.h>
#include <math.h>
#include "Faces.h"
#include "Edges.h"
#include <Inventor/nodes/SoNormal.h>
#include <Inventor/nodes/SoIndexedFaceSet.h>
//
// Find a face's normal, assuming its vertices are in
// counter-clockwise order.
//
void
Face::findNormal(const SbVec3f *verts)
{
// Use Newell's method to find face normal. See Newman & Sproull,
// pg. 499. This is better than the three-point cross-product
// method.
normal[0] = normal[1] = normal[2] = 0.0;
for (int i = 0; i < nv; i++)
{
int iv1 = int(v[i]);
int iv2 = int(v[(i+1)%nv]);
if (iv1 == iv2) continue;
SbVec3f v1 = verts[iv1];
SbVec3f v2 = verts[iv2];
normal[0] += (v1[1] - v2[1])*(v1[2] + v2[2]);
normal[1] += (v1[2] - v2[2])*(v1[0] + v2[0]);
normal[2] += (v1[0] - v2[0])*(v1[1] + v2[1]);
}
if (orientation == Face::CW) normal.negate();
if (normal.length() < 0.00001)
{
degenerate = 1;
}
else
{
normal.normalize();
degenerate = 0;
}
}
//
// Set this face's orientation relative to a given edge
//
void
Face::orientFace(int v1, int v2)
{
int v1_i;
// First, figure out which index is v1
for (v1_i = 0; v1_i < nv; v1_i++)
{
if (v[v1_i] == v1)
{
// Now just have to determine whether v2 is the next or previous
// vertex.
if (v[(v1_i+1)%nv] == v2) // Next
{
orientation = CCW;
break;
}
else if (v[(v1_i+nv-1)%nv] == v2) // Previous
{
orientation = CW;
break;
}
// Otherwise, keep on checking; we may be traversing a
// degenerate edge or strange facet where vertices are repeated.
}
}
assert(v1_i != nv); // Assertion: we found it.
}
FaceList::FaceList()
{
verts = NULL;
faceSet = NULL;
ed = NULL;
vd = NULL;
convex = TRUE;
solid = TRUE;
verbose = FALSE;
}
FaceList::FaceList(const SbVec3f *v, EdgeDict *e)
{
verts = v;
ed = e;
vd = NULL;
faceSet = NULL;
convex = TRUE;
solid = TRUE;
verbose = FALSE;
}
FaceList::FaceList(const SbVec3f *v, SoIndexedFaceSet *fs, SbBool vrb)
{
verts = v;
faceSet = fs; fs->ref();
convex = TRUE;
solid = TRUE;
vd = NULL;
verbose = vrb;
ed = new EdgeDict(1000);
SoMFInt32 *coord_indices = &faceSet->coordIndex;
int ni = coord_indices->getNum();
int32_t *indices = coord_indices->startEditing();
int start_o_face = 0;
// Now fill in face and edge structures
int n_degenerate_faces = 0;
int n_degenerate_edges = 0;
for (int i = 0, ct=0; i < coord_indices->getNum(); i++)
{
if (indices[i] == SO_END_FACE_INDEX
|| i == coord_indices->getNum()-1)
{
Face *f = new Face;
if (indices[i] == SO_END_FACE_INDEX)
f->nv = i - start_o_face;
else
f->nv = i - start_o_face + 1;
f->v = indices+start_o_face;
f->vct = ct; // first vertex index count for this face.
f->vidx = start_o_face;
f->vn = NULL;
f->orientation = Face::UNKNOWN;
f->findNormal(verts);
if (f->degenerate)
{
++n_degenerate_faces;
f->orientation = Face::CCW;
}
else for (int j = 0; j < f->nv; j++)
{
int i1 = (int)f->v[j];
int i2 = (int)f->v[(j+1)%f->nv];
if (i1 != i2)
ed->Add(f, i1, i2);
else ++n_degenerate_edges;
}
start_o_face = i+1;
append(f);
}
else
ct++; // "ct" counts the non-END entries, for use
// as the index into a per_vertex normal set
}
if (n_degenerate_faces != 0)
{
if (verbose) fprintf(stderr, "Detected %d degenerate faces\n",
n_degenerate_faces);
}
if (n_degenerate_edges != 0)
{
if (verbose) fprintf(stderr, "Detected %d degenerate edges\n",
n_degenerate_edges);
}
}
void
FaceList::append(Face *f)
{
SbPList::append((void *)f);
}
//
// This isn't accurate; we just use it to figure out if the volume is
// positive or negative to try to figure out if the surface is
// oriented correctly.
//
float
FaceList::volume()
{
int i, j;
int total_v = 0;
SbVec3f average(0.0, 0.0, 0.0);
for (j = 0; j < getLength(); j++)
{
Face *f = (*this)[j];
if (f->degenerate) continue;
for (i = 0; i < f->nv; i++)
{
average += verts[f->v[i]];
++total_v;
}
}
average /= (float) total_v;
float result = 0.0;
for (j = 0; j < getLength(); j++)
{
Face *f = (*this)[j];
if (f->degenerate) continue;
for (i = 1; i < f->nv-1; i++)
{
SbVec3f v1 = verts[f->v[0]] - average;
SbVec3f v2 = verts[f->v[i]] - average;
SbVec3f v3 = verts[f->v[i+1]] - average;
float t = (v1.cross(v2)).dot(v3);
if (f->orientation == Face::CCW)
{
result += t;
}
else if (f->orientation == Face::CW)
{
result -= t;
}
else
{
assert(0);
}
}
}
return result;
}
FaceList::~FaceList()
{
if (faceSet != NULL)
{
delete ed;
faceSet->coordIndex.finishEditing();
faceSet->unref();
for (int i = 0; i < getLength(); i++)
{
delete (*this)[i];
}
truncate(0);
}
if (vd)
delete [] vd;
}
void
FaceList::reverseOrientation()
{
for (int i = 0; i < getLength(); i++)
{
Face *f = (*this)[i];
if (f->orientation == Face::CW)
{
f->orientation = Face::CCW;
}
else if (f->orientation == Face::CCW)
{
f->orientation = Face::CW;
}
else
{
assert(0);
}
}
}
//
// This routine works by starting with a 'seed' face and assumes that
// its orientation is correct. It then visits all neighboring faces
// and makes their orientations consistent with the seed face's.
//
void
FaceList::findOrientation()
{
orientOutward();
correctOrientation();
}
void
FaceList::findFacetNormals(SoNormal *n)
{
assert(n != NULL && faceSet != NULL);
for (int i = 0; i < getLength(); i++)
{
(*this)[i]->findNormal(verts);
n->vector.set1Value(i, (*this)[i]->normal);
}
}
//
// Assuming that the correct orientation of a 'seed' face has been
// discovered, this routine figures out the correct orientation for
// all faces connected to that face.
//
void
FaceList::recursivelyOrient(Face *seed)
{
int i, j;
FaceList others;
if (seed->degenerate) return;
for (i = 0; i < seed->nv; i++)
{
j = (i+1)%seed->nv;
// Find other faces attached to this edge
ed->OtherFaces(seed, seed->v[i], seed->v[j], others);
for (int f = 0; f < others.getLength(); f++)
{
if (others[f]->orientation == Face::UNKNOWN)
{
if (seed->orientation == Face::CW)
others[f]->orientFace((int)seed->v[i], (int)seed->v[j]);
else if (seed->orientation == Face::CCW)
others[f]->orientFace((int)seed->v[j], (int)seed->v[i]);
else assert(0); // Should never happen
append(others[f]);
recursivelyOrient(others[f]);
}
}
}
}
//
// This routine takes a collection of faces and trys to figure out
// which way is out.
//
void
FaceList::orientOutward()
{
// int num_fragments = 0;
//
// Loop through all the faces; if we find one whose orientation
// hasn't been determined, will try to determine its orientation.
//
for (int i = 0; i < getLength(); i++)
{
Face *f = (*this)[i];
if (f->orientation != Face::UNKNOWN) continue;
// First, take a wild guess...
f->orientation = Face::CCW;
// ++num_fragments;
FaceList fragment(verts, ed);
fragment.append(f);
// Now recursively orient the faces connected to this face.
fragment.recursivelyOrient(f);
// Take a reasonable guess for whether or not that first face
// is oriented correctly:
float v = fragment.volume();
if (v*v < 0.00001*0.00001) // FLAT
{
// Do something...
// fprintf(stderr, "Flat fragment found\n");
}
else if (v < 0.0)
fragment.reverseOrientation();
}
// fprintf(stderr, "There were %d fragments\n", num_fragments);
}
void
FaceList::correctOrientation()
{
for (int i = 0; i < getLength(); i++)
{
Face *f = (*this)[i];
if (f->orientation == Face::CW)
{
for (int j = 0; j < f->nv/2; j++)
{
int k = f->nv - j - 1;
int32_t t = f->v[j];
f->v[j] = f->v[k];
f->v[k] = t;
}
f->orientation = Face::CCW;
}
}
}
void
FaceList::findShapeInfo()
{
FaceList others;
convex = TRUE;
solid = TRUE;
for (int k=0; k< getLength(); k++) {
Face *f = (*this)[k];
if (f->degenerate)
continue;
// If a face has more than 3 vertices, assume it's convex.
if (f->nv > 3) {
convex = FALSE;
}
// run through the edges of the face
for (int i = 0; i < f->nv; i++) {
int j = (i+1)%f->nv;
// Find other faces attached to this edge
ed->OtherFaces(f, f->v[i], f->v[j], others);
if (others.getLength() == 0) {
solid = FALSE;
break;
}
}
// we can quit searching if we've found
// out all there is to know.
if (!convex && !solid)
break;
}
}
int
FaceList::findBodies()
{
buildVertexDict();
// clear all faces
int i;
for (i=0; i< getLength(); i++)
(*this)[i]->body = -1;
int bodyN = 0;
for (i=0; i<getLength(); i++) {
Face *f = (*this)[i];
// each time we find an unmarked face,
// it's on a previously-undiscovered body
if (f->body == -1) {
f->body = bodyN++;
recursivelyMarkBody(f);
}
}
return bodyN;
}
void
FaceList::recursivelyMarkBody(Face *f)
{
int i;
assert(f->body != -1); // only recurse on marked faces
for (i = 0; i < f->nv; i++)
{
// get list of faces sharing this vertex
const FaceList &faces = vd[f->v[i]];
for (int j=0; j<faces.getLength(); j++) {
if (faces[j]->body == f->body) {
; // nothing to do if the other face is already part of this body
}
else if (faces[j]->body == -1) {
// the other face is still unmarked, so mark it and recurse
faces[j]->body = f->body;
recursivelyMarkBody(faces[j]);
}
else {
assert(0); // vertex dictionary isn't commutative??
}
}
}
}
void
FaceList::extractBody(int b,
int &bnf, int32_t *fNewFromOld, int32_t *fOldFromNew,
int &bnv, int32_t *vNewFromOld, int32_t *vOldFromNew)
{
// build the face maps
int f, bf;
for (f=0, bf=0; f < getLength(); f++) {
if ((*this)[f]->body == b) {
// f is the old face index
// bf is the new face index
fNewFromOld[f] = bf;
fOldFromNew[bf] = f;
bf++;
}
}
bnf = bf;
// build the vertex maps
int ni, i;
for (ni=0, i=0; i < vdSize; i++) {
const FaceList &faces = vd[i];
SbBool isInBody = FALSE;
for (int j = 0; j < faces.getLength(); j++) if (faces[j]->body == b) {
isInBody = TRUE;
break;
}
if (isInBody) {
// i is the old vertex index
// ni is the new vertex index
vNewFromOld[i] = ni;
vOldFromNew[ni] = i;
ni++;
}
else
vNewFromOld[i] = -1;
}
bnv = ni;
}
void
FaceList::buildVertexDict()
{
// already built?
if (vd) return;
int biggest_index = 0;
int i;
for (i = 0; i < getLength(); i++) {
Face *f = (*this)[i];
for (int j = 0; j < f->nv; j++) {
if (f->v[j] > biggest_index) biggest_index = f->v[j];
}
}
vd = new FaceList[biggest_index+1];
vdSize = biggest_index+1;
// Build list of faces around each vertex
for (i = 0; i < getLength(); i++)
{
Face *f = (*this)[i];
for (int j = 0; j < f->nv; j++)
{
vd[f->v[j]].append(f);
}
}
}
void
FaceList::findVertexNormals(SoNormal *norm, SoIndexedFaceSet *ifs,
float creaseAngle)
{
buildVertexDict();
// Initialize all faces
int i;
for (i = 0; i < getLength(); i++)
{
Face *f = (*this)[i];
f->findNormal(verts);
f->vn = new int32_t[f->nv];
for (int j = 0; j < f->nv; j++)
{
f->vn[j] = -1;
}
}
// Finally, create normals
norm->vector.deleteValues(0); // get rid of default value
int count = 0;
for (i = 0; i < getLength(); i++)
{
Face *f = (*this)[i];
for (int j = 0; j < f->nv; j++)
{
if (f->degenerate)
{
f->vn[j] = getIdx(norm->vector, SbVec3f(0,0,0));
}
else if (f->vn[j] == -1)
{
SbVec3f t;
vd[f->v[j]].averageNormals(norm->vector, f->normal,
creaseAngle,
f->v[j]);
}
ifs->normalIndex.set1Value(count, f->vn[j]);
++count;
}
ifs->normalIndex.set1Value(count, SO_END_FACE_INDEX);
++count;
}
}
//
// get the index of a vector in a SFVec3f;
// adds the vector if needed.
//
// use: private
//
int
FaceList::getIdx(SoMFVec3f &mf, const SbVec3f &p)
{
int n = mf.getNum();
SbVec3f *v = mf.startEditing();
// search from the end, since recent vectors are likely to be reused
for (int i=n-1; i>=0; i--)
if (p == v[i]) {
mf.finishEditing();
return i;
}
mf.finishEditing();
mf.set1Value(n, p);
return n;
}
//
// Average the normals around a vertex to get a vertex normal. Skip
// faces that are too different (as defined by creaseAngle)
//
void
FaceList::averageNormals(SoMFVec3f &norms, SbVec3f &reference,
float creaseAngle, int whichV)
{
SbVec3f average;
average.setValue(0.0, 0.0, 0.0);
float ca = cos(creaseAngle);
int num = 0;
int max = getLength();
// first, loop through and compute the average
int i;
for (i = 0; i < max; i++)
{
Face *f = (*this)[i];
if (f->degenerate) continue;
float dp = reference.dot(f->normal);
if (dp >= ca)
{
average += f->normal; ++num;
}
}
assert(num != 0);
average /= (float)num;
average.normalize();
// insert the average into the mfield
int index = getIdx(norms, average);
// loop through again and insert the average's index
// into the face arrays
for (i = 0; i < max; i++)
{
Face *f = (*this)[i];
if (f->degenerate) continue;
float dp = reference.dot(f->normal);
if (dp >= ca)
{
for (int j = 0; j < f->nv; j++)
{
if (f->v[j] == whichV)
f->vn[j] = index;
}
}
}
assert(num != 0);
average /= (float)num;
average.normalize();
}
| 20.795699
| 77
| 0.579046
|
OpenXIP
|
33ccbc62111db3125ab7d8fb2cf5a1137719ee6d
| 5,530
|
cpp
|
C++
|
src/CoLaParameterWriter.cpp
|
diersma/sick_visionary_cpp_shared
|
f86e647886fb1199a2ed063fcf128ed78c6651db
|
[
"Apache-2.0"
] | null | null | null |
src/CoLaParameterWriter.cpp
|
diersma/sick_visionary_cpp_shared
|
f86e647886fb1199a2ed063fcf128ed78c6651db
|
[
"Apache-2.0"
] | null | null | null |
src/CoLaParameterWriter.cpp
|
diersma/sick_visionary_cpp_shared
|
f86e647886fb1199a2ed063fcf128ed78c6651db
|
[
"Apache-2.0"
] | null | null | null |
//
// Copyright note: Redistribution and use in source, with or without modification, are permitted.
//
// Created: October 2018
//
// SICK AG, Waldkirch
// email: TechSupport0905@sick.de
#include "CoLaParameterWriter.h"
#include "MD5.h"
#include "VisionaryEndian.h"
namespace visionary
{
CoLaParameterWriter::CoLaParameterWriter(CoLaCommandType::Enum type, const char * name)
: m_type(type)
, m_name(name)
{
writeHeader(m_type, m_name);
}
CoLaParameterWriter::~CoLaParameterWriter()
{
}
CoLaParameterWriter& CoLaParameterWriter::parameterSInt(const int8_t sInt)
{
m_buffer.push_back(static_cast<uint8_t>(sInt));
return *this;
}
CoLaParameterWriter& CoLaParameterWriter::parameterUSInt(const uint8_t uSInt)
{
m_buffer.push_back(uSInt);
return *this;
}
CoLaParameterWriter& CoLaParameterWriter::parameterInt(const int16_t integer)
{
const int16_t bigEndianValue = nativeToBigEndian(integer);
m_buffer.insert(m_buffer.end(), reinterpret_cast<const uint8_t*>(&bigEndianValue), reinterpret_cast<const uint8_t*>(&bigEndianValue) + 2);
return *this;
}
CoLaParameterWriter& CoLaParameterWriter::parameterUInt(const uint16_t uInt)
{
const uint16_t bigEndianValue = nativeToBigEndian(uInt);
m_buffer.insert(m_buffer.end(), reinterpret_cast<const uint8_t*>(&bigEndianValue), reinterpret_cast<const uint8_t*>(&bigEndianValue) + 2);
return *this;
}
CoLaParameterWriter& CoLaParameterWriter::parameterDInt(const int32_t dInt)
{
const int32_t bigEndianValue = nativeToBigEndian(dInt);
m_buffer.insert(m_buffer.end(), reinterpret_cast<const uint8_t*>(&bigEndianValue), reinterpret_cast<const uint8_t*>(&bigEndianValue) + 4);
return *this;
}
CoLaParameterWriter& CoLaParameterWriter::parameterUDInt(const uint32_t uDInt)
{
const uint32_t bigEndianValue = nativeToBigEndian(uDInt);
m_buffer.insert(m_buffer.end(), reinterpret_cast<const uint8_t*>(&bigEndianValue), reinterpret_cast<const uint8_t*>(&bigEndianValue) + 4);
return *this;
}
CoLaParameterWriter& CoLaParameterWriter::parameterReal(const float real)
{
const float bigEndianValue = nativeToBigEndian(real);
m_buffer.insert(m_buffer.end(), reinterpret_cast<const uint8_t*>(&bigEndianValue), reinterpret_cast<const uint8_t*>(&bigEndianValue) + 4);
return *this;
}
CoLaParameterWriter& CoLaParameterWriter::parameterLReal(const double lReal)
{
const double bigEndianValue = nativeToBigEndian(lReal);
m_buffer.insert(m_buffer.end(), reinterpret_cast<const uint8_t*>(&bigEndianValue), reinterpret_cast<const uint8_t*>(&bigEndianValue) + 8);
return *this;
}
CoLaParameterWriter& CoLaParameterWriter::parameterBool(const bool boolean)
{
*this << static_cast<uint8_t>(boolean);
return *this;
}
CoLaParameterWriter& CoLaParameterWriter::parameterPasswordMD5(const std::string& str)
{
uint32_t valueUDInt = 0;
const unsigned char* byteData = MD5(str).getDigest();
// 128 bit to 32 bit using XOR
int byte0 = byteData[0] ^ byteData[4] ^ byteData[8] ^ byteData[12];
int byte1 = byteData[1] ^ byteData[5] ^ byteData[9] ^ byteData[13];
int byte2 = byteData[2] ^ byteData[6] ^ byteData[10] ^ byteData[14];
int byte3 = byteData[3] ^ byteData[7] ^ byteData[11] ^ byteData[15];
valueUDInt = static_cast<uint32_t>(byte0 | (byte1 << 8) | (byte2 << 16) | (byte3 << 24));
// Add as UDInt, it is already big endian
parameterUDInt(valueUDInt);
return *this;
}
CoLaParameterWriter& CoLaParameterWriter::operator<<(const char* str)
{
m_buffer.insert(m_buffer.end(), str, str+strlen(str));
return *this;
}
CoLaParameterWriter& CoLaParameterWriter::operator<<(const int8_t sInt)
{
return parameterSInt(sInt);
}
CoLaParameterWriter& CoLaParameterWriter::operator<<(const uint8_t uSInt)
{
return parameterUSInt(uSInt);
}
CoLaParameterWriter& CoLaParameterWriter::operator<<(const int16_t integer)
{
return parameterInt(integer);
}
CoLaParameterWriter& CoLaParameterWriter::operator<<(const uint16_t uInt)
{
return parameterUInt(uInt);
}
CoLaParameterWriter& CoLaParameterWriter::operator<<(const int32_t dInt)
{
return parameterDInt(dInt);
}
CoLaParameterWriter& CoLaParameterWriter::operator<<(const uint32_t uDInt)
{
return parameterUDInt(uDInt);
}
CoLaParameterWriter& CoLaParameterWriter::operator<<(const float real)
{
return parameterReal(real);
}
CoLaParameterWriter & CoLaParameterWriter::operator<<(const double lReal)
{
return parameterLReal(lReal);
}
CoLaParameterWriter& CoLaParameterWriter::operator<<(const bool boolean)
{
return parameterBool(boolean);
}
const CoLaCommand CoLaParameterWriter::build()
{
// Copy buffer
std::vector<uint8_t> buffer = m_buffer;
return CoLaCommand(buffer);
}
void CoLaParameterWriter::writeHeader(CoLaCommandType::Enum type, const char* name)
{
// Write command type
switch (type)
{
case CoLaCommandType::READ_VARIABLE: *this << "sRN "; break;
case CoLaCommandType::READ_VARIABLE_RESPONSE: *this << "sRA "; break;
case CoLaCommandType::WRITE_VARIABLE: *this << "sWN "; break;
case CoLaCommandType::WRITE_VARIABLE_RESPONSE: *this << "sWA "; break;
case CoLaCommandType::METHOD_INVOCATION: *this << "sMN "; break;
case CoLaCommandType::METHOD_RETURN_VALUE: *this << "sAN "; break;
case CoLaCommandType::COLA_ERROR: *this << "sFA"; break;
default: return;
}
// Write command name
*this << name << " ";
}
}
| 29.731183
| 141
| 0.726944
|
diersma
|
33d075de530a093dffd03561ad9ddeafd8c8c007
| 487
|
hpp
|
C++
|
test/test_vector/intf_xcvector_d/xcvector_d.hpp
|
skrzj-dev/libxccc
|
621ce71dcc0ad8fb7a13460aeb3dc250188fb5d2
|
[
"MIT"
] | null | null | null |
test/test_vector/intf_xcvector_d/xcvector_d.hpp
|
skrzj-dev/libxccc
|
621ce71dcc0ad8fb7a13460aeb3dc250188fb5d2
|
[
"MIT"
] | 10
|
2020-02-19T19:41:48.000Z
|
2020-03-08T22:08:35.000Z
|
test/test_vector/intf_xcvector_d/xcvector_d.hpp
|
skrzj-dev/libxccc
|
621ce71dcc0ad8fb7a13460aeb3dc250188fb5d2
|
[
"MIT"
] | null | null | null |
/*
* Copyright block:
*
* Source file of libxccc project
*
* Copyright (c) 2019 Jakub Skrzyniarz, skrzj-dev@protonmail.com
*
* Licensed under: MIT license; See the file "LICENSE" of libxccc project for more information.
*
* Copyright block: end
*/
#ifndef _XCC_TEST_TPL_VECTOR___CSEQ_HPP_
#define _XCC_TEST_TPL_VECTOR___CSEQ_HPP_
#include "_xcvector_d_api_u.hpp"
#include "_xcvector_d_api_s.hpp"
#include "_xcvector_d_api_i.hpp"
#include "_xcvector_d_internal.hpp"
#endif
| 20.291667
| 95
| 0.765914
|
skrzj-dev
|
33d2c7590ec73707d7dee983302c086d137bf682
| 18,448
|
cpp
|
C++
|
Source/AllProjects/Utilities/CIDResEd/CIDResEd_MenuData.cpp
|
MarkStega/CIDLib
|
82014e064eef51cad998bf2c694ed9c1c8cceac6
|
[
"MIT"
] | 216
|
2019-03-09T06:41:28.000Z
|
2022-02-25T16:27:19.000Z
|
Source/AllProjects/Utilities/CIDResEd/CIDResEd_MenuData.cpp
|
MarkStega/CIDLib
|
82014e064eef51cad998bf2c694ed9c1c8cceac6
|
[
"MIT"
] | 9
|
2020-09-27T08:00:52.000Z
|
2021-07-02T14:27:31.000Z
|
Source/AllProjects/Utilities/CIDResEd/CIDResEd_MenuData.cpp
|
MarkStega/CIDLib
|
82014e064eef51cad998bf2c694ed9c1c8cceac6
|
[
"MIT"
] | 29
|
2019-03-09T10:12:24.000Z
|
2021-03-03T22:25:29.000Z
|
//
// FILE NAME: CIDResEd_MenuData.cpp
//
// AUTHOR: Dean Roddey
//
// CREATED: 02/26/2010
//
// COPYRIGHT: Charmed Quark Systems, Ltd @ 2019
//
// This software is copyrighted by 'Charmed Quark Systems, Ltd' and
// the author (Dean Roddey.) It is licensed under the MIT Open Source
// license:
//
// https://opensource.org/licenses/MIT
//
// DESCRIPTION:
//
// This file implements the classes related to the menu resource data.
//
// CAVEATS/GOTCHAS:
//
// LOG:
//
// $_CIDLib_Log_$
//
// ---------------------------------------------------------------------------
// Includes
// ---------------------------------------------------------------------------
#include "CIDResEd.hpp"
// ---------------------------------------------------------------------------
// Magic macros
// ---------------------------------------------------------------------------
RTTIDecls(TResEdMenuItem,TObject)
RTTIDecls(TResEdMenuDesc,TObject)
// ---------------------------------------------------------------------------
// Local types and constants
// ---------------------------------------------------------------------------
namespace CIDResEd_MenuData
{
// -----------------------------------------------------------------------
// Some strings that we look for during parsing
// -----------------------------------------------------------------------
const TString strKW_ITEM(L"ITEM=");
const TString strKW_ITEMEnd(L"END ITEM");
const TString strKW_MENUEnd(L"END MENU");
const TString strKW_SUBMENU(L"SUBMENU=");
const TString strKW_SUBMENUEnd(L"END SUBMENU");
const TString strKW_SEPARATOR(L"SEPARATOR");
const TString strKW_SYMBOL(L"SYMBOL=");
const TString strKW_TEXTSYM(L"TEXTSYM=");
}
// ---------------------------------------------------------------------------
// CLASS: TResEdMenuItem
// PREFIX: remeni
// ---------------------------------------------------------------------------
// ---------------------------------------------------------------------------
// TResEdMenuItem: Public, static methods
// ---------------------------------------------------------------------------
// Sort by name comparator
tCIDLib::ESortComps
TResEdMenuItem::eComp( const TResEdMenuItem& remeni1
, const TResEdMenuItem& remeni2)
{
return remeni1.m_strName.eCompare(remeni2.m_strName);
}
// ---------------------------------------------------------------------------
// TResEdMenuItem: Constructors and Destructor
// ---------------------------------------------------------------------------
TResEdMenuItem::TResEdMenuItem() :
m_c4Id(0)
, m_premendSubMenu(nullptr)
, m_eType(tCIDCtrls::EMenuItemTypes::ActionItem)
{
}
TResEdMenuItem::TResEdMenuItem(const TResEdMenuItem& remeniToCopy) :
m_c4Id(remeniToCopy.m_c4Id)
, m_premendSubMenu(nullptr)
, m_eType(remeniToCopy.m_eType)
, m_strName(remeniToCopy.m_strName)
, m_strTextSym(remeniToCopy.m_strTextSym)
{
// If there's a submenu, then replicate it
if (remeniToCopy.m_premendSubMenu)
m_premendSubMenu = new TResEdMenuDesc(*remeniToCopy.m_premendSubMenu);
}
TResEdMenuItem::TResEdMenuItem( const tCIDCtrls::EMenuItemTypes eType
, const TString& strName
, const tCIDLib::TCard4 c4Id
, const TString& strTextSym) :
m_c4Id(c4Id)
, m_premendSubMenu(nullptr)
, m_eType(eType)
, m_strName(strName)
, m_strTextSym(strTextSym)
{
// If submenu type, then create the submenu for later filling in
if (eType == tCIDCtrls::EMenuItemTypes::SubMenu)
m_premendSubMenu = new TResEdMenuDesc;
}
TResEdMenuItem::~TResEdMenuItem()
{
delete m_premendSubMenu;
m_premendSubMenu = nullptr;
}
// ---------------------------------------------------------------------------
// TResEdMenuItem: Public operators
// ---------------------------------------------------------------------------
TResEdMenuItem& TResEdMenuItem::operator=(const TResEdMenuItem& remeniToAssign)
{
if (this != &remeniToAssign)
{
m_c4Id = remeniToAssign.m_c4Id;
m_eType = remeniToAssign.m_eType;
m_strName = remeniToAssign.m_strName;
m_strTextSym = remeniToAssign.m_strTextSym;
//
// If there's a submenu, then replicate it. If there isn't, get rid
// of ours if we have one.
//
if (remeniToAssign.m_premendSubMenu)
{
// The source must have been a submenu type
CIDAssert
(
m_eType != tCIDCtrls::EMenuItemTypes::SubMenu
, L"Non-submenu item type had a submenu"
);
// If we don't have one yet, create it
if (!m_premendSubMenu)
m_premendSubMenu = new TResEdMenuDesc(*remeniToAssign.m_premendSubMenu);
else
*m_premendSubMenu = *remeniToAssign.m_premendSubMenu;
}
else
{
// The source cannot have been a submenu type
CIDAssert
(
m_eType == tCIDCtrls::EMenuItemTypes::SubMenu
, L"Submenu item type had no submenu"
);
delete m_premendSubMenu;
m_premendSubMenu = 0;
}
}
return *this;
}
tCIDLib::TBoolean
TResEdMenuItem::operator==(const TResEdMenuItem& remeniToComp) const
{
if ((m_c4Id != remeniToComp.m_c4Id)
|| (m_eType != remeniToComp.m_eType)
|| (m_strName != remeniToComp.m_strName)
|| (m_strTextSym != remeniToComp.m_strTextSym))
{
return kCIDLib::False;
}
// If we both have no submenu, we are equal
if (!m_premendSubMenu && !remeniToComp.m_premendSubMenu)
return kCIDLib::True;
// If we both have a submenu, then recurse and return that
if (m_premendSubMenu && remeniToComp.m_premendSubMenu)
return (*m_premendSubMenu == *remeniToComp.m_premendSubMenu);
// Else we have to be different
return kCIDLib::False;
}
tCIDLib::TBoolean
TResEdMenuItem::operator!=(const TResEdMenuItem& remeniToComp) const
{
return !operator==(remeniToComp);
}
// ---------------------------------------------------------------------------
// TResEdMenuItem: Public, non-virtual methods
// ---------------------------------------------------------------------------
tCIDLib::TCard4 TResEdMenuItem::c4Id() const
{
return m_c4Id;
}
tCIDLib::TCard4 TResEdMenuItem::c4Id(const tCIDLib::TCard4 c4ToSet)
{
m_c4Id = c4ToSet;
return m_c4Id;
}
// Get or set the type of this menu item
tCIDCtrls::EMenuItemTypes TResEdMenuItem::eType() const
{
return m_eType;
}
tCIDCtrls::EMenuItemTypes
TResEdMenuItem::eType(const tCIDCtrls::EMenuItemTypes eType)
{
m_eType = eType;
//
// If not a submenu, remove any submenu we might have. If a submenu,
// then create an empty one if we don't have one.
//
if (m_premendSubMenu && (m_eType != tCIDCtrls::EMenuItemTypes::SubMenu))
{
delete m_premendSubMenu;
m_premendSubMenu = 0;
}
else if (!m_premendSubMenu && (m_eType == tCIDCtrls::EMenuItemTypes::SubMenu))
{
m_premendSubMenu = new TResEdMenuDesc;
}
return m_eType;
}
// Provide access to the submenu contents if we are a submenu type
const TResEdMenuDesc& TResEdMenuItem::remendSubMenu() const
{
CIDAssert(m_premendSubMenu != 0, L"There is no sub-menu to return");
return *m_premendSubMenu;
}
TResEdMenuDesc& TResEdMenuItem::remendSubMenu()
{
CIDAssert(m_premendSubMenu != 0, L"There is no sub-menu to return");
return *m_premendSubMenu;
}
// Get or set the symbolic name for this item
const TString& TResEdMenuItem::strName() const
{
return m_strName;
}
const TString& TResEdMenuItem::strName(const TString& strToSet)
{
m_strName = strToSet;
return m_strName;
}
// Get or set the symbolic name for the text of this item
const TString& TResEdMenuItem::strTextSym() const
{
return m_strTextSym;
}
const TString& TResEdMenuItem::strTextSym(const TString& strToSet)
{
m_strTextSym = strToSet;
return m_strTextSym;
}
// Format out contents out to the target output file
tCIDLib::TVoid
TResEdMenuItem::WriteTo( TTextOutStream& strmTar
, const tCIDLib::TCard4 c4Level) const
{
const TTextOutStream::Spaces Space(c4Level * 4);
const TTextOutStream::Spaces Space2((c4Level + 1) * 4);
if (m_eType == tCIDCtrls::EMenuItemTypes::Decoration)
{
strmTar << Space << L"SEPARATOR\n\n";
}
else
{
strmTar << Space << L"ITEM=\n"
<< Space2 << L"SYMBOL=" << m_strName << L", " << m_c4Id
<< kCIDLib::NewLn;
if (!m_strTextSym.bIsEmpty())
strmTar << Space2 << L"TEXTSYM=" << m_strTextSym << kCIDLib::NewLn;
strmTar << Space << L"END ITEM\n\n";
}
}
// ---------------------------------------------------------------------------
// CLASS: TResEdMenuDesc
// PREFIX: remend
// ---------------------------------------------------------------------------
// ---------------------------------------------------------------------------
// TResEdMenuItem: Public, static methods
// ---------------------------------------------------------------------------
tCIDLib::ESortComps
TResEdMenuDesc::eComp(const TResEdMenuDesc& remend1, const TResEdMenuDesc& remend2)
{
return remend1.m_strName.eCompare(remend2.m_strName);
}
// ---------------------------------------------------------------------------
// TResEdMenuItem: Constructors and Destructor
// ---------------------------------------------------------------------------
TResEdMenuDesc::TResEdMenuDesc() :
m_c4Id(kCIDLib::c4MaxCard)
{
}
TResEdMenuDesc::TResEdMenuDesc(const TResEdMenuDesc& remendToCopy) :
m_c4Id(remendToCopy.m_c4Id)
, m_colItems(remendToCopy.m_colItems)
, m_strName(remendToCopy.m_strName)
{
}
TResEdMenuDesc::~TResEdMenuDesc()
{
}
// ---------------------------------------------------------------------------
// TResEdMenuDesc: Public operators
// ---------------------------------------------------------------------------
TResEdMenuDesc& TResEdMenuDesc::operator=(const TResEdMenuDesc& remendToAssign)
{
if (this != &remendToAssign)
{
m_c4Id = remendToAssign.m_c4Id;
m_colItems = remendToAssign.m_colItems;
m_strName = remendToAssign.m_strName;
}
return *this;
}
tCIDLib::TBoolean
TResEdMenuDesc::operator==(const TResEdMenuDesc& remendToComp) const
{
return
(
(m_c4Id == remendToComp.m_c4Id)
&& (m_strName == remendToComp.m_strName)
&& (m_colItems == remendToComp.m_colItems)
);
}
tCIDLib::TBoolean
TResEdMenuDesc::operator!=(const TResEdMenuDesc& remeniToComp) const
{
return !operator==(remeniToComp);
}
// ---------------------------------------------------------------------------
// TResEdMenuDesc: Public, non-virtual methods
// ---------------------------------------------------------------------------
// Get or set the top level menu id
tCIDLib::TCard4 TResEdMenuDesc::c4Id() const
{
return m_c4Id;
}
tCIDLib::TCard4 TResEdMenuDesc::c4Id(const tCIDLib::TCard4 c4ToSet)
{
m_c4Id = c4ToSet;
return m_c4Id;
}
//
// Called to parse out a menu block. This can be recursive where sub-menus
// are involved.
//
tCIDLib::TVoid
TResEdMenuDesc::ParseFrom(TResEdSpooler& spoolSrc, const tCIDLib::TCard4 c4Level)
{
tCIDLib::TCard4 c4Id;
TString strInBuf;
TString strTmp;
//
// If this is the outermost menu, level 0, then we have to deal with
// the symbol for the menu itself. After that, the symbol info for
// submenus are in the menu items that contain them.
//
if (!c4Level)
{
spoolSrc.bCheckPrefix(CIDResEd_MenuData::strKW_SYMBOL, strInBuf, kCIDLib::True);
spoolSrc.ParseNameVal(strInBuf, c4Id);
m_strName = strInBuf;
m_c4Id = c4Id;
}
while(kCIDLib::True)
{
spoolSrc.bReadLine(strInBuf);
//
// If we hit the end of the main menu block, we are done.
//
if (strInBuf == CIDResEd_MenuData::strKW_MENUEnd)
{
//
// We should only see this when we are back to the main menu
// level.
//
if (c4Level)
{
facCIDResEd.ThrowErr
(
CID_FILE
, CID_LINE
, kResEdErrs::errcSrcFile_NestedEndMenu
, tCIDLib::ESeverities::Failed
, tCIDLib::EErrClasses::Format
, TCardinal(spoolSrc.c4CurLine())
, spoolSrc.strSrcFile()
);
}
break;
}
else if (strInBuf == CIDResEd_MenuData::strKW_SUBMENUEnd)
{
// We should only see this when in a nested submenu
if (!c4Level)
{
facCIDResEd.ThrowErr
(
CID_FILE
, CID_LINE
, kResEdErrs::errcSrcFile_BadEndSubMenu
, tCIDLib::ESeverities::Failed
, tCIDLib::EErrClasses::Format
, TCardinal(spoolSrc.c4CurLine())
, spoolSrc.strSrcFile()
);
}
break;
}
if (strInBuf == CIDResEd_MenuData::strKW_SEPARATOR)
{
// Add a separator item to our item list
TResEdMenuItem remitNew
(
tCIDCtrls::EMenuItemTypes::Decoration
, TString::strEmpty()
, kCIDLib::c4MaxCard
, TString::strEmpty()
);
m_colItems.objAdd(remitNew);
}
else if (strInBuf == CIDResEd_MenuData::strKW_SUBMENU)
{
// We should see the symbol,id and the text id
spoolSrc.bCheckPrefix(CIDResEd_MenuData::strKW_SYMBOL, strInBuf, kCIDLib::True);
spoolSrc.ParseNameVal(strInBuf, c4Id);
spoolSrc.bCheckPrefix(CIDResEd_MenuData::strKW_TEXTSYM, strTmp, kCIDLib::True);
// Create a temp new menu item with this info
TResEdMenuItem remitNew(tCIDCtrls::EMenuItemTypes::SubMenu, strInBuf, c4Id, strTmp);
// And now recurse on its nested menu
remitNew.remendSubMenu().ParseFrom(spoolSrc, c4Level + 1);
// Add it to the list
m_colItems.objAdd(remitNew);
}
else if (strInBuf == CIDResEd_MenuData::strKW_ITEM)
{
// We should see the symbol,id and the text id
spoolSrc.bCheckPrefix(CIDResEd_MenuData::strKW_SYMBOL, strInBuf, kCIDLib::True);
spoolSrc.ParseNameVal(strInBuf, c4Id);
spoolSrc.bCheckPrefix(CIDResEd_MenuData::strKW_TEXTSYM, strTmp, kCIDLib::True);
// Create a standard item and add it to our list
TResEdMenuItem remitNew(tCIDCtrls::EMenuItemTypes::ActionItem, strInBuf, c4Id, strTmp);
m_colItems.objAdd(remitNew);
// And we should see an end item
spoolSrc.CheckNext(CIDResEd_MenuData::strKW_ITEMEnd);
}
else
{
facCIDResEd.ThrowErr
(
CID_FILE
, CID_LINE
, kResEdErrs::errcSrcFile_Expected
, tCIDLib::ESeverities::Failed
, tCIDLib::EErrClasses::Format
, TString(L"SUBMENU=, ITEM=, SEPARATOR, or END MENU")
, TCardinal(spoolSrc.c4CurLine())
, spoolSrc.strSrcFile()
);
}
}
}
// Access individual menu sub-items
const TResEdMenuItem&
TResEdMenuDesc::remeniAt(const tCIDLib::TCard4 c4At) const
{
return m_colItems[c4At];
}
TResEdMenuItem& TResEdMenuDesc::remeniAt(const tCIDLib::TCard4 c4At)
{
return m_colItems[c4At];
}
// Get or set the symbolic name of the top level menu
const TString& TResEdMenuDesc::strName() const
{
return m_strName;
}
const TString& TResEdMenuDesc::strName(const TString& strToSet)
{
m_strName = strToSet;
return m_strName;
}
//
// Called to write out our contents to the output file. We get a level since
// menus can be recursive. We use it to control indenting.
//
tCIDLib::TVoid
TResEdMenuDesc::WriteTo( TTextOutStream& strmTar
, const tCIDLib::TCard4 c4Level) const
{
const TTextOutStream::Spaces Space((c4Level + 1) * 4);
const TTextOutStream::Spaces Space2((c4Level + 2) * 4);
if (!c4Level)
{
//
// In this case, the overall menu symbol and id are stored
// at our level.
//
strmTar << L"MENU=\n"
<< Space << L"SYMBOL=" << m_strName << L", " << m_c4Id
<< kCIDLib::DNewLn;
}
// Format out all our items. If we hit a sub-menu, we recurse
const tCIDLib::TCard4 c4Count = m_colItems.c4ElemCount();
for (tCIDLib::TCard4 c4Index = 0; c4Index < c4Count; c4Index++)
{
const TResEdMenuItem& remeniCur = m_colItems[c4Index];
if (remeniCur.eType() == tCIDCtrls::EMenuItemTypes::SubMenu)
{
//
// We have to recurse here, but we have to put out the
// symbol and text sym.
//
strmTar << Space << L"SUBMENU=\n"
<< Space2 << L"SYMBOL="
<< remeniCur.strName() << L", " << remeniCur.c4Id()
<< kCIDLib::NewLn;
if (!remeniCur.strTextSym().bIsEmpty())
{
strmTar << Space2 << L"TEXTSYM="
<< remeniCur.strTextSym() << kCIDLib::NewLn;
}
strmTar << kCIDLib::NewLn;
// Now recurse
remeniCur.remendSubMenu().WriteTo(strmTar, c4Level + 1);
// And close off the submenu
strmTar << Space << L"END SUBMENU\n\n";
}
else
{
// Just ask it to write itself out at the next level of indent
remeniCur.WriteTo(strmTar, c4Level + 1);
}
}
// And close off the menu when we exit the main menu
if (!c4Level)
strmTar << L"END MENU\n\n";
}
| 29.28254
| 99
| 0.535614
|
MarkStega
|
33d6568c9c007b8a4874951bcef8560b02d982bf
| 4,230
|
cpp
|
C++
|
lib/fuzzer/FuzzerIO.cpp
|
remote-android/toolchain_compiler-rt
|
2ab6b9526fadddd76a15e0ecedf60462b2de2ed7
|
[
"Apache-2.0"
] | null | null | null |
lib/fuzzer/FuzzerIO.cpp
|
remote-android/toolchain_compiler-rt
|
2ab6b9526fadddd76a15e0ecedf60462b2de2ed7
|
[
"Apache-2.0"
] | null | null | null |
lib/fuzzer/FuzzerIO.cpp
|
remote-android/toolchain_compiler-rt
|
2ab6b9526fadddd76a15e0ecedf60462b2de2ed7
|
[
"Apache-2.0"
] | 3
|
2021-11-28T11:21:54.000Z
|
2022-03-13T11:10:01.000Z
|
//===- FuzzerIO.cpp - IO utils. -------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// IO functions.
//===----------------------------------------------------------------------===//
#include "FuzzerDefs.h"
#include "FuzzerExtFunctions.h"
#include "FuzzerIO.h"
#include "FuzzerUtil.h"
#include <algorithm>
#include <cstdarg>
#include <fstream>
#include <iterator>
#include <sys/stat.h>
#include <sys/types.h>
namespace fuzzer {
static FILE *OutputFile = stderr;
long GetEpoch(const std::string &Path) {
struct stat St;
if (stat(Path.c_str(), &St))
return 0; // Can't stat, be conservative.
return St.st_mtime;
}
Unit FileToVector(const std::string &Path, size_t MaxSize, bool ExitOnError) {
std::ifstream T(Path, std::ios::binary);
if (ExitOnError && !T) {
Printf("No such directory: %s; exiting\n", Path.c_str());
exit(1);
}
T.seekg(0, T.end);
auto EndPos = T.tellg();
if (EndPos < 0) return {};
size_t FileLen = EndPos;
if (MaxSize)
FileLen = std::min(FileLen, MaxSize);
T.seekg(0, T.beg);
Unit Res(FileLen);
T.read(reinterpret_cast<char *>(Res.data()), FileLen);
return Res;
}
std::string FileToString(const std::string &Path) {
std::ifstream T(Path, std::ios::binary);
return std::string((std::istreambuf_iterator<char>(T)),
std::istreambuf_iterator<char>());
}
void CopyFileToErr(const std::string &Path) {
Printf("%s", FileToString(Path).c_str());
}
void WriteToFile(const Unit &U, const std::string &Path) {
// Use raw C interface because this function may be called from a sig handler.
FILE *Out = fopen(Path.c_str(), "w");
if (!Out) return;
fwrite(U.data(), sizeof(U[0]), U.size(), Out);
fclose(Out);
}
void ReadDirToVectorOfUnits(const char *Path, Vector<Unit> *V,
long *Epoch, size_t MaxSize, bool ExitOnError) {
long E = Epoch ? *Epoch : 0;
Vector<std::string> Files;
ListFilesInDirRecursive(Path, Epoch, &Files, /*TopDir*/true);
size_t NumLoaded = 0;
for (size_t i = 0; i < Files.size(); i++) {
auto &X = Files[i];
if (Epoch && GetEpoch(X) < E) continue;
NumLoaded++;
if ((NumLoaded & (NumLoaded - 1)) == 0 && NumLoaded >= 1024)
Printf("Loaded %zd/%zd files from %s\n", NumLoaded, Files.size(), Path);
auto S = FileToVector(X, MaxSize, ExitOnError);
if (!S.empty())
V->push_back(S);
}
}
void GetSizedFilesFromDir(const std::string &Dir, Vector<SizedFile> *V) {
Vector<std::string> Files;
ListFilesInDirRecursive(Dir, 0, &Files, /*TopDir*/true);
for (auto &File : Files)
if (size_t Size = FileSize(File))
V->push_back({File, Size});
}
std::string DirPlusFile(const std::string &DirPath,
const std::string &FileName) {
return DirPath + GetSeparator() + FileName;
}
void DupAndCloseStderr() {
int OutputFd = DuplicateFile(2);
if (OutputFd > 0) {
FILE *NewOutputFile = OpenFile(OutputFd, "w");
if (NewOutputFile) {
OutputFile = NewOutputFile;
if (EF->__sanitizer_set_report_fd)
EF->__sanitizer_set_report_fd(
reinterpret_cast<void *>(GetHandleFromFd(OutputFd)));
DiscardOutput(2);
}
}
}
void CloseStdout() {
DiscardOutput(1);
}
void Printf(const char *Fmt, ...) {
va_list ap;
va_start(ap, Fmt);
vfprintf(OutputFile, Fmt, ap);
va_end(ap);
fflush(OutputFile);
}
void VPrintf(bool Verbose, const char *Fmt, ...) {
if (!Verbose) return;
va_list ap;
va_start(ap, Fmt);
vfprintf(OutputFile, Fmt, ap);
va_end(ap);
fflush(OutputFile);
}
void RmDirRecursive(const std::string &Dir) {
IterateDirRecurisve(
Dir, [](const std::string &Path) {},
[](const std::string &Path) { RmDir(Path); },
[](const std::string &Path) { RemoveFile(Path); });
}
std::string TempPath(const char *Extension) {
return DirPlusFile(TmpDir(),
"libFuzzerTemp." + std::to_string(GetPid()) + Extension);
}
} // namespace fuzzer
| 28.013245
| 80
| 0.60922
|
remote-android
|
33d7dd879cf0042ce2eb234c7f38e828de1fce7e
| 1,287
|
cpp
|
C++
|
src/main/cpp/platform-wrapper-tests/TestBRAMMasked.cpp
|
DM-PRO-17-A/fpga-tidbits
|
017226cb8d56353454e6a84e9c2dfda176ca4865
|
[
"BSD-2-Clause"
] | 87
|
2015-09-17T21:22:40.000Z
|
2022-03-11T08:06:53.000Z
|
src/main/cpp/platform-wrapper-tests/TestBRAMMasked.cpp
|
DM-PRO-17-A/fpga-tidbits
|
017226cb8d56353454e6a84e9c2dfda176ca4865
|
[
"BSD-2-Clause"
] | 4
|
2016-03-04T02:10:21.000Z
|
2021-01-08T21:48:58.000Z
|
src/main/cpp/platform-wrapper-tests/TestBRAMMasked.cpp
|
DM-PRO-17-A/fpga-tidbits
|
017226cb8d56353454e6a84e9c2dfda176ca4865
|
[
"BSD-2-Clause"
] | 26
|
2015-11-16T05:44:49.000Z
|
2021-04-07T07:50:42.000Z
|
#include <iostream>
#include "platform.h"
#include "TestBRAMMasked.hpp"
#include <string>
using namespace std;
int main() {
WrapperRegDriver * platform = initPlatform();
TestBRAMMasked t(platform);
cout << "Signature: " << hex << t.get_signature() << dec << endl;
string cmd;
unsigned int addr, dat, writeMask;
cin >> cmd;
// commands:
// r [address] -- read data from [address] and print out
// w [address] [data] [writeMask] -- write data to address with given write mask
while(cmd != "q") {
if(cmd == "r") {
cin >> addr;
t.set_ports_0_req_addr(addr);
cout << "addr " << addr << " = " << t.get_ports_0_rsp_readData() << endl;
} else if (cmd == "w") {
cin >> addr >> dat >> writeMask;
t.set_ports_0_req_addr(addr);
t.set_ports_0_req_writeData(dat);
t.set_ports_0_req_writeMask_0(writeMask & 1);
t.set_ports_0_req_writeMask_1((writeMask & 2) >> 1);
t.set_ports_0_req_writeMask_2((writeMask & 4) >> 2);
t.set_ports_0_req_writeMask_3((writeMask & 8) >> 3);
t.set_ports_0_req_writeEn(1);
t.set_ports_0_req_writeEn(0);
cout << "wrote " << dat << " to " << addr << endl;
} else cout << "unrecognized" << endl;
cin >> cmd;
}
return 0;
}
| 27.382979
| 82
| 0.596737
|
DM-PRO-17-A
|
33e0933d389414c6edf3ddb1169092262e464a0f
| 4,577
|
cpp
|
C++
|
test/vector3_test.cpp
|
bluelightning32/walnut
|
c259e62dad5a22b95978e28afe18ebe230849cc6
|
[
"MIT"
] | 3
|
2021-10-16T16:22:52.000Z
|
2022-02-07T21:41:34.000Z
|
test/vector3_test.cpp
|
bluelightning32/walnut
|
c259e62dad5a22b95978e28afe18ebe230849cc6
|
[
"MIT"
] | 2
|
2021-10-17T10:25:35.000Z
|
2022-01-30T18:32:19.000Z
|
test/vector3_test.cpp
|
bluelightning32/walnut
|
c259e62dad5a22b95978e28afe18ebe230849cc6
|
[
"MIT"
] | null | null | null |
#include "walnut/vector3.h"
#include "gtest/gtest.h"
#include "walnut/point3.h"
namespace walnut {
TEST(Vector3, IsSameDir) {
Vector3 v1a(0, 0, 1);
Vector3 v1b(0, 0, 2);
Vector3 v2a(1, 0, 0);
Vector3 v2b(10, 0, 0);
EXPECT_TRUE(v1a.IsSameDir(v1b));
EXPECT_TRUE(v1b.IsSameDir(v1a));
EXPECT_FALSE(v1a.IsSameDir(v2a));
EXPECT_TRUE(v2a.IsSameDir(v2b));
EXPECT_TRUE(v2b.IsSameDir(v2a));
EXPECT_FALSE(v2a.IsSameDir(v1a));
}
TEST(Vector3, OppositeDirIsNotSame) {
Vector3 v1(1, 1, 1);
Vector3 v2(-1, -1, -1);
Vector3 v3(-2, -2, -2);
EXPECT_FALSE(v1.IsSameDir(v2));
EXPECT_FALSE(v1.IsSameDir(v3));
EXPECT_TRUE(v2.IsSameDir(v3));
}
TEST(Vector3, IsSameOrOppositeDir) {
Vector3 v1a(0, 0, -1);
Vector3 v1b(0, 0, -2);
Vector3 v1c(0, 0, 1);
Vector3 v1d(0, 0, 2);
Vector3 v2(1, 0, 0);
EXPECT_TRUE(v1a.IsSameOrOppositeDir(v1b));
EXPECT_TRUE(v1b.IsSameOrOppositeDir(v1c));
EXPECT_TRUE(v1c.IsSameOrOppositeDir(v1d));
EXPECT_TRUE(v1d.IsSameOrOppositeDir(v1a));
EXPECT_FALSE(v1a.IsSameOrOppositeDir(v2));
EXPECT_FALSE(v1c.IsSameOrOppositeDir(v2));
}
TEST(Vector3, Minus) {
Vector3 v1(1, 10, 100);
Vector3 v2(2, 20, 200);
EXPECT_EQ(v1 - v2, Vector3(-1, -10, -100));
}
TEST(Vector3, MinusMax) {
static constexpr int coord_bits = 640;
BigInt min_value = BigInt::min_value(coord_bits - 1);
BigInt max_value = BigInt::max_value(coord_bits - 1);
Vector3 v1(min_value, min_value, min_value);
Vector3 v2(max_value, max_value, max_value);
BigInt expected = min_value - max_value;
EXPECT_EQ(v1 - v2, Vector3(expected, expected, expected));
}
TEST(Vector3, Dot) {
Vector3 v1(5, 0, 0);
Vector3 v2(10, 0, 0);
EXPECT_EQ(v1.Dot(v2), 50);
}
TEST(Vector3, GetScaleSquared) {
Vector3 v1(1, 2, 3);
EXPECT_EQ(v1.GetScaleSquared(), 1*1 + 2*2 + 3*3);
}
TEST(Vector3, GetScaleSquaredMax) {
static constexpr int coord_bits = 640;
BigInt min_value = BigInt::min_value(coord_bits - 1);
// abs(int_min) > abs(int_max), so a vector with all int_min coordinates will
// have the biggest scale.
Vector3 min_vector(min_value, min_value, min_value);
BigInt expected_scale = min_value;
expected_scale = expected_scale * min_value;
expected_scale = expected_scale * 3;
EXPECT_EQ(min_vector.GetScaleSquared(), expected_scale);
}
TEST(Vector3, Scale) {
Vector3 v1(0, 2, 1);
EXPECT_EQ(v1, v1.Scale(1));
EXPECT_TRUE(v1.IsSameDir(v1.Scale(2)));
EXPECT_EQ(v1.Scale(2).GetScaleSquared(), v1.GetScaleSquared() * 2 * 2);
EXPECT_TRUE(v1.IsSameDir(v1.Scale(10)));
EXPECT_EQ(v1.Scale(10).GetScaleSquared(), v1.GetScaleSquared() * 10 * 10);
}
TEST(Vector3, DotNegMax) {
static constexpr int coord_bits = 640;
BigInt min_value = BigInt::min_value(coord_bits - 1);
Vector3 min_vector(min_value, min_value, min_value);
BigInt expected_scale = min_value;
expected_scale = expected_scale * min_value;
expected_scale = expected_scale * 3;
EXPECT_EQ(min_vector.Dot(min_vector), expected_scale);
}
TEST(Vector3, DotPosMax) {
static constexpr int coord_bits = 640;
BigInt max_value = BigInt::max_value(coord_bits - 1);
Vector3 max_vector(max_value, max_value, max_value);
BigInt expected_scale = max_value;
expected_scale = expected_scale * max_value;
expected_scale = expected_scale * 3;
EXPECT_EQ(max_vector.Dot(max_vector), expected_scale);
}
TEST(Vector3, CrossMax) {
static constexpr int coord_bits = 640;
BigInt min_value = BigInt::min_value(coord_bits - 1);
BigInt max_value = BigInt::max_value(coord_bits - 1);
Vector3 v1(min_value, min_value, BigInt(0));
Vector3 v2(max_value, min_value, BigInt(0));
// Calculate the z coordinate of the cross product using an extra large
// integer type, then verify it produces the same value.
BigInt casted_min_value(min_value);
BigInt casted_max_value(max_value);
BigInt expected_z = casted_min_value*casted_min_value -
casted_min_value*casted_max_value;
EXPECT_EQ(v1.Cross(v2).z(), expected_z);
}
TEST(Vector3, DropDimensionSideness) {
Vector3 a3(1, 2, 1);
Vector3 b3(2, 1, 3);
Vector3 c3 = a3.Cross(b3);
for (int dimension = 0; dimension < 3; ++dimension) {
Vector2 a2 = a3.DropDimension(dimension);
Vector2 b2 = b3.DropDimension(dimension);
BigInt c = a2.Cross(b2);
EXPECT_FALSE(c3.components()[dimension].IsZero())
<< "dimension=" << dimension;
if (c3.components()[dimension] > 0) {
EXPECT_GT(c, 0) << "dimension=" << dimension;
} else {
EXPECT_LT(c, 0) << "dimension=" << dimension;
}
}
}
} // walnut
| 26.766082
| 79
| 0.694341
|
bluelightning32
|
33e1207e4732c3afbd10abd6bc9be226d1d8978a
| 5,719
|
cpp
|
C++
|
moveit_ros/planning/planning_components_tools/src/evaluate_collision_checking_speed.cpp
|
lilustga/moveit2
|
036454faf63292c3a273d0dc91f865cfdaf442a8
|
[
"BSD-3-Clause"
] | 1
|
2021-04-26T00:46:04.000Z
|
2021-04-26T00:46:04.000Z
|
moveit_ros/planning/planning_components_tools/src/evaluate_collision_checking_speed.cpp
|
lilustga/moveit2
|
036454faf63292c3a273d0dc91f865cfdaf442a8
|
[
"BSD-3-Clause"
] | null | null | null |
moveit_ros/planning/planning_components_tools/src/evaluate_collision_checking_speed.cpp
|
lilustga/moveit2
|
036454faf63292c3a273d0dc91f865cfdaf442a8
|
[
"BSD-3-Clause"
] | 1
|
2020-07-21T19:17:47.000Z
|
2020-07-21T19:17:47.000Z
|
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2012, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
/* Author: Ioan Sucan, Sachin Chitta */
#include <chrono>
#include <moveit/planning_scene_monitor/planning_scene_monitor.h>
#include <boost/program_options/parsers.hpp>
#include <boost/program_options/variables_map.hpp>
#include <boost/thread.hpp>
using namespace std::chrono_literals;
static const std::string ROBOT_DESCRIPTION = "robot_description";
static const rclcpp::Logger LOGGER = rclcpp::get_logger("evaluate_collision_checking_speed");
void runCollisionDetection(unsigned int id, unsigned int trials, const planning_scene::PlanningScene* scene,
const moveit::core::RobotState* state)
{
RCLCPP_INFO(LOGGER, "Starting thread %u", id);
rclcpp::Clock clock(RCL_ROS_TIME);
collision_detection::CollisionRequest req;
rclcpp::Time start = clock.now();
for (unsigned int i = 0; i < trials; ++i)
{
collision_detection::CollisionResult res;
scene->checkCollision(req, res, *state);
}
double duration = (clock.now() - start).seconds();
RCLCPP_INFO(LOGGER, "Thread %u performed %lf collision checks per second", id, (double)trials / duration);
}
int main(int argc, char** argv)
{
rclcpp::init(argc, argv);
auto node = rclcpp::Node::make_shared("evaluate_collision_checking_speed");
unsigned int nthreads = 2;
unsigned int trials = 10000;
boost::program_options::options_description desc;
desc.add_options()("nthreads", boost::program_options::value<unsigned int>(&nthreads)->default_value(nthreads),
"Number of threads to use")(
"trials", boost::program_options::value<unsigned int>(&trials)->default_value(trials),
"Number of collision checks to perform with each thread")("wait",
"Wait for a user command (so the planning scene can be "
"updated in thre background)")("help", "this screen");
boost::program_options::variables_map vm;
boost::program_options::parsed_options po = boost::program_options::parse_command_line(argc, argv, desc);
boost::program_options::store(po, vm);
boost::program_options::notify(vm);
if (vm.count("help"))
{
std::cout << desc << std::endl;
return 0;
}
rclcpp::executors::MultiThreadedExecutor executor;
executor.add_node(node);
planning_scene_monitor::PlanningSceneMonitor psm(node, ROBOT_DESCRIPTION);
if (psm.getPlanningScene())
{
if (vm.count("wait"))
{
psm.startWorldGeometryMonitor();
psm.startSceneMonitor();
std::cout << "Listening to planning scene updates. Press Enter to continue ..." << std::endl;
std::cin.get();
}
else
rclcpp::sleep_for(500ms);
std::vector<moveit::core::RobotStatePtr> states;
RCLCPP_INFO(LOGGER, "Sampling %u valid states...", nthreads);
for (unsigned int i = 0; i < nthreads; ++i)
{
// sample a valid state
moveit::core::RobotState* state = new moveit::core::RobotState(psm.getPlanningScene()->getRobotModel());
collision_detection::CollisionRequest req;
do
{
state->setToRandomPositions();
state->update();
collision_detection::CollisionResult res;
psm.getPlanningScene()->checkCollision(req, res);
if (!res.collision)
break;
} while (true);
states.push_back(moveit::core::RobotStatePtr(state));
}
std::vector<boost::thread*> threads;
runCollisionDetection(10, trials, psm.getPlanningScene().get(), states[0].get());
for (unsigned int i = 0; i < states.size(); ++i)
threads.push_back(new boost::thread(
std::bind(&runCollisionDetection, i, trials, psm.getPlanningScene().get(), states[i].get())));
for (unsigned int i = 0; i < states.size(); ++i)
{
threads[i]->join();
delete threads[i];
}
}
else
RCLCPP_ERROR(LOGGER, "Planning scene not configured");
return 0;
}
| 40.274648
| 120
| 0.668299
|
lilustga
|
33e346ac3c3e7d077487530e43fe736c9cb58580
| 24,802
|
hpp
|
C++
|
glfw3_app/rx_gui_emu/RX/CALC_app/calc_gui.hpp
|
hirakuni45/glfw3_app
|
d9ceeef6d398229fda4849afe27f8b48d1597fcf
|
[
"BSD-3-Clause"
] | 9
|
2015-09-22T21:36:57.000Z
|
2021-04-01T09:16:53.000Z
|
glfw3_app/rx_gui_emu/RX/CALC_app/calc_gui.hpp
|
hirakuni45/glfw3_app
|
d9ceeef6d398229fda4849afe27f8b48d1597fcf
|
[
"BSD-3-Clause"
] | null | null | null |
glfw3_app/rx_gui_emu/RX/CALC_app/calc_gui.hpp
|
hirakuni45/glfw3_app
|
d9ceeef6d398229fda4849afe27f8b48d1597fcf
|
[
"BSD-3-Clause"
] | 2
|
2019-02-21T04:22:13.000Z
|
2021-03-02T17:24:32.000Z
|
#pragma once
//=====================================================================//
/*! @file
@brief GUI 関数電卓・クラス @n
RX65N Envision Kit @n
RX72N Envision Kit @n
calc_gui
@author 平松邦仁 (hira@rvf-rc45.net)
@copyright Copyright (C) 2020 Kunihito Hiramatsu @n
Released under the MIT license @n
https://github.com/hirakuni45/RX/blob/master/LICENSE
*/
//=====================================================================//
#ifndef EMU
#include "common/renesas.hpp"
#include "common/fixed_fifo.hpp"
#include "common/sci_i2c_io.hpp"
#include "chip/FT5206.hpp"
#endif
#include "graphics/font8x16.hpp"
#include "graphics/kfont.hpp"
#include "graphics/graphics.hpp"
#include "graphics/simple_dialog.hpp"
#include "graphics/widget_director.hpp"
#include "graphics/scaling.hpp"
#include "common/format.hpp"
#include "common/fixed_string.hpp"
#include "common/basic_arith.hpp"
#include "common/mpfr.hpp"
#include "calc_func.hpp"
#include "calc_symbol.hpp"
#include "resource.hpp"
namespace app {
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
/*!
@brief CALC GUI クラス
*/
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
class calc_gui {
public:
static const uint32_t CALC_NUM = 250; ///< 計算精度桁数 (250)
static const uint32_t ANS_NUM = 60; ///< 結果表示桁数
static const int16_t DISP_OFS_X = 4;
static const int16_t DISP_OFS_Y = 6;
static const int16_t LCD_X = 480;
static const int16_t LCD_Y = 272;
static const auto PIX = graphics::pixel::TYPE::RGB565;
#ifndef EMU
typedef utils::fixed_fifo<uint8_t, 64> RB64;
typedef utils::fixed_fifo<uint8_t, 64> SB64;
#endif
#if defined(SIG_RX65N)
typedef device::PORT<device::PORT6, device::bitpos::B3> LCD_DISP;
typedef device::PORT<device::PORT6, device::bitpos::B6> LCD_LIGHT;
static const uint32_t LCD_ORG = 0x0000'0100;
typedef device::PORT<device::PORT0, device::bitpos::B7> FT5206_RESET;
typedef device::sci_i2c_io<device::SCI6, RB64, SB64, device::port_map::option::FIRST_I2C> FT5206_I2C;
typedef device::glcdc_mgr<device::GLCDC, LCD_X, LCD_Y, PIX> GLCDC;
#elif defined(SIG_RX72N)
typedef device::PORT<device::PORTB, device::bitpos::B3> LCD_DISP;
typedef device::PORT<device::PORT6, device::bitpos::B7> LCD_LIGHT;
static const uint32_t LCD_ORG = 0x0080'0000;
typedef device::PORT<device::PORT6, device::bitpos::B6> FT5206_RESET;
typedef device::sci_i2c_io<device::SCI6, RB64, SB64, device::port_map::option::THIRD_I2C> FT5206_I2C;
typedef device::glcdc_mgr<device::GLCDC, LCD_X, LCD_Y, PIX> GLCDC;
#endif
#ifdef EMU
template <uint32_t LCDX, uint32_t LCDY>
class glcdc_emu {
public:
static const uint32_t width = LCDX;
static const uint32_t height = LCDY;
static const uint32_t line_width = width;
private:
uint16_t fb_[LCDX * LCDY];
public:
void sync_vpos() { }
void* get_fbp() { return fb_; }
};
typedef glcdc_emu<LCD_X, LCD_Y> GLCDC;
#endif
typedef graphics::font8x16 AFONT;
typedef graphics::kfont<16, 16> KFONT;
typedef graphics::font<AFONT, KFONT> FONT;
// ソフトウェアーレンダラー
typedef graphics::render<GLCDC, FONT> RENDER;
// 標準カラーインスタンス
typedef graphics::def_color DEF_COLOR;
private:
GLCDC glcdc_;
AFONT afont_;
KFONT kfont_;
FONT font_;
RENDER render_;
#ifndef EMU
FT5206_I2C ft5206_i2c_;
typedef chip::FT5206<FT5206_I2C> TOUCH;
#else
class touch_emu {
public:
struct touch_t {
vtx::spos pos;
};
private:
touch_t touch_[4];
uint32_t num_;
public:
touch_emu() : num_(0) { }
uint32_t get_touch_num() const { return num_; }
const auto& get_touch_pos(uint32_t idx) const {
if(idx >= 4) idx = 0;
return touch_[idx];
}
void update() { }
void set_pos(const vtx::spos& pos)
{
touch_[0].pos = pos;
num_ = 1;
}
void reset() { num_ = 0; }
};
typedef touch_emu TOUCH;
#endif
TOUCH touch_;
// RX65N Envision Kit: INT to P02(IRQ10), not use
// RX72N Envision Kit: INT to P34(IRQ4), not use
typedef gui::simple_dialog<RENDER, TOUCH> DIALOG;
DIALOG dialog_;
typedef gui::widget_director<RENDER, TOUCH, 60> WIDD;
WIDD widd_;
static const int16_t BTN_W = 41;
static const int16_t BTN_H = 38;
static const int16_t ORG_X = 8;
static const int16_t ORG_Y = 94;
static const int16_t SPC_X = 47;
static const int16_t SPC_Y = 44;
constexpr int16_t LOC_X(int16_t x)
{
return ORG_X + SPC_X * x;
}
constexpr int16_t LOC_Y(int16_t y)
{
return ORG_Y + SPC_Y * y;
}
typedef gui::button BUTTON;
BUTTON no0_;
BUTTON no1_;
BUTTON no2_;
BUTTON no3_;
BUTTON no4_;
BUTTON no5_;
BUTTON no6_;
BUTTON no7_;
BUTTON no8_;
BUTTON no9_;
BUTTON del_;
BUTTON ac_;
BUTTON mul_;
BUTTON div_;
BUTTON add_;
BUTTON sub_;
BUTTON poi_; // .
BUTTON x10_;
BUTTON ans_;
BUTTON equ_; // =
BUTTON sin_;
BUTTON cos_;
BUTTON tan_;
BUTTON pai_;
BUTTON sqr_; // x^2
BUTTON sqrt_; // √
BUTTON pow_; // x^y
BUTTON log_;
BUTTON ln_;
BUTTON inv_; // 1/x
BUTTON fc_;
BUTTON angt_;
BUTTON left_;
BUTTON right_;
BUTTON pin_; // (
BUTTON pot_; // )
BUTTON setup_;
BUTTON sym_;
BUTTON sym_in_;
BUTTON sym_out_;
// 数値クラス
typedef mpfr::value<CALC_NUM> NVAL;
typedef utils::calc_symbol<NVAL> SYMBOL;
SYMBOL symbol_;
typedef utils::calc_func<NVAL> FUNC;
FUNC func_;
typedef utils::basic_arith<NVAL, SYMBOL, FUNC> ARITH;
ARITH arith_;
typedef utils::fixed_string<256> STR;
STR cbackup_;
STR cbuff_;
uint32_t cbuff_pos_;
uint32_t del_len_;
static const int16_t limit_ = 3;
vtx::spos cur_pos_;
bool fc_mode_;
int nest_;
uint32_t symbol_idx_;
int shift_;
void clear_win_()
{
cbackup_.clear();
cbuff_.clear();
cbuff_pos_ = 0;
del_len_ = 0;
cur_pos_.set(0);
render_.set_fore_color(DEF_COLOR::Darkgray);
render_.round_box(vtx::srect(0, 0, 480, 16 * 5 + 6), 8);
nest_ = 0;
symbol_.set_value(SYMBOL::NAME::ANS, NVAL(0));
for(uint8_t i = static_cast<uint8_t>(SYMBOL::NAME::V0); i <= static_cast<uint8_t>(SYMBOL::NAME::V9); i++) {
symbol_.set_value(static_cast<SYMBOL::NAME>(i), NVAL(0));
}
symbol_idx_ = 0;
shift_ = 0;
}
typedef utils::fixed_string<512> OUTSTR;
void conv_cha_(char ch, OUTSTR& out)
{
auto code = static_cast<uint8_t>(ch);
if(code >= 0x80 && code < 0xc0) {
out += symbol_.get_name(static_cast<SYMBOL::NAME>(code));
return;
} else if(code >= 0xc0) {
out += func_.get_name(static_cast<FUNC::NAME>(code));
return;
}
switch(code) {
case '0': out += "0"; break;
case '1': out += "1"; break;
case '2': out += "2"; break;
case '3': out += "3"; break;
case '4': out += "4"; break;
case '5': out += "5"; break;
case '6': out += "6"; break;
case '7': out += "7"; break;
case '8': out += "8"; break;
case '9': out += "9"; break;
case '+': out += "+"; break;
case '-': out += "-"; break;
case '/': out += "÷"; break;
case '*': out += "×"; break;
case '?': out += "?"; break;
default:
if(ch >= 0) {
out += ch;
}
break;
}
}
uint32_t size_cha_(char ch)
{
auto code = static_cast<uint8_t>(ch);
uint32_t l = 8;
if(code >= 0x80 && code < 0xc0) {
// symbol_.get_name(static_cast<SYMBOL::NAME>(code));
l = 16;
} else if(code >= 0xc0) {
l = strlen(func_.get_name(static_cast<FUNC::NAME>(code)));
l *= 8;
} else if(ch >= '0' && ch <= '9') {
l = 16;
} else if(ch == '+' || ch == '-' || ch == '/' || ch == '*') {
l = 16;
}
return l;
}
OUTSTR conv_str_(const char* str)
{
OUTSTR out;
char ch;
while((ch = *str++) != 0) {
conv_cha_(ch, out);
}
return out;
}
void update_calc_()
{
if(cbuff_pos_ == cbuff_.size()) return;
if(cbuff_pos_ > cbuff_.size()) {
if(del_len_ > 0) {
auto x = cur_pos_.x - del_len_;
render_.set_fore_color(DEF_COLOR::Darkgray);
render_.fill_box(vtx::srect(DISP_OFS_X + x, DISP_OFS_Y + cur_pos_.y * 20, del_len_, 16));
cur_pos_.x -= del_len_;
del_len_ = 0;
}
} else {
render_.set_fore_color(DEF_COLOR::White);
auto i = cbuff_pos_;
int x = cur_pos_.x + DISP_OFS_X;
while(i < cbuff_.size()) {
OUTSTR tmp;
conv_cha_(cbuff_[i], tmp);
x = render_.draw_text(vtx::spos(x, DISP_OFS_Y + cur_pos_.y * 20), tmp.c_str());
++i;
}
cur_pos_.x = x - DISP_OFS_X;
}
cbuff_pos_ = cbuff_.size();
}
void parse_cbuff_(OUTSTR& out)
{
uint32_t i = 0;
while(i < cbuff_.size()) {
conv_cha_(cbuff_[i], out);
++i;
}
}
// 答え表示
void draw_ans_(const NVAL& in, bool ok)
{
char tmp[ANS_NUM+1];
if(ok) {
NVAL ans = in;
if(shift_ != 0) {
auto exp = NVAL::exp10(NVAL(shift_));
ans *= exp;
}
ans(ANS_NUM, tmp, sizeof(tmp));
// zero supless
auto l = strlen(tmp);
while(l > 0) {
--l;
if(tmp[l] != '0') break;
else {
tmp[l] = 0;
}
}
if(l > 0) { if(tmp[l] == '.') tmp[l] = 0; }
} else {
utils::sformat("?", tmp, sizeof(tmp));
}
utils::format(" %s\n") % tmp;
auto out = conv_str_(tmp);
render_.set_fore_color(DEF_COLOR::Darkgray);
render_.round_box(vtx::srect(0, DISP_OFS_Y + 20 * 3, 480, 20), 6, false, true);
render_.set_fore_color(DEF_COLOR::White);
render_.draw_text(vtx::spos(DISP_OFS_X, DISP_OFS_Y + 20 * 3), out.c_str());
if(shift_ != 0) { // exp 表示
render_.set_fore_color(DEF_COLOR::Black);
render_.fill_box(vtx::srect(480 - DISP_OFS_X - 24 - 2, DISP_OFS_Y + 20 * 3, 32, 20));
char tmp[8];
utils::sformat("%+d", tmp, sizeof(tmp)) % -shift_;
render_.set_fore_color(DEF_COLOR::White);
render_.draw_text(vtx::spos(480 - DISP_OFS_X - 24, DISP_OFS_Y + 20 * 3 + 1), tmp);
}
}
// 答え表示
void update_equ_()
{
if(cbuff_.empty()) {
cbuff_ = cbackup_;
return;
}
// 括弧が開いていたら自動で閉じる
while(nest_ > 0) { cbuff_ += ')'; nest_--; }
update_calc_();
auto ok = arith_.analize(cbuff_.c_str());
auto ans = arith_();
symbol_.set_value(SYMBOL::NAME::ANS, ans);
draw_ans_(ans, ok);
OUTSTR cmd;
parse_cbuff_(cmd);
utils::format("%s\n") % cmd.c_str();
cbackup_ = cbuff_;
cbuff_.clear();
cur_pos_.x = 0;
cur_pos_.y++;
if(cur_pos_.y >= limit_) {
render_.move(vtx::srect(DISP_OFS_X, DISP_OFS_Y + 20, 480 - DISP_OFS_X * 2, 20 * 2), vtx::spos(DISP_OFS_X, DISP_OFS_Y));
render_.set_fore_color(DEF_COLOR::Darkgray);
render_.fill_box(vtx::srect(DISP_OFS_X, DISP_OFS_Y + 20 * 2, 480 - DISP_OFS_X * 2, 20));
cur_pos_.y = limit_ - 1;
}
shift_ = 0;
}
void update_fc_()
{
if(fc_mode_) {
sin_.set_title("asin");
cos_.set_title("acos");
tan_.set_title("atan");
// pai_.set_title("е");
} else {
sin_.set_title("sin");
cos_.set_title("cos");
tan_.set_title("tan");
// pai_.set_title("π");
}
}
public:
//-------------------------------------------------------------//
/*!
@brief AUDIO GUI コンストラクタ
*/
//-------------------------------------------------------------//
calc_gui() noexcept :
#ifndef EMU
glcdc_(nullptr, reinterpret_cast<void*>(LCD_ORG)),
#else
glcdc_(),
#endif
afont_(), kfont_(), font_(afont_, kfont_),
render_(glcdc_, font_),
#ifndef EMU
ft5206_i2c_(), touch_(ft5206_i2c_),
#else
touch_(),
#endif
dialog_(render_, touch_),
widd_(render_, touch_),
no0_(vtx::srect(LOC_X(5), LOC_Y(3), BTN_W, BTN_H), "0"),
no1_(vtx::srect(LOC_X(5), LOC_Y(2), BTN_W, BTN_H), "1"),
no2_(vtx::srect(LOC_X(6), LOC_Y(2), BTN_W, BTN_H), "2"),
no3_(vtx::srect(LOC_X(7), LOC_Y(2), BTN_W, BTN_H), "3"),
no4_(vtx::srect(LOC_X(5), LOC_Y(1), BTN_W, BTN_H), "4"),
no5_(vtx::srect(LOC_X(6), LOC_Y(1), BTN_W, BTN_H), "5"),
no6_(vtx::srect(LOC_X(7), LOC_Y(1), BTN_W, BTN_H), "6"),
no7_(vtx::srect(LOC_X(5), LOC_Y(0), BTN_W, BTN_H), "7"),
no8_(vtx::srect(LOC_X(6), LOC_Y(0), BTN_W, BTN_H), "8"),
no9_(vtx::srect(LOC_X(7), LOC_Y(0), BTN_W, BTN_H), "9"),
del_(vtx::srect(LOC_X(8), LOC_Y(0), BTN_W, BTN_H), "DEL"),
ac_ (vtx::srect(LOC_X(9), LOC_Y(0), BTN_W, BTN_H), "AC"),
mul_(vtx::srect(LOC_X(8), LOC_Y(1), BTN_W, BTN_H), "×"),
div_(vtx::srect(LOC_X(9), LOC_Y(1), BTN_W, BTN_H), "÷"),
add_(vtx::srect(LOC_X(8), LOC_Y(2), BTN_W, BTN_H), "+"),
sub_(vtx::srect(LOC_X(9), LOC_Y(2), BTN_W, BTN_H), "-"),
poi_(vtx::srect(LOC_X(6), LOC_Y(3), BTN_W, BTN_H), "・"),
x10_(vtx::srect(LOC_X(7), LOC_Y(3), BTN_W, BTN_H), "X10"),
ans_(vtx::srect(LOC_X(8), LOC_Y(3), BTN_W, BTN_H), "Ans"),
equ_(vtx::srect(LOC_X(9), LOC_Y(3), BTN_W, BTN_H), "="),
sin_(vtx::srect(LOC_X(4), LOC_Y(0), BTN_W, BTN_H), "sin"),
cos_(vtx::srect(LOC_X(4), LOC_Y(1), BTN_W, BTN_H), "cos"),
tan_(vtx::srect(LOC_X(4), LOC_Y(2), BTN_W, BTN_H), "tan"),
pai_(vtx::srect(LOC_X(4), LOC_Y(3), BTN_W, BTN_H), "π"),
sqr_ (vtx::srect(LOC_X(3), LOC_Y(0), BTN_W, BTN_H)),
sqrt_(vtx::srect(LOC_X(3), LOC_Y(1), BTN_W, BTN_H), "√"),
pow_ (vtx::srect(LOC_X(3), LOC_Y(2), BTN_W, BTN_H)),
log_(vtx::srect(LOC_X(2), LOC_Y(0), BTN_W, BTN_H), "log"),
ln_ (vtx::srect(LOC_X(2), LOC_Y(1), BTN_W, BTN_H), "ln"),
inv_(vtx::srect(LOC_X(2), LOC_Y(2), BTN_W, BTN_H)),
fc_ (vtx::srect(LOC_X(0), LOC_Y(0), BTN_W, BTN_H), "FC"),
angt_(vtx::srect(LOC_X(0), LOC_Y(1), BTN_W, BTN_H), "Deg"),
left_ (vtx::srect(LOC_X(0), LOC_Y(3), BTN_W, BTN_H), "<--"),
right_(vtx::srect(LOC_X(1), LOC_Y(3), BTN_W, BTN_H), "-->"),
pin_(vtx::srect(LOC_X(2), LOC_Y(3), BTN_W, BTN_H), "("),
pot_(vtx::srect(LOC_X(3), LOC_Y(3), BTN_W, BTN_H), ")"),
setup_ (vtx::srect(LOC_X(0), LOC_Y(2), BTN_W, BTN_H), "@"),
sym_ (vtx::srect(LOC_X(1), LOC_Y(0), BTN_W, BTN_H), "Sm0"),
sym_in_ (vtx::srect(LOC_X(1), LOC_Y(1), BTN_W, BTN_H), "Min"),
sym_out_ (vtx::srect(LOC_X(1), LOC_Y(2), BTN_W, BTN_H), "Rcl"),
symbol_(), func_(), arith_(symbol_, func_),
cbackup_(), cbuff_(), cbuff_pos_(0), del_len_(0), cur_pos_(0),
fc_mode_(false), nest_(0), symbol_idx_(0), shift_(0)
{ }
//-------------------------------------------------------------//
/*!
@brief widget 追加
@param[in] w widget
@return 正常なら「true」
*/
//-------------------------------------------------------------//
bool insert_widget(gui::widget* w) noexcept {
return widd_.insert(w);
}
//-------------------------------------------------------------//
/*!
@brief widget 削除
@param[in] w widget
@return 正常なら「true」
*/
//-------------------------------------------------------------//
void remove_widget(gui::widget* w) noexcept {
widd_.remove(w);
}
//-------------------------------------------------------------//
/*!
@brief GLCDC クラスの参照
@return GLCDC クラス
*/
//-------------------------------------------------------------//
auto& at_glcdc() noexcept { return glcdc_; }
//-------------------------------------------------------------//
/*!
@brief TOUCH クラスの参照
@return TOUCH クラス
*/
//-------------------------------------------------------------//
auto& at_touch() noexcept { return touch_; }
//-------------------------------------------------------------//
/*!
@brief レンダーの参照
@return レンダー
*/
//-------------------------------------------------------------//
RENDER& at_render() noexcept { return render_; }
//-------------------------------------------------------------//
/*!
@brief 開始
*/
//-------------------------------------------------------------//
void start() noexcept
{
#ifndef EMU
{ // GLCDC の初期化
LCD_DISP::DIR = 1;
LCD_LIGHT::DIR = 1;
LCD_DISP::P = 0; // DISP Disable
LCD_LIGHT::P = 0; // BackLight Disable (No PWM)
if(glcdc_.start()) {
utils::format("Start GLCDC\n");
LCD_DISP::P = 1; // DISP Enable
LCD_LIGHT::P = 1; // BackLight Enable (No PWM)
if(!glcdc_.control(GLCDC::CONTROL_CMD::START_DISPLAY)) {
utils::format("GLCDC ctrl fail...\n");
}
} else {
utils::format("GLCDC Fail\n");
}
}
{ // FT5206 touch screen controller
TOUCH::reset<FT5206_RESET>();
uint8_t intr_lvl = 1;
if(!ft5206_i2c_.start(FT5206_I2C::SPEED::STANDARD, intr_lvl)) {
utils::format("FT5206 I2C Start Fail...\n");
}
if(!touch_.start()) {
utils::format("FT5206 Start Fail...\n");
}
}
#endif
}
//-------------------------------------------------------------//
/*!
@brief タッチ・パネルの設定
*/
//-------------------------------------------------------------//
void setup_touch_panel() noexcept
{
render_.sync_frame();
dialog_.modal(vtx::spos(400, 60),
"Touch panel device wait...\nPlease touch it with some screen.");
uint8_t nnn = 0;
while(1) {
render_.sync_frame();
touch_.update();
auto num = touch_.get_touch_num();
if(num == 0) {
++nnn;
if(nnn >= 60) break;
} else {
nnn = 0;
}
}
render_.clear(DEF_COLOR::Black);
}
//-------------------------------------------------------------//
/*!
@brief GUI のセットアップ
*/
//-------------------------------------------------------------//
void setup() noexcept
{
// for(int y = 0; y < 80; ++y) {
// render_.line_h(16*16+(y << 4), 16*16+4, 16*300);
// }
// render_.round_box(vtx::srect(16, 16, 300, 100), 16);
// return;
no0_.enable();
no0_.at_select_func() = [=](uint32_t id) { cbuff_ += '0'; };
no1_.enable();
no1_.at_select_func() = [=](uint32_t id) { cbuff_ += '1'; };
no2_.enable();
no2_.at_select_func() = [=](uint32_t id) { cbuff_ += '2'; };
no3_.enable();
no3_.at_select_func() = [=](uint32_t id) { cbuff_ += '3'; };
no4_.enable();
no4_.at_select_func() = [=](uint32_t id) { cbuff_ += '4'; };
no5_.enable();
no5_.at_select_func() = [=](uint32_t id) { cbuff_ += '5'; };
no6_.enable();
no6_.at_select_func() = [=](uint32_t id) { cbuff_ += '6'; };
no7_.enable();
no7_.at_select_func() = [=](uint32_t id) { cbuff_ += '7'; };
no8_.enable();
no8_.at_select_func() = [=](uint32_t id) { cbuff_ += '8'; };
no9_.enable();
no9_.at_select_func() = [=](uint32_t id) { cbuff_ += '9'; };
del_.enable();
del_.set_base_color(graphics::def_color::Orange);
del_.at_select_func() = [=](uint32_t id) {
if(cbuff_.empty()) return;
auto code = static_cast<uint8_t>(cbuff_.back());
cbuff_.pop_back();
if(code == '(') {
del_len_ = 8;
nest_--;
if(!cbuff_.empty()) {
auto code = static_cast<uint8_t>(cbuff_.back());
if(code >= 0xc0) {
del_len_ += func_.get_name_size(static_cast<FUNC::NAME>(code));
cbuff_.pop_back();
}
}
} else if(code == ')') {
del_len_ = 8;
nest_++;
} else if(code == '.' || code == '^') {
del_len_ = 8;
} else if(code >= '0' && code <= '9') {
del_len_ = 16;
} else if(code == '+' || code == '-' || code == '*' || code == '/') {
del_len_ = 16;
} else if(code >= 0x80 && code < 0xc0) {
del_len_ = symbol_.get_name_size(static_cast<SYMBOL::NAME>(code));
} else if(code >= 0xc0) {
del_len_ = func_.get_name_size(static_cast<FUNC::NAME>(code));
}
};
ac_.enable();
ac_.set_base_color(graphics::def_color::Orange);
ac_.at_select_func() = [=](uint32_t id) { clear_win_(); };
mul_.enable();
mul_.at_select_func() = [=](uint32_t id) { cbuff_ += '*'; };
div_.enable();
div_.at_select_func() = [=](uint32_t id) { cbuff_ += '/'; };
add_.enable();
add_.at_select_func() = [=](uint32_t id) { cbuff_ += '+'; };
sub_.enable();
sub_.at_select_func() = [=](uint32_t id) { cbuff_ += '-'; };
poi_.enable();
poi_.at_select_func() = [=](uint32_t id) { cbuff_ += '.'; };
left_.enable();
left_.at_select_func() = [=](uint32_t id) {
shift_++;
NVAL ans;
symbol_(SYMBOL::NAME::ANS, ans);
draw_ans_(ans, true);
};
right_.enable();
right_.at_select_func() = [=](uint32_t id) {
shift_--;
NVAL ans;
symbol_(SYMBOL::NAME::ANS, ans);
draw_ans_(ans, true);
};
pin_.enable();
pin_.at_select_func() = [=](uint32_t id) { cbuff_ += '('; nest_++; };
pot_.enable();
pot_.at_select_func() = [=](uint32_t id) { cbuff_ += ')'; nest_--; };
x10_.enable();
x10_.at_select_func() = [=](uint32_t id) {
cbuff_ += '*';
cbuff_ += static_cast<char>(FUNC::NAME::EXP10);
cbuff_ += '(';
nest_++;
};
ans_.enable();
ans_.at_select_func() = [=](uint32_t id) {
NVAL tmp;
symbol_(SYMBOL::NAME::ANS, tmp);
if(tmp != 0) {
cbuff_ += static_cast<char>(SYMBOL::NAME::ANS);
}
};
equ_.enable();
equ_.at_select_func() = [=](uint32_t id) { update_equ_(); };
sin_.enable();
sin_.set_base_color(graphics::def_color::EmeraldGreen);
sin_.at_select_func() = [=](uint32_t id) {
if(fc_mode_) {
cbuff_ += static_cast<char>(FUNC::NAME::ASIN);
} else {
cbuff_ += static_cast<char>(FUNC::NAME::SIN);
}
cbuff_ += '('; nest_++;
};
cos_.enable();
cos_.set_base_color(graphics::def_color::EmeraldGreen);
cos_.at_select_func() = [=](uint32_t id) {
if(fc_mode_) {
cbuff_ += static_cast<char>(FUNC::NAME::ACOS);
} else {
cbuff_ += static_cast<char>(FUNC::NAME::COS);
}
cbuff_ += '('; nest_++;
};
tan_.enable();
tan_.set_base_color(graphics::def_color::EmeraldGreen);
tan_.at_select_func() = [=](uint32_t id) {
if(fc_mode_) {
cbuff_ += static_cast<char>(FUNC::NAME::ATAN);
} else {
cbuff_ += static_cast<char>(FUNC::NAME::TAN);
}
cbuff_ += '('; nest_++;
};
pai_.enable();
pai_.set_base_color(graphics::def_color::LightPink);
pai_.at_select_func() = [=](uint32_t id) {
// if(fc_mode_) {
// cbuff_ += static_cast<char>(SYMBOL::NAME::LOG2);
// } else {
cbuff_ += static_cast<char>(SYMBOL::NAME::PI);
// }
};
sqr_.enable();
sqr_.set_mobj(resource::bitmap::x_2_);
sqr_.at_select_func() = [=](uint32_t id) {
cbuff_ += '^';
cbuff_ += '2';
};
sqrt_.enable();
sqrt_.at_select_func() = [=](uint32_t id) {
cbuff_ += static_cast<char>(FUNC::NAME::SQRT);
cbuff_ += '('; nest_++;
};
pow_.enable();
pow_.set_mobj(resource::bitmap::x_y_);
pow_.at_select_func() = [=](uint32_t id) {
cbuff_ += '^';
};
log_.enable();
log_.at_select_func() = [=](uint32_t id) {
cbuff_ += static_cast<char>(FUNC::NAME::LOG);
cbuff_ += '('; nest_++;
};
ln_.enable();
ln_.at_select_func() = [=](uint32_t id) {
cbuff_ += static_cast<char>(FUNC::NAME::LN);
cbuff_ += '('; nest_++;
};
inv_.enable();
inv_.set_mobj(resource::bitmap::x_m1_);
inv_.at_select_func() = [=](uint32_t id) {
cbuff_ += '^';
cbuff_ += '-';
cbuff_ += '1';
};
fc_.enable(); // 機能キー
fc_.set_base_color(graphics::def_color::SafeColor);
fc_.at_select_func() = [=](uint32_t id) {
fc_mode_ = !fc_mode_;
update_fc_();
};
angt_.enable(); // 角度タイプ
angt_.set_base_color(graphics::def_color::SafeColor);
angt_.at_select_func() = [=](uint32_t id) {
switch(func_.get_atype()) {
case FUNC::ATYPE::Deg:
func_.set_atype(FUNC::ATYPE::Rad);
angt_.set_title("Rad");
break;
case FUNC::ATYPE::Rad:
func_.set_atype(FUNC::ATYPE::Grad);
angt_.set_title("Grad");
break;
case FUNC::ATYPE::Grad:
func_.set_atype(FUNC::ATYPE::Deg);
angt_.set_title("Deg");
break;
}
};
setup_.enable(); // 設定
setup_.set_base_color(graphics::def_color::SafeColor);
setup_.at_select_func() = [=](uint32_t id) {
};
sym_.enable(); // シンボル変更
sym_.set_base_color(graphics::def_color::SafeColor);
sym_.at_select_func() = [=](uint32_t id) {
symbol_idx_++;
symbol_idx_ %= 10;
auto name = static_cast<SYMBOL::NAME>(static_cast<uint32_t>(SYMBOL::NAME::V0) + symbol_idx_);
sym_.set_title(symbol_.get_name(name));
};
sym_in_.enable(); // シンボル(in)
sym_in_.set_base_color(graphics::def_color::SafeColor);
sym_in_.at_select_func() = [=](uint32_t id) {
NVAL tmp;
symbol_(SYMBOL::NAME::ANS, tmp);
auto name = static_cast<SYMBOL::NAME>(
static_cast<uint32_t>(SYMBOL::NAME::V0) + symbol_idx_);
symbol_.set_value(name, tmp);
};
sym_out_.enable(); // シンボル(out)
sym_out_.set_base_color(graphics::def_color::SafeColor);
sym_out_.at_select_func() = [=](uint32_t id) {
cbuff_ += static_cast<char>(
static_cast<uint32_t>(SYMBOL::NAME::V0) + symbol_idx_);
};
clear_win_();
}
//-------------------------------------------------------------//
/*!
@brief アップデート
*/
//-------------------------------------------------------------//
void update() noexcept
{
render_.sync_frame();
touch_.update();
widd_.update();
update_calc_();
}
};
}
| 26.497863
| 123
| 0.557576
|
hirakuni45
|
33e51175a328ccd7a1fc4e2653f84464ca0b11e5
| 495
|
cpp
|
C++
|
src/events/MyStopImmCmd.cpp
|
LinuxDroneLab/MyDrone
|
33b8e9f15cebf79da0141e4d8aa5f4d57da73b3e
|
[
"Apache-2.0"
] | 2
|
2021-05-31T09:46:39.000Z
|
2022-02-17T12:33:43.000Z
|
src/events/MyStopImmCmd.cpp
|
LinuxDroneLab/MyLinuxDrone
|
33b8e9f15cebf79da0141e4d8aa5f4d57da73b3e
|
[
"Apache-2.0"
] | 17
|
2018-09-03T05:41:37.000Z
|
2018-11-15T07:48:20.000Z
|
src/events/MyStopImmCmd.cpp
|
LinuxDroneLab/MyLinuxDrone
|
33b8e9f15cebf79da0141e4d8aa5f4d57da73b3e
|
[
"Apache-2.0"
] | null | null | null |
/*
* MyStopImmCmd.cpp
*
* Created on: 17 dic 2015
* Author: andrea
*/
#include <commons/MyPriority.h>
#include <events/MyStopImmCmd.h>
MyStopImmCmd::MyStopImmCmd(boost::uuids::uuid origin, boost::uuids::uuid destination) : MyCmd(origin, destination) {
this->setPriority(MyPriority::STOP_IMMEDIATELY_PRIORITY);
}
MyStopImmCmd::~MyStopImmCmd() {
// TODO Auto-generated destructor stub
}
MyEvent::EventType MyStopImmCmd::getType() const {
return MyEvent::EventType::StopImmCmd;
}
| 22.5
| 116
| 0.735354
|
LinuxDroneLab
|
33e56173897c6d3c0ffb6ae8763041e62eaa6e19
| 4,526
|
cxx
|
C++
|
Core/GraphicsKit/GraphicsHandler.cxx
|
broken-bytes/Cyanite-Engine
|
5733e1fc010b375bbafc9ebfa41e248845f945b3
|
[
"MIT"
] | null | null | null |
Core/GraphicsKit/GraphicsHandler.cxx
|
broken-bytes/Cyanite-Engine
|
5733e1fc010b375bbafc9ebfa41e248845f945b3
|
[
"MIT"
] | null | null | null |
Core/GraphicsKit/GraphicsHandler.cxx
|
broken-bytes/Cyanite-Engine
|
5733e1fc010b375bbafc9ebfa41e248845f945b3
|
[
"MIT"
] | 1
|
2020-12-20T07:35:05.000Z
|
2020-12-20T07:35:05.000Z
|
#include "pch.hxx"
#include "GraphicsHandler.hxx"
#include "TypeConverter.hxx"
#include "../../Libs/ImGUI/imgui.h"
#include "../../Libs/ImGUI/imgui_impl_dx12.h"
#include "../../Libs/ImGUI/imgui_impl_win32.h"
auto open = true;
namespace Cyanite::GraphicsKit {
GraphicsHandler::GraphicsHandler(HWND window) {
SetDebugMode();
_device = std::make_unique<Gpu>(window);
_relay = std::make_unique<EventKit::EventRelay>(
[this](EventKit::IEvent event) {
this->EventHandler(event);
}
);
}
GraphicsHandler::~GraphicsHandler() {}
auto GraphicsHandler::Initialize() -> void {
_list = _device->CreateCommandList(
_device->DirectAlloc(
0,
0
)
);
_list->Close();
IMGUI_CHECKVERSION();
ImGui::CreateContext();
ImGuiIO& io = ImGui::GetIO(); (void)io;
ImGui::StyleColorsLight();
ImGui_ImplWin32_Init(_window);
ImGui_ImplDX12_Init(
_device->Device().get(),
Frames,
DXGI_FORMAT_R8G8B8A8_UNORM,
_device->SrvHeap().get(),
_device->SrvHeap().get()->GetCPUDescriptorHandleForHeapStart(),
_device->SrvHeap().get()->GetGPUDescriptorHandleForHeapStart()
);
}
auto GraphicsHandler::Deinitialize() -> void {
// wait for the gpu to finish all frames
for (int x = 0; x < Frames; ++x)
{
AwaitFrameCompletion();
}
delete _device.release();
_swapChain = nullptr;
_list = nullptr;
_rtvHeap = nullptr;
for (int x = 0; x < Frames; ++x)
{
_renderTargets[x] = nullptr;
};
}
auto GraphicsHandler::Update() -> void {}
auto GraphicsHandler::Render() -> void {
float color[] = { 0,0,0,0 };
UpdatePipeline();
ImGui_ImplDX12_NewFrame();
ImGui_ImplWin32_NewFrame();
ImGui::NewFrame();
ImGui::Begin("Test");
ImGui::Text("Test");
ImGui::End();
ImGui::Render();
ImGui_ImplDX12_RenderDrawData(
ImGui::GetDrawData(),
_list.get()
);
ImGui::EndFrame();
auto imgui = ImGui::GetDrawData();
ImGui_ImplDX12_RenderDrawData(imgui, _list.get());
_device->ExecuteDirect({ _list });
_device->Draw();
}
auto GraphicsHandler::Resize(uint32_t width, uint32_t height) -> void {
ImGui_ImplDX12_InvalidateDeviceObjects();
ImGui_ImplDX12_CreateDeviceObjects();
//_device->Resize(width, height);
}
auto GraphicsHandler::SetDebugMode() -> void {
#if defined(_DEBUG)
// Always enable the debug layer before doing anything DX12 related
// so all possible errors generated while creating DX12 objects
// are caught by the debug layer.
winrt::com_ptr<ID3D12Debug> debugInterface;
winrt::check_hresult(
D3D12GetDebugInterface(
IID_PPV_ARGS(debugInterface.put()
)
)
);
debugInterface->EnableDebugLayer();
#endif
}
auto GraphicsHandler::Flush(winrt::com_ptr<ID3D12CommandQueue> commandQueue, winrt::com_ptr<ID3D12Fence> fence,
uint64_t& fenceValue, HANDLE fenceEvent) -> void {}
auto GraphicsHandler::FrameStart() -> void {}
auto GraphicsHandler::FrameMid() const -> void {}
auto GraphicsHandler::FrameEnd() -> void {}
auto GraphicsHandler::UpdatePipeline() -> void {
_device->Update(_list);
}
auto GraphicsHandler::SetDefaultPipeline(winrt::com_ptr<ID3D12GraphicsCommandList> commands) -> void {}
auto GraphicsHandler::LoadPipeline() -> void {}
auto GraphicsHandler::LoadAssets() -> void {}
auto GraphicsHandler::CreateRootSignature() -> void {}
auto GraphicsHandler::CreatePipelineState() -> void {}
auto GraphicsHandler::CreateRenderTargetViews() -> void {}
auto GraphicsHandler::CreateDepthStencil() -> void {}
auto GraphicsHandler::CreateVertexBuffer() -> void {}
auto GraphicsHandler::CopyToVertexBuffer(winrt::com_ptr<ID3D12GraphicsCommandList> list,
D3D12_SUBRESOURCE_DATA vertexData) -> void {}
auto GraphicsHandler::CreateIndexBuffer() -> void {}
auto GraphicsHandler::CopyToIndexBuffer(winrt::com_ptr<ID3D12GraphicsCommandList> list,
D3D12_SUBRESOURCE_DATA indexData) -> void {}
auto GraphicsHandler::CreateShaderResource() -> void {}
auto GraphicsHandler::UploadTextures(std::vector<Components::Texture> textures) -> void {}
auto GraphicsHandler::PopulateCommandList() -> void {}
auto GraphicsHandler::AwaitFrameCompletion() -> void {
// swap the current rtv buffer index so we draw on the correct buffer
_device->Wait();
}
auto GraphicsHandler::Worker(uint8_t id) -> void {}
auto GraphicsHandler::EventHandler(EventKit::IEvent event) -> void {
if (event.Type == EventKit::EventType::WindowResize) {
MathKit::Types::Vector2<uint32_t> dim{ 0,0 };
dim = *static_cast<MathKit::Types::Vector2<uint32_t>*>(event.Data);
Resize(dim.X, dim.Y);
}
}
}
| 30.173333
| 112
| 0.710782
|
broken-bytes
|
33e8ceff1a3e3a2641d9d9bb4cfe401acbc9da34
| 8,738
|
cpp
|
C++
|
src/storage/mutate/DeleteVerticesProcessor.cpp
|
whitewum/nebula-storage
|
3830820e8453a1c8457acb06380f0570a673cbe8
|
[
"Apache-2.0"
] | null | null | null |
src/storage/mutate/DeleteVerticesProcessor.cpp
|
whitewum/nebula-storage
|
3830820e8453a1c8457acb06380f0570a673cbe8
|
[
"Apache-2.0"
] | null | null | null |
src/storage/mutate/DeleteVerticesProcessor.cpp
|
whitewum/nebula-storage
|
3830820e8453a1c8457acb06380f0570a673cbe8
|
[
"Apache-2.0"
] | null | null | null |
/* Copyright (c) 2019 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "storage/StorageFlags.h"
#include "storage/mutate/DeleteVerticesProcessor.h"
#include "utils/IndexKeyUtils.h"
#include "utils/NebulaKeyUtils.h"
#include "utils/OperationKeyUtils.h"
namespace nebula {
namespace storage {
void DeleteVerticesProcessor::process(const cpp2::DeleteVerticesRequest& req) {
spaceId_ = req.get_space_id();
const auto& partVertices = req.get_parts();
CHECK_NOTNULL(env_->schemaMan_);
auto ret = env_->schemaMan_->getSpaceVidLen(spaceId_);
if (!ret.ok()) {
LOG(ERROR) << ret.status();
for (auto& part : partVertices) {
pushResultCode(cpp2::ErrorCode::E_INVALID_SPACEVIDLEN, part.first);
}
onFinished();
return;
}
spaceVidLen_ = ret.value();
callingNum_ = partVertices.size();
CHECK_NOTNULL(env_->indexMan_);
auto iRet = env_->indexMan_->getTagIndexes(spaceId_);
if (iRet.ok()) {
indexes_ = std::move(iRet).value();
}
CHECK_NOTNULL(env_->kvstore_);
if (indexes_.empty()) {
// Operate every part, the graph layer guarantees the unique of the vid
std::vector<std::string> keys;
keys.reserve(32);
for (auto& part : partVertices) {
auto partId = part.first;
const auto& vertexIds = part.second;
keys.clear();
for (auto& vid : vertexIds) {
if (!NebulaKeyUtils::isValidVidLen(spaceVidLen_, vid.getStr())) {
LOG(ERROR) << "Space " << spaceId_ << ", vertex length invalid, "
<< " space vid len: " << spaceVidLen_ << ", vid is " << vid;
pushResultCode(cpp2::ErrorCode::E_INVALID_VID, partId);
onFinished();
return;
}
auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen_, partId, vid.getStr());
std::unique_ptr<kvstore::KVIterator> iter;
auto retRes = env_->kvstore_->prefix(spaceId_, partId, prefix, &iter);
if (retRes != kvstore::ResultCode::SUCCEEDED) {
VLOG(3) << "Error! ret = " << static_cast<int32_t>(retRes)
<< ", spaceID " << spaceId_;
handleErrorCode(retRes, spaceId_, partId);
onFinished();
return;
}
while (iter->valid()) {
auto key = iter->key();
if (NebulaKeyUtils::isVertex(spaceVidLen_, key)) {
auto tagId = NebulaKeyUtils::getTagId(spaceVidLen_, key);
// Evict vertices from cache
if (FLAGS_enable_vertex_cache && vertexCache_ != nullptr) {
VLOG(3) << "Evict vertex cache for VID " << vid
<< ", TagID " << tagId;
vertexCache_->evict(std::make_pair(vid.getStr(), tagId));
}
keys.emplace_back(key.str());
}
iter->next();
}
}
doRemove(spaceId_, partId, keys);
}
} else {
std::for_each(partVertices.begin(), partVertices.end(), [this](auto &pv) {
auto partId = pv.first;
auto atomic = [partId, v = std::move(pv.second),
this]() -> folly::Optional<std::string> {
return deleteVertices(partId, v);
};
auto callback = [partId, this](kvstore::ResultCode code) {
VLOG(3) << "partId:" << partId << ", code:" << static_cast<int32_t>(code);
handleAsync(spaceId_, partId, code);
};
env_->kvstore_->asyncAtomicOp(spaceId_, partId, atomic, callback);
});
}
}
folly::Optional<std::string>
DeleteVerticesProcessor::deleteVertices(PartitionID partId,
const std::vector<Value>& vertices) {
env_->onFlyingRequest_.fetch_add(1);
std::unique_ptr<kvstore::BatchHolder> batchHolder = std::make_unique<kvstore::BatchHolder>();
for (auto& vertex : vertices) {
auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen_, partId, vertex.getStr());
std::unique_ptr<kvstore::KVIterator> iter;
auto ret = env_->kvstore_->prefix(spaceId_, partId, prefix, &iter);
if (ret != kvstore::ResultCode::SUCCEEDED) {
VLOG(3) << "Error! ret = " << static_cast<int32_t>(ret)
<< ", spaceId " << spaceId_;
return folly::none;
}
TagID latestTagId = -1;
while (iter->valid()) {
auto key = iter->key();
if (!NebulaKeyUtils::isVertex(spaceVidLen_, key)) {
iter->next();
continue;
}
auto tagId = NebulaKeyUtils::getTagId(spaceVidLen_, key);
if (FLAGS_enable_vertex_cache && vertexCache_ != nullptr) {
VLOG(3) << "Evict vertex cache for vertex ID " << vertex << ", tagId " << tagId;
vertexCache_->evict(std::make_pair(vertex.getStr(), tagId));
}
/**
* example ,the prefix result as below :
* V1_tag1_version3
* V1_tag1_version2
* V1_tag1_version1
* V1_tag2_version3
* V1_tag2_version2
* V1_tag2_version1
* V1_tag3_version3
* V1_tag3_version2
* V1_tag3_version1
* Because index depends on latest version of tag.
* So only V1_tag1_version3, V1_tag2_version3 and V1_tag3_version3 are needed,
* Using latestTagId to identify if it is the latest version
*/
if (latestTagId != tagId) {
RowReaderWrapper reader;
for (auto& index : indexes_) {
if (index->get_schema_id().get_tag_id() == tagId) {
auto indexId = index->get_index_id();
if (reader == nullptr) {
reader = RowReaderWrapper::getTagPropReader(env_->schemaMan_,
spaceId_,
tagId,
iter->val());
if (reader == nullptr) {
LOG(WARNING) << "Bad format row";
return folly::none;
}
}
const auto& cols = index->get_fields();
auto valuesRet = IndexKeyUtils::collectIndexValues(reader.get(),
cols);
if (!valuesRet.ok()) {
continue;
}
auto indexKey = IndexKeyUtils::vertexIndexKey(spaceVidLen_,
partId,
indexId,
vertex.getStr(),
std::move(valuesRet).value());
// Check the index is building for the specified partition or not
auto indexState = env_->getIndexState(spaceId_, partId, indexId);
if (env_->checkRebuilding(indexState)) {
auto deleteOpKey = OperationKeyUtils::deleteOperationKey(partId);
batchHolder->put(std::move(deleteOpKey), std::move(indexKey));
} else if (env_->checkIndexLocked(indexState)) {
LOG(ERROR) << "The index has been locked: " << index->get_index_name();
return folly::none;
} else {
batchHolder->remove(std::move(indexKey));
}
}
}
latestTagId = tagId;
}
batchHolder->remove(key.str());
iter->next();
}
}
return encodeBatchValue(batchHolder->getBatch());
}
} // namespace storage
} // namespace nebula
| 43.69
| 100
| 0.478027
|
whitewum
|
33e8fbd020ba1a3c6a02e5671f081ac71f6d9aeb
| 1,448
|
cpp
|
C++
|
swapnum.cpp
|
W-YXN/MyNOIPProjects
|
0269a8385a6c8d87511236146f374f39dcdd2b82
|
[
"Apache-2.0"
] | null | null | null |
swapnum.cpp
|
W-YXN/MyNOIPProjects
|
0269a8385a6c8d87511236146f374f39dcdd2b82
|
[
"Apache-2.0"
] | null | null | null |
swapnum.cpp
|
W-YXN/MyNOIPProjects
|
0269a8385a6c8d87511236146f374f39dcdd2b82
|
[
"Apache-2.0"
] | 1
|
2019-01-19T01:05:07.000Z
|
2019-01-19T01:05:07.000Z
|
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <cstring>
#include <algorithm>
using std::cerr;
using std::cin;
using std::cout;
using std::endl;
using std::string;
int da_biao[] = {1, 1, 2, 6, 24, 120};
int hash(string s)
{
int num = 0;
for (int i = 0; i < 6 - 1; i++)
{
int temp = 0;
for (int j = i + 1; j < 6; j++)
{
if (s[j] < s[i])
temp++;
}
num += da_biao[6 - i - 1] * temp;
}
return num;
}
int BFS(string src, string dest)
{
int vis[725] = {0}, step[725] = {0};
int front, rear, s0, s1;
string q[725];
front = rear = 0;
s1 = hash(dest);
q[rear++] = src;
s0 = hash(src);
vis[hash(src)] = 1;
step[s0] = 0;
while (front < rear)
{
string cur = q[front++];
s0 = hash(cur);
if (s0 == s1)
return step[s0];
for (int i = 0; i < 6 - 1; i++)
{
string next = cur;
char tmp = next[i];
next[i] = next[i + 1];
next[i + 1] = tmp;
int ts = hash(next);
if (vis[ts] == 0)
{
vis[ts] = 1;
step[ts] = step[s0] + 1;
q[rear++] = next;
}
}
}
}
int main()
{
string src, dest;
while (cin >> src >> dest)
{
cout << BFS(src, dest) << endl;
}
return 0;
}
| 18.329114
| 41
| 0.413674
|
W-YXN
|
33ead74ff815f1fee0453e0b5ce078d7a329d22f
| 8,158
|
cpp
|
C++
|
core/crc32.cpp
|
granitepenguin/wwiv
|
30669b21f45c5f534014a251f55568c86904f87e
|
[
"Apache-2.0"
] | 157
|
2015-07-08T18:29:22.000Z
|
2022-03-10T10:22:58.000Z
|
core/crc32.cpp
|
granitepenguin/wwiv
|
30669b21f45c5f534014a251f55568c86904f87e
|
[
"Apache-2.0"
] | 1,037
|
2015-07-18T03:09:12.000Z
|
2022-03-13T17:39:55.000Z
|
core/crc32.cpp
|
granitepenguin/wwiv
|
30669b21f45c5f534014a251f55568c86904f87e
|
[
"Apache-2.0"
] | 74
|
2015-07-08T19:42:19.000Z
|
2021-12-22T06:15:46.000Z
|
/*
* Crc - 32 BIT ANSI X3.66 CRC checksum files
*/
#include "core/file.h"
#include <memory>
#include <string>
namespace wwiv::core {
/**********************************************************************\
|* *|
|* Demonstration program to compute the 32-bit CRC used as the frame *|
|* check sequence in ADCCP (ANSI X3.66, also known as FIPS PUB 71 *|
|* and FED-STD-1003, the U.S. versions of CCITT's X.25 link-level *|
|* protocol). The 32-bit FCS was added via the Federal Register, *|
|* 1 June 1982, p.23798. I presume but don't know for certain that *|
|* this polynomial is or will be included in CCITT V.41, which *|
|* defines the 16-bit CRC (often called CRC-CCITT) polynomial. FIPS *|
|* PUB 78 says that the 32-bit FCS reduces otherwise undetected *|
|* errors by a factor of 10^-5 over 16-bit FCS. *|
|* *|
\**********************************************************************/
/*
* Copyright (C) 1986 Gary S. Brown. You may use this program, or
* code or tables extracted from it, as desired without restriction.
*/
/* First, the polynomial itself and its table of feedback terms. The */
/* polynomial is */
/* X^32+X^26+X^23+X^22+X^16+X^12+X^11+X^10+X^8+X^7+X^5+X^4+X^2+X^1+X^0 */
/* Note that we take it "backwards" and put the highest-order term in */
/* the lowest-order bit. The X^32 term is "implied"; the LSB is the */
/* X^31 term, etc. The X^0 term (usually shown as "+1") results in */
/* the MSB being 1. */
/* Note that the usual hardware shift register implementation, which */
/* is what we're using (we're merely optimizing it by doing eight-bit */
/* chunks at a time) shifts bits into the lowest-order term. In our */
/* implementation, that means shifting towards the right. Why do we */
/* do it this way? Because the calculated CRC must be transmitted in */
/* order from highest-order term to lowest-order term. UARTs transmit */
/* characters in order from LSB to MSB. By storing the CRC this way, */
/* we hand it to the UART in the order low-byte to high-byte; the UART */
/* sends each low-bit to hight-bit; and the result is transmission bit */
/* by bit from highest- to lowest-order term without requiring any bit */
/* shuffling on our part. Reception works similarly. */
/* The feedback terms table consists of 256, 32-bit entries. Notes: */
/* */
/* 1. The table can be generated at runtime if desired; code to do so */
/* is shown later. It might not be obvious, but the feedback */
/* terms simply represent the results of eight shift/xor opera- */
/* tions for all combinations of data and CRC register values. */
/* */
/* 2. The CRC accumulation logic is the same for all CRC polynomials, */
/* be they sixteen or thirty-two bits wide. You simply choose the */
/* appropriate table. Alternatively, because the table can be */
/* generated at runtime, you can start by generating the table for */
/* the polynomial in question and use exactly the same "updcrc", */
/* if your application needn't simultaneously handle two CRC */
/* polynomials. (Note, however, that XMODEM is strange.) */
/* */
/* 3. For 16-bit CRCs, the table entries need be only 16 bits wide; */
/* of course, 32-bit entries work OK if the high 16 bits are zero. */
/* */
/* 4. The values must be right-shifted by eight bits by the "updcrc" */
/* logic; the shift must be unsigned (bring in zeroes). On some */
/* hardware you could probably optimize the shift in assembler by */
/* using byte-swap instructions. */
static uint32_t crc_32_tab[] = { /* CRC polynomial 0xedb88320 */
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
};
#define UPDC32(octet, crc) (crc_32_tab[((crc) ^ (octet)) & 0xff] ^ ((crc) >> 8))
uint32_t crc32file(const std::filesystem::path& path) {
uint32_t crc = 0xFFFFFFFF;
File file(path);
if (!file.Open(File::modeReadOnly | File::modeBinary, File::shareDenyWrite)) {
return false;
}
const auto size = file.length();
const auto buffer = std::make_unique<uint8_t[]>(size);
if (!file.Read(buffer.get(), size)) {
return false;
}
for (auto i = 0; i < size; i++) {
crc = UPDC32(buffer[i], crc);
}
return ~crc;
}
uint32_t crc32string(const std::string& contents) {
uint32_t crc = 0xFFFFFFFF;
for (const auto& c : contents) {
crc = UPDC32(c, crc);
}
return ~crc;
}
}
| 60.880597
| 99
| 0.655185
|
granitepenguin
|
33eeea67b7f9d30147d29e772a2845d08078de67
| 9,985
|
hpp
|
C++
|
boost/config/compiler/visualc.hpp
|
UnPourTous/boost-159-for-rn
|
47e2c37fcbd5e1b25561e5a4fc81bc4f31d2cbf4
|
[
"BSL-1.0"
] | 2
|
2021-08-08T02:06:56.000Z
|
2021-12-20T02:16:44.000Z
|
include/boost/config/compiler/visualc.hpp
|
Acidburn0zzz/PopcornTorrent-1
|
c12a30ef9e971059dae5f7ce24a8c37fef83c0c4
|
[
"MIT"
] | null | null | null |
include/boost/config/compiler/visualc.hpp
|
Acidburn0zzz/PopcornTorrent-1
|
c12a30ef9e971059dae5f7ce24a8c37fef83c0c4
|
[
"MIT"
] | 1
|
2017-04-09T17:04:14.000Z
|
2017-04-09T17:04:14.000Z
|
// (C) Copyright John Maddock 2001 - 2003.
// (C) Copyright Darin Adler 2001 - 2002.
// (C) Copyright Peter Dimov 2001.
// (C) Copyright Aleksey Gurtovoy 2002.
// (C) Copyright David Abrahams 2002 - 2003.
// (C) Copyright Beman Dawes 2002 - 2003.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org for most recent version.
//
// Microsoft Visual C++ compiler setup:
//
// We need to be careful with the checks in this file, as contrary
// to popular belief there are versions with _MSC_VER with the final
// digit non-zero (mainly the MIPS cross compiler).
//
// So we either test _MSC_VER >= XXXX or else _MSC_VER < XXXX.
// No other comparisons (==, >, or <=) are safe.
//
#define BOOST_MSVC _MSC_VER
//
// Helper macro BOOST_MSVC_FULL_VER for use in Boost code:
//
#if _MSC_FULL_VER > 100000000
# define BOOST_MSVC_FULL_VER _MSC_FULL_VER
#else
# define BOOST_MSVC_FULL_VER (_MSC_FULL_VER * 10)
#endif
// Attempt to suppress VC6 warnings about the length of decorated names (obsolete):
#pragma warning( disable : 4503 ) // warning: decorated name length exceeded
#define BOOST_HAS_PRAGMA_ONCE
//
// versions check:
// we don't support Visual C++ prior to version 7.1:
#if _MSC_VER < 1310
# error "Compiler not supported or configured - please reconfigure"
#endif
#if _MSC_FULL_VER < 180020827
# define BOOST_NO_FENV_H
#endif
#if _MSC_VER < 1400
// although a conforming signature for swprint exists in VC7.1
// it appears not to actually work:
# define BOOST_NO_SWPRINTF
// Our extern template tests also fail for this compiler:
# define BOOST_NO_CXX11_EXTERN_TEMPLATE
// Variadic macros do not exist for VC7.1 and lower
# define BOOST_NO_CXX11_VARIADIC_MACROS
#endif
#if _MSC_VER < 1500 // 140X == VC++ 8.0
# define BOOST_NO_MEMBER_TEMPLATE_FRIENDS
#endif
#if _MSC_VER < 1600 // 150X == VC++ 9.0
// A bug in VC9:
# define BOOST_NO_ADL_BARRIER
#endif
#ifndef _NATIVE_WCHAR_T_DEFINED
# define BOOST_NO_INTRINSIC_WCHAR_T
#endif
//
// check for exception handling support:
#if !defined(_CPPUNWIND) && !defined(BOOST_NO_EXCEPTIONS)
# define BOOST_NO_EXCEPTIONS
#endif
//
// __int64 support:
//
#define BOOST_HAS_MS_INT64
#if defined(_MSC_EXTENSIONS) || (_MSC_VER >= 1400)
# define BOOST_HAS_LONG_LONG
#else
# define BOOST_NO_LONG_LONG
#endif
#if (_MSC_VER >= 1400) && !defined(_DEBUG)
# define BOOST_HAS_NRVO
#endif
#if _MSC_VER >= 1600 // 160X == VC++ 10.0
# define BOOST_HAS_PRAGMA_DETECT_MISMATCH
#endif
//
// disable Win32 API's if compiler extensions are
// turned off:
//
#if !defined(_MSC_EXTENSIONS) && !defined(BOOST_DISABLE_WIN32)
# define BOOST_DISABLE_WIN32
#endif
#if !defined(_CPPRTTI) && !defined(BOOST_NO_RTTI)
# define BOOST_NO_RTTI
#endif
//
// TR1 features:
//
#if _MSC_VER >= 1700
// # define BOOST_HAS_TR1_HASH // don't know if this is true yet.
// # define BOOST_HAS_TR1_TYPE_TRAITS // don't know if this is true yet.
# define BOOST_HAS_TR1_UNORDERED_MAP
# define BOOST_HAS_TR1_UNORDERED_SET
#endif
//
// C++0x features
//
// See above for BOOST_NO_LONG_LONG
// C++ features supported by VC++ 10 (aka 2010)
//
#if _MSC_VER < 1600
# define BOOST_NO_CXX11_AUTO_DECLARATIONS
# define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS
# define BOOST_NO_CXX11_LAMBDAS
# define BOOST_NO_CXX11_RVALUE_REFERENCES
# define BOOST_NO_CXX11_STATIC_ASSERT
# define BOOST_NO_CXX11_NULLPTR
# define BOOST_NO_CXX11_DECLTYPE
#endif // _MSC_VER < 1600
#if _MSC_VER >= 1600
# define BOOST_HAS_STDINT_H
#endif
// C++11 features supported by VC++ 11 (aka 2012)
//
#if _MSC_VER < 1700
# define BOOST_NO_CXX11_FINAL
# define BOOST_NO_CXX11_RANGE_BASED_FOR
# define BOOST_NO_CXX11_SCOPED_ENUMS
#endif // _MSC_VER < 1700
// C++11 features supported by VC++ 12 (aka 2013).
//
#if _MSC_FULL_VER < 180020827
# define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
# define BOOST_NO_CXX11_DELETED_FUNCTIONS
# define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS
# define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS
# define BOOST_NO_CXX11_RAW_LITERALS
# define BOOST_NO_CXX11_TEMPLATE_ALIASES
# define BOOST_NO_CXX11_TRAILING_RESULT_TYPES
# define BOOST_NO_CXX11_VARIADIC_TEMPLATES
# define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX
# define BOOST_NO_CXX11_DECLTYPE_N3276
#endif
// C++11 features supported by VC++ 14 (aka 2015)
//
#if (_MSC_FULL_VER < 190023026)
# define BOOST_NO_CXX11_NOEXCEPT
# define BOOST_NO_CXX11_REF_QUALIFIERS
# define BOOST_NO_CXX11_USER_DEFINED_LITERALS
# define BOOST_NO_CXX11_ALIGNAS
# define BOOST_NO_CXX11_INLINE_NAMESPACES
# define BOOST_NO_CXX11_CHAR16_T
# define BOOST_NO_CXX11_CHAR32_T
# define BOOST_NO_CXX11_UNICODE_LITERALS
# define BOOST_NO_CXX14_DECLTYPE_AUTO
# define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES
# define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION
# define BOOST_NO_CXX14_BINARY_LITERALS
# define BOOST_NO_CXX14_GENERIC_LAMBDAS
# define BOOST_NO_CXX14_DIGIT_SEPARATORS
#endif
// MSVC including version 14 has not yet completely
// implemented value-initialization, as is reported:
// "VC++ does not value-initialize members of derived classes without
// user-declared constructor", reported in 2009 by Sylvester Hesp:
// https://connect.microsoft.com/VisualStudio/feedback/details/484295
// "Presence of copy constructor breaks member class initialization",
// reported in 2009 by Alex Vakulenko:
// https://connect.microsoft.com/VisualStudio/feedback/details/499606
// "Value-initialization in new-expression", reported in 2005 by
// Pavel Kuznetsov (MetaCommunications Engineering):
// https://connect.microsoft.com/VisualStudio/feedback/details/100744
// Reported again by John Maddock in 2015 for VC14:
// https://connect.microsoft.com/VisualStudio/feedback/details/1582233/c-subobjects-still-not-value-initialized-correctly
// See also: http://www.boost.org/libs/utility/value_init.htm#compiler_issues
// (Niels Dekker, LKEB, May 2010)
#define BOOST_NO_COMPLETE_VALUE_INITIALIZATION
// C++11 features not supported by any versions
#define BOOST_NO_SFINAE_EXPR
#define BOOST_NO_TWO_PHASE_NAME_LOOKUP
//
// This is somewhat supported in VC14, but we may need to wait for
// a service release before enabling:
//
#define BOOST_NO_CXX11_CONSTEXPR
// C++ 14:
#if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)
# define BOOST_NO_CXX14_AGGREGATE_NSDMI
#endif
#if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)
# define BOOST_NO_CXX14_CONSTEXPR
#endif
#if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)
# define BOOST_NO_CXX14_VARIABLE_TEMPLATES
#endif
//
// prefix and suffix headers:
//
#ifndef BOOST_ABI_PREFIX
# define BOOST_ABI_PREFIX "boost/config/abi/msvc_prefix.hpp"
#endif
#ifndef BOOST_ABI_SUFFIX
# define BOOST_ABI_SUFFIX "boost/config/abi/msvc_suffix.hpp"
#endif
#ifndef BOOST_COMPILER
// TODO:
// these things are mostly bogus. 1200 means version 12.0 of the compiler. The
// artificial versions assigned to them only refer to the versions of some IDE
// these compilers have been shipped with, and even that is not all of it. Some
// were shipped with freely downloadable SDKs, others as crosscompilers in eVC.
// IOW, you can't use these 'versions' in any sensible way. Sorry.
# if defined(UNDER_CE)
# if _MSC_VER < 1400
// Note: I'm not aware of any CE compiler with version 13xx
# if defined(BOOST_ASSERT_CONFIG)
# error "Unknown EVC++ compiler version - please run the configure tests and report the results"
# else
# pragma message("Unknown EVC++ compiler version - please run the configure tests and report the results")
# endif
# elif _MSC_VER < 1500
# define BOOST_COMPILER_VERSION evc8
# elif _MSC_VER < 1600
# define BOOST_COMPILER_VERSION evc9
# elif _MSC_VER < 1700
# define BOOST_COMPILER_VERSION evc10
# elif _MSC_VER < 1800
# define BOOST_COMPILER_VERSION evc11
# elif _MSC_VER < 1900
# define BOOST_COMPILER_VERSION evc12
# elif _MSC_VER < 2000
# define BOOST_COMPILER_VERSION evc14
# else
# if defined(BOOST_ASSERT_CONFIG)
# error "Unknown EVC++ compiler version - please run the configure tests and report the results"
# else
# pragma message("Unknown EVC++ compiler version - please run the configure tests and report the results")
# endif
# endif
# else
# if _MSC_VER < 1310
// Note: Versions up to 7.0 aren't supported.
# define BOOST_COMPILER_VERSION 5.0
# elif _MSC_VER < 1300
# define BOOST_COMPILER_VERSION 6.0
# elif _MSC_VER < 1310
# define BOOST_COMPILER_VERSION 7.0
# elif _MSC_VER < 1400
# define BOOST_COMPILER_VERSION 7.1
# elif _MSC_VER < 1500
# define BOOST_COMPILER_VERSION 8.0
# elif _MSC_VER < 1600
# define BOOST_COMPILER_VERSION 9.0
# elif _MSC_VER < 1700
# define BOOST_COMPILER_VERSION 10.0
# elif _MSC_VER < 1800
# define BOOST_COMPILER_VERSION 11.0
# elif _MSC_VER < 1900
# define BOOST_COMPILER_VERSION 12.0
# elif _MSC_VER < 2000
# define BOOST_COMPILER_VERSION 14.0
# else
# define BOOST_COMPILER_VERSION _MSC_VER
# endif
# endif
# define BOOST_COMPILER "Microsoft Visual C++ version " BOOST_STRINGIZE(BOOST_COMPILER_VERSION)
#endif
//
// last known and checked version is 19.00.23026 (VC++ 2015 RTM):
#if (_MSC_VER > 1900)
# if defined(BOOST_ASSERT_CONFIG)
# error "Unknown compiler version - please run the configure tests and report the results"
# else
# pragma message("Unknown compiler version - please run the configure tests and report the results")
# endif
#endif
| 33.506711
| 122
| 0.734902
|
UnPourTous
|
33f141e0eaf03e25006ee0a24735eb009857f070
| 5,564
|
cpp
|
C++
|
test/allocator.cpp
|
cburgard/c4core
|
e7b7ece50b456c3ae598b8b932825ae6ea913b84
|
[
"MIT"
] | null | null | null |
test/allocator.cpp
|
cburgard/c4core
|
e7b7ece50b456c3ae598b8b932825ae6ea913b84
|
[
"MIT"
] | null | null | null |
test/allocator.cpp
|
cburgard/c4core
|
e7b7ece50b456c3ae598b8b932825ae6ea913b84
|
[
"MIT"
] | null | null | null |
// This is an open source non-commercial project. Dear PVS-Studio, please check it.
// PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com
#include "c4/allocator.hpp"
#include "c4/test.hpp"
#include <vector>
#include <string>
#include <map>
C4_BEGIN_NAMESPACE(c4)
template< class T > using small_adapter = c4::small_allocator< T >;
template< class T > using small_adapter_mr = c4::small_allocator_mr< T >;
#define _c4definealloctypes(Alloc) \
using AllocInt = typename Alloc::template rebind<int>::other;\
using AllocChar = typename Alloc::template rebind<char>::other;\
using _string = std::basic_string< char, std::char_traits<char>, AllocChar >;\
using AllocString = typename Alloc::template rebind<_string>::other;\
using AllocPair = typename Alloc::template rebind<std::pair<const _string,int>>::other;\
using _vector_int = std::vector<int, AllocInt >;\
using _vector_string = std::vector<_string, AllocString >;\
using _map_string_int = std::map<_string, int, std::less<_string>, AllocPair >;
//-----------------------------------------------------------------------------
template< class Alloc >
void test_traits_compat_construct(typename Alloc::value_type const& val, Alloc &a)
{
using atraits = std::allocator_traits< Alloc >;
using value_type = typename Alloc::value_type;
value_type *mem = a.allocate(1);
atraits::construct(a, mem, val);
EXPECT_EQ(*mem, val);
atraits::destroy(a, mem);
a.deallocate(mem, 1);
}
TEST(allocator, traits_compat_construct)
{
allocator<int> a;
test_traits_compat_construct(1, a);
}
TEST(small_allocator, traits_compat_construct)
{
small_allocator<int> a;
test_traits_compat_construct(1, a);
}
TEST(allocator_mr_global, traits_compat_construct)
{
allocator_mr<int> a;
test_traits_compat_construct(1, a);
}
TEST(allocator_mr_linear, traits_compat_construct)
{
MemoryResourceLinear mr(1024);
allocator_mr<int> a(&mr);
test_traits_compat_construct(1, a);
}
TEST(allocator_mr_linear_arr, traits_compat_construct)
{
MemoryResourceLinearArr<1024> mr;
allocator_mr<int> a(&mr);
test_traits_compat_construct(1, a);
}
TEST(small_allocator_mr_global, traits_compat_construct)
{
small_allocator_mr<int> a;
test_traits_compat_construct(1, a);
}
TEST(small_allocator_mr_linear, traits_compat_construct)
{
MemoryResourceLinear mr(1024);
small_allocator_mr<int> a(&mr);
test_traits_compat_construct(1, a);
}
TEST(small_allocator_mr_linear_arr, traits_compat_construct)
{
MemoryResourceLinearArr<1024> mr;
small_allocator_mr<int> a(&mr);
test_traits_compat_construct(1, a);
}
//-----------------------------------------------------------------------------
template< class Alloc >
void clear_mr(Alloc a)
{
auto mrl = dynamic_cast<MemoryResourceLinear*>(a.resource());
if(mrl)
{
mrl->clear();
}
}
template< class Alloc >
void do_std_containers_test(Alloc alloc)
{
_c4definealloctypes(Alloc);
{
_string v(alloc);
v.reserve(256);
v = "adskjhsdfkjdflkjsdfkjhsdfkjh";
EXPECT_EQ(v, "adskjhsdfkjdflkjsdfkjhsdfkjh");
}
clear_mr(alloc);
{
int arr[128];
for(int &i : arr)
{
i = 42;
}
_vector_int vi(arr, arr+C4_COUNTOF(arr), alloc);
for(int i : vi)
{
EXPECT_EQ(i, 42);
}
}
clear_mr(alloc);
{
AllocChar a = alloc;
_vector_string v({"foo", "bar", "baz", "bat", "bax"}, a);
EXPECT_EQ(v.size(), 5);
EXPECT_EQ(v[0], "foo");
EXPECT_EQ(v[1], "bar");
EXPECT_EQ(v[2], "baz");
EXPECT_EQ(v[3], "bat");
EXPECT_EQ(v[4], "bax");
}
clear_mr(alloc);
{
AllocString a = alloc;
_vector_string v(a);
v.resize(4);
int count = 0;
for(auto &s : v)
{
s = _string(64, (char)('0' + count++));
}
}
clear_mr(alloc);
{
AllocPair a = alloc;
_map_string_int v(a);
EXPECT_EQ(v.size(), 0);
v["foo"] = 0;
v["bar"] = 1;
v["baz"] = 2;
v["bat"] = 3;
EXPECT_EQ(v.size(), 4);
EXPECT_EQ(v["foo"], 0);
EXPECT_EQ(v["bar"], 1);
EXPECT_EQ(v["baz"], 2);
EXPECT_EQ(v["bat"], 3);
}
}
TEST(allocator_global, std_containers)
{
allocator<int> a;
do_std_containers_test(a);
}
TEST(small_allocator_global, std_containers)
{
/* this is failing, investigate
small_allocator<int> a;
do_std_containers_test(a);
*/
}
TEST(allocator_mr_global, std_containers)
{
allocator_mr<int> a;
do_std_containers_test(a);
}
TEST(allocator_mr_linear, std_containers)
{
MemoryResourceLinear mr(1024);
allocator_mr<int> a(&mr);
do_std_containers_test(a);
}
TEST(allocator_mr_linear_arr, std_containers)
{
MemoryResourceLinearArr<1024> mr;
allocator_mr<int> a(&mr);
do_std_containers_test(a);
}
TEST(small_allocator_mr_global, std_containers)
{
/* this is failing, investigate
small_allocator_mr<int> a;
do_std_containers_test(a);
*/
}
TEST(small_allocator_mr_linear, std_containers)
{
/* this is failing, investigate
MemoryResourceLinear mr(1024);
small_allocator_mr<int> a(&mr);
do_std_containers_test(a);
*/
}
TEST(small_allocator_mr_linear_arr, std_containers)
{
/* this is failing, investigate
MemoryResourceLinearArr<1024> mr;
small_allocator_mr<int> a(&mr);
do_std_containers_test(a);
*/
}
C4_END_NAMESPACE(c4)
| 23.476793
| 88
| 0.635155
|
cburgard
|
33f16f6fc92b59515c32e00ad47850b1141d8726
| 5,175
|
cpp
|
C++
|
test/test_EKF_flow.cpp
|
huangwen0907/ecl
|
c91c78dcf67dee85225ada6621b6fd19ec008fd5
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_EKF_flow.cpp
|
huangwen0907/ecl
|
c91c78dcf67dee85225ada6621b6fd19ec008fd5
|
[
"BSD-3-Clause"
] | 1
|
2020-04-24T09:27:03.000Z
|
2020-04-24T09:27:03.000Z
|
test/test_EKF_flow.cpp
|
worlddeworld/ecl
|
31ae51ff96d7a07477718692fa709823990c7664
|
[
"BSD-3-Clause"
] | null | null | null |
/****************************************************************************
*
* Copyright (c) 2019 ECL Development Team. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name PX4 nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/**
* Test the gps fusion
* @author Kamil Ritz <ka.ritz@hotmail.com>
*/
#include <gtest/gtest.h>
#include "EKF/ekf.h"
#include "sensor_simulator/sensor_simulator.h"
#include "sensor_simulator/ekf_wrapper.h"
#include "test_helper/reset_logging_checker.h"
class EkfFlowTest : public ::testing::Test {
public:
EkfFlowTest(): ::testing::Test(),
_ekf{std::make_shared<Ekf>()},
_sensor_simulator(_ekf),
_ekf_wrapper(_ekf) {};
std::shared_ptr<Ekf> _ekf;
SensorSimulator _sensor_simulator;
EkfWrapper _ekf_wrapper;
// Setup the Ekf with synthetic measurements
void SetUp() override
{
_ekf->init(0);
_sensor_simulator.runSeconds(2);
}
// Use this method to clean up any memory, network etc. after each test
void TearDown() override
{
}
};
TEST_F(EkfFlowTest, resetToFlowVelocityInAir)
{
ResetLoggingChecker reset_logging_checker(_ekf);
// WHEN: simulate being 5m above ground
const float simulated_distance_to_ground = 5.f;
_sensor_simulator._rng.setData(simulated_distance_to_ground, 100);
_sensor_simulator._rng.setLimits(0.1f, 9.f);
_sensor_simulator.startRangeFinder();
_ekf->set_in_air_status(true);
_sensor_simulator.runSeconds(1.5f);
const float estimated_distance_to_ground = _ekf->getTerrainVertPos();
EXPECT_FLOAT_EQ(estimated_distance_to_ground, simulated_distance_to_ground);
reset_logging_checker.capturePreResetState();
// WHEN: start fusing flow data
const Vector2f simulated_horz_velocity(0.5f, -0.2f);
flowSample flow_sample = _sensor_simulator._flow.dataAtRest();
flow_sample.flow_xy_rad =
Vector2f(- simulated_horz_velocity(1) * flow_sample.dt / estimated_distance_to_ground,
simulated_horz_velocity(0) * flow_sample.dt / estimated_distance_to_ground);
_sensor_simulator._flow.setData(flow_sample);
_ekf_wrapper.enableFlowFusion();
_sensor_simulator.startFlow();
_sensor_simulator.runSeconds(0.2);
// THEN: estimated velocity should match simulated velocity
const Vector2f estimated_horz_velocity = Vector2f(_ekf->getVelocity());
EXPECT_FALSE(isEqual(estimated_horz_velocity, simulated_horz_velocity)); // TODO: This needs to change
// AND: the reset in velocity should be saved correctly
reset_logging_checker.capturePostResetState();
EXPECT_TRUE(reset_logging_checker.isHorizontalVelocityResetCounterIncreasedBy(1));
EXPECT_TRUE(reset_logging_checker.isVerticalVelocityResetCounterIncreasedBy(0));
EXPECT_TRUE(reset_logging_checker.isVelocityDeltaLoggedCorrectly(1e-9f));
}
TEST_F(EkfFlowTest, resetToFlowVelocityOnGround)
{
ResetLoggingChecker reset_logging_checker(_ekf);
// WHEN: being on ground
const float estimated_distance_to_ground = _ekf->getTerrainVertPos();
EXPECT_LT(estimated_distance_to_ground, 0.3f);
reset_logging_checker.capturePreResetState();
// WHEN: start fusing flow data
_ekf_wrapper.enableFlowFusion();
_sensor_simulator.startFlow();
_sensor_simulator.runSeconds(1.0);
// THEN: estimated velocity should match simulated velocity
const Vector2f estimated_horz_velocity = Vector2f(_ekf->getVelocity());
EXPECT_TRUE(isEqual(estimated_horz_velocity, Vector2f(0.f, 0.f)));
// AND: the reset in velocity should be saved correctly
reset_logging_checker.capturePostResetState();
EXPECT_TRUE(reset_logging_checker.isHorizontalVelocityResetCounterIncreasedBy(1));
EXPECT_TRUE(reset_logging_checker.isVerticalVelocityResetCounterIncreasedBy(0));
EXPECT_TRUE(reset_logging_checker.isVelocityDeltaLoggedCorrectly(1e-9f));
}
| 38.333333
| 103
| 0.766957
|
huangwen0907
|
33f1d451fc143ef55fe01c5133849398ca573f8a
| 1,223
|
cpp
|
C++
|
aws-cpp-sdk-personalize-events/source/model/User.cpp
|
perfectrecall/aws-sdk-cpp
|
fb8cbebf2fd62720b65aeff841ad2950e73d8ebd
|
[
"Apache-2.0"
] | 1
|
2022-02-10T08:06:54.000Z
|
2022-02-10T08:06:54.000Z
|
aws-cpp-sdk-personalize-events/source/model/User.cpp
|
perfectrecall/aws-sdk-cpp
|
fb8cbebf2fd62720b65aeff841ad2950e73d8ebd
|
[
"Apache-2.0"
] | 1
|
2021-10-14T16:57:00.000Z
|
2021-10-18T10:47:24.000Z
|
aws-cpp-sdk-personalize-events/source/model/User.cpp
|
ravindra-wagh/aws-sdk-cpp
|
7d5ff01b3c3b872f31ca98fb4ce868cd01e97696
|
[
"Apache-2.0"
] | 1
|
2021-11-09T12:02:58.000Z
|
2021-11-09T12:02:58.000Z
|
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/personalize-events/model/User.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
namespace Aws
{
namespace PersonalizeEvents
{
namespace Model
{
User::User() :
m_userIdHasBeenSet(false),
m_propertiesHasBeenSet(false)
{
}
User::User(JsonView jsonValue) :
m_userIdHasBeenSet(false),
m_propertiesHasBeenSet(false)
{
*this = jsonValue;
}
User& User::operator =(JsonView jsonValue)
{
if(jsonValue.ValueExists("userId"))
{
m_userId = jsonValue.GetString("userId");
m_userIdHasBeenSet = true;
}
if(jsonValue.ValueExists("properties"))
{
m_properties = jsonValue.GetString("properties");
m_propertiesHasBeenSet = true;
}
return *this;
}
JsonValue User::Jsonize() const
{
JsonValue payload;
if(m_userIdHasBeenSet)
{
payload.WithString("userId", m_userId);
}
if(m_propertiesHasBeenSet)
{
payload.WithString("properties", m_properties);
}
return payload;
}
} // namespace Model
} // namespace PersonalizeEvents
} // namespace Aws
| 16.306667
| 69
| 0.703189
|
perfectrecall
|
33f374dcd7ebb7c3a7ca3d8469a50a8e0b614e6d
| 1,582
|
hpp
|
C++
|
sdk/identity/azure-identity/inc/azure/identity/version.hpp
|
chidozieononiwu/azure-sdk-for-cpp
|
7d9032fcc815523231d6ff3e1d96d6212e94b079
|
[
"MIT"
] | null | null | null |
sdk/identity/azure-identity/inc/azure/identity/version.hpp
|
chidozieononiwu/azure-sdk-for-cpp
|
7d9032fcc815523231d6ff3e1d96d6212e94b079
|
[
"MIT"
] | null | null | null |
sdk/identity/azure-identity/inc/azure/identity/version.hpp
|
chidozieononiwu/azure-sdk-for-cpp
|
7d9032fcc815523231d6ff3e1d96d6212e94b079
|
[
"MIT"
] | null | null | null |
// Copyright (c) Microsoft Corporation. All rights reserved.
// SPDX-License-Identifier: MIT
/**
* @file
* @brief Provides version information.
*/
#pragma once
#include "azure/identity/dll_import_export.hpp"
#include <string>
#define AZURE_IDENTITY_VERSION_MAJOR 1
#define AZURE_IDENTITY_VERSION_MINOR 0
#define AZURE_IDENTITY_VERSION_PATCH 0
#define AZURE_IDENTITY_VERSION_PRERELEASE "beta.5"
namespace Azure { namespace Identity {
/**
* @brief Provides version information.
*/
class PackageVersion {
public:
/// Major numeric identifier.
static constexpr int Major = AZURE_IDENTITY_VERSION_MAJOR;
/// Minor numeric identifier.
static constexpr int Minor = AZURE_IDENTITY_VERSION_MINOR;
/// Patch numeric identifier.
static constexpr int Patch = AZURE_IDENTITY_VERSION_PATCH;
/// Optional pre-release identifier. SDK is in a pre-release state when not empty.
AZ_IDENTITY_DLLEXPORT static std::string const PreRelease;
/**
* @brief The version in string format used for telemetry following the `semver.org` standard
* (https://semver.org).
*/
static std::string VersionString();
private:
// To avoid leaking out the #define values we smuggle out the value
// which will later be used to initialize the PreRelease std::string
static constexpr char const* secret = AZURE_IDENTITY_VERSION_PRERELEASE;
};
}} // namespace Azure::Identity
#undef AZURE_IDENTITY_VERSION_MAJOR
#undef AZURE_IDENTITY_VERSION_MINOR
#undef AZURE_IDENTITY_VERSION_PATCH
#undef AZURE_IDENTITY_VERSION_PRERELEASE
| 27.754386
| 97
| 0.75158
|
chidozieononiwu
|
33f441ae14a72517805f746798f4212571761954
| 87,097
|
cc
|
C++
|
tensorflow/compiler/jit/mark_for_compilation_pass.cc
|
yennster/tensorflow
|
0cc38aaa4064fd9e79101994ce9872c6d91f816b
|
[
"Apache-2.0"
] | 11
|
2018-01-03T15:11:09.000Z
|
2021-04-13T05:47:27.000Z
|
tensorflow/compiler/jit/mark_for_compilation_pass.cc
|
yennster/tensorflow
|
0cc38aaa4064fd9e79101994ce9872c6d91f816b
|
[
"Apache-2.0"
] | 2
|
2021-08-25T16:14:40.000Z
|
2022-02-10T03:25:19.000Z
|
tensorflow/compiler/jit/mark_for_compilation_pass.cc
|
yennster/tensorflow
|
0cc38aaa4064fd9e79101994ce9872c6d91f816b
|
[
"Apache-2.0"
] | 10
|
2018-07-31T10:56:21.000Z
|
2019-10-07T08:05:21.000Z
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/jit/mark_for_compilation_pass.h"
#include <atomic>
#include <deque>
#include <limits>
#include <unordered_map>
#include <unordered_set>
#include "absl/base/call_once.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/jit/compilability_check_util.h"
#include "tensorflow/compiler/jit/deadness_analysis.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/device_util.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/resource_operation_safety_analysis.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "tensorflow/compiler/tf2xla/resource_operation_table.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/service/graphcycles/graphcycles.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/union_find.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
using DeadnessPredicate = DeadnessAnalysis::DeadnessPredicate;
using jit::DeviceId;
using jit::DeviceSet;
using xla::StatusOr;
// The clusters we create here are eventually lowered into an
// _XlaCompile/_XlaRun pair with a TF executor "fallback" that uses the
// PartitionedCall op to execute the cluster in the regular graph executor if
// need be. PartitionedCall, however, reruns the entire TF graph optimization
// pipeline over the cluster which includes this mark for compilation pass. To
// avoid endlessly recursing we tag nodes that we've already visited with this
// attribute so that we can bail out if we see them a second time.
//
// TODO(sanjoy): This method is not robust since it is possible that the
// optimizations run by PartitionedCall can mutate the cluster arbitrarily,
// dropping the kXlaAlreadyClustered attributes from all nodes in the process.
// The correct fix is to use the ConfigProto to pass in some sort of flag into
// the PartitionedCall kernel that tells it to not rerun auto-clustering on the
// cluster.
const char* kXlaAlreadyClustered = "_XlaAlreadyClustered";
class MarkForCompilationPassImpl {
public:
struct DebugOptions {
// If true, do not respect the results of deadness analysis.
bool ignore_deadness_checks;
// If true, do not do safety checks to preserve TensorFlow's resource
// variable concurrency semantics.
bool ignore_resource_variable_checks;
// If true, do not respect the _XlaCompile=false attribute.
bool ignore_xla_compile_attr;
int max_cluster_size;
int min_cluster_size;
// Compiler fuel for the auto-clustering algorithm.
//
// We decrement this value by one on every time we choose a compilation
// candidate and we stop clustering when it hits zero. This means the
// initial value for this variable (via --tf_xla_clustering_fuel=N)
// effectively acts as a "cap" for how much we cluster and we can bisect
// over this initial value to discover clustering decisions that cause a
// miscompile or a performance regression.
std::atomic<int64>* fuel;
bool dump_graphs;
};
MarkForCompilationPassImpl(DebugOptions debug_options, Graph* graph,
FunctionLibraryDefinition* flib_def, Env* env,
OptimizerOptions::GlobalJitLevel global_jit_level)
: debug_options_(debug_options),
graph_(graph),
flib_def_(flib_def),
env_(env),
global_jit_level_(global_jit_level) {}
Status Run();
private:
// Represents a "cluster" or a connected subgraph of a TensorFlow graph.
class Cluster {
public:
// Constructs a trivial cluster representing a single TF node.
Cluster(int tf_graph_node_id, int effective_cluster_size,
bool has_functional_control_flow, DeviceSet devices,
absl::optional<DeviceId> resource_op_device,
absl::optional<int> resource_var_operation_node_id,
absl::optional<DeadnessPredicate> deadness_predicate,
bool is_xla_compile_attr_true, absl::optional<string> xla_scope)
: cycles_graph_node_id_(tf_graph_node_id),
effective_cluster_size_(effective_cluster_size),
has_functional_control_flow_(has_functional_control_flow),
devices_(std::move(devices)),
resource_op_device_(resource_op_device),
deadness_predicate_(deadness_predicate),
is_xla_compile_attr_true_(is_xla_compile_attr_true),
xla_scope_(std::move(xla_scope)) {
if (resource_var_operation_node_id.has_value()) {
resource_var_operation_node_ids_.push_back(
*resource_var_operation_node_id);
}
}
// Merges `other` into this cluster, and clears `other`. This method is
// closely tied with the implementation of `MarkForCompilationPassImpl`.
void Merge(Cluster* other);
// If this is a trivial cluster containing only one node then return the ID
// of that node. May not be called otherwise.
int GetIdOfOnlyNode() const {
DCHECK_EQ(cluster_size(), 1);
return cycles_graph_node_id();
}
// The number of TF nodes in this cluster.
int cluster_size() const { return cluster_size_; }
// The ID of the cluster as represented in `cycles_graph_`.
int cycles_graph_node_id() const { return cycles_graph_node_id_; }
// Sets the ID of the cluster as represented in `cycles_graph_`.
void set_cycles_graph_node_id(int cycles_graph_node_id) {
cycles_graph_node_id_ = cycles_graph_node_id;
}
// The size of the cluster excluding constant and identity nodes.
int effective_cluster_size() const { return effective_cluster_size_; }
// True if the cluster has functional control flow like `If` and `While`.
bool has_functional_control_flow() const {
return has_functional_control_flow_;
}
// The set of devices nodes in the cluster are placed on.
const DeviceSet& devices() const { return devices_; }
// If the cluster has a resource operation then the device the resource
// operation is placed on. A cluster may have resource ops placed only on a
// single device.
const absl::optional<DeviceId>& resource_op_device() const {
return resource_op_device_;
}
// If not nullopt the a predicate that is true iff the cluster is alive.
// Otherwise the user has (unsafely) disabled deadness analysis. If this is
// unset on a single Cluster instance then it is unset on all Cluster
// instances.
const absl::optional<DeadnessPredicate>& deadness_predicate() const {
return deadness_predicate_;
}
// If true then the cluster has a XlaCompile=true attribute on one of its
// nodes.
bool is_xla_compile_attr_true() const { return is_xla_compile_attr_true_; }
// If not nullopt then the all nodes in the cluster either do not have the
// XlaScope attribute set or have it set to the value returned.
const absl::optional<string>& xla_scope() const { return xla_scope_; }
// Returns the TF graph node IDs for the resource variable operations in
// this cluster.
absl::Span<const int> resource_var_operation_node_ids() const {
return resource_var_operation_node_ids_;
}
string DebugString(const Graph& graph) const {
Node* node = graph.FindNodeId(cycles_graph_node_id());
if (!node) {
// This should never happen but we try to be resilient because this is a
// debugging aid.
return absl::StrCat("NULL NODE IN #", cycles_graph_node_id());
}
if (cluster_size() == 1) {
return absl::StrCat("<", node->name(), " #", cycles_graph_node_id(),
">");
}
return absl::StrCat("<", node->name(), " + ", cluster_size() - 1,
" others #", cycles_graph_node_id(), ">");
}
private:
int cluster_size_ = 1;
int cycles_graph_node_id_;
int effective_cluster_size_;
bool has_functional_control_flow_;
DeviceSet devices_;
absl::optional<DeviceId> resource_op_device_;
absl::optional<DeadnessPredicate> deadness_predicate_;
bool is_xla_compile_attr_true_;
absl::optional<string> xla_scope_;
std::vector<int> resource_var_operation_node_ids_;
TF_DISALLOW_COPY_AND_ASSIGN(Cluster);
};
// If `cluster` has only a single node then returns that, otherwise returns
// nullptr.
Node* GetOnlyNodeIn(const Cluster& cluster);
// Returns true if `cluster` is a trivial cluster containing a "sink like"
// node -- a NoOp node that only the Sink node control depends on.
bool IsSinkLike(const Cluster& cluster);
// Returns true if `cluster` looks like an "i++" operation on an integer
// scalar resource variable.
bool IsScalarIntegerResourceOperation(const Cluster& cluster);
// ---------------------------------------------------------------------------
// The pass proceeds in four steps, out of which `RunEdgeContractionLoop` and
// `CreateClusters` do most of the heavy lifting.
// Initializes some internal data structures.
//
// If this returns false then Initialize exited early (either because there is
// nothing to do or we saw a graph that we can't handle) and not all the
// fields in this MarkForCompilationPassImpl instance are set up.
StatusOr<bool> Initialize();
// Runs through the entire cluster graph in post-order and calls `fn(from,
// to)` on each edge. `fn(from, to)` is expected to return true if it was
// able to contract `from`->`to`.
//
// Returns true if `fn` returned true for any edge.
template <typename FnTy>
StatusOr<bool> ForEachEdgeInPostOrder(FnTy fn);
// Contracts as many edges as possible to create XLA clusters. After this
// finishes the clustering decisions made are implicitly stored in
// `clusters_`.
Status RunEdgeContractionLoop();
// Manifests the clustering decisions into the TF graph by tagging nodes with
// an `_XlaCluster` attribute. Also some basic filter logic, like
// tf_xla_min_cluster_size, are applied here.
Status CreateClusters();
Status DumpDebugInfo();
bool IsCompilationCandidate(Node* n) const {
return compilation_candidates_.find(n) != compilation_candidates_.end();
}
// Tries to contract the edge from cluster `from` to cluster `to`. Returns
// true if successful.
StatusOr<bool> TryToContractEdge(Cluster* from, Cluster* to);
// Nodes that XLA can compile are put in `compilation_candidates_`.
Status FindCompilationCandidates();
bool CompilationDisallowedByXlaCompileAttr(Node* node);
// Populates `clusters_`.
Status BuildInitialClusterSet();
StatusOr<bool> ShouldCompileClusterImpl(const Cluster& cluster);
StatusOr<bool> ShouldCompileCluster(const Cluster& cluster);
StatusOr<bool> ClusteringWillIntroduceInterDeviceDependency(
const Cluster& from, const Cluster& to);
// Returns true if the devices in `cluster_a` and `cluster_b` are compatible
// and therefore not a hindrance for combining the two clusters into a larger
// cluster.
StatusOr<bool> AreDevicesCompatible(const Cluster& cluster_a,
const Cluster& cluster_b);
void DumpPostClusteringGraphs();
void VLogClusteringSummary();
Cluster* MakeNewCluster(int cycles_graph_node_id, int effective_cluster_size,
bool has_functional_control_flow,
const DeviceSet& device_set,
absl::optional<DeviceId> resource_op_device,
absl::optional<int> resource_var_operation_node_id,
absl::optional<DeadnessPredicate> deadness_predicate,
bool is_xla_compile_attr_true,
absl::optional<string> xla_scope) {
cluster_storage_.push_back(absl::make_unique<Cluster>(
cycles_graph_node_id, effective_cluster_size,
has_functional_control_flow, device_set, resource_op_device,
resource_var_operation_node_id, deadness_predicate,
is_xla_compile_attr_true, xla_scope));
return cluster_storage_.back().get();
}
absl::optional<string> GetXlaScope(Node* n);
// Returns the cluster for node `n`. If two nodes, N1 and N2, are placed in
// the same cluster by the clustering algorithm then this function will return
// the same Cluster instance for N1 and N2.
//
// Returns nullptr if `n` is not a compilation candidate.
Cluster* GetClusterForNode(Node* n) {
return cluster_for_node_[n->id()].Get();
}
// Returns the cluster for a node in `cycles_graph_`. This uses the same
// underlying map because of how we set things up, but we can do an additional
// CHECK in this accessor.
//
// Returns nullptr if `node_id` is not a compilation candidate.
Cluster* GetClusterForCyclesGraphNode(int node_id) {
// We have to check `graph_->FindNodeId(node) == nullptr` because we add all
// nodes in [0, graph_->num_node_ids()) to the cycle detection graph but the
// TF graph may be missing some node ids.
if (node_id >= graph_->num_node_ids() ||
graph_->FindNodeId(node_id) == nullptr) {
return nullptr;
}
Cluster* cluster = cluster_for_node_[node_id].Get();
if (cluster) {
DCHECK_EQ(cluster->cycles_graph_node_id(), node_id);
}
return cluster;
}
bool LogNotContractableAndReturnFalse(Cluster* from, Cluster* to,
absl::string_view reason);
// Finds a path in `cycles_graph_` from `from` to `to` that is not a direct
// edge from `from` to `to`.
//
// Tries to find a path that contains at least one unclusterable node.
std::vector<int> FindAlternatePathForDebugging(int from, int to);
// Returns a string representing `cycles_graph_node_id`. If the node is
// unclusterable (either it is a phatom "frame" node or is not a compilation
// candidate) then set `*found_unclustered` to true.
string DebugStringForCyclesGraphNode(int node_id, bool* found_unclustered);
// We could not contract the edge from `from` to `to`. Return a string
// describing an alternate path from `from` to `to` (besides the direct edge
// from `from` to `to`) which would have created a cycle had we contracted the
// edge.
//
// Tries (if possible) to find a path that contains at least one unclusterable
// node as it is surprising to the user if we print "A->B could not be
// contracted because of the path [P,Q,R]" where P, Q and R are all clusters
// since in that case a natural question is why we could not form a {A, P, Q,
// R, B} cluster.
string DescribePotentialCycle(int from, int to);
// Merge the clusters `cluster_from` and `cluster_to`. After this step the
// larger combined cluster is represented by `cluster_from`, but can have
// `cycles_graph_`'s ID of either `cluster_from` or `cluster_to` depending on
// which way will require less operations.
bool MergeClusters(Cluster* cluster_from, Cluster* cluster_to) {
int from = cluster_from->cycles_graph_node_id();
int to = cluster_to->cycles_graph_node_id();
auto optional_merged_node = cycles_graph_.ContractEdge(from, to);
if (!optional_merged_node.has_value()) {
VLOG(3) << "Could not contract " << cluster_from->DebugString(*graph_)
<< " -> " << cluster_to->DebugString(*graph_)
<< " because contracting the edge would create a cycle via "
<< DescribePotentialCycle(from, to) << ".";
return false;
}
// Merge the clusters.
cluster_from->Merge(cluster_to);
// Update `cycle_graph_`'s ID.
cluster_from->set_cycles_graph_node_id(optional_merged_node.value());
// Merge the UnionFind<Cluster*>.
cluster_for_node_[from].Merge(&cluster_for_node_[to]);
return true;
}
string EdgeContractionFailureMsg(Cluster* from, Cluster* to,
absl::string_view reason) {
return absl::StrCat("Could not contract ", from->DebugString(*graph_),
" -> ", to->DebugString(*graph_), " because ", reason,
".");
}
DebugOptions debug_options_;
Graph* graph_;
FunctionLibraryDefinition* flib_def_;
Env* env_;
OptimizerOptions::GlobalJitLevel global_jit_level_;
absl::flat_hash_map<const Cluster*, bool> should_compile_cluster_cache_;
jit::DeviceInfoCache device_info_cache_;
bool initialized_ = false;
bool edges_contracted_ = false;
bool clusters_created_ = false;
std::vector<std::unique_ptr<Cluster>> cluster_storage_;
std::vector<UnionFind<Cluster*>> cluster_for_node_;
GraphCycles cycles_graph_;
OrderedNodeSet compilation_candidates_;
std::unique_ptr<DeadnessAnalysis> deadness_analysis_;
int64 iteration_count_ = 0;
absl::flat_hash_set<std::pair<int, int>> unsafe_resource_deps_;
};
std::vector<int> MarkForCompilationPassImpl::FindAlternatePathForDebugging(
int from, int to) {
std::vector<int> rpo = cycles_graph_.AllNodesInPostOrder();
absl::c_reverse(rpo);
// best_pred_for_node[n] contains a predecessor of `n` that has an
// unclusterable node in some path from `from` to itself.
// best_pred_for_node[n] is unpopulated for nodes that are not reachable from
// `from`. We build this table up inductively by traversing the cycles graph
// in RPO.
absl::flat_hash_map<int, int> best_pred_for_node;
best_pred_for_node[from] = -1;
int rpo_index = 0, current_rpo_node;
do {
current_rpo_node = rpo[rpo_index++];
absl::optional<int> some_pred, preferred_pred;
for (int pred : cycles_graph_.Predecessors(current_rpo_node)) {
if (!best_pred_for_node.contains(pred)) {
continue;
}
// Ignore the from->to edge since we're trying to find an alternate path.
if (current_rpo_node == to && pred == from) {
continue;
}
some_pred = pred;
if (GetClusterForCyclesGraphNode(pred) == nullptr) {
preferred_pred = pred;
}
}
if (some_pred || preferred_pred) {
best_pred_for_node[current_rpo_node] =
preferred_pred.has_value() ? *preferred_pred : *some_pred;
}
} while (current_rpo_node != to);
auto get_best_pred = [&](int n) {
auto it = best_pred_for_node.find(n);
CHECK(it != best_pred_for_node.end());
return it->second;
};
std::vector<int> path;
int current_path_node = get_best_pred(to);
while (current_path_node != from) {
path.push_back(current_path_node);
current_path_node = get_best_pred(current_path_node);
}
absl::c_reverse(path);
return path;
}
string MarkForCompilationPassImpl::DebugStringForCyclesGraphNode(
int cycles_graph_node_id, bool* found_unclustered) {
Cluster* cluster = GetClusterForCyclesGraphNode(cycles_graph_node_id);
if (cluster) {
return cluster->DebugString(*graph_);
}
*found_unclustered = true;
if (cycles_graph_node_id >= graph_->num_node_ids()) {
return absl::StrCat("<oob #", cycles_graph_node_id, ">");
}
Node* node = graph_->FindNodeId(cycles_graph_node_id);
if (!node) {
return absl::StrCat("<bad #", cycles_graph_node_id, ">");
}
return node->name();
}
string MarkForCompilationPassImpl::DescribePotentialCycle(int from, int to) {
std::vector<string> path_str;
bool found_unclustered = false;
absl::c_transform(FindAlternatePathForDebugging(from, to),
std::back_inserter(path_str), [&](int node_id) {
return DebugStringForCyclesGraphNode(node_id,
&found_unclustered);
});
return absl::StrCat(!found_unclustered ? "(all clusters) " : "", "[",
absl::StrJoin(path_str, ","), "]");
}
void MarkForCompilationPassImpl::Cluster::Merge(Cluster* other) {
// We keep our own cycles_graph_node_id_ to mirror what GraphCycles does.
// Clearing out data structures in `other` is just a memory saving
// optimization and not needed for correctness.
cluster_size_ += other->cluster_size_;
effective_cluster_size_ += other->effective_cluster_size_;
has_functional_control_flow_ |= other->has_functional_control_flow_;
devices_.UnionWith(other->devices_);
DCHECK(!(resource_op_device_.has_value() &&
other->resource_op_device_.has_value()) ||
*resource_op_device_ == *other->resource_op_device_)
<< "AreDevicesCompatible should have returned false otherwise!";
if (!resource_op_device_.has_value()) {
resource_op_device_ = other->resource_op_device_;
}
is_xla_compile_attr_true_ |= other->is_xla_compile_attr_true_;
if (!xla_scope_.has_value()) {
xla_scope_ = std::move(other->xla_scope_);
}
resource_var_operation_node_ids_.reserve(
resource_var_operation_node_ids_.size() +
other->resource_var_operation_node_ids_.size());
absl::c_copy(other->resource_var_operation_node_ids_,
std::back_inserter(resource_var_operation_node_ids_));
other->resource_var_operation_node_ids_.clear();
}
Status IgnoreResourceOpForSafetyAnalysis(
jit::DeviceInfoCache* device_info_cache, const Node& n, bool* ignore) {
// If a resource operation is assigned to XLA_CPU or XLA_GPU explicitly then
// ignore it during resource operation safety analysis. We need this hack
// because of two reasons:
//
// 1. Operations assigned to XLA_CPU and XLA_GPU have to always be compiled.
// 2. We don't support live-out values of type DT_RESOURCE and live-in values
// of type DT_RESOURCE that are not resource variables.
//
// Together these imply we cannot let resource variable safety analysis
// constrain e.g. a TensorArrayV3->TensorArrayAssignV3 edge to be in different
// clusters: both of them will have to be clustered because of (1) and we
// won't be able to keep the edge between the two as neither the input to the
// second XLA cluster nor the output from the first XLA cluster are supported
// because of (2).
//
// TODO(b/113100872): This can be fixed if the TensorFlow representation for
// TensorArray and Stack on the XLA_{C|G}PU devices were the same in XLA; then
// (2) would no longer hold.
if (n.assigned_device_name().empty()) {
*ignore = false;
return Status::OK();
}
TF_ASSIGN_OR_RETURN(
const XlaOpRegistry::DeviceRegistration* registration,
device_info_cache->GetCompilationDevice(n.assigned_device_name()));
if (!registration) {
*ignore = true;
} else {
*ignore = registration->cluster_resource_variable_ops_unsafely;
}
return Status::OK();
}
StatusOr<bool> MarkForCompilationPassImpl::Initialize() {
TF_RET_CHECK(!initialized_ && !edges_contracted_ && !clusters_created_);
initialized_ = true;
TF_RETURN_IF_ERROR(FindCompilationCandidates());
if (compilation_candidates_.empty()) {
VLOG(2) << "No compilable candidates";
return false;
}
TF_ASSIGN_OR_RETURN(bool cycle_detection_graph_ok,
CreateCycleDetectionGraph(graph_, &cycles_graph_));
if (!cycle_detection_graph_ok) {
// TODO(sanjoy): This should be logged via the XLA activity listener.
VLOG(2) << "Could not form cycle detection graph";
return false;
}
if (!debug_options_.ignore_deadness_checks) {
XLA_SCOPED_LOGGING_TIMER_LEVEL("DeadnessAnalysis", 1);
TF_RETURN_IF_ERROR(DeadnessAnalysis::Run(*graph_, &deadness_analysis_));
}
// Each compilation candidate belongs to a cluster. The cluster's
// representative names the node in the 'cycles' graph that represents the
// cluster.
TF_RETURN_IF_ERROR(BuildInitialClusterSet());
return true;
}
template <typename FnTy>
StatusOr<bool> MarkForCompilationPassImpl::ForEachEdgeInPostOrder(FnTy fn) {
bool changed = false;
for (int32 node : cycles_graph_.AllNodesInPostOrder()) {
Cluster* cluster_from = GetClusterForCyclesGraphNode(node);
if (!cluster_from) {
continue;
}
// Make a copy of the set of successors because we may modify the graph in
// TryToContractEdge.
std::vector<int32> successors_copy =
cycles_graph_.SuccessorsCopy(cluster_from->cycles_graph_node_id());
for (int to : successors_copy) {
iteration_count_++;
Cluster* cluster_to = GetClusterForCyclesGraphNode(to);
if (!cluster_to) {
continue;
}
TF_ASSIGN_OR_RETURN(bool contracted_edge, fn(cluster_from, cluster_to));
changed |= contracted_edge;
}
}
return changed;
}
Node* MarkForCompilationPassImpl::GetOnlyNodeIn(const Cluster& cluster) {
return cluster.cluster_size() == 1
? graph_->FindNodeId(cluster.GetIdOfOnlyNode())
: nullptr;
}
bool MarkForCompilationPassImpl::IsSinkLike(const Cluster& cluster) {
if (Node* n = GetOnlyNodeIn(cluster)) {
return n->type_string() == "NoOp" && n->out_edges().size() == 1 &&
(*n->out_edges().begin())->dst()->IsSink();
}
return false;
}
bool MarkForCompilationPassImpl::IsScalarIntegerResourceOperation(
const Cluster& cluster) {
Node* n = GetOnlyNodeIn(cluster);
if (!n) {
return false;
}
if (n->type_string() != "AssignAddVariableOp" &&
n->type_string() != "AssignSubVariableOp") {
return false;
}
DataType dtype;
if (!TryGetNodeAttr(n->def(), "dtype", &dtype) || !DataTypeIsInteger(dtype)) {
return false;
}
Node* const_input = nullptr;
for (const Edge* e : n->in_edges()) {
if (!e->IsControlEdge() && e->src()->IsConstant()) {
const_input = e->src();
break;
}
}
if (!const_input) {
return false;
}
const TensorProto* proto = nullptr;
if (!TryGetNodeAttr(const_input->def(), "value", &proto)) {
return false;
}
return TensorShapeUtils::IsScalar(proto->tensor_shape());
}
Status MarkForCompilationPassImpl::RunEdgeContractionLoop() {
TF_RET_CHECK(initialized_ && !edges_contracted_ && !clusters_created_);
edges_contracted_ = true;
// TODO(hpucha): Handle the case where kXlaClusterAttr is already set (for
// example, from the Grappler fusion pass).
// In general there are multiple maximal clusterings, but they are not all
// equally performant. Some clustering decision are likely to improve
// performance much more than others, and we cannot order contractions on this
// cost function, nor can we look at global information while deciding on
// individual edges to contract. Instead, we will make decisions on these
// important edges then make decisions on all other edges, causing the highest
// chance of all most important edges to be contracted.
//
// An example of where this might occur is with a digraph:
// {A -> B, B -> C, A -> X, X -> C} where B is a Size operation and X is
// not-compilable. In this case, the valid clusterings are {A,B} or {B,C}. B
// should be clustered with A because it will prevent a potentially large
// tensor from A being computed and copied.
//
// To choose better maximal clusterings we make multiple iterations over the
// graph in post-order, where each such iteration is called a "phase".
// Phase 0: contract metadata operations with their producer.
VLOG(4) << "Running phase 0";
TF_RETURN_IF_ERROR(
ForEachEdgeInPostOrder([&](Cluster* from, Cluster* to) -> StatusOr<bool> {
// Shape consuming operations are desirable to cluster with their
// operands because they return a small set of scalar values after
// consuming a large amount of data. For example, given a graph X -> Y
// -> Size -> Z, where the possible clustering is [{X, Y, Size}, {Z}] or
// [{X, Y}, {Size, Z}], the better clustering is Size with Y because the
// output of size will be a small tensor while Y is a potentially large
// tensor that must be computed and possible transposed/copied before
// the second cluster executes.
Node* n = GetOnlyNodeIn(*to);
bool is_shape_consumer_op = n && IsShapeConsumerOp(*n);
if (!is_shape_consumer_op) {
return false;
}
return TryToContractEdge(from, to);
}).status());
// Phase 1: apply a heuristic to ensure that we don't mess up clustering due
// to "group_deps". After this phase most edges should have been contracted.
VLOG(4) << "Running phase 1";
TF_RETURN_IF_ERROR(
ForEachEdgeInPostOrder([&](Cluster* from, Cluster* to) -> StatusOr<bool> {
// We split out this phase to get good clustering in the presence of a
// specific pattern seen in some graphs:
//
// digraph {
// ApplyWeightUpdates_0 -> "iteration++"
// ApplyWeightUpdates_1 -> "iteration++"
// ApplyWeightUpdates_2 -> "iteration++"
// ApplyWeightUpdates_0 -> Computation_A
// ApplyWeightUpdates_1 -> Computation_B
// ApplyWeightUpdates_2 -> Computation_C
// Computation_A -> NoOp
// Computation_B -> NoOp
// Computation_C -> NoOp
// "iteration++" -> NoOp
// }
//
// In the graph above we can't cluster iteration++ with any of the
// gradient update operations since that will break the TF resource
// variable memory model. Given that constraint the ideal clustering
// would be to put all the gradient updates and all of the Computation_*
// nodes in one cluster, and leave iteration++ and NoOp unclustered.
//
// A naive post-order traversal would not create this good clustering,
// however. Instead it will first create a cluster that puts
// Computation_* nodes, the NoOp and iteration++ node in a single
// cluster, after which it will fail to put any of the
// ApplyWeightUpdates_* nodes into this cluster. To avoid this fate we
// instead run a pass that avoids contracting edges _into_ NoOps like
// the above, and avoid clustering edges _from_ "iteration++" like the
// above. Then we run a second pass that contracts the edges we could
// not contract the first time around.
if (IsSinkLike(*to)) {
return false;
}
if (IsScalarIntegerResourceOperation(*from)) {
return false;
}
return TryToContractEdge(from, to);
}).status());
// Phase 2: contract any remaining edges. After this phase we should have a
// maximal clustering:
//
// A. We visit a cluster only after maximally clustering all its children.
// B. By the time we're done with a node all of its children that could have
// been absorbed into the node have been absorbed.
// C. We have an invariant that making a cluster larger does not make edges
// leaving it more contractable. That is, if we have
// digraph { X->Y; Y->Z; } then collapsing X->Y does not make it possible
// to contract Y->Z if Y->Z was not contractible originally.
VLOG(4) << "Running phase 2";
TF_RETURN_IF_ERROR(ForEachEdgeInPostOrder([&](Cluster* from, Cluster* to) {
return TryToContractEdge(from, to);
}).status());
// Check that the conclusion made above (that iterating over the graph once in
// post order gives a maximal clustering) holds. Once the linear time
// post-order scheme has been battle tested we can move this to happen only in
// debug builds.
VLOG(2) << "Checking idempotence";
TF_ASSIGN_OR_RETURN(bool changed,
ForEachEdgeInPostOrder([&](Cluster* from, Cluster* to) {
return TryToContractEdge(from, to);
}));
TF_RET_CHECK(!changed);
return Status::OK();
}
std::atomic<int64> cluster_sequence_num;
int64 GetNextClusterSequenceNumber() { return cluster_sequence_num++; }
Status MarkForCompilationPassImpl::CreateClusters() {
TF_RET_CHECK(initialized_ && edges_contracted_ && !clusters_created_);
clusters_created_ = true;
// Names for each cluster.
std::unordered_map<int, string> cluster_names;
if (debug_options_.dump_graphs) {
DumpGraphToFile("before_mark_for_compilation", *graph_, flib_def_);
}
// Mark clusters for compilation that:
// * are placed on a device that requires compilation (an XlaDevice),
// * are explicitly marked for compilation (_XlaCompile=true), or
// * have more than debug_options_.xla_min_cluster_size elements (applicable
// only if compilation is enabled, otherwise there will be no such
// candidates).
for (Node* n : compilation_candidates_) {
Cluster* cluster = GetClusterForNode(n);
TF_ASSIGN_OR_RETURN(bool should_compile_cluster,
ShouldCompileCluster(*cluster));
if (!should_compile_cluster) {
continue;
}
// We assume that functional If and While nodes have at least
// min_cluster_size non-trivial nodes in them. It would be more principled
// to (recursively) verify this fact, but that's probably not worth the
// trouble.
if (cluster->effective_cluster_size() >= debug_options_.min_cluster_size ||
cluster->has_functional_control_flow() ||
cluster->is_xla_compile_attr_true()) {
string& name = cluster_names[cluster->cycles_graph_node_id()];
if (name.empty()) {
name = absl::StrCat("cluster_", GetNextClusterSequenceNumber());
}
n->AddAttr(kXlaClusterAttr, name);
n->AddAttr(kXlaAlreadyClustered, true);
VLOG(3) << "Assigning node " << n->name() << " to cluster " << name;
}
}
return Status::OK();
}
Status MarkForCompilationPassImpl::DumpDebugInfo() {
TF_RET_CHECK(initialized_ && edges_contracted_ && clusters_created_);
if (debug_options_.dump_graphs) {
DumpPostClusteringGraphs();
}
VLogClusteringSummary();
return Status::OK();
}
StatusOr<bool>
MarkForCompilationPassImpl::ClusteringWillIntroduceInterDeviceDependency(
const Cluster& cluster_from, const Cluster& cluster_to) {
// If any of the consumer's producers are on a different device, do not
// cluster these nodes. This prevents other work on this device from being
// delayed by work on other devices. We consider predecessors of the entire
// cluster rather than just the inputs to the node to prevent the cluster
// still being combined in cases where the 'to' cluster has multiple
// dependencies on the 'from' cluster and another dependency leads to a
// merging of the clusters.
//
// TODO(b/117085735): We probably want to handle the reciprocal of this case
// where a cluster is producing data for multiple devices.
for (const auto& in_id :
cycles_graph_.Predecessors(cluster_to.cycles_graph_node_id())) {
const Cluster* cluster_in = GetClusterForCyclesGraphNode(in_id);
if (cluster_in) {
TF_ASSIGN_OR_RETURN(bool devices_compatible,
AreDevicesCompatible(cluster_to, *cluster_in));
if (!devices_compatible) {
return true;
}
TF_ASSIGN_OR_RETURN(devices_compatible,
AreDevicesCompatible(cluster_from, *cluster_in));
if (!devices_compatible) {
return true;
}
}
}
return false;
}
absl::optional<string> MarkForCompilationPassImpl::GetXlaScope(Node* node) {
// Look for either _XlaScope or _XlaInternalScope on both nodes to guide
// clustering. If both nodes have a scope and the scopes do not match, do
// not cluster along this edge. If even one of the nodes lacks a scope
// attribute, then it is treated as a "bridge" and a cluster may be created
// along it.
//
// The difference between _XlaScope and _XlaInternalScope is that _XlaScope is
// provided by users through jit_scope APIs, while _XlaInternalScope is
// automatically generated by the ClusterScopingPass when auto_jit is on. As
// such, we respect _XlaScope only when auto_jit is off, while respecting
// _XlaInternalScope only when auto_jit is on.
//
// We may want to restrict the _XlaScope behavior to require all nodes marked
// with _XlaCompile=true to also have a _XlaScope property set (and raise an
// error otherwise); but for now we don't do this.
if (global_jit_level_ != OptimizerOptions::OFF) {
// If global_jit_level_ is ON, respect only _XlaInternalScope.
const string& scope =
GetNodeAttrString(node->attrs(), kXlaInternalScopeAttr);
if (!scope.empty()) {
return scope;
}
} else {
// If global_jit_level_ is OFF, respect only _XlaScope.
const string& scope = GetNodeAttrString(node->attrs(), kXlaScopeAttr);
if (!scope.empty()) {
return scope;
}
}
return absl::nullopt;
}
// Returns true iff the attribute `attr_name` is attached to either the node or
// to it's callee.
static bool GetNodeOrFuncAttr(Node* node, FunctionLibraryDefinition* flib_def,
const char* attr_name) {
bool out = false;
bool attr_value;
if (TryGetNodeAttr(node->attrs(), attr_name, &attr_value)) {
out |= attr_value;
}
if (flib_def->GetAttr(*node, attr_name, &attr_value).ok()) {
out |= attr_value;
}
return out;
}
Status MarkForCompilationPassImpl::BuildInitialClusterSet() {
auto ignore_resource_ops = [&](const Node& n, bool* ignore) {
return IgnoreResourceOpForSafetyAnalysis(&device_info_cache_, n, ignore);
};
std::vector<std::pair<int, int>> unsafe_resource_deps_vect;
TF_RETURN_IF_ERROR(ComputeIncompatibleResourceOperationPairs(
*graph_, flib_def_, ignore_resource_ops, &unsafe_resource_deps_vect));
absl::c_copy(
unsafe_resource_deps_vect,
std::inserter(unsafe_resource_deps_, unsafe_resource_deps_.begin()));
cluster_for_node_.resize(graph_->num_node_ids());
for (Node* node : graph_->nodes()) {
if (!IsCompilationCandidate(node)) {
cluster_for_node_[node->id()].Get() = nullptr;
continue;
}
// We want clusters to be big enough that the benefit from XLA's
// optimizations offsets XLA related overhead (for instance we add some
// Switch/Merge nodes into the graph to implement lazy compilation). To
// this end, we don't count Identity and Constant nodes because they do not
// enable interesting optimizations by themselves.
int effective_cluster_size =
(node->IsIdentity() || node->IsConstant()) ? 0 : 1;
bool has_functional_control_flow = node->IsWhileNode() || node->IsIfNode();
absl::optional<DeadnessPredicate> deadness_predicate;
if (deadness_analysis_) {
TF_ASSIGN_OR_RETURN(
deadness_predicate,
deadness_analysis_->GetPredicateFor(node, Graph::kControlSlot));
}
const string& device_name_str = !node->assigned_device_name().empty()
? node->assigned_device_name()
: node->requested_device();
TF_ASSIGN_OR_RETURN(DeviceId device,
device_info_cache_.GetIdFor(device_name_str));
bool is_resource_op = HasResourceInputOrOutput(*node);
absl::optional<DeviceId> resource_op_device;
if (is_resource_op) {
resource_op_device = device;
}
absl::optional<int> resource_var_operation_node_id;
if (is_resource_op || MayCallFunction(*node, flib_def_)) {
resource_var_operation_node_id = node->id();
}
bool is_xla_compile_attr_true =
GetNodeOrFuncAttr(node, flib_def_, kXlaCompileAttr) ||
(global_jit_level_ != OptimizerOptions::OFF &&
GetNodeOrFuncAttr(node, flib_def_, kXlaMustCompileAttr));
DeviceSet devices;
devices.Insert(device);
Cluster* new_cluster = MakeNewCluster(
/*cycles_graph_node_id=*/node->id(),
/*effective_cluster_size=*/effective_cluster_size,
/*has_functional_control_flow=*/has_functional_control_flow, devices,
resource_op_device, resource_var_operation_node_id, deadness_predicate,
/*is_xla_compile_attr_true=*/is_xla_compile_attr_true,
GetXlaScope(node));
cluster_for_node_[node->id()].Get() = new_cluster;
}
return Status::OK();
}
StatusOr<bool> IsIdentityDrivingConstsInLoop(Node* node) {
if (!node->IsIdentity()) {
return false;
}
// Check if the Identity is driven by a Switch on its true path.
auto it = absl::c_find_if(node->in_edges(), [](const Edge* e) {
return e->src()->IsSwitch() && e->src_output() == 1;
});
if (it == node->in_edges().end()) {
return false;
}
const Node* switch_node = (*it)->src();
// Check if the Switch is driven by LoopCond.
const Node* maybe_loop_cond;
TF_RETURN_IF_ERROR(switch_node->input_node(1, &maybe_loop_cond));
if (!maybe_loop_cond->IsLoopCond()) {
return false;
}
// Check if the Identity is driving any const nodes through a control edge.
bool driving_any_consts =
absl::c_any_of(node->out_edges(), [](const Edge* e) {
return e->dst()->IsConstant() && e->IsControlEdge();
});
if (!driving_any_consts) {
return false;
}
return true;
}
absl::flat_hash_set<string> GetOrCreateAllowlist() {
absl::flat_hash_map<string, std::vector<string>>* allowlist_table =
tensorflow::GetAllowlistTable();
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
absl::flat_hash_set<string> allowlist;
for (auto s : absl::StrSplit(flags->tf_xla_ops_to_cluster, ',')) {
if (s == "FUSIBLE") {
for (auto pair : *allowlist_table) {
allowlist.insert(pair.second.begin(), pair.second.end());
}
} else if (allowlist_table->contains(s)) {
auto v = allowlist_table->at(s);
allowlist.insert(v.begin(), v.end());
} else if (!s.empty()) {
// Should be a user provided TF operation.
allowlist.insert(string(s));
}
}
if (VLOG_IS_ON(2) && !allowlist.empty()) {
std::vector<string> vallowlist(allowlist.begin(), allowlist.end());
absl::c_sort(vallowlist);
VLOG(2) << "XLA clustering will only consider the following TF operations: "
<< absl::StrJoin(vallowlist, " ");
}
return allowlist;
}
Status MarkForCompilationPassImpl::FindCompilationCandidates() {
OptimizerOptions opts;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(nullptr, env_, /*config=*/nullptr,
TF_GRAPH_DEF_VERSION, flib_def_, opts));
FunctionLibraryRuntime* lib_runtime =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
std::vector<bool> compile_time_const_nodes(graph_->num_node_ids(), false);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*graph_, /*compile_time_const_arg_indices=*/nullptr,
&compile_time_const_nodes, lib_runtime));
// Iterate over nodes in sorted order so that compiler fuel is deterministic.
// We can't simply pass op_nodes().begin() and op_nodes().end() to the
// std::vector constructor because they're not proper iterators, with
// iterator_traits defined and so on.
std::vector<Node*> sorted_nodes;
for (Node* node : graph_->op_nodes()) {
sorted_nodes.push_back(node);
}
std::sort(sorted_nodes.begin(), sorted_nodes.end(), NodeComparatorID());
if (*debug_options_.fuel >= std::numeric_limits<int64>::max() / 2) {
// The assumption is that if fuel started out as INT64_MAX, it will forever
// stay greater than INT64_MAX / 2.
VLOG(2) << "Starting fuel: infinity";
} else {
VLOG(2) << "Starting fuel: " << *debug_options_.fuel;
}
VLOG(2) << "sorted_nodes.size() = " << sorted_nodes.size();
auto allowlist = GetOrCreateAllowlist();
std::vector<string> vall_ops = XlaOpRegistry::GetAllRegisteredOps();
absl::flat_hash_set<string> all_ops(vall_ops.begin(), vall_ops.end());
// Check that user's provided TF operation really exists.
for (const auto& s : allowlist) {
if (!all_ops.contains(string(s))) {
return errors::InvalidArgument(
"The operation '", s,
"' passed to --tf_xla_ops_to_cluster is not supported by XLA.");
}
}
for (Node* node : sorted_nodes) {
if (*debug_options_.fuel <= 0) {
VLOG(1)
<< "Hit fuel limit; not marking any remaining ops as clusterable.";
break;
}
TF_ASSIGN_OR_RETURN(
const DeviceType& device_type,
device_info_cache_.GetDeviceTypeFor(node->assigned_device_name()));
VLOG(4) << "Device type for " << node->name() << ": "
<< device_type.type_string();
if (CompilationDisallowedByXlaCompileAttr(node)) {
VLOG(2) << "Not clustering " << node->name()
<< ": disallowed by _XlaCompile attribute";
continue;
}
const XlaOpRegistry::DeviceRegistration* registration;
if (!XlaOpRegistry::GetCompilationDevice(device_type.type(),
®istration)) {
VLOG(2) << "Rejecting " << node->name()
<< ": could not find JIT device for " << device_type.type();
continue;
}
RecursiveCompilabilityChecker::OperationFilter filter =
CreateOperationFilter(*registration);
filter.require_always_compilable = true;
RecursiveCompilabilityChecker checker(
filter, DeviceType{registration->compilation_device_name});
if (!checker.IsCompilableNode(*node, lib_runtime)) {
continue;
}
if (!allowlist.empty() && !allowlist.contains(node->def().op())) {
VLOG(1) << "Rejecting TF operation " << node->def().op()
<< " as it is not listed in --tf_xla_ops_to_cluster.";
continue;
}
if (compile_time_const_nodes[node->id()]) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(
graph_->op_registry()->LookUpOpDef(node->type_string(), &op_def));
if (op_def->is_stateful()) {
// It is easiest to demonstrate the problem we're trying to solve with
// an example. Say we have this graph:
//
// shape = RandomUniformInt();
// reshape = Reshape(input, shape)
//
// Both RandomUniformInt and Reshape are compilable by XLA so, absent
// any other reason, we will try to put both shape and reshape in the
// same cluster. However, since XLA only supports statically shaped
// values, it will expect to be able to constant fold `shape` to get a
// static shape for `reshape`. This is a problem because side-effecting
// ops like RandomUniformInt() cannot be constant folded. We fix this
// by putting `shape` and `reshape` in different clusters, which results
// in us recompiling `reshape`'s cluster for every new value of `shape`,
// making `reshape` statically sized within each compilation. We
// simplify the solution even further by disallowing operations like
// `shape` from being part of *any* non-trivial cluster. They're either
// not compiled by XLA altogether or, if assigned to an XLA_* device
// with "must compile" semantics, compiled into a trivial single-op
// cluster. This approach leaves some room for improvement, and we can
// consider implementing a more aggressive data-flow-analysis based
// solution in the future if needed.
//
// One ugly problem we have to contend with: certain sets of ops *have*
// to be in the same cluster because values flowing between them have
// types that can't be live-in or live-out of a cluster. These ops are:
//
// - TensorArray ops operating on the same TensorArray instance.
// - Stack ops operating on the same Stack instance.
//
// To work around this we avoid isolating these specific ops. Because
// of this concession it is unsound to auto-cluster them because then
// we'd create clusters we could not compile (because we can't constant
// fold, say, a TensorArrayRead or a StackPopV2). But we don't
// auto-cluster these operations today so we're good for now.
const XlaResourceOpInfo* op_info =
GetResourceOpInfoForOp(node->type_string());
bool is_tensor_array_or_stack_op =
op_info && op_info->resource_kind() != XlaResourceKind::kVariable;
if (!is_tensor_array_or_stack_op) {
VLOG(2) << "Isolating " << node->name()
<< ": must-be-constant stateful op";
continue;
}
}
}
// This is a heuristic to avoid creating dependency between while loop
// condition and body computations. Dependency between them can be created
// if a special Identity node in the following pattern is clustered in.
// That is, an Identity node in the loop cond computation is used to drive
// const nodes consumed by the loop body. If this Identity node goes into
// the same cluster with nodes from the loop body, extra dependency is
// created between the loop cond and body computations and it hinders the
// progression of the loop cond computation at runtime with significant
// overhead. Specifically, we look for the below pattern and do not cluster
// in this Identity to avoid the described issue. Since Identity has low
// execution cost in native TF, the fact that this heuristic gives up these
// special Identity nodes as candidates should not harm any performance. If
// other considerations emerge in the future, we can revisit the heuristic
// and only disallow these Identities to go into the cluster with nodes from
// the loop body but still consider them candidates.
//
// LoopCond ->
// Merge -> Switch -> Identity -> i++ -> ... -> NextIteration
// ..> Const -> LoopBody
// (control edge)
TF_ASSIGN_OR_RETURN(bool is_identity_driving_consts_in_loop,
IsIdentityDrivingConstsInLoop(node));
if (is_identity_driving_consts_in_loop) {
VLOG(2) << "Rejecting " << node->name()
<< ": including it can create dependencies between while loop "
"condition and body computations with runtime overhead.";
continue;
}
compilation_candidates_.insert(node);
--(*debug_options_.fuel);
}
VLOG(2) << "compilation_candidates_.size() = "
<< compilation_candidates_.size();
return Status::OK();
}
bool MarkForCompilationPassImpl::CompilationDisallowedByXlaCompileAttr(
Node* node) {
if (debug_options_.ignore_xla_compile_attr) {
return false;
}
// If there is a _XlaCompile annotation, use its value.
bool compile = false;
Status status = GetNodeAttr(node->attrs(), kXlaCompileAttr, &compile);
if (status.ok()) {
if (!compile) {
VLOG(2) << "Rejecting " << node->name() << ": kXlaCompileAttr("
<< kXlaCompileAttr << ") is false.";
}
return !compile;
}
status = flib_def_->GetAttr(*node, kXlaCompileAttr, &compile);
if (status.ok()) {
if (!compile) {
VLOG(2) << "Rejecting " << node->name() << ": kXlaCompileAttr("
<< kXlaCompileAttr << ") on callee is false.";
}
return !compile;
}
return false;
}
bool MarkForCompilationPassImpl::LogNotContractableAndReturnFalse(
Cluster* from, Cluster* to, absl::string_view reason) {
VLOG(3) << EdgeContractionFailureMsg(from, to, reason);
return false;
}
StatusOr<bool> MarkForCompilationPassImpl::TryToContractEdge(Cluster* from,
Cluster* to) {
DCHECK(from->deadness_predicate().has_value() ==
to->deadness_predicate().has_value());
if (from->deadness_predicate() != to->deadness_predicate()) {
VLOG(3) << EdgeContractionFailureMsg(
from, to,
absl::StrCat(
"the two nodes have mismatching deadness: ",
deadness_analysis_->DebugString(*from->deadness_predicate()),
" and ",
deadness_analysis_->DebugString(*to->deadness_predicate())));
return false;
}
TF_ASSIGN_OR_RETURN(bool devices_compatible,
AreDevicesCompatible(*from, *to));
if (!devices_compatible) {
return LogNotContractableAndReturnFalse(
from, to, "the two nodes have incompatible devices");
}
if (from->xla_scope().has_value() && to->xla_scope().has_value() &&
*from->xla_scope() != *to->xla_scope()) {
return LogNotContractableAndReturnFalse(
from, to, "the two nodes have mismatching XLA scopes");
}
// Don't exceed the maximum cluster size.
if (from->cluster_size() + to->cluster_size() >
debug_options_.max_cluster_size) {
return LogNotContractableAndReturnFalse(
from, to, "the new cluster will be larger than the max cluster size");
}
TF_ASSIGN_OR_RETURN(bool will_introduce_cross_device_dependency,
ClusteringWillIntroduceInterDeviceDependency(*from, *to));
if (will_introduce_cross_device_dependency) {
return LogNotContractableAndReturnFalse(
from, to, "the new cluster will introduce a cross device dependency");
}
// Check if contracting this edge will break the resource variable concurrency
// semantics. In theory this is quadratic in the number of nodes, but seems
// to not be a problem in practice so far.
if (!debug_options_.ignore_resource_variable_checks) {
for (int resource_var_from : from->resource_var_operation_node_ids()) {
for (int resource_var_to : to->resource_var_operation_node_ids()) {
// If unsafe_resource_deps_ contains {A, B} then
//
// a. A and B are resource operations.
// b. A and B cannot be placed in the same cluster.
// c. There is no path from B to A in the cycles graph (but there may
// be a path from A to B).
//
// So check the legality of the edge contraction by checking if any of
// the n^2 pairs of resource variable operations are forbidden.
if (unsafe_resource_deps_.contains(
{resource_var_from, resource_var_to})) {
return LogNotContractableAndReturnFalse(
from, to,
"the new cluster would break resource variable semantics");
}
}
}
}
return MergeClusters(from, to);
}
Status MarkForCompilationPassImpl::Run() {
// Make sure that kernels have been registered on the JIT device.
XlaOpRegistry::RegisterCompilationKernels();
// Start the timer after XlaOpRegistry::RegisterCompilationKernels which does
// some one-time work.
XLA_SCOPED_LOGGING_TIMER_LEVEL("MarkForCompilationPassImpl::Run", 1);
TF_ASSIGN_OR_RETURN(bool initialized, Initialize());
if (!initialized) {
// Initialization exited early which means this instance of
// MarkForCompilationPassImpl is not set up to run the subsequent phases.
return Status::OK();
}
TF_RETURN_IF_ERROR(RunEdgeContractionLoop());
TF_RETURN_IF_ERROR(CreateClusters());
TF_RETURN_IF_ERROR(DumpDebugInfo());
return Status::OK();
}
void MarkForCompilationPassImpl::DumpPostClusteringGraphs() {
DumpGraphToFile("mark_for_compilation", *graph_, flib_def_);
// We also dump out an annotated version of the TF graph where the nodes
// names are prefixed with the cluster names. This can help visualizing the
// clustering decisions on TensorBoard.
Graph new_graph(graph_->op_registry());
CopyGraph(*graph_, &new_graph);
for (Node* n : new_graph.nodes()) {
if (absl::optional<absl::string_view> cluster_name =
GetXlaClusterForNode(*n)) {
n->set_name(absl::StrCat(*cluster_name, "/", n->name()));
} else if (n->type_string() == "VarHandleOp") {
n->set_name(absl::StrCat("varhandle/", n->name()));
} else {
// There is room for improvement here. In particular, it may help to
// split these unclustered nodes into classes where every node in a
// specific class has edges to and from the same set of clusters.
n->set_name(absl::StrCat("unclustered/", n->name()));
}
}
DumpGraphToFile("mark_for_compilation_annotated", new_graph, flib_def_);
}
string RatioToString(int numerator, int denominator) {
return absl::StrFormat("%d / %d (%.2f%%)", numerator, denominator,
(100.0 * numerator) / denominator);
}
void MarkForCompilationPassImpl::VLogClusteringSummary() {
if (!VLOG_IS_ON(2)) {
return;
}
XlaAutoClusteringSummary auto_clustering_info =
GetXlaAutoClusteringSummary(*graph_);
VLOG(2) << "*** Clustering info for graph of size " << graph_->num_nodes();
VLOG(2) << " Built " << auto_clustering_info.clusters_size()
<< " clusters, size "
<< RatioToString(auto_clustering_info.clustered_node_count(),
graph_->num_nodes());
for (const XlaAutoClusteringSummary::Cluster& cluster :
auto_clustering_info.clusters()) {
absl::string_view cluster_name = cluster.name();
int size = cluster.size();
VLOG(2) << " " << cluster_name << " "
<< RatioToString(size, graph_->num_nodes());
for (const XlaAutoClusteringSummary::OpAndCount& op_count :
cluster.op_histogram()) {
VLOG(3) << " " << op_count.op() << ": " << op_count.count()
<< " instances";
}
}
if (!auto_clustering_info.unclustered_op_histogram().empty()) {
VLOG(2) << " Unclustered nodes: "
<< RatioToString(auto_clustering_info.unclustered_node_count(),
graph_->num_nodes());
for (const XlaAutoClusteringSummary::OpAndCount& op_count :
auto_clustering_info.unclustered_op_histogram()) {
VLOG(3) << " " << op_count.op() << ": " << op_count.count()
<< " instances";
}
}
struct EdgeInfo {
absl::string_view node_name;
absl::optional<absl::string_view> cluster_name;
absl::string_view GetClusterName() const {
return cluster_name ? *cluster_name : "[none]";
}
std::pair<absl::string_view, absl::optional<absl::string_view>> AsPair()
const {
return {node_name, cluster_name};
}
bool operator<(const EdgeInfo& other) const {
return AsPair() < other.AsPair();
}
};
using EdgeInfoMap = std::map<absl::string_view, std::map<EdgeInfo, int64>>;
EdgeInfoMap incoming_edge_infos;
EdgeInfoMap outgoing_edge_infos;
std::set<absl::string_view> cluster_names_to_print;
for (const Edge* e : graph_->edges()) {
const Node* from = e->src();
absl::optional<absl::string_view> from_cluster_name =
GetXlaClusterForNode(*from);
const Node* to = e->dst();
absl::optional<absl::string_view> to_cluster_name =
GetXlaClusterForNode(*to);
if (to_cluster_name == from_cluster_name) {
continue;
}
if (to_cluster_name) {
incoming_edge_infos[*to_cluster_name]
[EdgeInfo{from->name(), from_cluster_name}]++;
cluster_names_to_print.insert(*to_cluster_name);
}
if (from_cluster_name) {
outgoing_edge_infos[*from_cluster_name][{to->name(), to_cluster_name}]++;
cluster_names_to_print.insert(*from_cluster_name);
}
}
VLOG(4) << "*** Inter-Cluster edges:";
if (cluster_names_to_print.empty()) {
VLOG(4) << " [none]";
}
auto print_edge_info_set_for_cluster = [&](absl::string_view cluster_name,
const EdgeInfoMap& edge_info_map,
absl::string_view desc) {
auto it = edge_info_map.find(cluster_name);
if (it != edge_info_map.end()) {
VLOG(4) << " " << it->second.size() << " " << desc << " edges";
for (const auto& edge_info_count_pair : it->second) {
VLOG(4) << " " << edge_info_count_pair.first.GetClusterName() << " "
<< edge_info_count_pair.first.node_name << " # "
<< edge_info_count_pair.second;
}
} else {
VLOG(4) << " No " << desc << " edges.";
}
};
for (absl::string_view cluster_name : cluster_names_to_print) {
VLOG(4) << " ** Cluster " << cluster_name;
print_edge_info_set_for_cluster(cluster_name, incoming_edge_infos,
"incoming");
print_edge_info_set_for_cluster(cluster_name, outgoing_edge_infos,
"outgoing");
}
}
StatusOr<bool> MarkForCompilationPassImpl::AreDevicesCompatible(
const Cluster& cluster_a, const Cluster& cluster_b) {
DeviceSet devices = cluster_a.devices();
devices.UnionWith(cluster_b.devices());
TF_ASSIGN_OR_RETURN(
absl::optional<jit::DeviceId> maybe_chosen_device,
MaybePickDeviceForXla(device_info_cache_, devices,
/*allow_mixing_unknown_and_cpu=*/false));
if (!maybe_chosen_device.has_value()) {
return false;
}
jit::DeviceId chosen_device = *maybe_chosen_device;
// If we are able to pick a device `chosen_device` for the larger cluster, the
// resource operations in `cluster_a` and `cluster_b` must be placed on the
// same device as `chosen_device`. This is because the _XlaCompile and
// _XlaRun kernels are going to run on and therefore try to access the
// resource variables from `chosen_device`, which will be an error if the
// resource variables are placed on some other device.
auto resource_op_device_ok =
[&](absl::optional<DeviceId> resource_op_device) {
return !resource_op_device.has_value() ||
*resource_op_device == chosen_device;
};
return resource_op_device_ok(cluster_a.resource_op_device()) &&
resource_op_device_ok(cluster_b.resource_op_device());
}
// Returns `true` iff we should compile `cluster`.
StatusOr<bool> MarkForCompilationPassImpl::ShouldCompileClusterImpl(
const Cluster& cluster) {
TF_ASSIGN_OR_RETURN(DeviceId chosen_device,
PickDeviceForXla(device_info_cache_, cluster.devices(),
/*allow_mixing_unknown_and_cpu=*/false));
const DeviceType& device_type =
device_info_cache_.GetDeviceTypeFor(chosen_device);
const XlaOpRegistry::DeviceRegistration* registration =
device_info_cache_.GetCompilationDevice(chosen_device);
TF_RET_CHECK(registration)
<< "chosen device = " << device_info_cache_.GetNameFor(chosen_device)
<< "; device type = " << device_type.type() << "; devices ("
<< device_info_cache_.DebugString(cluster.devices());
bool should_compile =
cluster.is_xla_compile_attr_true() ||
registration->autoclustering_policy ==
XlaOpRegistry::AutoclusteringPolicy::kAlways ||
(registration->autoclustering_policy ==
XlaOpRegistry::AutoclusteringPolicy::kIfEnabledGlobally &&
global_jit_level_ != OptimizerOptions::OFF);
if (!should_compile && global_jit_level_ != OptimizerOptions::OFF &&
device_type.type_string() == DEVICE_CPU) {
static absl::once_flag once;
absl::call_once(once, [] {
LOG(WARNING)
<< "(One-time warning): Not using XLA:CPU for cluster because envvar "
"TF_XLA_FLAGS=--tf_xla_cpu_global_jit was not set. If you want "
"XLA:CPU, either set that envvar, or use experimental_jit_scope "
"to enable XLA:CPU. To confirm that XLA is active, pass "
"--vmodule=xla_compilation_cache=1 (as a proper command-line "
"flag, not via TF_XLA_FLAGS) or set the envvar "
"XLA_FLAGS=--xla_hlo_profile.";
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
if (flags->tf_xla_cpu_global_jit) {
LOG(WARNING)
<< "(Although the tf_xla_cpu_global_jit flag is currently enabled, "
"perhaps it wasn't enabled at process startup?)";
}
});
}
VLOG(3) << (should_compile ? "Compiling" : "Not compiling")
<< " cluster with device "
<< device_info_cache_.GetNameFor(chosen_device);
return should_compile;
}
StatusOr<bool> MarkForCompilationPassImpl::ShouldCompileCluster(
const Cluster& cluster) {
auto it = should_compile_cluster_cache_.find(&cluster);
if (it != should_compile_cluster_cache_.end()) {
return it->second;
}
TF_ASSIGN_OR_RETURN(bool should_compile, ShouldCompileClusterImpl(cluster));
should_compile_cluster_cache_.insert({&cluster, should_compile});
return should_compile;
}
Status MarkForCompilation(
const GraphOptimizationPassOptions& options,
const MarkForCompilationPassImpl::DebugOptions& debug_options) {
Graph* graph = options.graph->get();
FunctionLibraryDefinition* flib_def = options.flib_def;
// Deadness analysis expects a graph with source and sink edges properly
// connected but sometimes the incoming graph does not follow this invariant.
// So fix up the source and sink edges before calling into deadness analysis.
FixupSourceAndSinkEdges(graph);
// See explanation on `kXlaAlreadyClustered`.
for (Node* n : graph->nodes()) {
if (n->attrs().Find(kXlaAlreadyClustered)) {
return Status::OK();
}
}
return MarkForCompilationPassImpl{debug_options, graph, flib_def,
options.session_options != nullptr
? options.session_options->env
: Env::Default(),
GetGlobalJitLevelForGraph(options)}
.Run();
}
std::atomic<int64>* GetPointerToFuel(int64 initial_value) {
static std::atomic<int64>* fuel = [&]() {
std::atomic<int64>* fuel = new std::atomic<int64>;
*fuel = initial_value;
return fuel;
}();
return fuel;
}
} // anonymous namespace
Status MarkForCompilationPass::Run(
const GraphOptimizationPassOptions& options) {
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
MarkForCompilationPassImpl::DebugOptions debug_options;
debug_options.ignore_deadness_checks =
flags->tf_xla_disable_deadness_safety_checks_for_debugging;
debug_options.ignore_resource_variable_checks =
flags->tf_xla_disable_resource_variable_safety_checks_for_debugging;
debug_options.ignore_xla_compile_attr = false;
debug_options.max_cluster_size = flags->tf_xla_max_cluster_size;
debug_options.min_cluster_size = flags->tf_xla_min_cluster_size;
debug_options.fuel = GetPointerToFuel(flags->tf_xla_clustering_fuel);
debug_options.dump_graphs = flags->tf_xla_clustering_debug;
return MarkForCompilation(options, debug_options);
}
Status MarkForCompilationPass::RunForTest(
const GraphOptimizationPassOptions& options,
bool disable_deadness_analysis) {
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
MarkForCompilationPassImpl::DebugOptions debug_options;
debug_options.ignore_deadness_checks = disable_deadness_analysis;
debug_options.ignore_resource_variable_checks =
flags->tf_xla_disable_resource_variable_safety_checks_for_debugging;
debug_options.ignore_xla_compile_attr = true;
debug_options.max_cluster_size = flags->tf_xla_max_cluster_size;
debug_options.min_cluster_size = flags->tf_xla_min_cluster_size;
debug_options.fuel = GetPointerToFuel(flags->tf_xla_clustering_fuel);
debug_options.dump_graphs = flags->tf_xla_clustering_debug;
return MarkForCompilation(options, debug_options);
}
absl::flat_hash_map<string, std::vector<string>>* GetAllowlistTable() {
// Table format: category name: {list of TF operations in that category}
static absl::flat_hash_map<string, std::vector<string>>* result =
new absl::flat_hash_map<string, std::vector<string>>{
// Unary
{"PW",
{"ComplexAbs", "Angle", "Conj", "Abs", "Acos", "Acosh", "Asin",
"Atan", "Atanh", "Ceil", "Cos", "Cosh", "Sin", "Exp", "Expm1",
"Floor", "IsFinite", "IsInf", "IsNan", "Inv", "Reciprocal", "Log",
"Log1p", "Invert", "LogicalNot", "Ndtri", "Neg", "Rint", "Round",
"Rsqrt", "Sigmoid", "Sign", "Sinh", "Softplus", "Softsign", "Sqrt",
"Square", "Tan", "Tanh", "Real", "Imag", "Erf", "Erfc", "Erfinv",
"Lgamma", "Digamma",
// Binary
"Add", "AddV2", "Sub", "Mul", "Div", "Atan2", "Complex", "DivNoNan",
"MulNoNan", "FloorDiv", "Xlogy", "Xlog1py", "Xdivy", "FloorMod",
"BitwiseAnd", "BitwiseOr", "BitwiseXor", "LeftShift", "RightShift",
"LogicalAnd", "LogicalOr", "Mod", "Maximum", "Minimum", "RealDiv",
"ReciprocalGrad", "RsqrtGrad", "SqrtGrad", "TruncateDiv",
"TruncateMod", "Equal", "NotEqual", "Greater", "GreaterEqual",
"Less", "LessEqual", "SigmoidGrad", "SoftplusGrad", "SoftsignGrad",
"TanhGrad", "Pow", "SquaredDifference", "ApproximateEqual",
// Others
"AddN", "Bitcast", "Cast", "ClipByValue", "Const", "Empty",
"Identity", "IdentityN", "Relu", "Relu6", "ReluGrad", "Relu6Grad",
"LeakyReluGrad", "Elu", "EluGrad", "Selu", "SeluGrad", "Select",
"SelectV2", "Transpose", "ConjugateTranspose",
"_UnaryOpsComposition",
// The following 4 operations are converted to identity
"PlaceholderWithDefault", "PreventGradient", "StopGradient",
"Snapshot"}},
// clang-format off
{"RED",
{"All", "Any", "Min", "Max", "Mean", "Prod", "Sum"}},
// clang-format on
{"PWRED",
{"ArgMax", "ArgMin", "DiagPart", "Softmax",
"SparseSoftmaxCrossEntropyWithLogits", "LogSoftmax"}},
{"REDUCEWINDOW",
{"ArgMax", "ArgMin", "DiagPart", "Softmax",
"SparseSoftmaxCrossEntropyWithLogits", "LogSoftmax"}},
{"REDUCEWINDOWPW", {"BiasAddGrad", "LRN", "LRNGrad"}},
{"BN",
{"FusedBatchNorm", "FusedBatchNormV2", "FusedBatchNormV3",
"_FusedBatchNormEx", "FusedBatchNormGrad", "FusedBatchNormGradV2",
"FusedBatchNormGradV3"}},
{"SORT", {"TopKV2"}}, // XLA version much faster then TF version.
{"MISC",
// clang-format off
{"BroadcastTo", "ExpandDims", "Fill", "NoOp",
"Range", "Rank", "Reshape", "Shape", "ShapeN", "Size", "Squeeze",
"Transpose", "ZerosLike", "OnesLike", "BiasAdd" /*PW + Broadcast*/,
"BroadcastArgs", "BroadcastGradientArgs", "OneHot", "Concat", "ConcatV2",
"ConcatOffset", "Const", "MirrorPad", "MirrorPadGrad", "Pack", "Pad",
"PadV2", "Reverse", "ReverseV2", "ReverseSequence", "Slice", "Split",
"SplitV", "StridedSlice", "StridedSliceGrad",
"ResourceStridedSliceAssign", "Tile", "Transpose", "InvertPermutation",
"Unpack", "DeviceIndex", "TensorStridedSliceUpdate",
}}};
// clang-format on
return result;
}
namespace testing {
void ResetClusterSequenceNumber() { cluster_sequence_num = 0; }
absl::flat_hash_set<string> GetKnownXLAAllowlistOp() {
absl::flat_hash_set<string> result{"AdjustContrastv2",
"AdjustHue",
"AdjustSaturation",
"Asinh",
"Assert",
"AssignAddVariableOp",
"AssignSubVariableOp",
"AssignVariableOp",
"AvgPool",
"AvgPool3D",
"AvgPool3DGrad",
"AvgPoolGrad",
"BatchMatMul",
"BatchMatMulV2",
"BatchToSpace",
"BatchToSpaceND",
"BesselI0e",
"BesselI1e",
"Betainc",
"BiasAddV1",
"Bucketize",
"Case",
"CheckNumerics",
"Cholesky",
"ControlTrigger",
"Conv2D",
"Conv2DBackpropFilter",
"Conv2DBackpropInput",
"Conv3D",
"Conv3DBackpropFilterV2",
"Conv3DBackpropInputV2",
"Cross",
"Cumprod",
"Cumsum",
"DataFormatDimMap",
"DataFormatVecPermute",
"DepthToSpace",
"DepthwiseConv2dNative",
"DepthwiseConv2dNativeBackpropFilter",
"DepthwiseConv2dNativeBackpropInput",
"Dequantize",
"Diag",
"DynamicStitch",
"Einsum",
"EmptyTensorList",
"EnsureShape",
"ExtractImagePatches",
"Igamma",
"IgammaGradA",
"RandomGammaGrad",
"Igammac",
"FFT",
"FFT2D",
"FFT3D",
"FakeParam",
"FakeQuantWithMinMaxArgs",
"FakeQuantWithMinMaxArgsGradient",
"FakeQuantWithMinMaxVars",
"FakeQuantWithMinMaxVarsGradient",
"Gather",
"GatherNd",
"GatherV2",
"HSVToRGB",
"IFFT",
"IFFT2D",
"IFFT3D",
"IRFFT",
"IRFFT2D",
"IRFFT3D",
"If",
"InTopKV2",
"L2Loss",
"LeakyRelu",
"LinSpace",
"ListDiff",
"LogMatrixDeterminant",
"LowerBound",
"MatMul",
"MatrixBandPart",
"MatrixDiag",
"MatrixDiagPart",
"MatrixDiagPartV2",
"MatrixDiagPartV3",
"MatrixDiagV2",
"MatrixDiagV3",
"MatrixInverse",
"MatrixSetDiag",
"MatrixSetDiagV2",
"MatrixSetDiagV3",
"MatrixSolve",
"MatrixTriangularSolve",
"MaxPool",
"MaxPool3D",
"MaxPool3DGrad",
"MaxPool3DGradGrad",
"MaxPoolGrad",
"MaxPoolGradGrad",
"MaxPoolGradGradV2",
"MaxPoolGradV2",
"MaxPoolV2",
"Multinomial",
"NextAfter",
"NonMaxSuppressionV4",
"ParallelDynamicStitch",
"ParameterizedTruncatedNormal",
"PartitionedCall",
"Polygamma",
"PopulationCount",
"Qr",
"QuantizeAndDequantizeV2",
"QuantizeAndDequantizeV3",
"RFFT",
"RFFT2D",
"RFFT3D",
"RGBToHSV",
"RandomShuffle",
"RandomStandardNormal",
"RandomUniform",
"RandomUniformInt",
"ReadVariableOp",
"ResizeBilinear",
"ResizeBilinearGrad",
"ResizeNearestNeighbor",
"ResourceApplyAdaMax",
"ResourceApplyAdadelta",
"ResourceApplyAdagrad",
"ResourceApplyAdagradDA",
"ResourceApplyAdagradV2",
"ResourceApplyAdam",
"ResourceApplyAddSign",
"ResourceApplyCenteredRMSProp",
"ResourceApplyFtrl",
"ResourceApplyFtrlV2",
"ResourceApplyGradientDescent",
"ResourceApplyKerasMomentum",
"ResourceApplyMomentum",
"ResourceApplyPowerSign",
"ResourceApplyProximalAdagrad",
"ResourceApplyProximalGradientDescent",
"ResourceApplyRMSProp",
"ResourceGather",
"ResourceScatterAdd",
"ResourceScatterDiv",
"ResourceScatterMax",
"ResourceScatterMin",
"ResourceScatterMul",
"ResourceScatterNdAdd",
"ResourceScatterNdSub",
"ResourceScatterNdUpdate",
"ResourceScatterSub",
"ResourceScatterUpdate",
"RngReadAndSkip",
"RngSkip",
"Roll",
"ScatterNd",
"SelfAdjointEigV2",
"SoftmaxCrossEntropyWithLogits",
"SpaceToBatch",
"SpaceToBatchND",
"SpaceToDepth",
"SparseMatMul",
"SparseToDense",
"StackCloseV2",
"StackPopV2",
"StackPushV2",
"StackV2",
"StatefulPartitionedCall",
"StatefulStandardNormalV2",
"StatefulTruncatedNormal",
"StatefulUniform",
"StatefulUniformFullInt",
"StatefulUniformInt",
"StatelessCase",
"StatelessIf",
"StatelessMultinomial",
"StatelessRandomGetAlg",
"StatelessRandomGetKeyCounter",
"StatelessRandomGetKeyCounterAlg",
"StatelessRandomNormal",
"StatelessRandomNormalV2",
"StatelessRandomUniform",
"StatelessRandomUniformV2",
"StatelessRandomUniformInt",
"StatelessRandomUniformIntV2",
"StatelessRandomUniformFullInt",
"StatelessRandomUniformFullIntV2",
"StatelessTruncatedNormal",
"StatelessTruncatedNormalV2",
"StatelessWhile",
"Svd",
"SymbolicGradient",
"TensorArrayCloseV3",
"TensorArrayConcatV3",
"TensorArrayGatherV3",
"TensorArrayGradV3",
"TensorArrayReadV3",
"TensorArrayScatterV3",
"TensorArraySizeV3",
"TensorArraySplitV3",
"TensorArrayV3",
"TensorArrayWriteV3",
"TensorListConcatV2",
"TensorListElementShape",
"TensorListFromTensor",
"TensorListGather",
"TensorListGetItem",
"TensorListLength",
"TensorListPopBack",
"TensorListPushBack",
"TensorListReserve",
"TensorListSetItem",
"TensorListSplit",
"TensorListStack",
"TensorScatterAdd",
"TensorScatterMax",
"TensorScatterMin",
"TensorScatterSub",
"TensorScatterUpdate",
"TridiagonalSolve",
"TruncatedNormal",
"Unique",
"UpperBound",
"UnsortedSegmentMax",
"UnsortedSegmentMin",
"UnsortedSegmentProd",
"UnsortedSegmentSum",
"VarIsInitializedOp",
"VariableShape",
"Where",
"While",
"XlaBroadcastHelper",
"XlaConv",
"XlaDequantize",
"XlaDot",
"XlaDynamicSlice",
"XlaDynamicUpdateSlice",
"XlaEinsum",
"XlaGather",
"XlaIf",
"XlaKeyValueSort",
"XlaPad",
"XlaRecv",
"XlaReduce",
"XlaReduceWindow",
"XlaReplicaId",
"XlaScatter",
"XlaSelectAndScatter",
"XlaSelfAdjointEig",
"XlaSend",
"XlaSetBound",
"XlaSetDynamicDimensionSize",
"XlaSharding",
"XlaSort",
"XlaSpmdFullToShardShape",
"XlaSpmdShardToFullShape",
"XlaSvd",
"XlaVariadicReduce",
"XlaWhile",
"Zeta",
"_Arg",
"_ArrayToList",
"_ListToArray",
"_Retval"};
return result;
}
} // namespace testing
} // namespace tensorflow
| 41.753116
| 80
| 0.608379
|
yennster
|
33f7672afb5116b4662ee91006d93a9e073121b7
| 10,859
|
cpp
|
C++
|
app/src/main/cpp/sample/PortraitStayColorExample.cpp
|
volionamdp/NDK_OpenGLES_3_0
|
204b2b9ba8240c4153050da3331b0fb2b6dfe75f
|
[
"Apache-2.0"
] | 1,695
|
2019-08-09T01:24:00.000Z
|
2022-03-31T13:12:28.000Z
|
app/src/main/cpp/sample/PortraitStayColorExample.cpp
|
codingwatching/NDK_OpenGLES_3_0
|
4875cdee3fa8722cf72c470913183085faefc5a4
|
[
"Apache-2.0"
] | 7
|
2020-10-27T03:12:15.000Z
|
2022-03-23T08:43:54.000Z
|
app/src/main/cpp/sample/PortraitStayColorExample.cpp
|
codingwatching/NDK_OpenGLES_3_0
|
4875cdee3fa8722cf72c470913183085faefc5a4
|
[
"Apache-2.0"
] | 449
|
2019-09-02T05:11:15.000Z
|
2022-03-30T06:12:01.000Z
|
/**
*
* Created by 公众号:字节流动 on 2022/4/18.
* https://github.com/githubhaohao/NDK_OpenGLES_3_0
* 最新文章首发于公众号:字节流动,有疑问或者技术交流可以添加微信 Byte-Flow ,领取视频教程, 拉你进技术交流群
*
* */
#include <gtc/matrix_transform.hpp>
#include "PortraitStayColorExample.h"
#include "../util/GLUtils.h"
PortraitStayColorExample::PortraitStayColorExample()
{
m_SamplerLoc = GL_NONE;
m_MVPMatLoc = GL_NONE;
m_TextureId = GL_NONE;
m_VaoId = GL_NONE;
m_AngleX = 0;
m_AngleY = 0;
m_ScaleX = 1.0f;
m_ScaleY = 1.0f;
m_frameIndex = 0;
}
PortraitStayColorExample::~PortraitStayColorExample()
{
NativeImageUtil::FreeNativeImage(&m_RenderImage);
NativeImageUtil::FreeNativeImage(&m_GrayImage);
NativeImageUtil::FreeNativeImage(&m_MappingImage);
}
void PortraitStayColorExample::Init()
{
if(m_ProgramObj)
return;
//create RGBA texture
glGenTextures(1, &m_TextureId);
glBindTexture(GL_TEXTURE_2D, m_TextureId);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, GL_NONE);
glGenTextures(1, &m_GrayTexId);
glBindTexture(GL_TEXTURE_2D, m_GrayTexId);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, GL_NONE);
glGenTextures(1, &m_MappingTexId);
glBindTexture(GL_TEXTURE_2D, m_MappingTexId);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, GL_NONE);
char vShaderStr[] =
"#version 300 es\n"
"layout(location = 0) in vec4 a_position;\n"
"layout(location = 1) in vec2 a_texCoord;\n"
"uniform mat4 u_MVPMatrix;\n"
"out vec2 v_texCoord;\n"
"void main()\n"
"{\n"
" gl_Position = u_MVPMatrix * a_position;\n"
" v_texCoord = a_texCoord;\n"
"}";
char fShaderStr[] =
"#version 300 es\n"
"precision mediump float;\n"
"in vec2 v_texCoord;\n"
"layout(location = 0) out vec4 outColor;\n"
"uniform sampler2D u_texture0;//rgba\n"
"uniform sampler2D u_texture1;//gray\n"
"uniform sampler2D u_texture2;//mapping\n"
"uniform float u_offset;\n"
"uniform vec2 u_texSize;\n"
"uniform float u_time;\n"
"void main()\n"
"{\n"
" float gray = texture(u_texture1, v_texCoord).r;\n"
" vec4 rgba = texture(u_texture0, v_texCoord);\n"
" if(gray > 0.01) {\n"
" outColor = rgba;\n"
" }\n"
" else\n"
" {\n"
" vec2 fragCoord = gl_FragCoord.xy;\n"
" vec2 p = (-u_texSize.xy + 2.0*fragCoord)/u_texSize.y;\n"
" float a = atan(p.y,p.x);\n"
" float r = pow( pow(p.x*p.x,4.0) + pow(p.y*p.y,4.0), 1.0/8.0 );\n"
" vec2 uv = vec2( 1.0/r + 0.2*u_time, a );\n"
" float f = cos(12.0*uv.x)*cos(6.0*uv.y);\n"
" vec3 col = 0.5 + 0.5*sin( 3.1416*f + vec3(0.0,0.5,1.0) );\n"
" col = col*r;\n"
" vec4 tunnelColor = vec4(col, 1.0);\n"
"\n"
" float Y = 0.299 * rgba.r + 0.587 * rgba.g + 0.114 * rgba.b;\n"
" vec4 grayColor = vec4(vec3(Y), 1.0);\n"
" outColor = mix(grayColor, tunnelColor, u_offset);\n"
" }\n"
"}";
m_ProgramObj = GLUtils::CreateProgram(vShaderStr, fShaderStr);
if (m_ProgramObj)
{
m_SamplerLoc = glGetUniformLocation(m_ProgramObj, "u_texture0");
m_MVPMatLoc = glGetUniformLocation(m_ProgramObj, "u_MVPMatrix");
}
else
{
LOGCATE("PortraitStayColorExample::Init create program fail");
}
GLfloat verticesCoords[] = {
-1.0f, 1.0f, 0.0f, // Position 0
-1.0f, -1.0f, 0.0f, // Position 1
1.0f, -1.0f, 0.0f, // Position 2
1.0f, 1.0f, 0.0f, // Position 3
};
GLfloat textureCoords[] = {
0.0f, 0.0f, // TexCoord 0
0.0f, 1.0f, // TexCoord 1
1.0f, 1.0f, // TexCoord 2
1.0f, 0.0f // TexCoord 3
};
GLushort indices[] = { 0, 1, 2, 0, 2, 3 };
// Generate VBO Ids and load the VBOs with data
glGenBuffers(3, m_VboIds);
glBindBuffer(GL_ARRAY_BUFFER, m_VboIds[0]);
glBufferData(GL_ARRAY_BUFFER, sizeof(verticesCoords), verticesCoords, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, m_VboIds[1]);
glBufferData(GL_ARRAY_BUFFER, sizeof(textureCoords), textureCoords, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_VboIds[2]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
// Generate VAO Id
glGenVertexArrays(1, &m_VaoId);
glBindVertexArray(m_VaoId);
glBindBuffer(GL_ARRAY_BUFFER, m_VboIds[0]);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), (const void *)0);
glBindBuffer(GL_ARRAY_BUFFER, GL_NONE);
glBindBuffer(GL_ARRAY_BUFFER, m_VboIds[1]);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(GLfloat), (const void *)0);
glBindBuffer(GL_ARRAY_BUFFER, GL_NONE);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_VboIds[2]);
glBindVertexArray(GL_NONE);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_TextureId);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, m_RenderImage.width, m_RenderImage.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, m_RenderImage.ppPlane[0]);
glBindTexture(GL_TEXTURE_2D, GL_NONE);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, m_GrayTexId);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, m_GrayImage.width, m_GrayImage.height, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, m_GrayImage.ppPlane[0]);
glBindTexture(GL_TEXTURE_2D, GL_NONE);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, m_MappingTexId);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, m_MappingImage.width, m_MappingImage.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, m_MappingImage.ppPlane[0]);
glBindTexture(GL_TEXTURE_2D, GL_NONE);
}
void PortraitStayColorExample::LoadImage(NativeImage *pImage)
{
LOGCATE("PortraitStayColorExample::LoadImage pImage = %p", pImage->ppPlane[0]);
if (pImage)
{
m_RenderImage.width = pImage->width;
m_RenderImage.height = pImage->height;
m_RenderImage.format = pImage->format;
NativeImageUtil::CopyNativeImage(pImage, &m_RenderImage);
}
}
void PortraitStayColorExample::Draw(int screenW, int screenH)
{
LOGCATE("PortraitStayColorExample::Draw()");
if(m_ProgramObj == GL_NONE || m_TextureId == GL_NONE) return;
m_frameIndex ++;
UpdateMVPMatrix(m_MVPMatrix, m_AngleX, m_AngleY, (float)screenW / screenH);
// Use the program object
glUseProgram (m_ProgramObj);
glBindVertexArray(m_VaoId);
glUniformMatrix4fv(m_MVPMatLoc, 1, GL_FALSE, &m_MVPMatrix[0][0]);
// Bind the RGBA map
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_TextureId);
GLUtils::setInt(m_ProgramObj, "u_texture0", 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, m_GrayTexId);
GLUtils::setInt(m_ProgramObj, "u_texture1", 1);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, m_MappingTexId);
GLUtils::setInt(m_ProgramObj, "u_texture2", 2);
GLUtils::setVec2(m_ProgramObj, "u_texSize", m_RenderImage.width, m_RenderImage.height);
//GLUtils::setVec2(m_ProgramObj, "u_mappingTexSize", m_MappingImage.width, m_MappingImage.height);
float offset = (sin(m_frameIndex * MATH_PI / 160) + 1.0f) / 2.0f;
GLUtils::setFloat(m_ProgramObj, "u_offset", offset);
GLUtils::setFloat(m_ProgramObj, "u_time", m_frameIndex * 0.04f);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, (const void *)0);
}
void PortraitStayColorExample::Destroy()
{
if (m_ProgramObj)
{
glDeleteProgram(m_ProgramObj);
glDeleteBuffers(3, m_VboIds);
glDeleteVertexArrays(1, &m_VaoId);
glDeleteTextures(1, &m_TextureId);
glDeleteTextures(1, &m_GrayTexId);
glDeleteTextures(1, &m_MappingTexId);
}
}
/**
* @param angleX 绕X轴旋转度数
* @param angleY 绕Y轴旋转度数
* @param ratio 宽高比
* */
void PortraitStayColorExample::UpdateMVPMatrix(glm::mat4 &mvpMatrix, int angleX, int angleY, float ratio)
{
LOGCATE("PortraitStayColorExample::UpdateMVPMatrix angleX = %d, angleY = %d, ratio = %f", angleX, angleY, ratio);
angleX = angleX % 360;
angleY = angleY % 360;
//转化为弧度角
float radiansX = static_cast<float>(MATH_PI / 180.0f * angleX);
float radiansY = static_cast<float>(MATH_PI / 180.0f * angleY);
// Projection matrix
glm::mat4 Projection = glm::ortho(-1.0f, 1.0f, -1.0f, 1.0f, 0.1f, 100.0f);
//glm::mat4 Projection = glm::frustum(-ratio, ratio, -1.0f, 1.0f, 4.0f, 100.0f);
//glm::mat4 Projection = glm::perspective(45.0f,ratio, 0.1f,100.f);
// View matrix
glm::mat4 View = glm::lookAt(
glm::vec3(0, 0, 4), // Camera is at (0,0,1), in World Space
glm::vec3(0, 0, 0), // and looks at the origin
glm::vec3(0, 1, 0) // Head is up (set to 0,-1,0 to look upside-down)
);
// Model matrix
glm::mat4 Model = glm::mat4(1.0f);
Model = glm::scale(Model, glm::vec3(m_ScaleX, m_ScaleY, 1.0f));
Model = glm::rotate(Model, radiansX, glm::vec3(1.0f, 0.0f, 0.0f));
Model = glm::rotate(Model, radiansY, glm::vec3(0.0f, 1.0f, 0.0f));
Model = glm::translate(Model, glm::vec3(0.0f, 0.0f, 0.0f));
mvpMatrix = Projection * View * Model;
}
void PortraitStayColorExample::UpdateTransformMatrix(float rotateX, float rotateY, float scaleX, float scaleY)
{
GLSampleBase::UpdateTransformMatrix(rotateX, rotateY, scaleX, scaleY);
m_AngleX = static_cast<int>(rotateX);
m_AngleY = static_cast<int>(rotateY);
m_ScaleX = scaleX;
m_ScaleY = scaleY;
}
void PortraitStayColorExample::LoadMultiImageWithIndex(int index, NativeImage *pImage) {
LOGCATE("PortraitStayColorExample::LoadMultiImageWithIndex pImage = %p,[w=%d,h=%d,f=%d]", pImage->ppPlane[0], pImage->width, pImage->height, pImage->format);
if (pImage)
{
switch (index) {
case 0:
m_GrayImage.width = pImage->width;
m_GrayImage.height = pImage->height;
m_GrayImage.format = pImage->format;
NativeImageUtil::CopyNativeImage(pImage, &m_GrayImage);
//NativeImageUtil::DumpNativeImage(&m_GrayImage, "/sdcard/DCIM", "PortraitStayColorExample");
break;
case 1:
m_MappingImage.width = pImage->width;
m_MappingImage.height = pImage->height;
m_MappingImage.format = pImage->format;
NativeImageUtil::CopyNativeImage(pImage, &m_MappingImage);
break;
}
}
}
| 33.207951
| 158
| 0.699328
|
volionamdp
|
33fe699357e02ad9843eb4a9bffa8c4c67edeafd
| 3,821
|
hpp
|
C++
|
src/pretty_effect.hpp
|
scryptic86/gfx_demo
|
8f744beb84522f1bc24617725a421a2f2b85aaca
|
[
"MIT"
] | 58
|
2021-05-10T23:25:06.000Z
|
2022-03-22T00:59:04.000Z
|
src/pretty_effect.hpp
|
scryptic86/gfx_demo
|
8f744beb84522f1bc24617725a421a2f2b85aaca
|
[
"MIT"
] | 3
|
2021-07-28T16:26:35.000Z
|
2022-02-18T21:45:09.000Z
|
src/pretty_effect.hpp
|
scryptic86/gfx_demo
|
8f744beb84522f1bc24617725a421a2f2b85aaca
|
[
"MIT"
] | 5
|
2021-07-16T12:00:48.000Z
|
2022-02-14T02:04:06.000Z
|
/*
This example code is in the Public Domain (or CC0 licensed, at your option.)
Unless required by applicable law or agreed to in writing, this
software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied.
*/
#pragma once
#include <stdint.h>
#include <math.h>
#include "sdkconfig.h"
#include "esp_err.h"
#include "gfx_pixel.hpp"
#include "gfx_bitmap.hpp"
#include "gfx_palette.hpp"
#if defined(ARDUINO) || defined(CONFIG_IDF_TARGET_ESP32)
#include "decode_image.hpp"
using pixels_type = gfx::large_bitmap<gfx::rgb_pixel<16>>;
// play with palettes if you want:
//using pixels_type = gfx::large_bitmap<gfx::indexed_pixel<4>,gfx::ega_palette<gfx::rgb_pixel<16>>>;
//using pixels_palette_type = typename pixels_type::palette_type;
//pixels_palette_type pixels_palette;
pixels_type pixels;
//Grab a rgb16 pixel from the esp32_tiles image
static inline typename gfx::rgb_pixel<16> get_bgnd_pixel(int x, int y)
{
//Image has an 8x8 pixel margin, so we can also resolve e.g. [-3, 243]
typename pixels_type::pixel_type px;
pixels.point(gfx::point16(x+8,y+8),&px);
gfx::rgb_pixel<16> result;
// in case this is indexed, we need to convert it here
// while we still have access to the palette
convert_palette_to(pixels,px,&result);
return result;
}
#else
//esp32s2/c3 doesn't have enough memory to hold the decoded image, calculate instead
static inline gfx::rgb_pixel<16> get_bgnd_pixel(int x, int y)
{
gfx::rgb_pixel<16> result;
result.value((x<<3)^(y<<3)^(x*y));
return result;
}
#endif
//This variable is used to detect the next frame.
static int prev_frame=-1;
//Instead of calculating the offsets for each pixel we grab, we pre-calculate the valueswhenever a frame changes, then re-use
//these as we go through all the pixels in the frame. This is much, much faster.
static int8_t *xofs, *yofs;
static int8_t *xcomp, *ycomp;
//Calculate the pixel data for a set of lines (with implied line size of 320). Pixels go in dest, line is the Y-coordinate of the
//first line to be calculated, linect is the amount of lines to calculate. Frame increases by one every time the entire image
//is displayed; this is used to go to the next frame of animation.
template<typename Destination>
void pretty_effect_calc_lines(uint16_t width,uint16_t height,Destination& dest, int line, int frame, int linect)
{
if (frame!=prev_frame) {
//We need to calculate a new set of offset coefficients. Take some random sines as offsets to make everything
//look pretty and fluid-y.
for (int x=0; x<width; x++) xofs[x]=sin(frame*0.15+x*0.06)*4;
for (int y=0; y<height; y++) yofs[y]=sin(frame*0.1+y*0.05)*4;
for (int x=0; x<width; x++) xcomp[x]=sin(frame*0.11+x*0.12)*4;
for (int y=0; y<height; y++) ycomp[y]=sin(frame*0.07+y*0.15)*4;
prev_frame=frame;
}
for (int y=line; y<line+linect; y++) {
for (int x=0; x<width; x++) {
gfx::draw::point(dest,gfx::spoint16(x,y-line),get_bgnd_pixel(x+yofs[y]+xcomp[x], y+xofs[x]+ycomp[y]));
}
}
}
gfx::gfx_result pretty_effect_init(const char* image,uint16_t image_width,uint16_t image_height,uint16_t width,uint16_t height)
{
xofs = (int8_t*)malloc(width);
assert(xofs!=nullptr);
yofs = (int8_t*)malloc(height);
assert(yofs!=nullptr);
xcomp = (int8_t*)malloc(width);
assert(xcomp!=nullptr);
ycomp = (int8_t*)malloc(height);
assert(ycomp!=nullptr);
#ifdef CONFIG_IDF_TARGET_ESP32
return decode_image(image,image_width,image_height, &pixels/*,&pixels_palette*/);
#elif CONFIG_IDF_TARGET_ESP32S2 || CONFIG_IDF_TARGET_ESP32C3
//esp32s2/c3 doesn't have enough memory to hold the decoded image, calculate instead
return gfx::gfx_result::success;
#endif
}
| 40.221053
| 129
| 0.70793
|
scryptic86
|
33ff09edd27d1f6d40c29c8033c7e16151f36648
| 91
|
cpp
|
C++
|
src/main.cpp
|
trexxet/PSO
|
6cd159b94117e55534b3087e6971b4adda35f37c
|
[
"MIT"
] | null | null | null |
src/main.cpp
|
trexxet/PSO
|
6cd159b94117e55534b3087e6971b4adda35f37c
|
[
"MIT"
] | null | null | null |
src/main.cpp
|
trexxet/PSO
|
6cd159b94117e55534b3087e6971b4adda35f37c
|
[
"MIT"
] | 2
|
2020-03-02T15:53:24.000Z
|
2020-06-22T09:06:23.000Z
|
#include "window/Window.h"
int main () {
Window window;
window.mainLoop();
return 0;
}
| 11.375
| 26
| 0.659341
|
trexxet
|
d506e885184ac0a0dc608690b07ac5e48ce7ed12
| 4,382
|
hpp
|
C++
|
Storage/Tape/Tape.hpp
|
ivanizag/CLK
|
e5d364c4ce20aaf6dbdf19ddce1879e91f4ddbc8
|
[
"MIT"
] | 2
|
2018-09-11T14:59:09.000Z
|
2018-09-12T09:39:46.000Z
|
Storage/Tape/Tape.hpp
|
MaddTheSane/CLK
|
6252859ef5631c7ccbc33e95cf4d888727ff3856
|
[
"MIT"
] | null | null | null |
Storage/Tape/Tape.hpp
|
MaddTheSane/CLK
|
6252859ef5631c7ccbc33e95cf4d888727ff3856
|
[
"MIT"
] | null | null | null |
//
// Tape.hpp
// Clock Signal
//
// Created by Thomas Harte on 18/01/2016.
// Copyright 2016 Thomas Harte. All rights reserved.
//
#ifndef Tape_hpp
#define Tape_hpp
#include <memory>
#include "../../ClockReceiver/ClockReceiver.hpp"
#include "../../ClockReceiver/ClockingHintSource.hpp"
#include "../TimedEventLoop.hpp"
namespace Storage {
namespace Tape {
/*!
Models a tape as a sequence of pulses, each pulse being of arbitrary length and described
by their relationship with zero:
- high pulses exit from zero upward before returning to it;
- low pulses exit from zero downward before returning to it;
- zero pulses run along zero.
Subclasses should implement at least @c get_next_pulse and @c reset to provide a serial feeding
of pulses and the ability to return to the start of the feed. They may also implement @c seek if
a better implementation than a linear search from the @c reset time can be implemented.
*/
class Tape {
public:
struct Pulse {
enum Type {
High, Low, Zero
} type;
Time length;
Pulse(Type type, Time length) : type(type), length(length) {}
Pulse() {}
};
/*!
If at the start of the tape returns the first stored pulse. Otherwise advances past
the last-returned pulse and returns the next.
@returns the pulse that begins at the current cursor position.
*/
Pulse get_next_pulse();
/// Returns the tape to the beginning.
void reset();
/// @returns @c true if the tape has progressed beyond all recorded content; @c false otherwise.
virtual bool is_at_end() = 0;
/*!
Returns a numerical representation of progression into the tape. Precision is arbitrary but
required to be at least to the whole pulse. Greater numbers are later than earlier numbers,
but not necessarily continuous.
*/
virtual uint64_t get_offset();
/*!
Moves the tape to the first time at which the specified offset would be returned by get_offset.
*/
virtual void set_offset(uint64_t);
/*!
Calculates and returns the amount of time that has elapsed since the time began. Potentially expensive.
*/
virtual Time get_current_time();
/*!
Seeks to @c time. Potentially expensive.
*/
virtual void seek(Time &time);
virtual ~Tape() {};
private:
uint64_t offset_;
Tape::Pulse pulse_;
virtual Pulse virtual_get_next_pulse() = 0;
virtual void virtual_reset() = 0;
};
/*!
Provides a helper for: (i) retaining a reference to a tape; and (ii) running the tape at a certain
input clock rate.
Will call @c process_input_pulse instantaneously upon reaching *the end* of a pulse. Therefore a subclass
can decode pulses into data within process_input_pulse, using the supplied pulse's @c length and @c type.
*/
class TapePlayer: public TimedEventLoop, public ClockingHint::Source {
public:
TapePlayer(int input_clock_rate);
virtual ~TapePlayer() {}
void set_tape(std::shared_ptr<Storage::Tape::Tape> tape);
bool has_tape();
std::shared_ptr<Storage::Tape::Tape> get_tape();
void run_for(const Cycles cycles);
void run_for_input_pulse();
ClockingHint::Preference preferred_clocking() const override;
protected:
virtual void process_next_event() override;
virtual void process_input_pulse(const Tape::Pulse &pulse) = 0;
private:
inline void get_next_pulse();
std::shared_ptr<Storage::Tape::Tape> tape_;
Tape::Pulse current_pulse_;
};
/*!
A specific subclass of the tape player for machines that sample such as to report only either a
high or a low current input level.
Such machines can use @c get_input() to get the current level of the input.
They can also provide a delegate to be notified upon any change in the input level.
*/
class BinaryTapePlayer : public TapePlayer {
public:
BinaryTapePlayer(int input_clock_rate);
void set_motor_control(bool enabled);
bool get_motor_control() const;
void set_tape_output(bool set);
bool get_input() const;
void run_for(const Cycles cycles);
class Delegate {
public:
virtual void tape_did_change_input(BinaryTapePlayer *tape_player) = 0;
};
void set_delegate(Delegate *delegate);
ClockingHint::Preference preferred_clocking() const final;
protected:
Delegate *delegate_ = nullptr;
void process_input_pulse(const Storage::Tape::Tape::Pulse &pulse) final;
bool input_level_ = false;
bool motor_is_running_ = false;
};
}
}
#endif /* Tape_hpp */
| 27.049383
| 106
| 0.730717
|
ivanizag
|
d506ead310292411834fc41e58da72e7b772aa64
| 758
|
cc
|
C++
|
firmware/main.cc
|
Millisman/OCPP-NANO
|
e38db48c989e84b4d7eeaf1e90fcf9ef6c9cf5a5
|
[
"Apache-2.0"
] | null | null | null |
firmware/main.cc
|
Millisman/OCPP-NANO
|
e38db48c989e84b4d7eeaf1e90fcf9ef6c9cf5a5
|
[
"Apache-2.0"
] | null | null | null |
firmware/main.cc
|
Millisman/OCPP-NANO
|
e38db48c989e84b4d7eeaf1e90fcf9ef6c9cf5a5
|
[
"Apache-2.0"
] | null | null | null |
#include "mcu/mcu.h"
#include "mcu/watchdog.h"
#include "mcu/serial.h"
#include "gsm/modem_manager.h"
#include "ocpp/ocpp_man.h"
#include "app/man_time.h"
#include "event/loop.h"
#include "events.h"
static uint8_t rx_buff[512];
static uint8_t tx_buff[512];
int main() {
//std::cout.setf(std::ios::unitbuf);
SEI;
mcu::Watchdog::enable();
app::ManTime::init();
gsm::ManGSM::get();
gsm::ManGSM::set_buf(rx_buff, tx_buff, sizeof(rx_buff), sizeof(tx_buff));
ocpp::ManOCPP::get();
ocpp::ManOCPP::set_buf(rx_buff, tx_buff, sizeof(rx_buff), sizeof(tx_buff));
while (1) {
event::Loop::post(event::Event(events::UPDATE));
event::Loop::dispatch();
mcu::Watchdog::reset();
}
}
| 22.294118
| 79
| 0.618734
|
Millisman
|
d50963006eaefdbb3d210e9a2920c83c336726eb
| 21,459
|
cpp
|
C++
|
Game/Springs.cpp
|
sjohal21/Floating-Sandbox
|
0170e3696ed4f012f5f17fdbbdaef1af4117f495
|
[
"CC-BY-4.0"
] | 44
|
2018-07-08T16:44:53.000Z
|
2022-02-06T14:07:30.000Z
|
Game/Springs.cpp
|
sjohal21/Floating-Sandbox
|
0170e3696ed4f012f5f17fdbbdaef1af4117f495
|
[
"CC-BY-4.0"
] | 31
|
2019-03-24T16:00:38.000Z
|
2022-02-24T20:23:18.000Z
|
Game/Springs.cpp
|
sjohal21/Floating-Sandbox
|
0170e3696ed4f012f5f17fdbbdaef1af4117f495
|
[
"CC-BY-4.0"
] | 24
|
2018-11-08T21:58:53.000Z
|
2022-01-12T12:04:42.000Z
|
/***************************************************************************************
* Original Author: Gabriele Giuseppini
* Created: 2018-05-12
* Copyright: Gabriele Giuseppini (https://github.com/GabrieleGiuseppini)
***************************************************************************************/
#include "Physics.h"
#include <cmath>
namespace Physics {
void Springs::Add(
ElementIndex pointAIndex,
ElementIndex pointBIndex,
Octant factoryPointAOctant,
Octant factoryPointBOctant,
SuperTrianglesVector const & superTriangles,
ElementCount coveringTrianglesCount,
Points const & points)
{
ElementIndex const springIndex = static_cast<ElementIndex>(mIsDeletedBuffer.GetCurrentPopulatedSize());
mIsDeletedBuffer.emplace_back(false);
mEndpointsBuffer.emplace_back(pointAIndex, pointBIndex);
mFactoryEndpointOctantsBuffer.emplace_back(factoryPointAOctant, factoryPointBOctant);
mSuperTrianglesBuffer.emplace_back(superTriangles);
mFactorySuperTrianglesBuffer.emplace_back(superTriangles);
assert(coveringTrianglesCount >= superTriangles.size()); // Covering triangles count includes super triangles
mCoveringTrianglesCountBuffer.emplace_back(coveringTrianglesCount);
// Breaking elongation recalculated later
mBreakingElongationBuffer.emplace_back(0.0f);
mFactoryRestLengthBuffer.emplace_back((points.GetPosition(pointAIndex) - points.GetPosition(pointBIndex)).length());
mRestLengthBuffer.emplace_back((points.GetPosition(pointAIndex) - points.GetPosition(pointBIndex)).length());
// Dynamics coefficients recalculated later, but stiffness grows slowly and shrinks fast, hence we want to start high
mDynamicsCoefficientsBuffer.emplace_back(std::numeric_limits<float>::max(), 0.0f);
// Stiffness is average
float const averageStiffness =
(points.GetStructuralMaterial(pointAIndex).Stiffness + points.GetStructuralMaterial(pointBIndex).Stiffness)
/ 2.0f;
// Strength is average
float const averageStrength =
(points.GetStrength(pointAIndex) + points.GetStrength(pointBIndex))
/ 2.0f;
// Melting temperature is average
float const averageMeltingTemperature =
(points.GetStructuralMaterial(pointAIndex).MeltingTemperature + points.GetStructuralMaterial(pointBIndex).MeltingTemperature)
/ 2.0f;
mMaterialPropertiesBuffer.emplace_back(
averageStiffness,
averageStrength,
averageMeltingTemperature,
CalculateExtraMeltingInducedTolerance(averageStrength));
// Base structural material is arbitrarily the weakest of the two;
// only affects sound and name, anyway
mBaseStructuralMaterialBuffer.emplace_back(
points.GetStructuralMaterial(pointAIndex).Strength < points.GetStructuralMaterial(pointBIndex).Strength
? &(points.GetStructuralMaterial(pointAIndex))
: &(points.GetStructuralMaterial(pointBIndex)));
// If both nodes are rope, then the spring is rope
// (non-rope <-> rope springs are "connections" and not to be treated as ropes)
mIsRopeBuffer.emplace_back(points.IsRope(pointAIndex) && points.IsRope(pointBIndex));
// Spring is permeable by default - will be changed later
mWaterPermeabilityBuffer.emplace_back(1.0f);
// Heat properties are average
float const thermalConductivity =
(points.GetStructuralMaterial(pointAIndex).ThermalConductivity + points.GetStructuralMaterial(pointBIndex).ThermalConductivity)
/ 2.0f;
mMaterialThermalConductivityBuffer.emplace_back(thermalConductivity);
mIsStressedBuffer.emplace_back(false);
// Calculate parameters for this spring
UpdateForDecayAndTemperatureAndGameParameters(
springIndex,
mCurrentNumMechanicalDynamicsIterations,
mCurrentSpringStiffnessAdjustment,
mCurrentSpringDampingAdjustment,
mCurrentSpringStrengthAdjustment,
CalculateSpringStrengthIterationsAdjustment(mCurrentNumMechanicalDynamicsIterationsAdjustment),
mCurrentMeltingTemperatureAdjustment,
points);
}
void Springs::Destroy(
ElementIndex springElementIndex,
DestroyOptions destroyOptions,
GameParameters const & gameParameters,
Points const & points)
{
assert(!IsDeleted(springElementIndex));
// Invoke destroy handler
assert(nullptr != mShipPhysicsHandler);
mShipPhysicsHandler->HandleSpringDestroy(
springElementIndex,
!!(destroyOptions & Springs::DestroyOptions::DestroyAllTriangles),
gameParameters);
// Fire spring break event, unless told otherwise
if (!!(destroyOptions & Springs::DestroyOptions::FireBreakEvent))
{
mGameEventHandler->OnBreak(
GetBaseStructuralMaterial(springElementIndex),
mParentWorld.GetOceanSurface().IsUnderwater(GetEndpointAPosition(springElementIndex, points)), // Arbitrary
1);
}
// Zero out our dynamics coefficients, so that we can still calculate Hooke's
// and damping forces for this spring without running the risk of
// affecting non-deleted points
mDynamicsCoefficientsBuffer[springElementIndex].StiffnessCoefficient = 0.0f;
mDynamicsCoefficientsBuffer[springElementIndex].DampingCoefficient = 0.0f;
// Flag ourselves as deleted
mIsDeletedBuffer[springElementIndex] = true;
}
void Springs::Restore(
ElementIndex springElementIndex,
GameParameters const & gameParameters,
Points const & points)
{
assert(IsDeleted(springElementIndex));
// Clear the deleted flag
mIsDeletedBuffer[springElementIndex] = false;
// Recalculate parameters for this spring
UpdateForDecayAndTemperatureAndGameParameters(
springElementIndex,
mCurrentNumMechanicalDynamicsIterations,
mCurrentSpringStiffnessAdjustment,
mCurrentSpringDampingAdjustment,
mCurrentSpringStrengthAdjustment,
CalculateSpringStrengthIterationsAdjustment(mCurrentNumMechanicalDynamicsIterationsAdjustment),
mCurrentMeltingTemperatureAdjustment,
points);
// Invoke restore handler
assert(nullptr != mShipPhysicsHandler);
mShipPhysicsHandler->HandleSpringRestore(
springElementIndex,
gameParameters);
}
void Springs::UpdateForGameParameters(
GameParameters const & gameParameters,
Points const & points)
{
if (gameParameters.NumMechanicalDynamicsIterations<float>() != mCurrentNumMechanicalDynamicsIterations
|| gameParameters.NumMechanicalDynamicsIterationsAdjustment != mCurrentNumMechanicalDynamicsIterationsAdjustment
|| gameParameters.SpringStiffnessAdjustment != mCurrentSpringStiffnessAdjustment
|| gameParameters.SpringDampingAdjustment != mCurrentSpringDampingAdjustment
|| gameParameters.SpringStrengthAdjustment != mCurrentSpringStrengthAdjustment
|| gameParameters.MeltingTemperatureAdjustment != mCurrentMeltingTemperatureAdjustment)
{
// Recalc
UpdateForDecayAndTemperatureAndGameParameters(
gameParameters,
points);
assert(mCurrentNumMechanicalDynamicsIterations == gameParameters.NumMechanicalDynamicsIterations<float>());
assert(mCurrentNumMechanicalDynamicsIterationsAdjustment == gameParameters.NumMechanicalDynamicsIterationsAdjustment);
assert(mCurrentSpringStiffnessAdjustment == gameParameters.SpringStiffnessAdjustment);
assert(mCurrentSpringDampingAdjustment == gameParameters.SpringDampingAdjustment);
assert(mCurrentSpringStrengthAdjustment == gameParameters.SpringStrengthAdjustment);
assert(mCurrentMeltingTemperatureAdjustment == gameParameters.MeltingTemperatureAdjustment);
}
}
void Springs::UpdateForDecayAndTemperatureAndGameParameters(
GameParameters const & gameParameters,
Points const & points)
{
//
// Assumption: decay and temperature have changed; parameters might have (but most likely not)
//
// Recalc
UpdateForDecayAndTemperatureAndGameParameters(
gameParameters.NumMechanicalDynamicsIterations<float>(),
gameParameters.NumMechanicalDynamicsIterationsAdjustment,
gameParameters.SpringStiffnessAdjustment,
gameParameters.SpringDampingAdjustment,
gameParameters.SpringStrengthAdjustment,
gameParameters.MeltingTemperatureAdjustment,
points);
// Remember the new values
mCurrentNumMechanicalDynamicsIterations = gameParameters.NumMechanicalDynamicsIterations<float>();
mCurrentNumMechanicalDynamicsIterationsAdjustment = gameParameters.NumMechanicalDynamicsIterationsAdjustment;
mCurrentSpringStiffnessAdjustment = gameParameters.SpringStiffnessAdjustment;
mCurrentSpringDampingAdjustment = gameParameters.SpringDampingAdjustment;
mCurrentSpringStrengthAdjustment = gameParameters.SpringStrengthAdjustment;
mCurrentMeltingTemperatureAdjustment = gameParameters.MeltingTemperatureAdjustment;
}
void Springs::UploadElements(
ShipId shipId,
Render::RenderContext & renderContext) const
{
// Either upload all springs, or just the edge springs
bool const doUploadAllSprings =
DebugShipRenderModeType::Springs == renderContext.GetDebugShipRenderMode();
// Ropes are uploaded as springs only if DebugRenderMode is springs or edge springs
bool const doUploadRopesAsSprings =
DebugShipRenderModeType::Springs == renderContext.GetDebugShipRenderMode()
|| DebugShipRenderModeType::EdgeSprings == renderContext.GetDebugShipRenderMode();
auto & shipRenderContext = renderContext.GetShipRenderContext(shipId);
for (ElementIndex i : *this)
{
// Only upload non-deleted springs that are not covered by two super-triangles, unless
// we are in springs render mode
if (!mIsDeletedBuffer[i])
{
if (IsRope(i) && !doUploadRopesAsSprings)
{
shipRenderContext.UploadElementRope(
GetEndpointAIndex(i),
GetEndpointBIndex(i));
}
else if (
mCoveringTrianglesCountBuffer[i] < 2
|| doUploadAllSprings
|| IsRope(i))
{
shipRenderContext.UploadElementSpring(
GetEndpointAIndex(i),
GetEndpointBIndex(i));
}
}
}
}
void Springs::UploadStressedSpringElements(
ShipId shipId,
Render::RenderContext & renderContext) const
{
auto & shipRenderContext = renderContext.GetShipRenderContext(shipId);
for (ElementIndex i : *this)
{
if (!mIsDeletedBuffer[i])
{
if (mIsStressedBuffer[i])
{
shipRenderContext.UploadElementStressedSpring(
GetEndpointAIndex(i),
GetEndpointBIndex(i));
}
}
}
}
void Springs::UpdateForStrains(
GameParameters const & gameParameters,
Points & points)
{
float constexpr StrainHighWatermark = 0.5f; // Greater than this multiplier to be stressed
float constexpr StrainLowWatermark = 0.08f; // Less than this multiplier to become non-stressed
OceanSurface const & oceanSurface = mParentWorld.GetOceanSurface();
// Visit all springs
for (ElementIndex s : *this)
{
// Avoid breaking deleted springs
if (!mIsDeletedBuffer[s])
{
// Calculate strain length
float const strain = fabs(GetLength(s, points) - mRestLengthBuffer[s]);
// Check against breaking elongation
float const breakingElongation = mBreakingElongationBuffer[s];
if (strain > breakingElongation)
{
// It's broken!
// Destroy this spring
this->Destroy(
s,
DestroyOptions::FireBreakEvent // Notify Break
| DestroyOptions::DestroyAllTriangles,
gameParameters,
points);
}
else if (mIsStressedBuffer[s])
{
// Stressed spring...
// ...see if should un-stress it
if (strain < StrainLowWatermark * breakingElongation)
{
// It's not stressed anymore
mIsStressedBuffer[s] = false;
}
}
else
{
// Not stressed spring
// ...see if should stress it
if (strain > StrainHighWatermark * breakingElongation)
{
// It's stressed!
mIsStressedBuffer[s] = true;
// Notify stress
mGameEventHandler->OnStress(
GetBaseStructuralMaterial(s),
oceanSurface.IsUnderwater(GetEndpointAPosition(s, points)), // Arbitrary
1);
}
}
}
}
}
////////////////////////////////////////////////////////////////////
void Springs::UpdateForDecayAndTemperatureAndGameParameters(
float numMechanicalDynamicsIterations,
float numMechanicalDynamicsIterationsAdjustment,
float stiffnessAdjustment,
float dampingAdjustment,
float strengthAdjustment,
float meltingTemperatureAdjustment,
Points const & points)
{
float const strengthIterationsAdjustment
= CalculateSpringStrengthIterationsAdjustment(numMechanicalDynamicsIterationsAdjustment);
// Recalc all parameters
for (ElementIndex s : *this)
{
if (!IsDeleted(s))
{
inline_UpdateForDecayAndTemperatureAndGameParameters(
s,
numMechanicalDynamicsIterations,
stiffnessAdjustment,
dampingAdjustment,
strengthAdjustment,
strengthIterationsAdjustment,
meltingTemperatureAdjustment,
points);
}
}
}
void Springs::UpdateForDecayAndTemperatureAndGameParameters(
ElementIndex springIndex,
float numMechanicalDynamicsIterations,
float stiffnessAdjustment,
float dampingAdjustment,
float strengthAdjustment,
float strengthIterationsAdjustment,
float meltingTemperatureAdjustment,
Points const & points)
{
inline_UpdateForDecayAndTemperatureAndGameParameters(
springIndex,
numMechanicalDynamicsIterations,
stiffnessAdjustment,
dampingAdjustment,
strengthAdjustment,
strengthIterationsAdjustment,
meltingTemperatureAdjustment,
points);
}
void Springs::inline_UpdateForDecayAndTemperatureAndGameParameters(
ElementIndex springIndex,
float numMechanicalDynamicsIterations,
float stiffnessAdjustment,
float dampingAdjustment,
float strengthAdjustment,
float strengthIterationsAdjustment,
float meltingTemperatureAdjustment,
Points const & points)
{
auto const endpointAIndex = GetEndpointAIndex(springIndex);
auto const endpointBIndex = GetEndpointBIndex(springIndex);
float const massFactor =
(points.GetAugmentedMaterialMass(endpointAIndex) * points.GetAugmentedMaterialMass(endpointBIndex))
/ (points.GetAugmentedMaterialMass(endpointAIndex) + points.GetAugmentedMaterialMass(endpointBIndex));
float const dt = GameParameters::SimulationStepTimeDuration<float> / numMechanicalDynamicsIterations;
// Note: in 1.14, the spring temperature was the average of the two points.
// Differences in temperature between adjacent points made it so that springs'
// melting was widely underestimated.
// In reality, a spring is as "soft" as its softness point.
////float const springTemperature =
//// (points.GetTemperature(endpointAIndex) + points.GetTemperature(endpointBIndex)) / 2.0f;
float const springTemperature = std::max(
points.GetTemperature(endpointAIndex),
points.GetTemperature(endpointBIndex));
// Excedence of temperature over melting temperature; might be negative
// if we're below the melting temperature
float const meltingOverheat =
springTemperature
- GetMaterialMeltingTemperature(springIndex) * meltingTemperatureAdjustment;
//
// Stiffness coefficient
//
// The "stiffness coefficient" is the factor which, once multiplied with the spring displacement,
// yields the spring force, according to Hooke's law.
//
// We calculate the coefficient so that the two forces applied to each of the two masses produce a resulting
// change in position equal to a fraction SpringReductionFraction * adjustment of the spring displacement,
// in the time interval of a single mechanical dynamics simulation.
//
// After one iteration the spring displacement dL = L - L0 is reduced to:
// dL * (1-SRF)
// where SRF is the (adjusted) SpringReductionFraction parameter. After N iterations this would be:
// dL * (1-SRF)^N
//
// The reduction adjustment is both the material-specific adjustment and the global game adjustment.
//
// If the endpoints are melting, their temperature also controls the stiffness - the higher the temperature,
// above the melting point, the lower the stiffness; this is achieved with a smoothed multiplier with the following
// edges:
// T <= Tm : 1.0
// T >= Tm + DeltaMeltingTMax : ~< 1.0 (== MinStiffnessFraction, asymptote)
//
// Asymptote
// NOTE: This value should be adjusted based on the number of spring iterations we perform
// per simulation step
float constexpr MinStiffnessFraction = 0.0002f;
// We reach max softness at T+200
float const meltDepthFraction = SmoothStep(0.0f, 200.0f, meltingOverheat);
// 1.0 when not melting, MinStiffnessFraction when melting "a lot"
float const meltMultiplier = Mix(1.0f, MinStiffnessFraction, meltDepthFraction);
// Our desired stiffness coefficient
float const desiredStiffnessCoefficient =
GameParameters::SpringReductionFraction
* GetMaterialStiffness(springIndex)
* stiffnessAdjustment
* massFactor
/ (dt * dt)
* meltMultiplier;
// If the coefficient is growing (spring is becoming more stiff), then
// approach the desired stiffness coefficient slowly,
// or else we have too much discontinuity and might explode
if (desiredStiffnessCoefficient > mDynamicsCoefficientsBuffer[springIndex].StiffnessCoefficient)
{
mDynamicsCoefficientsBuffer[springIndex].StiffnessCoefficient +=
0.03f // 0.03: ~76 steps to 1/10th off target
* (desiredStiffnessCoefficient - mDynamicsCoefficientsBuffer[springIndex].StiffnessCoefficient);
}
else
{
// Sudden decrease
mDynamicsCoefficientsBuffer[springIndex].StiffnessCoefficient = desiredStiffnessCoefficient;
}
//
// Damping coefficient
//
// Magnitude of the drag force on the relative velocity component along the spring.
//
mDynamicsCoefficientsBuffer[springIndex].DampingCoefficient =
GameParameters::SpringDampingCoefficient
* dampingAdjustment
* massFactor
/ dt;
//
// Breaking elongation
//
// The breaking elongation - i.e. the max delta L, aka displacement tolerance - depends on:
// - The material strength and the strength adjustment
// - The spring's decay (which itself is a function of the endpoints' decay)
// - If the endpoints are melting, their temperature - so to keep springs intact while melting makes them longer
// - The actual number of mechanics iterations we'll be performing
//
// The breaking elongation is the strength multiplied with the spring's rest length, so that it's ready to be
// compared against the spring's absolute delta L without having to divide the delta L by the rest length
//
// Decay of spring == avg of two endpoints' decay
float const springDecay =
(points.GetDecay(endpointAIndex) + points.GetDecay(endpointBIndex))
/ 2.0f;
// If we're melting, the current spring length, when longer than the
// previous rest length, is also its new rest length - but no more than a few times
// the factory rest length, or else springs become abnormally-long spikes.
// When cooling again, we leave the rest length at its maximum - modeling permanent deformation.
if (meltingOverheat > 0.0f)
{
SetRestLength(
springIndex,
Clamp(
GetLength(springIndex, points),
GetRestLength(springIndex),
mFactoryRestLengthBuffer[springIndex] * 2.0f));
}
mBreakingElongationBuffer[springIndex] =
GetMaterialStrength(springIndex)
* strengthAdjustment
* 0.839501f // Magic number: from 1.14, after #iterations increased from 24 to 30
* 0.643389f // Magic number: in 1.15.2 we're shortened the simulation time step from 0.2 to 0.156
* strengthIterationsAdjustment
* springDecay
* GetRestLength(springIndex) // To make strain comparison independent from rest length
* (1.0f + GetExtraMeltingInducedTolerance(springIndex) * meltDepthFraction); // When melting, springs are more tolerant to elongation
}
}
| 39.812616
| 141
| 0.691505
|
sjohal21
|
d50bb384b143f01db7d151605fc11a6e73cd7bb6
| 1,778
|
hpp
|
C++
|
__unit_tests/gv_base_unit_test/unit_test_log.hpp
|
dragonsn/gv_game_engine
|
dca6c1fb1f8d96e9a244f157a63f8a69da084b0f
|
[
"MIT"
] | 2
|
2018-12-03T13:17:31.000Z
|
2020-04-08T07:00:02.000Z
|
__unit_tests/gv_base_unit_test/unit_test_log.hpp
|
dragonsn/gv_game_engine
|
dca6c1fb1f8d96e9a244f157a63f8a69da084b0f
|
[
"MIT"
] | null | null | null |
__unit_tests/gv_base_unit_test/unit_test_log.hpp
|
dragonsn/gv_game_engine
|
dca6c1fb1f8d96e9a244f157a63f8a69da084b0f
|
[
"MIT"
] | null | null | null |
namespace unit_test_log
{
//the test is from poco
static gv_log* s_log = NULL;
class MyRunnable : public gv_runnable
{
public:
MyRunnable()
: _ran(false)
{
}
void run()
{
GV_PROFILE_EVENT(MyRunnable, 0);
gv_thread* pThread = gv_thread::current();
if (pThread)
_threadName = pThread->get_name();
int i = 0;
while (1)
{
gv_string_tmp s;
s << gv_global::time->get_microsec_from_start() << " " << *_threadName << " " << i++ << " \r\n";
(*s_log) << s;
if (i % 100 == 0)
{
if (_event.try_wait(1))
return;
}
}
}
bool ran() const
{
return _ran;
}
const gv_id& threadName() const
{
return _threadName;
}
void notify()
{
_event.set();
}
static void staticFunc()
{
++_staticVar;
}
static int _staticVar;
private:
bool _ran;
gv_id _threadName;
gv_event _event;
};
int MyRunnable::_staticVar = 0;
void test4_threads()
{
sub_test_timer timer("multi_thread_log");
{
s_log = gv_global::log->create_log("test_log()", 65536 * 10);
gv_thread thread1(gv_id_one);
gv_thread thread2(gv_id_two);
gv_thread thread3(gv_id_three);
gv_thread thread4(gv_id_four);
MyRunnable r1;
MyRunnable r2;
MyRunnable r3;
MyRunnable r4;
thread1.start(&r1);
gv_thread::sleep(2);
thread2.start(&r2);
thread3.start(&r3);
thread4.start(&r4);
gv_thread::sleep(2000);
r4.notify();
thread4.join();
r3.notify();
gv_thread::sleep(200);
thread3.join();
r2.notify();
thread2.join();
r1.notify();
thread1.join();
gv_global::log->close_log(s_log);
}
gv_id::static_purge();
}
void main(gvt_array< gv_string >& args)
{
GVM_LOG(main, "first !!!!" << 12323 << "\r\n");
GVM_LOG(main, "first !!!!" << 12323 << "\r\n");
GVM_LOG(main, "first !!!!" << 12323 << "\r\n");
test4_threads();
}
}
| 16.311927
| 100
| 0.625984
|
dragonsn
|
d50c799a9a01262db84728d004dc551dc514d47f
| 14,484
|
cpp
|
C++
|
sources/gst-plugins/gst-nvinfer/gstnvinfer_meta_utils.cpp
|
xiaomaozhou26/deepstream-4.0.1
|
45f54ebe1db2546bd924b71cd06de1d9ad22e151
|
[
"MIT"
] | 11
|
2020-01-20T02:08:57.000Z
|
2022-02-25T05:35:15.000Z
|
sources/gst-plugins/gst-nvinfer/gstnvinfer_meta_utils.cpp
|
prat96/Deepstream-4.0
|
dfb3d0b082a80f5fafe158d27289311f9ecd9b6f
|
[
"MIT"
] | 1
|
2020-04-06T13:06:34.000Z
|
2020-04-06T13:06:34.000Z
|
sources/gst-plugins/gst-nvinfer/gstnvinfer_meta_utils.cpp
|
prat96/Deepstream-4.0
|
dfb3d0b082a80f5fafe158d27289311f9ecd9b6f
|
[
"MIT"
] | 10
|
2020-02-01T06:44:33.000Z
|
2021-06-04T06:56:42.000Z
|
/**
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#include <cstring>
#include "gstnvinfer_meta_utils.h"
static inline int
get_element_size (NvDsInferDataType data_type)
{
switch (data_type) {
case FLOAT:
return 4;
case HALF:
return 2;
case INT32:
return 4;
case INT8:
return 1;
default:
return 0;
}
}
/**
* Attach metadata for the detector. We will be adding a new metadata.
*/
void
attach_metadata_detector (GstNvInfer * nvinfer, GstMiniObject * tensor_out_object,
GstNvInferFrame & frame, NvDsInferDetectionOutput & detection_output)
{
static gchar font_name[] = "Serif";
NvDsObjectMeta *obj_meta = NULL;
NvDsObjectMeta *parent_obj_meta = frame.obj_meta; /* This will be NULL in case of primary detector */
NvDsFrameMeta *frame_meta = frame.frame_meta;
NvDsBatchMeta *batch_meta = frame_meta->base_meta.batch_meta;
nvds_acquire_meta_lock (batch_meta);
frame_meta->bInferDone = TRUE;
/* Iterate through the inference output for one frame and attach the detected
* bnounding boxes. */
for (guint i = 0; i < detection_output.numObjects; i++) {
NvDsInferObject & obj = detection_output.objects[i];
GstNvInferDetectionFilterParams & filter_params =
(*nvinfer->perClassDetectionFilterParams)[obj.classIndex];
/* Scale the bounding boxes proportionally based on how the object/frame was
* scaled during input. */
obj.left /= frame.scale_ratio_x;
obj.top /= frame.scale_ratio_y;
obj.width /= frame.scale_ratio_x;
obj.height /= frame.scale_ratio_y;
/* Check if the scaled box co-ordinates meet the detection filter criteria.
* Skip the box if it does not. */
if (obj.width < filter_params.detectionMinWidth)
continue;
if (obj.height < filter_params.detectionMinHeight)
continue;
if (filter_params.detectionMaxWidth > 0 &&
obj.width > filter_params.detectionMaxWidth)
continue;
if (filter_params.detectionMaxHeight > 0 &&
obj.width > filter_params.detectionMaxHeight)
continue;
if (obj.top < filter_params.roiTopOffset)
continue;
if (obj.top + obj.height >
(frame.input_surf_params->height - filter_params.roiBottomOffset))
continue;
obj_meta = nvds_acquire_obj_meta_from_pool (batch_meta);
obj_meta->unique_component_id = nvinfer->unique_id;
obj_meta->confidence = 0.0;
/* This is an untracked object. Set tracking_id to -1. */
obj_meta->object_id = UNTRACKED_OBJECT_ID;
obj_meta->class_id = obj.classIndex;
NvOSD_RectParams & rect_params = obj_meta->rect_params;
NvOSD_TextParams & text_params = obj_meta->text_params;
/* Assign bounding box coordinates. */
rect_params.left = obj.left;
rect_params.top = obj.top;
rect_params.width = obj.width;
rect_params.height = obj.height;
if(!nvinfer->process_full_frame) {
rect_params.left += parent_obj_meta->rect_params.left;
rect_params.top += parent_obj_meta->rect_params.top;
}
/* Border of width 3. */
rect_params.border_width = 3;
if (obj.classIndex > (gint) nvinfer->perClassColorParams->size()) {
rect_params.has_bg_color = 0;
rect_params.border_color = (NvOSD_ColorParams) {1, 0, 0, 1};
} else {
GstNvInferColorParams &color_params =
(*nvinfer->perClassColorParams)[obj.classIndex];
rect_params.has_bg_color = color_params.have_bg_color;
rect_params.bg_color = color_params.bg_color;
rect_params.border_color = color_params.border_color;
}
if (obj.label)
strncpy (obj_meta->obj_label, obj.label, MAX_LABEL_SIZE);
/* display_text requires heap allocated memory. */
text_params.display_text = g_strdup (obj.label);
/* Display text above the left top corner of the object. */
text_params.x_offset = rect_params.left;
text_params.y_offset = rect_params.top - 10;
/* Set black background for the text. */
text_params.set_bg_clr = 1;
text_params.text_bg_clr = (NvOSD_ColorParams) {
0, 0, 0, 1};
/* Font face, size and color. */
text_params.font_params.font_name = font_name;
text_params.font_params.font_size = 11;
text_params.font_params.font_color = (NvOSD_ColorParams) {
1, 1, 1, 1};
nvds_add_obj_meta_to_frame (frame_meta, obj_meta, parent_obj_meta);
}
nvds_release_meta_lock (batch_meta);
}
/**
* Update string label in an existing object metadata. If processing on full
* frames, need to attach a new metadata. Assume only one label per object is generated.
*/
void
attach_metadata_classifier (GstNvInfer * nvinfer, GstMiniObject * tensor_out_object,
GstNvInferFrame & frame, GstNvInferObjectInfo & object_info)
{
NvDsObjectMeta *object_meta = frame.obj_meta;
NvDsBatchMeta *batch_meta = (nvinfer->process_full_frame) ?
frame.frame_meta->base_meta.batch_meta : object_meta->base_meta.batch_meta;
if (object_info.attributes.size () == 0 ||
object_info.label.length() == 0)
return;
nvds_acquire_meta_lock (batch_meta);
if (nvinfer->process_full_frame) {
/* Attach only one object in the meta since this is a full frame
* classification. */
object_meta = nvds_acquire_obj_meta_from_pool (batch_meta);
/* Font to be used for label text. */
static gchar font_name[] = "Serif";
NvOSD_RectParams & rect_params = object_meta->rect_params;
NvOSD_TextParams & text_params = object_meta->text_params;
//frame.object_meta = object_meta;
/* Assign bounding box coordinates. */
rect_params.left = 0;
rect_params.top = 0;
rect_params.width = frame.input_surf_params->width;
rect_params.height = frame.input_surf_params->height;
/* Semi-transparent yellow background. */
rect_params.has_bg_color = 0;
rect_params.bg_color = (NvOSD_ColorParams) {
1, 1, 0, 0.4};
/* Red border of width 6. */
rect_params.border_width = 6;
rect_params.border_color = (NvOSD_ColorParams) {
1, 0, 0, 1};
object_meta->object_id = UNTRACKED_OBJECT_ID;
object_meta->class_id = -1;
/* display_text requires heap allocated memory. Actual string formation
* is done later in the function. */
text_params.display_text = g_strdup ("");
/* Display text above the left top corner of the object. */
text_params.x_offset = rect_params.left;
text_params.y_offset = rect_params.top - 10;
/* Set black background for the text. */
text_params.set_bg_clr = 1;
text_params.text_bg_clr = (NvOSD_ColorParams) {
0, 0, 0, 1};
/* Font face, size and color. */
text_params.font_params.font_name = font_name;
text_params.font_params.font_size = 11;
text_params.font_params.font_color = (NvOSD_ColorParams) {
1, 1, 1, 1};
/* Attach the NvDsFrameMeta structure as NvDsMeta to the buffer. Pass the
* function to be called when freeing the meta_data. */
nvds_add_obj_meta_to_frame (frame.frame_meta, object_meta, NULL);
}
std::string string_label = object_info.label;
/* Fill the attribute info structure for the object. */
guint num_attrs = object_info.attributes.size ();
NvDsClassifierMeta *classifier_meta =
nvds_acquire_classifier_meta_from_pool (batch_meta);
classifier_meta->unique_component_id = nvinfer->unique_id;
for (unsigned int i = 0; i < num_attrs; i++) {
NvDsLabelInfo *label_info =
nvds_acquire_label_info_meta_from_pool (batch_meta);
NvDsInferAttribute &attr = object_info.attributes[i];
/* TODO: Check this for multilabel classifier <20-03-19, shaunakg> */
// label_info->result_class_id =
// attributes[i].attributeIndex; /* multilabel classifier, clr, make, model */
label_info->label_id = attr.attributeIndex;
label_info->result_class_id = attr.attributeValue;
label_info->result_prob = attr.attributeConfidence;
if (attr.attributeLabel) {
g_strlcpy (label_info->result_label, attr.attributeLabel, MAX_LABEL_SIZE);
if (object_info.label.length() == 0)
string_label.append (attr.attributeLabel).append(" ");
}
nvds_add_label_info_meta_to_classifier(classifier_meta, label_info);
}
if (string_label.length () > 0 && object_meta) {
gchar *temp = object_meta->text_params.display_text;
object_meta->text_params.display_text =
g_strconcat (temp, " ", string_label.c_str (), nullptr);
g_free (temp);
}
nvds_add_classifier_meta_to_object (object_meta, classifier_meta);
nvds_release_meta_lock (batch_meta);
}
/**
* Given an object's history, merge the new classification results with the
* previous cached results. This can be used to improve the results of
* classification when reinferencing over time. Currently, the function
* just uses the latest results.
*/
void
merge_classification_output (GstNvInferObjectHistory & history,
GstNvInferObjectInfo &new_result)
{
history.cached_info.attributes.assign (new_result.attributes.begin (),
new_result.attributes.end ());
history.cached_info.label.assign (new_result.label);
}
static void
release_segmentation_meta (gpointer data, gpointer user_data)
{
NvDsUserMeta *user_meta = (NvDsUserMeta *) data;
NvDsInferSegmentationMeta *meta = (NvDsInferSegmentationMeta *) user_meta->user_meta_data;
if (meta->priv_data) {
gst_mini_object_unref (GST_MINI_OBJECT (meta->priv_data));
} else {
g_free (meta->class_map);
g_free (meta->class_probabilities_map);
}
delete meta;
}
static gpointer
copy_segmentation_meta (gpointer data, gpointer user_data)
{
NvDsUserMeta *src_user_meta = (NvDsUserMeta *) data;
NvDsInferSegmentationMeta *src_meta = (NvDsInferSegmentationMeta *) src_user_meta->user_meta_data;
NvDsInferSegmentationMeta *meta = (NvDsInferSegmentationMeta *) g_malloc (sizeof (NvDsInferSegmentationMeta));
meta->classes = src_meta->classes;
meta->width = src_meta->width;
meta->height = src_meta->height;
meta->class_map = (gint *) g_memdup(src_meta->class_map, meta->width * meta->height * sizeof (gint));
meta->class_probabilities_map = (gfloat *) g_memdup(src_meta->class_probabilities_map, meta->classes * meta->width * meta->height * sizeof (gfloat));
meta->priv_data = NULL;
return meta;
}
void
attach_metadata_segmentation (GstNvInfer * nvinfer, GstMiniObject * tensor_out_object,
GstNvInferFrame & frame, NvDsInferSegmentationOutput & segmentation_output)
{
NvDsBatchMeta *batch_meta = (nvinfer->process_full_frame) ?
frame.frame_meta->base_meta.batch_meta : frame.obj_meta->base_meta.batch_meta;
NvDsUserMeta *user_meta = nvds_acquire_user_meta_from_pool (batch_meta);
NvDsInferSegmentationMeta *meta = (NvDsInferSegmentationMeta *) g_malloc (sizeof (NvDsInferSegmentationMeta));
meta->classes = segmentation_output.classes;
meta->width = segmentation_output.width;
meta->height = segmentation_output.height;
meta->class_map = segmentation_output.class_map;
meta->class_probabilities_map = segmentation_output.class_probability_map;
meta->priv_data = gst_mini_object_ref (tensor_out_object);
user_meta->user_meta_data = meta;
user_meta->base_meta.meta_type = (NvDsMetaType) NVDSINFER_SEGMENTATION_META;
user_meta->base_meta.release_func = release_segmentation_meta;
user_meta->base_meta.copy_func = copy_segmentation_meta;
if (nvinfer->process_full_frame) {
nvds_add_user_meta_to_frame (frame.frame_meta, user_meta);
} else {
nvds_add_user_meta_to_obj (frame.obj_meta, user_meta);
}
}
/* Called when NvDsUserMeta for each frame/object is released. Reduce the
* refcount of the mini_object by 1 and free other memory. */
static void
release_tensor_output_meta (gpointer data, gpointer user_data)
{
NvDsUserMeta *user_meta = (NvDsUserMeta *) data;
NvDsInferTensorMeta *meta = (NvDsInferTensorMeta *) user_meta->user_meta_data;
gst_mini_object_unref (GST_MINI_OBJECT (meta->priv_data));
delete meta->out_buf_ptrs_dev;
delete meta->out_buf_ptrs_host;
delete meta;
}
/* Attaches the raw tensor output to the GstBuffer as metadata. */
void
attach_tensor_output_meta (GstNvInfer *nvinfer, GstMiniObject * tensor_out_object,
GstNvInferBatch *batch, NvDsInferContextBatchOutput *batch_output)
{
NvDsBatchMeta *batch_meta = (nvinfer->process_full_frame) ?
batch->frames[0].frame_meta->base_meta.batch_meta :
batch->frames[0].obj_meta->base_meta.batch_meta;
/* Create and attach NvDsInferTensorMeta for each frame/object. Also
* increment the refcount of GstNvInferTensorOutputObject. */
for (size_t j = 0; j < batch->frames.size(); j++) {
GstNvInferFrame &frame = batch->frames[j];
NvDsInferTensorMeta *meta = new NvDsInferTensorMeta;
meta->unique_id = nvinfer->unique_id;
meta->num_output_layers = nvinfer->output_layers_info->size ();
meta->output_layers_info = nvinfer->output_layers_info->data ();
meta->out_buf_ptrs_host = new void *[meta->num_output_layers];
meta->out_buf_ptrs_dev = new void *[meta->num_output_layers];
meta->gpu_id = nvinfer->gpu_id;
meta->priv_data = gst_mini_object_ref (tensor_out_object);
for (unsigned int i = 0; i < meta->num_output_layers; i++) {
NvDsInferLayerInfo & info = meta->output_layers_info[i];
meta->out_buf_ptrs_dev[i] =
(uint8_t *) batch_output->outputDeviceBuffers[i] +
info.dims.numElements * get_element_size (info.dataType) * j;
meta->out_buf_ptrs_host[i] =
(uint8_t *) batch_output->hostBuffers[info.bindingIndex] +
info.dims.numElements * get_element_size (info.dataType) * j;
}
NvDsUserMeta *user_meta = nvds_acquire_user_meta_from_pool (batch_meta);
user_meta->user_meta_data = meta;
user_meta->base_meta.meta_type =
(NvDsMetaType) NVDSINFER_TENSOR_OUTPUT_META;
user_meta->base_meta.release_func = release_tensor_output_meta;
user_meta->base_meta.copy_func = nullptr;
user_meta->base_meta.batch_meta = batch_meta;
if (nvinfer->process_full_frame) {
nvds_add_user_meta_to_frame (frame.frame_meta, user_meta);
} else {
nvds_add_user_meta_to_obj (frame.obj_meta, user_meta);
}
}
}
| 37.91623
| 151
| 0.727285
|
xiaomaozhou26
|
d50ca4db3b173611b34e96d1284ad5266e1af917
| 1,520
|
hpp
|
C++
|
laser_scans_fusion/include/laser_scans_fusion/scan_fusion.hpp
|
Robotion-AI/Autobotware
|
9930cff624c250eaf044e7dfa93587e7e3be89e3
|
[
"Apache-2.0"
] | 17
|
2021-05-11T02:58:37.000Z
|
2021-07-13T09:14:04.000Z
|
laser_scans_fusion/include/laser_scans_fusion/scan_fusion.hpp
|
Robotion-AI/ModuLiDAR
|
9930cff624c250eaf044e7dfa93587e7e3be89e3
|
[
"Apache-2.0"
] | 1
|
2021-05-12T04:12:35.000Z
|
2021-05-18T12:32:06.000Z
|
laser_scans_fusion/include/laser_scans_fusion/scan_fusion.hpp
|
Robotion-AI/ModuLiDAR
|
9930cff624c250eaf044e7dfa93587e7e3be89e3
|
[
"Apache-2.0"
] | 3
|
2021-05-12T02:51:44.000Z
|
2021-06-05T02:22:37.000Z
|
/*
* Desc: Laser Scan fusion.
* Author: khaled elmadawi
* mail: khalid.elmadawi@robotion.ai
* Date: 30 march 2021
*
* Copyright 2020 Robotion-AI. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef Scan_FUSION_HH
#define Scan_FUSION_HH
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/point_types.h>
#include <pcl/PCLPointCloud2.h>
#include <pcl/conversions.h>
#include <pcl_ros/transforms.h>
#include <pcl/point_cloud.h>
#include <pcl_ros/point_cloud.h>
#include <tf2_geometry_msgs/tf2_geometry_msgs.h>
#include "sensor_msgs/LaserScan.h"
#include "ros/ros.h"
#include "dataContainers.hpp"
class scan_fusion
{
public:
scan_fusion();
~scan_fusion();
void scans_accumulator(std::vector<scan> &fused_ls,
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud, int cloud_offset,
tf::StampedTransform transform);
sensor_msgs::LaserScan fuse_scans(std::vector<scan> fused_ls, fusion_config config);
};
#endif
| 33.043478
| 88
| 0.724342
|
Robotion-AI
|
d51297e81dc022998584a27fd03b693c79db36a1
| 6,972
|
cpp
|
C++
|
src/servicelistdb.cpp
|
Penny-Admixture/smileyCoin
|
2dec63a08984a1fbd6fbdbb82d79a4483f019261
|
[
"MIT"
] | 2
|
2021-06-12T07:38:49.000Z
|
2021-06-16T00:03:28.000Z
|
src/servicelistdb.cpp
|
Penny-Admixture/smileyCoin
|
2dec63a08984a1fbd6fbdbb82d79a4483f019261
|
[
"MIT"
] | null | null | null |
src/servicelistdb.cpp
|
Penny-Admixture/smileyCoin
|
2dec63a08984a1fbd6fbdbb82d79a4483f019261
|
[
"MIT"
] | null | null | null |
//
// Created by Lenovo on 4/24/2020.
//
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Bitcoin developersg
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "servicelistdb.h"
#include "base58.h"
#include "net.h"
#include "netbase.h"
#include "rpcserver.h"
#include "util.h"
#include <vector>
#include "init.h"
#include "main.h"
#include "sync.h"
#include "wallet.h"
#include "serviceitemlistdb.h"
bool CServiceList::SetForked(const bool &fFork)
{
fForked = fFork;
return true;
}
bool CServiceList::UpdateServiceInfo(const std::map<std::string, std::tuple<std::string, std::string, std::string> > &map)
{
for(std::map<std::string, std::tuple<std::string, std::string, std::string> >::const_iterator it = map.begin(); it!= map.end(); it++)
{
if (get<0>(it->second) == "DS") { // If op_return begins with DS (delete service)
mapServiceList::iterator itService = saddresses.find(it->first);
// If key is found in service list
if (itService != saddresses.end()) {
saddresses.erase(itService);
}
} else if (get<0>(it->second) == "NS") { // If op_return begins with NS (new service)
saddresses.insert(*it);
}
}
return true;
}
// The heights need to be rolled back before new blocks are connected if any were disconnected.
// TODO: We should try to get rid of this and write the height undo information to the disk instead.
bool CServiceList::UpdateServiceAddressHeights()
{
if(!fForked)
return true;
CBlockIndex* pindexSeek = mapBlockIndex.find(pcoinsTip->GetBestBlock())->second;
if(!chainActive.Contains(pindexSeek)) {
return false;
}
CBlock block;
std::map<std::string, std::tuple<std::string, std::string, std::string> > serviceInfo;
mapServiceList mforkedAddresses;
for(mapServiceList::const_iterator it = saddresses.begin(); it!=saddresses.end(); it++)
{
if (get<0>(it->second) == "DS") { // If op_return begins with DS
mapServiceList::iterator itService = saddresses.find(it->first);
// If key is found in service list
if (itService != saddresses.end()) {
saddresses.erase(itService);
}
} else if (get<0>(it->second) == "NS") { // If op_return begins with NS
saddresses.insert(*it);
}
}
if(fDebug) {
LogPrintf("%d addresses seen at fork and need to be relocated\n", mforkedAddresses.size());
}
bool ret;
typedef std::pair<std::string, std::tuple<std::string, std::string, std::string> > pairType;
BOOST_FOREACH(const pairType &pair, serviceInfo) {
ret = pcoinsTip->SetServiceInfo(pair.first, pair.second);
assert(ret);
}
if(!UpdateServiceInfo(serviceInfo))
return false;
return mforkedAddresses.empty();
}
bool CServiceList::GetServiceAddresses(std::multiset<std::pair<std::string, std::tuple<std::string, std::string, std::string>>> &retset) const {
for(std::map<std::string, std::tuple<std::string, std::string, std::string> >::const_iterator it=saddresses.begin(); it!=saddresses.end(); it++)
{
std::string displayType;
if (get<2>(it->second) == "1") {
displayType = "Ticket Sales";
} else if (get<2>(it->second) == "2") {
displayType = "UBI";
} else if (get<2>(it->second) == "3") {
displayType = "Book Chapter";
} else if (get<2>(it->second) == "4") {
displayType = "Traceability";
} else if (get<2>(it->second) == "5") {
displayType = "Nonprofit Organization";
} else if (get<2>(it->second) == "6") {
displayType = "DEX";
} else if (get<2>(it->second) == "7") {
displayType = "Survey";
} else if (get<2>(it->second) == "8") {
displayType = "Non-profit Group";
} else {
displayType = get<2>(it->second);
}
retset.insert(std::make_pair(it->first, std::make_tuple(get<0>(it->second), get<1>(it->second), displayType)));
}
return true;
}
bool CServiceList::GetMyServiceAddresses(std::multiset<std::pair<std::string, std::tuple<std::string, std::string, std::string>>> &retset) const {
for (std::map<std::string, std::tuple<std::string, std::string, std::string> >::const_iterator it = saddresses.begin();it != saddresses.end(); it++)
{
std::string displayType;
if (IsMine(*pwalletMain, CBitcoinAddress(it->first).Get())) {
if (get<2>(it->second) == "1") {
displayType = "Ticket Sales";
} else if (get<2>(it->second) == "2") {
displayType = "UBI";
} else if (get<2>(it->second) == "3") {
displayType = "Book Chapter";
} else if (get<2>(it->second) == "4") {
displayType = "Traceability";
} else if (get<2>(it->second) == "5") {
displayType = "Nonprofit Organization";
} else if (get<2>(it->second) == "6") {
displayType = "DEX";
} else if (get<2>(it->second) == "7") {
displayType = "Survey";
} else if (get<2>(it->second) == "8") {
displayType = "Non-profit Group";
} else {
displayType = get<2>(it->second);
}
retset.insert(std::make_pair(it->first, std::make_tuple(get<0>(it->second), get<1>(it->second), displayType)));
}
}
return true;
}
bool CServiceList::IsService(std::string address) {
for (std::map<std::string, std::tuple<std::string, std::string, std::string> >::const_iterator it = saddresses.begin();it != saddresses.end(); it++)
{
// If address found on service list
if (address == it->first) {
return true;
}
}
return false;
}
bool CServiceList::GetTickets(std::string serviceAddress, std::multiset<std::pair<std::string, std::tuple<std::string, std::string, std::string, std::string, std::string, std::string>>> &retset) const {
std::multiset<std::pair<std::string, std::tuple<std::string, std::string, std::string, std::string, std::string, std::string > > > taddresses;
ServiceItemList.GetTicketList(taddresses);
for(std::set< std::pair< std::string, std::tuple<std::string, std::string, std::string, std::string, std::string, std::string> > >::const_iterator it = taddresses.begin(); it!=taddresses.end(); it++ )
{
// If service address matches ticket service address
if (serviceAddress == get<1>(it->second)) {
retset.insert(std::make_pair(it->first, std::make_tuple(get<0>(it->second), get<1>(it->second), get<2>(it->second), get<3>(it->second), get<4>(it->second), get<5>(it->second))));
}
}
return true;
}
| 39.84
| 204
| 0.588784
|
Penny-Admixture
|
d5130d8983461ef7a544676e181b5c707a63a4c4
| 1,831
|
hpp
|
C++
|
libs/boost_1_72_0/boost/fusion/container/deque/front_extended_deque.hpp
|
henrywarhurst/matrix
|
317a2a7c35c1c7e3730986668ad2270dc19809ef
|
[
"BSD-3-Clause"
] | null | null | null |
libs/boost_1_72_0/boost/fusion/container/deque/front_extended_deque.hpp
|
henrywarhurst/matrix
|
317a2a7c35c1c7e3730986668ad2270dc19809ef
|
[
"BSD-3-Clause"
] | null | null | null |
libs/boost_1_72_0/boost/fusion/container/deque/front_extended_deque.hpp
|
henrywarhurst/matrix
|
317a2a7c35c1c7e3730986668ad2270dc19809ef
|
[
"BSD-3-Clause"
] | null | null | null |
/*=============================================================================
Copyright (c) 2005-2012 Joel de Guzman
Copyright (c) 2005-2006 Dan Marsden
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#if !defined(BOOST_FUSION_FRONT_EXTENDED_DEQUE_26112006_2209)
#define BOOST_FUSION_FRONT_EXTENDED_DEQUE_26112006_2209
#include <boost/fusion/container/deque/detail/keyed_element.hpp>
#include <boost/fusion/sequence/intrinsic/size.hpp>
#include <boost/fusion/support/config.hpp>
#include <boost/fusion/support/sequence_base.hpp>
#include <boost/mpl/int.hpp>
namespace boost {
namespace fusion {
template <typename Deque, typename T>
struct front_extended_deque
: detail::keyed_element<typename Deque::next_down, T, Deque>,
sequence_base<front_extended_deque<Deque, T>> {
typedef detail::keyed_element<typename Deque::next_down, T, Deque> base;
typedef mpl::int_<(Deque::next_down::value - 1)> next_down;
typedef typename Deque::next_up next_up;
typedef mpl::int_<(result_of::size<Deque>::value + 1)> size;
template <typename Arg>
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
front_extended_deque(Deque const &deque, Arg const &val)
: base(val, deque) {}
#if defined(BOOST_NO_CXX11_RVALUE_REFERENCES)
template <typename Arg>
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED
front_extended_deque(Deque const &deque, Arg &val)
: base(val, deque) {}
#else
template <typename Arg>
BOOST_CXX14_CONSTEXPR BOOST_FUSION_GPU_ENABLED
front_extended_deque(Deque const &deque, Arg &&val)
: base(BOOST_FUSION_FWD_ELEM(Arg, val), deque) {}
#endif
};
} // namespace fusion
} // namespace boost
#endif
| 37.367347
| 80
| 0.697433
|
henrywarhurst
|
d514128d167743bf11b66fc8cfad143f3ded9a35
| 1,167
|
cpp
|
C++
|
qt/ApplicationArchitecture/CustomConfig_v1/abackend.cpp
|
ArboreusSystems/arboreus_examples
|
17d39e18f4b2511c19f97d4e6c07ec9d7087fae8
|
[
"BSD-3-Clause"
] | 17
|
2019-02-19T21:29:22.000Z
|
2022-01-29T11:03:45.000Z
|
qt/ApplicationArchitecture/CustomConfig_v1/abackend.cpp
|
MbohBless/arboreus_examples
|
97f0e25182bbc4b5ffab37c6157514332002aeee
|
[
"BSD-3-Clause"
] | null | null | null |
qt/ApplicationArchitecture/CustomConfig_v1/abackend.cpp
|
MbohBless/arboreus_examples
|
97f0e25182bbc4b5ffab37c6157514332002aeee
|
[
"BSD-3-Clause"
] | 9
|
2021-02-21T05:32:23.000Z
|
2022-02-26T07:51:52.000Z
|
// ----------------------------------------------------------
/*!
\class ABackend
\title
\brief Template file classes/cpp/file.cpp
\list
\li @notice Template file classes/file.h
\li @copyright Arboreus (http://arboreus.systems)
\li @author Alexandr Kirilov (http://alexandr.kirilov.me)
\li @created 05/04/2021 at 20:19:51
\endlist
*/
// ----------------------------------------------------------
// Class header
#include "abackend.h"
// -----------
/*!
\fn
Doc.
*/
ABackend::ABackend(QObject *parent) : QObject(parent) {
ALOG << "ABackend created";
}
// -----------
/*!
\fn
Doc.
*/
ABackend::~ABackend(void) {
ALOG << "ABackend deleted";
}
// -----------
/*!
\fn
Doc.
*/
ABackend &ABackend::mInstance() {
static ABackend oInstance;
return oInstance;
}
// -----------
/*!
\fn
Doc.
*/
void ABackend::mInit(
QGuiApplication* inGuiApplication,
QQmlApplicationEngine* inEngine,
QQmlContext* inRootContext,
QObject* inConfig
) {
pGuiApplication = inGuiApplication;
pEngine = inEngine;
pRootContext = inRootContext;
pConfig = inConfig;
pLogger = new ALogger(pEngine);
pLogger->mInit();
ALOG << "ABackend initiated";
}
| 13.892857
| 61
| 0.574979
|
ArboreusSystems
|
d514173b88292108380f88cb866c3565c034228b
| 232
|
cxx
|
C++
|
Modules/ThirdParty/VNL/src/vxl/core/vnl/xio/tests/test_template_include.cxx
|
rdsouza10/ITK
|
07cb23f9866768b5f4ee48ebec8766b6e19efc69
|
[
"Apache-2.0"
] | 3
|
2019-11-19T09:47:25.000Z
|
2022-02-24T00:32:31.000Z
|
Modules/ThirdParty/VNL/src/vxl/core/vnl/xio/tests/test_template_include.cxx
|
rdsouza10/ITK
|
07cb23f9866768b5f4ee48ebec8766b6e19efc69
|
[
"Apache-2.0"
] | 1
|
2019-03-18T14:19:49.000Z
|
2020-01-11T13:54:33.000Z
|
Modules/ThirdParty/VNL/src/vxl/core/vnl/xio/tests/test_template_include.cxx
|
rdsouza10/ITK
|
07cb23f9866768b5f4ee48ebec8766b6e19efc69
|
[
"Apache-2.0"
] | 1
|
2022-02-24T00:32:36.000Z
|
2022-02-24T00:32:36.000Z
|
#include <vnl/xio/vnl_xio_matrix.hxx>
#include <vnl/xio/vnl_xio_matrix_fixed.hxx>
#include <vnl/xio/vnl_xio_quaternion.hxx>
#include <vnl/xio/vnl_xio_vector.hxx>
#include <vnl/xio/vnl_xio_vector_fixed.hxx>
int main() { return 0; }
| 29
| 43
| 0.775862
|
rdsouza10
|
d5186d74d92fb642c60cf33179c09de641e142d5
| 15,872
|
cpp
|
C++
|
src/c_glfont.cpp
|
deadline-cxn/dlstorm
|
4dec81a71af5f4bbf7daf227c5fd662b9c613e9c
|
[
"BSD-3-Clause"
] | null | null | null |
src/c_glfont.cpp
|
deadline-cxn/dlstorm
|
4dec81a71af5f4bbf7daf227c5fd662b9c613e9c
|
[
"BSD-3-Clause"
] | null | null | null |
src/c_glfont.cpp
|
deadline-cxn/dlstorm
|
4dec81a71af5f4bbf7daf227c5fd662b9c613e9c
|
[
"BSD-3-Clause"
] | null | null | null |
/***************************************************************
** DLSTORM Deadline's Code Storm Library
** /\
** ---- D/L \----
** \/
** License: BSD
** Copyright: 2020
** File: c_glfont.cpp
** Class: CGLFont
** Description: Fonts for OpenGL
** Author: Seth Parson
** Twitter: @Sethcoder
** Website: www.sethcoder.com
** Email: defectiveseth@gmail.com
***************************************************************/
#include "c_glfont.h"
/////////////////////////////////////// Utility function
extern "C" int CGLFont_StrLen(const char *string) {
int i, j = 0;
// char ch[2];
for (i = 0; i < (int)strlen(string); i++) {
if (string[i] == '^') {
i++;
if (string[i] == 0) return j;
if (string[i] == '^') {
j++;
// ch[0] = string[i];
// ch[1] = 0;
}
if (string[i] == '>') {
i += 6;
}
} else {
j++;
// ch[0] = string[i];
// ch[1] = 0;
}
}
return j;
}
/////////////////////////////////////// CGLFont class
// CGLFont::CGLFont() { Init(); }
// CGLFont::CGLFont(CLog *pInLog) { Init(); pLog = pInLog; }
// CGLFont::CGLFont(CGAF *pInGAF, CLog *pInLog) { Init(); pGAF = pInGAF; pLog = pInLog; }
// CGLFont::CGLFont(char *fn) { Init(); Load(fn); }
CGLFont::CGLFont(C_GFX *pInGFX, CGAF *pInGAF, CLog *pInLog) {
Init();
pGFX = pInGFX;
pGAF = pInGAF;
pLog = pInLog;
}
CGLFont::~CGLFont() { Kill(); }
void CGLFont::Init() {
memset(szFilename, 0, _FILENAME_SIZE);
pGFX = 0;
pGAF = 0;
pLog = 0;
pNext = 0;
pFontTex = 0;
r = 120;
g = 120;
b = 120;
width = 8.5f;
height = 8.5f;
}
GLuint CGLFont::Load(const char *szInFilename) { // Build Our Font Display List
float cx, cy;
int loop;
strcpy(szFilename, szInFilename);
pFontTex = new CGLTexture(pGFX, pGAF, pLog, szFilename);
if (!pFontTex->glBmap) dlcsm_delete(pFontTex);
if (!pFontTex) return 0;
pFontList = glGenLists(256); // Creating 256 Display Lists
glBindTexture(GL_TEXTURE_2D, pFontTex->glBmap); // Select Our Font Texture
for (loop = 0; loop < 256; loop++) { // Loop Through All 256 Lists
cx = float(loop % 16) / 16.0f; // X Position Of Current Character
cy = float(loop / 16) / 16.0f; // Y Position Of Current Character
glNewList(pFontList + loop, GL_COMPILE); // Start Building A List
glBegin(GL_QUADS); // Use A Quad For Each Character
glTexCoord2f(cx, 1 - cy - 0.0625f); // Texture Coord (Bottom Left)
glVertex2i(0, 0); // Vertex Coord (Bottom Left)
glTexCoord2f(cx + 0.0625f, 1 - cy - 0.0625f); // Texture Coord (Bottom Right)
glVertex2i(16, 0); // Vertex Coord (Bottom Right)
glTexCoord2f(cx + 0.0625f, 1 - cy); // Texture Coord (Top Right)
glVertex2i(16, 16); // Vertex Coord (Top Right)
glTexCoord2f(cx, 1 - cy); // Texture Coord (Top Left)
glVertex2i(0, 16); // Vertex Coord (Top Left)
glEnd(); // Done Building Our Quad (Character)
glTranslated(16, 0, 0); // Move To The Right Of The Character
glEndList(); // Done Building The Display List
} // Loop Until All 256 Are Built
return pFontTex->glBmap;
}
GLvoid CGLFont::Kill() {
if (pFontList) {
glDeleteLists(pFontList, 256);
pFontList = 0;
}
dlcsm_delete(pFontTex);
}
GLvoid CGLFont::SinPrint(GLint x, GLint y, const char *string, int set, u_char r, u_char g, u_char b) {}
GLvoid CGLFont::PrintSolid(GLint x, GLint y, const char *string, u_char nr, u_char ng, u_char nb) {
int i, j = 0;
char ch[2];
for (i = 0; i < (int)strlen(string); i++) {
if (string[i] == '^') {
i++;
if (string[i] == 0) return;
if (string[i] == '^') {
j++;
ch[0] = string[i];
ch[1] = 0;
RawPrint((GLint)(x + (j * (width))), (GLint)y, ch, 1, nr, ng, nb);
}
if (string[i] == '>') {
i += 6;
}
} else {
j++;
ch[0] = string[i];
ch[1] = 0;
RawPrint((GLint)(x + (j * (width))), (GLint)y, ch, 1, nr, ng, nb);
}
}
}
GLvoid CGLFont::Print(GLint x, GLint y, const char *string, int set) {
int i, j;
char ch[2];
j = 0;
u_char nr, ng, nb;
bool shadowed = 0;
bool bolded = 0;
nr = r;
ng = g;
nb = b;
for (i = 0; i < (int)strlen(string); i++) {
if (string[i] == '^') {
i++;
switch (string[i]) {
case 0:
case '\n': return;
case '0':
nr = 0;
ng = 0;
nb = 0;
break;
case '1':
nr = 255;
ng = 255;
nb = 255;
break;
case '2':
nr = 0;
ng = 0;
nb = 255;
break;
case '3':
nr = 0;
ng = 255;
nb = 0;
break;
case '4':
nr = 255;
ng = 0;
nb = 0;
break;
case '5':
nr = 0;
ng = 255;
nb = 255;
break;
case '6':
nr = 255;
ng = 255;
nb = 0;
break;
case '7':
nr = 255;
ng = 0;
nb = 255;
break;
case '8':
nr = 100;
ng = 0;
nb = 255;
break;
case '9':
nr = 100;
ng = 255;
nb = 0;
break;
case 'A':
case 'a':
nr = 255;
ng = 0;
nb = 100;
break;
case 'B':
case 'b':
nr = 100;
ng = 255;
nb = 100;
break;
case 'C':
case 'c':
nr = 200;
ng = 200;
nb = 200;
break;
case 'D':
case 'd':
nr = 150;
ng = 150;
nb = 150;
break;
case 'E':
case 'e':
nr = 100;
ng = 100;
nb = 100;
break;
case 'F':
case 'f':
nr = 50;
ng = 50;
nb = 50;
break;
case 'G':
case 'g':
nr = 50;
ng = 0;
nb = 0;
break;
case 'H':
case 'h':
nr = 0;
ng = 50;
nb = 0;
break;
case 'I':
case 'i':
nr = 0;
ng = 0;
nb = 50;
break;
case 'J':
case 'j':
nr = 50;
ng = 0;
nb = 50;
break;
case 'K':
case 'k':
nr = 50;
ng = 50;
nb = 0;
break;
case 'L':
case 'l':
nr = 0;
ng = 50;
nb = 50;
break;
case '>':
// get the 6 character hex code for the colors
ch[0] = string[i + 1];
ch[1] = string[i + 2];
ch[2] = 0;
nr = dlcs_hex_to_dec(ch);
ch[0] = string[i + 3];
ch[1] = string[i + 4];
ch[2] = 0;
ng = dlcs_hex_to_dec(ch);
ch[0] = string[i + 5];
ch[1] = string[i + 6];
ch[2] = 0;
nb = dlcs_hex_to_dec(ch);
i = i + 6;
break;
case '^':
j++;
ch[0] = string[i];
ch[1] = 0;
if (bolded) BoldPrint((GLint)(x + (j * (width))), (GLint)y, ch, set, 0, 0, 0);
if (shadowed) {
RawPrint((GLint)(x + (j * (width)) + 1), (GLint)y + 1, ch, set, 0, 0, 0);
RawPrint((GLint)(x + (j * (width)) + 2), (GLint)y + 2, ch, set, 0, 0, 0);
}
RawPrint((GLint)(x + (j * (width))), (GLint)y, ch, set, nr, ng, nb);
break;
case '+':
i++;
j++;
if (!shadowed)
shadowed = true;
else
shadowed = false;
ch[0] = string[i];
ch[1] = 0;
if (ch[0] != '^') {
if (bolded) BoldPrint((GLint)(x + (j * (width))), (GLint)y, ch, set, 0, 0, 0);
if (shadowed) {
RawPrint((GLint)(x + (j * (width)) + 1), (GLint)y + 1, ch, set, 0, 0, 0);
RawPrint((GLint)(x + (j * (width)) + 2), (GLint)y + 2, ch, set, 0, 0, 0);
}
RawPrint((GLint)(x + (j * (width))), (GLint)y, ch, set, nr, ng, nb);
} else {
i--;
j--;
}
break;
case '&':
i++;
j++;
if (!bolded)
bolded = true;
else
bolded = false;
ch[0] = string[i];
ch[1] = 0;
if (ch[0] != '^') {
if (bolded) BoldPrint((GLint)(x + (j * (width))), (GLint)y, ch, set, 0, 0, 0);
if (shadowed) {
RawPrint((GLint)(x + (j * (width)) + 1), (GLint)y + 1, ch, set, 0, 0, 0);
RawPrint((GLint)(x + (j * (width)) + 2), (GLint)y + 2, ch, set, 0, 0, 0);
}
RawPrint((GLint)(x + (j * (width))), (GLint)y, ch, set, nr, ng, nb);
} else {
i--;
j--;
}
break;
default:
nr = 255;
ng = 255;
nb = 0;
break;
}
} else {
j++;
ch[0] = string[i];
ch[1] = 0;
if (bolded) BoldPrint((GLint)(x + (j * (width))), (GLint)y, ch, set, 0, 0, 0);
if (shadowed) RawPrint((GLint)(x + (j * (width)) + 2), (GLint)y + 2, ch, set, 0, 0, 0);
RawPrint((GLint)(x + (j * (width))), (GLint)y, ch, set, nr, ng, nb);
}
}
}
GLvoid CGLFont::Stuff(GLenum target, GLint x, GLint y, const char *string, int set, u_char r, u_char g, u_char b) {
GLint tx, ty, sx, sy, sw, sh;
tx = 10;
ty = 10;
sx = 32;
sy = 32;
sw = 32;
sh = 32;
glColor3ub(r, g, b);
glBlendFunc(GL_ONE, GL_ONE);
glEnable(GL_BLEND);
glBindTexture(GL_TEXTURE_2D, pFontTex->glBmap); // Select Our Font Texture
glEnable(GL_TEXTURE_2D);
glLoadIdentity();
glTranslated(x, y, 0);
glCopyTexSubImage2D(target, 0, tx, ty, sx, sy, sw, sh);
glMatrixMode(GL_PROJECTION); // Select The Projection Matrix
glPopMatrix(); // Restore The Old Projection Matrix
glMatrixMode(GL_MODELVIEW); // Select The Modelview Matrix
glPopMatrix(); // Restore The Old Projection Matrix
glEnable(GL_DEPTH_TEST); // Enables Depth Testing
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
glPopMatrix();
}
GLvoid CGLFont::RawPrint(GLint x, GLint y, const char *string, int wset, u_char r, u_char g, u_char b) {
if (!pFontTex) return;
if (!pFontTex->glBmap) {
pFontTex->LoadGL(va("%s.png", szFilename));
}
y = (-y) + (SDL_GetWindowSurface(pGFX->pWindow)->h) - 16;
if (wset < 2) {
glDisable(GL_DEPTH_TEST); // Disables Depth Testing
glMatrixMode(GL_PROJECTION); // Select The Projection Matrix
glPushMatrix(); // Store The Projection Matrix
glLoadIdentity(); // Reset The Projection Matrix
gluOrtho2D(0, SDL_GetWindowSurface(pGFX->pWindow)->w, 0, SDL_GetWindowSurface(pGFX->pWindow)->h); // Set Up An Ortho Screen
glMatrixMode(GL_MODELVIEW); // Select The Modelview Matrix
glPushMatrix(); // Store The Modelview Matrix
glColor3ub(r, g, b);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_BLEND);
glBindTexture(GL_TEXTURE_2D, pFontTex->glBmap); // Select Our Font Texture
glEnable(GL_TEXTURE_2D);
glLoadIdentity();
glTranslated(x, y, 0);
glListBase(pFontList - 32 + (128 * wset)); // Choose The Font Set (0 or 1)
glCallLists(strlen(string), GL_BYTE, string); // Write The Text To The Screen
glMatrixMode(GL_PROJECTION); // Select The Projection Matrix
glPopMatrix(); // Restore The Old Projection Matrix
glMatrixMode(GL_MODELVIEW); // Select The Modelview Matrix
glPopMatrix(); // Restore The Old Projection Matrix
glEnable(GL_DEPTH_TEST);
}
}
GLvoid CGLFont::BoldPrint(GLint x, GLint y, const char *string, int wset, u_char r, u_char g, u_char b) {
RawPrint(x - 1, y - 1, string, wset, r, g, b);
RawPrint(x - 1, y, string, wset, r, g, b);
RawPrint(x - 1, y + 1, string, wset, r, g, b);
RawPrint(x, y - 1, string, wset, r, g, b);
RawPrint(x, y + 1, string, wset, r, g, b);
RawPrint(x + 1, y - 1, string, wset, r, g, b);
RawPrint(x + 1, y, string, wset, r, g, b);
RawPrint(x + 1, y + 1, string, wset, r, g, b);
}
| 37.17096
| 138
| 0.361706
|
deadline-cxn
|
d51a2adcadac7980566c9872d7d6fdd2f3ec196d
| 23,096
|
cpp
|
C++
|
gra.cpp
|
TheDarkPhoenix/2DRacing
|
249fd5fa3533f578557bca318b4577fca406f826
|
[
"MIT"
] | null | null | null |
gra.cpp
|
TheDarkPhoenix/2DRacing
|
249fd5fa3533f578557bca318b4577fca406f826
|
[
"MIT"
] | null | null | null |
gra.cpp
|
TheDarkPhoenix/2DRacing
|
249fd5fa3533f578557bca318b4577fca406f826
|
[
"MIT"
] | null | null | null |
#include "gra.h"
#include "si.h"
#include <SFML/Graphics.hpp>
#include <cstdio>
#include <string>
#include <cmath>
#include <fstream>
using namespace sf;
using namespace std;
Gra::Gra(RenderWindow& window) : world(b2Vec2(0, 0))
{
window.SetFramerateLimit(70);
Gra::NazwyTras.open("Trasy/trasy.txt");
if (Gra::NazwyTras.good())
{
Gra::NazwyTras >> Gra::nLiczbaTras;
}
Gra::tTrasy = new mapa[Gra::nLiczbaTras];
Gra::imgTrasy = new Image[Gra::nLiczbaTras];
for (int i = 0; i < Gra::nLiczbaTras; ++i)
{
Gra::NazwyTras >> tTrasy[i].chNazwaTxt >> tTrasy[i].strNazwaPng;
Gra::imgTrasy[i].LoadFromFile(tTrasy[i].strNazwaPng);
tTrasy[i].trasa.SetImage(imgTrasy[i]);
tTrasy[i].trasa.Resize(window.GetWidth()/2, window.GetWidth()/2);
tTrasy[i].trasa.SetCenter(imgTrasy[i].GetWidth()/2, imgTrasy[i].GetHeight()/2);
tTrasy[i].trasa.SetPosition(window.GetWidth()/2, window.GetHeight()/2);
}
if (Gra::NazwyTras.good())
Gra::NazwyTras >> Gra::nLiczbaSamochodow;
Gra::samochody = new samochod[Gra::nLiczbaSamochodow];
Gra::imgSamochody = new Image[Gra::nLiczbaSamochodow];
for (int i = 0; i < Gra::nLiczbaSamochodow; ++i)
{
Gra::NazwyTras >> Gra::samochody[i].strNazwaPng;
Gra::imgSamochody[i].LoadFromFile(Gra::samochody[i].strNazwaPng);
samochody[i].car.SetImage(imgSamochody[i]);
samochody[i].car.Resize(imgSamochody[i].GetWidth()/1.5, imgSamochody[i].GetHeight()/1.5);
samochody[i].car.SetCenter(imgSamochody[i].GetWidth()/2, imgSamochody[i].GetHeight()/2+50);
samochody[i].car.SetPosition(window.GetWidth()/2, window.GetHeight()/2);
}
Gra::background.LoadFromFile("Obrazy/background.png");
Gra::whiteBackground.Create(640, 480, Color::White);
t1[0] = window.GetWidth() *0.9;
t1[1] = window.GetHeight()/2 - 15;
t1[2] = window.GetWidth() - 15;
t1[3] = window.GetHeight()/2+15;
t2[0] = window.GetWidth() / 10;
t2[1] = window.GetHeight()/2 - 15;
t2[2] = 15;
t2[3] = window.GetHeight()/2+15;
PrzyciskP.AddPoint(t1[0], t1[1]);
PrzyciskP.AddPoint(t1[0], t1[1]+30);
PrzyciskP.AddPoint(t1[2], t1[3]-15);
PrzyciskL.AddPoint(t2[0], t2[1]);
PrzyciskL.AddPoint(t2[2], t2[1]+15);
PrzyciskL.AddPoint(t2[0], t2[3]);
PrzyciskP.SetColor(Color::Red);
PrzyciskL.SetColor(Color::Red);
KoniecGry = false;
Dalej = false;
b2BodyDef bodyDef;
b2Body* m_groundBody = world.CreateBody( &bodyDef );
b2PolygonShape polygonShape;
b2FixtureDef fixtureDef;
fixtureDef.shape = &polygonShape;
fixtureDef.isSensor = true;
polygonShape.SetAsBox(75, 105, b2Vec2(0,0), 0 );
b2Fixture* groundAreaFixture = m_groundBody->CreateFixture(&fixtureDef);
groundAreaFixture->SetUserData( new GroundAreaFUD( 0.5f, false ) );
}
Gra::~Gra()
{
if (tTrasy)
delete[] tTrasy;
if (imgTrasy)
delete[] imgTrasy;
if (samochody)
delete[] samochody;
if (imgSamochody)
delete[] imgSamochody;
}
bool Gra::Menu(const Input& wejscie, Event event, RenderWindow& window)
{
Gra::sBackground.SetImage(Gra::background);
String tekst1;
String tekst2;
String tekst3;
tekst1.SetSize(20);
tekst2.SetSize(20);
tekst3.SetSize(20);
tekst1.SetText("Start gry");
tekst2.SetText("Start gry");
tekst3.SetText("Wyjscie");
tekst1.SetCenter(tekst1.GetRect().Right/2, tekst1.GetRect().Bottom/2);
tekst2.SetCenter(tekst2.GetRect().Right/2, tekst2.GetRect().Bottom/2);
tekst3.SetCenter(tekst3.GetRect().Right/2, tekst3.GetRect().Bottom/2);
tekst1.SetPosition(window.GetWidth()/2, window.GetHeight()/3+25);
tekst2.SetPosition(window.GetWidth()/2, window.GetHeight()/3 + window.GetHeight()/9 + 100);
tekst3.SetPosition(window.GetWidth()/2, window.GetHeight()/3+ (window.GetHeight()/9)*2 +80+25);
float p1[4] = {tekst1.GetRect().Left, tekst1.GetRect().Top, tekst1.GetRect().Right, tekst1.GetRect().Bottom};
float p2[4] = {tekst2.GetRect().Left, tekst2.GetRect().Top, tekst2.GetRect().Right, tekst2.GetRect().Bottom};
float p3[4] = {tekst3.GetRect().Left, tekst3.GetRect().Top, tekst3.GetRect().Right, tekst3.GetRect().Bottom};
while (window.IsOpened())
{
while (window.GetEvent(event))
{
if (event.Type == Event::Closed)
{
window.Close();
return false;
}
if (wejscie.IsKeyDown(Key::Escape))
{
window.Close();
return false;
}
if (wejscie.IsMouseButtonDown(Mouse::Left))
{
if (wejscie.GetMouseX() >= p3[0] && wejscie.GetMouseX() <= p3[2] && wejscie.GetMouseY() >= p3[1] && wejscie.GetMouseY() <= p3 [3])
{
window.Close();
return false;
}
if (wejscie.GetMouseX() >= p2[0] && wejscie.GetMouseX() <= p2[2] && wejscie.GetMouseY() >= p2[1] && wejscie.GetMouseY() <= p2 [3]);
{
window.Clear();
return true;
}
}
}
if (wejscie.GetMouseX() >= p2[0] && wejscie.GetMouseX() <= p2[2] && wejscie.GetMouseY() >= p2[1] && wejscie.GetMouseY() <= p2 [3])
tekst2.SetColor(Color::Blue);
else
tekst2.SetColor(Color::Black);
if (wejscie.GetMouseX() >= p3[0] && wejscie.GetMouseX() <= p3[2] && wejscie.GetMouseY() >= p3[1] && wejscie.GetMouseY() <= p3 [3])
tekst3.SetColor(Color::Blue);
else
tekst3.SetColor(Color::Black);
window.Draw(Gra::sBackground);
window.Draw(tekst2);
window.Draw(tekst3);
window.Display();
window.Clear();
}
return true;
}
bool Gra::WyborTrasy(const Input& wejscie, Event event, RenderWindow& window)
{
Gra::sBackground.SetImage(Gra::whiteBackground);
int AktualnaTrasa = 3;
String str;
str.SetText("Wybierz");
str.SetCenter(str.GetRect().Right/2, str.GetRect().Bottom/2);
str.SetPosition(window.GetWidth()/2, window.GetHeight()*0.9);
float p1[4] = {str.GetRect().Left, str.GetRect().Top, str.GetRect().Right, str.GetRect().Bottom};
while (window.IsOpened())
{
while (window.GetEvent(event))
{
if (event.Type == Event::Closed)
{
window.Close();
return false;
}
if (wejscie.IsKeyDown(Key::Escape))
{
window.Close();
return false;
}
if (wejscie.IsMouseButtonDown(Mouse::Left))
{
if (wejscie.GetMouseX() >= t1[0] && wejscie.GetMouseX() <= t1[2] && wejscie.GetMouseY() >= t1[1] && wejscie.GetMouseY() <= t1[3])
{
window.Clear();
if (AktualnaTrasa + 1 < Gra::nLiczbaTras)
++AktualnaTrasa;
else
AktualnaTrasa = 0;
}
else if (wejscie.GetMouseX() >= t2[2] && wejscie.GetMouseX() <= t2[0] && wejscie.GetMouseY() >= t2[1] && wejscie.GetMouseY() <= t2[3])
{
window.Clear();
if (AktualnaTrasa - 1 >= 0)
--AktualnaTrasa;
else
AktualnaTrasa = Gra::nLiczbaTras-1;
}
else if (wejscie.GetMouseX() >= p1[0] && wejscie.GetMouseX() <= p1[2] && wejscie.GetMouseY() >= p1[1] && wejscie.GetMouseY() <= p1[3])
{
window.Clear();
nTrasa = AktualnaTrasa;
return true;
}
}
if (wejscie.GetMouseX() >= t1[0] && wejscie.GetMouseX() <= t1[2] && wejscie.GetMouseY() >= t1[1] && wejscie.GetMouseY() <= t1[3])
PrzyciskP.SetColor(Color::Green);
else
PrzyciskP.SetColor(Color::Red);
if (wejscie.GetMouseX() >= t2[2] && wejscie.GetMouseX() <= t2[0] && wejscie.GetMouseY() >= t2[1] && wejscie.GetMouseY() <= t2[3])
PrzyciskL.SetColor(Color::Green);
else
PrzyciskL.SetColor(Color::Red);
if (wejscie.GetMouseX() >= p1[0] && wejscie.GetMouseX() <= p1[2] && wejscie.GetMouseY() >= p1[1] && wejscie.GetMouseY() <= p1[3])
str.SetColor(Color::Blue);
else
str.SetColor(Color::Black);
window.Draw(Gra::sBackground);
window.Draw(tTrasy[AktualnaTrasa].trasa);
window.Draw(str);
window.Draw(PrzyciskL);
window.Draw(PrzyciskP);
window.Display();
window.Clear();
}
}
return true;
}
bool Gra::WyborSamochodu(const Input& wejscie, Event event, RenderWindow& window)
{
Gra::sBackground.SetImage(Gra::whiteBackground);
int AktualnySamochod = 0;
String str;
str.SetText("Wybierz");
str.SetCenter(str.GetRect().Right/2, str.GetRect().Bottom/2);
str.SetPosition(window.GetWidth()/2, window.GetHeight()*0.9);
float p1[4] = {str.GetRect().Left, str.GetRect().Top, str.GetRect().Right, str.GetRect().Bottom};
while (window.IsOpened())
{
while (window.GetEvent(event))
{
if (event.Type == Event::Closed)
{
window.Close();
return false;
}
if (wejscie.IsKeyDown(Key::Escape))
{
window.Close();
return false;
}
if (wejscie.IsMouseButtonDown(Mouse::Left))
{
if (wejscie.GetMouseX() >= t1[0] && wejscie.GetMouseX() <= t1[2] && wejscie.GetMouseY() >= t1[1] && wejscie.GetMouseY() <= t1[3])
{
window.Clear();
if (AktualnySamochod + 1 < nLiczbaSamochodow)
++AktualnySamochod;
else
AktualnySamochod = 0;
}
else if (wejscie.GetMouseX() >= t2[2] && wejscie.GetMouseX() <= t2[0] && wejscie.GetMouseY() >= t2[1] && wejscie.GetMouseY() <= t2[3])
{
window.Clear();
if (AktualnySamochod - 1 >= 0)
--AktualnySamochod;
else
AktualnySamochod = nLiczbaSamochodow-1;
}
else if (wejscie.GetMouseX() >= p1[0] && wejscie.GetMouseX() <= p1[2] && wejscie.GetMouseY() >= p1[1] && wejscie.GetMouseY() <= p1[3])
{
window.Clear();
nCar = AktualnySamochod;
return true;
}
}
if (wejscie.GetMouseX() >= t1[0] && wejscie.GetMouseX() <= t1[2] && wejscie.GetMouseY() >= t1[1] && wejscie.GetMouseY() <= t1[3])
PrzyciskP.SetColor(Color::Green);
else
PrzyciskP.SetColor(Color::Red);
if (wejscie.GetMouseX() >= t2[2] && wejscie.GetMouseX() <= t2[0] && wejscie.GetMouseY() >= t2[1] && wejscie.GetMouseY() <= t2[3])
PrzyciskL.SetColor(Color::Green);
else
PrzyciskL.SetColor(Color::Red);
if (wejscie.GetMouseX() >= p1[0] && wejscie.GetMouseX() <= p1[2] && wejscie.GetMouseY() >= p1[1] && wejscie.GetMouseY() <= p1[3])
str.SetColor(Color::Blue);
else
str.SetColor(Color::Black);
window.Draw(Gra::sBackground);
window.Draw(samochody[AktualnySamochod].car);
window.Draw(PrzyciskL);
window.Draw(PrzyciskP);
window.Draw(str);
window.Display();
window.Clear();
}
}
return true;
}
bool Gra::Okrazenia(const Input& wejscie, Event event, RenderWindow& window)
{
char ch[50];
nOkrazenia = 1;
Gra::sBackground.SetImage(Gra::whiteBackground);
String str;
String str2;
String str3;
sprintf(ch, "%i", nOkrazenia);
str2.SetText(ch);
str.SetText("Wybierz");
str3.SetText("Liczba okrazen");
str.SetCenter(str.GetRect().Right/2, str.GetRect().Bottom/2);
str2.SetCenter(str2.GetRect().Right/2, str2.GetRect().Bottom/2);
str3.SetCenter(str3.GetRect().Right/2, str3.GetRect().Bottom/2);
str.SetPosition(window.GetWidth()/2, window.GetHeight()*0.9);
str2.SetPosition(window.GetWidth()/2, window.GetHeight()/2);
str3.SetPosition(window.GetWidth()/2, window.GetHeight()/10);
str2.SetColor(Color::Black);
str3.SetColor(Color::Black);
float p1[4] = {str.GetRect().Left, str.GetRect().Top, str.GetRect().Right, str.GetRect().Bottom};
while (window.IsOpened())
{
while (window.GetEvent(event))
{
if (event.Type == Event::Closed)
{
window.Close();
return false;
}
if (wejscie.IsKeyDown(Key::Escape))
{
window.Close();
return false;
}
if (wejscie.IsMouseButtonDown(Mouse::Left))
{
if (wejscie.GetMouseX() >= t1[0] && wejscie.GetMouseX() <= t1[2] && wejscie.GetMouseY() >= t1[1] && wejscie.GetMouseY() <= t1[3])
{
window.Clear();
++nOkrazenia;
sprintf(ch, "%i", nOkrazenia);
str2.SetText(ch);
}
else if (wejscie.GetMouseX() >= t2[2] && wejscie.GetMouseX() <= t2[0] && wejscie.GetMouseY() >= t2[1] && wejscie.GetMouseY() <= t2[3])
{
window.Clear();
if (nOkrazenia - 1 > 1)
--nOkrazenia;
else
nOkrazenia = 1;
sprintf(ch, "%i", nOkrazenia);
str2.SetText(ch);
}
else if (wejscie.GetMouseX() >= p1[0] && wejscie.GetMouseX() <= p1[2] && wejscie.GetMouseY() >= p1[1] && wejscie.GetMouseY() <= p1[3])
{
window.Clear();
return true;
}
}
if (wejscie.GetMouseX() >= t1[0] && wejscie.GetMouseX() <= t1[2] && wejscie.GetMouseY() >= t1[1] && wejscie.GetMouseY() <= t1[3])
PrzyciskP.SetColor(Color::Green);
else
PrzyciskP.SetColor(Color::Red);
if (wejscie.GetMouseX() >= t2[2] && wejscie.GetMouseX() <= t2[0] && wejscie.GetMouseY() >= t2[1] && wejscie.GetMouseY() <= t2[3])
PrzyciskL.SetColor(Color::Green);
else
PrzyciskL.SetColor(Color::Red);
if (wejscie.GetMouseX() >= p1[0] && wejscie.GetMouseX() <= p1[2] && wejscie.GetMouseY() >= p1[1] && wejscie.GetMouseY() <= p1[3])
str.SetColor(Color::Blue);
else
str.SetColor(Color::Black);
window.Draw(Gra::sBackground);
window.Draw(PrzyciskL);
window.Draw(PrzyciskP);
window.Draw(str);
window.Draw(str2);
window.Draw(str3);
window.Display();
window.Clear();
}
}
return true;
}
bool Gra::LiczbaPrzeciwnikow(const Input& wejscie, Event event, RenderWindow& window)
{
char ch[50];
int nPrzeciwnicy = 2;
Gra::sBackground.SetImage(Gra::whiteBackground);
String str;
String str2;
String str3;
sprintf(ch, "%i", nPrzeciwnicy);
str2.SetText(ch);
str.SetText("Wybierz");
str3.SetText("Liczba przeciwnikow");
str.SetCenter(str.GetRect().Right/2, str.GetRect().Bottom/2);
str2.SetCenter(str2.GetRect().Right/2, str2.GetRect().Bottom/2);
str3.SetCenter(str3.GetRect().Right/2, str3.GetRect().Bottom/2);
str.SetPosition(window.GetWidth()/2, window.GetHeight()*0.9);
str2.SetPosition(window.GetWidth()/2, window.GetHeight()/2);
str3.SetPosition(window.GetWidth()/2, window.GetHeight()/10);
str2.SetColor(Color::Black);
str3.SetColor(Color::Black);
float p1[4] = {str.GetRect().Left, str.GetRect().Top, str.GetRect().Right, str.GetRect().Bottom};
while (window.IsOpened())
{
while (window.GetEvent(event))
{
if (event.Type == Event::Closed)
{
window.Close();
return false;
}
if (wejscie.IsKeyDown(Key::Escape))
{
window.Close();
return false;
}
if (wejscie.IsMouseButtonDown(Mouse::Left))
{
if (wejscie.GetMouseX() >= t1[0] && wejscie.GetMouseX() <= t1[2] && wejscie.GetMouseY() >= t1[1] && wejscie.GetMouseY() <= t1[3])
{
window.Clear();
++nPrzeciwnicy;
sprintf(ch, "%i", nPrzeciwnicy);
str2.SetText(ch);
}
else if (wejscie.GetMouseX() >= t2[2] && wejscie.GetMouseX() <= t2[0] && wejscie.GetMouseY() >= t2[1] && wejscie.GetMouseY() <= t2[3])
{
window.Clear();
if (nPrzeciwnicy - 1 > 0)
--nPrzeciwnicy;
else
nPrzeciwnicy = 0;
sprintf(ch, "%i", nPrzeciwnicy);
str2.SetText(ch);
}
else if (wejscie.GetMouseX() >= p1[0] && wejscie.GetMouseX() <= p1[2] && wejscie.GetMouseY() >= p1[1] && wejscie.GetMouseY() <= p1[3])
{
window.Clear();
nLiczbaPrzeciwnikow = nPrzeciwnicy;
return true;
}
}
if (wejscie.GetMouseX() >= t1[0] && wejscie.GetMouseX() <= t1[2] && wejscie.GetMouseY() >= t1[1] && wejscie.GetMouseY() <= t1[3])
PrzyciskP.SetColor(Color::Green);
else
PrzyciskP.SetColor(Color::Red);
if (wejscie.GetMouseX() >= t2[2] && wejscie.GetMouseX() <= t2[0] && wejscie.GetMouseY() >= t2[1] && wejscie.GetMouseY() <= t2[3])
PrzyciskL.SetColor(Color::Green);
else
PrzyciskL.SetColor(Color::Red);
if (wejscie.GetMouseX() >= p1[0] && wejscie.GetMouseX() <= p1[2] && wejscie.GetMouseY() >= p1[1] && wejscie.GetMouseY() <= p1[3])
str.SetColor(Color::Blue);
else
str.SetColor(Color::Black);
window.Draw(Gra::sBackground);
window.Draw(PrzyciskL);
window.Draw(PrzyciskP);
window.Draw(str);
window.Draw(str2);
window.Draw(str3);
window.Display();
window.Clear();
}
}
return true;
}
bool Gra::Koniec(float LacznyCzas, float NajlepszeOkrazenie, const Input& wejscie, Event event, RenderWindow& window)
{
Gra::sBackground.SetImage(Gra::whiteBackground);
char ch[50];
String str;
String str2;
String str3;
String str4;
String str5;
sprintf(ch, "%f", LacznyCzas);
str.SetText("Laczny czas");
str2.SetText("Najlepsze okrazenie");
str3.SetText("Dalej");
str4.SetText(ch);
sprintf(ch, "%f", NajlepszeOkrazenie);
str5.SetText(ch);
str.SetCenter(str.GetRect().Right/2, str.GetRect().Bottom/2);
str2.SetCenter(str2.GetRect().Right/2, str2.GetRect().Bottom/2);
str3.SetCenter(str3.GetRect().Right/2, str3.GetRect().Bottom/2);
str4.SetCenter(str4.GetRect().Right/2, str4.GetRect().Bottom/2);
str5.SetCenter(str5.GetRect().Right/2, str5.GetRect().Bottom/2);
str.SetPosition(window.GetWidth()/2, window.GetHeight()*0.25);
str2.SetPosition(window.GetWidth()/2, window.GetHeight()*0.50);
str3.SetPosition(window.GetWidth()/2, window.GetHeight()*0.75);
str4.SetPosition(window.GetWidth()/2, window.GetHeight()*0.35);
str5.SetPosition(window.GetWidth()/2, window.GetHeight()*0.60);
float p3[4] = {str3.GetRect().Left, str3.GetRect().Top, str3.GetRect().Right, str3.GetRect().Bottom};
str.SetColor(Color::Black);
str2.SetColor(Color::Black);
str3.SetColor(Color::Black);
str4.SetColor(Color::Black);
str5.SetColor(Color::Black);
while (window.IsOpened())
{
while (window.GetEvent(event))
{
if (event.Type == Event::Closed)
{
window.Close();
return false;
}
if (wejscie.IsKeyDown(Key::Escape))
{
window.Close();
return false;
}
if (wejscie.IsMouseButtonDown(Mouse::Left))
{
if (wejscie.GetMouseX() >= p3[0] && wejscie.GetMouseX() <= p3[2] && wejscie.GetMouseY() >= p3[1] && wejscie.GetMouseY() <= p3[3])
{
Dalej = true;
return true;
}
}
}
if (wejscie.GetMouseX() >= p3[0] && wejscie.GetMouseX() <= p3[2] && wejscie.GetMouseY() >= p3[1] && wejscie.GetMouseY() <= p3[3])
str3.SetColor(Color::Blue);
else
str3.SetColor(Color::Black);
window.Draw(Gra::sBackground);
window.Draw(str);
window.Draw(str2);
window.Draw(str3);
window.Draw(str4);
window.Draw(str5);
window.Display();
window.Clear();
}
return true;
}
bool Gra::Main(const Input& wejscie, Event event, RenderWindow& window)
{
bool b = true;
for(;;)
{
b = Menu(wejscie, event, window);
if (!b)
break;
b = WyborTrasy(wejscie, event, window);
if (!b)
break;
b = WyborSamochodu(wejscie, event, window);
if (!b)
break;
b = Okrazenia(wejscie, event, window);
if (!b)
break;
b = LiczbaPrzeciwnikow(wejscie, event, window);
if (!b)
break;
b = game(wejscie, event, window);
if (!b)
break;
}
return true;
}
bool Gra::game(const Input& wejscie, Event event, RenderWindow& window)
{
game1 = new Game(world, nOkrazenia, nLiczbaPrzeciwnikow, imgSamochody[nCar], window, nTrasa, tTrasy[nTrasa].strNazwaPng, tTrasy[nTrasa].chNazwaTxt);
float* u1 = new float;
float* u2 = new float;
int j = game1->Gra(world, wejscie, event, window, u1, u2);
View& View = window.GetDefaultView();
View.Move(-View.GetRect().Left, -View.GetRect().Top);
if (j == 0)
{
delete game1;
delete u1;
delete u2;
return true;
}
else if (j == -1)
{
delete game1;
delete u1;
delete u2;
return false;
}
else if (j == 1)
Koniec(*u1, *u2, wejscie, event, window);
delete game1;
delete u1;
delete u2;
return true;
}
| 38.365449
| 152
| 0.526628
|
TheDarkPhoenix
|
d51e713f2142a93529d4e0b16da18cf492ebc097
| 3,584
|
cpp
|
C++
|
vnext/Microsoft.ReactNative/Base/CoreUIManagers.cpp
|
CoDeRgAnEsh/react-native-windows
|
c99016ee963168de0088e5d03a22e1f85bf181f6
|
[
"MIT"
] | 1
|
2021-11-09T10:25:13.000Z
|
2021-11-09T10:25:13.000Z
|
vnext/Microsoft.ReactNative/Base/CoreUIManagers.cpp
|
CoDeRgAnEsh/react-native-windows
|
c99016ee963168de0088e5d03a22e1f85bf181f6
|
[
"MIT"
] | 1
|
2020-10-12T19:39:41.000Z
|
2020-10-12T19:39:41.000Z
|
vnext/Microsoft.ReactNative/Base/CoreUIManagers.cpp
|
CoDeRgAnEsh/react-native-windows
|
c99016ee963168de0088e5d03a22e1f85bf181f6
|
[
"MIT"
] | null | null | null |
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#include "pch.h"
#include <IReactInstance.h>
#include <IUIManager.h>
#include <Modules/NativeUIManager.h>
#include <ViewManagerProvider.h>
// Standard View Managers
#include <Views/ActivityIndicatorViewManager.h>
#include <Views/DatePickerViewManager.h>
#include <Views/FlyoutViewManager.h>
#include <Views/Image/ImageViewManager.h>
#include <Views/PickerViewManager.h>
#include <Views/PopupViewManager.h>
#include <Views/RawTextViewManager.h>
#include <Views/RefreshControlManager.h>
#include <Views/RootViewManager.h>
#include <Views/ScrollContentViewManager.h>
#include <Views/ScrollViewManager.h>
#include <Views/SliderViewManager.h>
#include <Views/SwitchViewManager.h>
#include <Views/TextInputViewManager.h>
#include <Views/TextViewManager.h>
#include <Views/ViewViewManager.h>
#include <Views/VirtualTextViewManager.h>
// Polyester View Managers // TODO: Move Polyester implementations out of this
// library and depot
#include <Polyester/ButtonContentViewManager.h>
#include <Polyester/ButtonViewManager.h>
#include <Polyester/HyperlinkViewManager.h>
#include <Polyester/IconViewManager.h>
namespace react::uwp {
void AddStandardViewManagers(
std::vector<std::unique_ptr<facebook::react::IViewManager>> &viewManagers,
std::shared_ptr<IReactInstance> const &instance) noexcept {
viewManagers.push_back(std::make_unique<ActivityIndicatorViewManager>(instance));
viewManagers.push_back(std::make_unique<DatePickerViewManager>(instance));
viewManagers.push_back(std::make_unique<FlyoutViewManager>(instance));
viewManagers.push_back(std::make_unique<ImageViewManager>(instance));
viewManagers.push_back(std::make_unique<PickerViewManager>(instance));
viewManagers.push_back(std::make_unique<PopupViewManager>(instance));
viewManagers.push_back(std::make_unique<RawTextViewManager>(instance));
viewManagers.push_back(std::make_unique<RootViewManager>(instance));
viewManagers.push_back(std::make_unique<ScrollContentViewManager>(instance));
viewManagers.push_back(std::make_unique<SliderViewManager>(instance));
viewManagers.push_back(std::make_unique<ScrollViewManager>(instance));
viewManagers.push_back(std::make_unique<SwitchViewManager>(instance));
viewManagers.push_back(std::make_unique<TextViewManager>(instance));
viewManagers.push_back(std::make_unique<TextInputViewManager>(instance));
viewManagers.push_back(std::make_unique<ViewViewManager>(instance));
viewManagers.push_back(std::make_unique<VirtualTextViewManager>(instance));
viewManagers.push_back(std::make_unique<RefreshControlViewManager>(instance));
}
void AddPolyesterViewManagers(
std::vector<std::unique_ptr<facebook::react::IViewManager>> &viewManagers,
std::shared_ptr<IReactInstance> const &instance) noexcept {
viewManagers.push_back(std::make_unique<polyester::ButtonViewManager>(instance));
viewManagers.push_back(std::make_unique<polyester::ButtonContentViewManager>(instance));
viewManagers.push_back(std::make_unique<polyester::HyperlinkViewManager>(instance));
viewManagers.push_back(std::make_unique<polyester::IconViewManager>(instance));
}
std::shared_ptr<facebook::react::IUIManager> CreateUIManager2(
Mso::React::IReactContext *context,
std::vector<react::uwp::NativeViewManager> &&viewManagers) noexcept {
// Create UIManager, passing in ViewManagers
return createIUIManager(std::move(viewManagers), new NativeUIManager(context));
}
} // namespace react::uwp
| 45.948718
| 91
| 0.787109
|
CoDeRgAnEsh
|
d520f8873bb38be108e1aeb0db4be441ab93aa37
| 11,470
|
cpp
|
C++
|
src/kits/shared/SettingsMessage.cpp
|
Yn0ga/haiku
|
74e271b2a286c239e60f0ec261f4f197f4727eee
|
[
"MIT"
] | 1,338
|
2015-01-03T20:06:56.000Z
|
2022-03-26T13:49:54.000Z
|
src/kits/shared/SettingsMessage.cpp
|
Yn0ga/haiku
|
74e271b2a286c239e60f0ec261f4f197f4727eee
|
[
"MIT"
] | 15
|
2015-01-17T22:19:32.000Z
|
2021-12-20T12:35:00.000Z
|
src/kits/shared/SettingsMessage.cpp
|
Yn0ga/haiku
|
74e271b2a286c239e60f0ec261f4f197f4727eee
|
[
"MIT"
] | 350
|
2015-01-08T14:15:27.000Z
|
2022-03-21T18:14:35.000Z
|
/*
* Copyright 2008-2010, Stephan Aßmus <superstippi@gmx.de>.
* Copyright 1998, Eric Shepherd.
* All rights reserved. Distributed under the terms of the Be Sample Code
* license.
*/
//! Be Newsletter Volume II, Issue 35; September 2, 1998 (Eric Shepherd)
#include "SettingsMessage.h"
#include <new>
#include <Autolock.h>
#include <Entry.h>
#include <File.h>
#include <Messenger.h>
#include <String.h>
SettingsMessage::SettingsMessage(directory_which directory,
const char* filename)
:
BMessage('pref'),
fListeners(0)
{
fStatus = find_directory(directory, &fPath);
if (fStatus == B_OK)
fStatus = fPath.Append(filename);
if (fStatus == B_OK)
fStatus = Load();
}
SettingsMessage::~SettingsMessage()
{
Save();
for (int32 i = fListeners.CountItems() - 1; i >= 0; i--)
delete reinterpret_cast<BMessenger*>(fListeners.ItemAtFast(i));
}
status_t
SettingsMessage::InitCheck() const
{
return fStatus;
}
status_t
SettingsMessage::Load()
{
BAutolock _(this);
BFile file(fPath.Path(), B_READ_ONLY);
status_t status = file.InitCheck();
if (status == B_OK)
status = Unflatten(&file);
return status;
}
status_t
SettingsMessage::Save() const
{
BAutolock _(const_cast<SettingsMessage*>(this));
BFile file(fPath.Path(), B_WRITE_ONLY | B_CREATE_FILE | B_ERASE_FILE);
status_t status = file.InitCheck();
if (status == B_OK)
status = Flatten(&file);
return status;
}
bool
SettingsMessage::AddListener(const BMessenger& listener)
{
BAutolock _(this);
BMessenger* listenerCopy = new(std::nothrow) BMessenger(listener);
if (listenerCopy && fListeners.AddItem(listenerCopy))
return true;
delete listenerCopy;
return false;
}
void
SettingsMessage::RemoveListener(const BMessenger& listener)
{
BAutolock _(this);
for (int32 i = fListeners.CountItems() - 1; i >= 0; i--) {
BMessenger* listenerItem = reinterpret_cast<BMessenger*>(
fListeners.ItemAtFast(i));
if (*listenerItem == listener) {
fListeners.RemoveItem(i);
delete listenerItem;
return;
}
}
}
// #pragma mark -
status_t
SettingsMessage::SetValue(const char* name, bool value)
{
status_t ret = ReplaceBool(name, value);
if (ret != B_OK)
ret = AddBool(name, value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, int8 value)
{
status_t ret = ReplaceInt8(name, value);
if (ret != B_OK)
ret = AddInt8(name, value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, int16 value)
{
status_t ret = ReplaceInt16(name, value);
if (ret != B_OK)
ret = AddInt16(name, value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, uint16 value)
{
status_t ret = ReplaceUInt16(name, value);
if (ret != B_OK)
ret = AddUInt16(name, value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, int32 value)
{
status_t ret = ReplaceInt32(name, value);
if (ret != B_OK)
ret = AddInt32(name, value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, uint32 value)
{
status_t ret = ReplaceUInt32(name, value);
if (ret != B_OK)
ret = AddUInt32(name, value);
if (ret == B_BAD_TYPE && HasData(name, B_INT32_TYPE)) {
// For compatibility with older versions of this class, replace an int32
RemoveData(name);
ret = AddUInt32(name, value);
}
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, int64 value)
{
status_t ret = ReplaceInt64(name, value);
if (ret != B_OK)
ret = AddInt64(name, value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, uint64 value)
{
status_t ret = ReplaceUInt64(name, value);
if (ret != B_OK)
ret = AddUInt64(name, value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, float value)
{
status_t ret = ReplaceFloat(name, value);
if (ret != B_OK)
ret = AddFloat(name, value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, double value)
{
status_t ret = ReplaceDouble(name, value);
if (ret != B_OK)
ret = AddDouble(name, value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, const char* value)
{
status_t ret = ReplaceString(name, value);
if (ret != B_OK)
ret = AddString(name, value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, const BString& value)
{
status_t ret = ReplaceString(name, value);
if (ret != B_OK)
ret = AddString(name, value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, const BPoint& value)
{
status_t ret = ReplacePoint(name, value);
if (ret != B_OK)
ret = AddPoint(name, value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, const BRect& value)
{
status_t ret = ReplaceRect(name, value);
if (ret != B_OK)
ret = AddRect(name, value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, const entry_ref& value)
{
status_t ret = ReplaceRef(name, &value);
if (ret != B_OK)
ret = AddRef(name, &value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, const BMessage& value)
{
status_t ret = ReplaceMessage(name, &value);
if (ret != B_OK)
ret = AddMessage(name, &value);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, const BFlattenable* value)
{
status_t ret = ReplaceFlat(name, const_cast<BFlattenable*>(value));
if (ret != B_OK)
ret = AddFlat(name, const_cast<BFlattenable*>(value));
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, type_code type, const void* data,
ssize_t numBytes)
{
status_t ret = ReplaceData(name, type, data, numBytes);
if (ret != B_OK)
ret = AddData(name, type, data, numBytes);
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
status_t
SettingsMessage::SetValue(const char* name, const BFont& value)
{
font_family family;
font_style style;
value.GetFamilyAndStyle(&family, &style);
BMessage fontMessage;
status_t ret = fontMessage.AddString("family", family);
if (ret == B_OK)
ret = fontMessage.AddString("style", style);
if (ret == B_OK)
ret = fontMessage.AddFloat("size", value.Size());
if (ret == B_OK) {
if (ReplaceMessage(name, &fontMessage) != B_OK)
ret = AddMessage(name, &fontMessage);
}
if (ret == B_OK)
_NotifyValueChanged(name);
return ret;
}
// #pragma mark -
bool
SettingsMessage::GetValue(const char* name, bool defaultValue) const
{
bool value;
if (FindBool(name, &value) != B_OK)
return defaultValue;
return value;
}
int8
SettingsMessage::GetValue(const char* name, int8 defaultValue) const
{
int8 value;
if (FindInt8(name, &value) != B_OK)
return defaultValue;
return value;
}
int16
SettingsMessage::GetValue(const char* name, int16 defaultValue) const
{
int16 value;
if (FindInt16(name, &value) != B_OK)
return defaultValue;
return value;
}
uint16
SettingsMessage::GetValue(const char* name, uint16 defaultValue) const
{
uint16 value;
if (FindUInt16(name, &value) != B_OK)
return defaultValue;
return value;
}
int32
SettingsMessage::GetValue(const char* name, int32 defaultValue) const
{
int32 value;
if (FindInt32(name, &value) != B_OK)
return defaultValue;
return value;
}
uint32
SettingsMessage::GetValue(const char* name, uint32 defaultValue) const
{
uint32 value;
if (FindUInt32(name, &value) == B_OK)
return value;
// For compatibility with older versions of this class, also accept an int32
int32 signedValue;
if (FindInt32(name, &signedValue) == B_OK && signedValue >= 0)
return signedValue;
return defaultValue;
}
int64
SettingsMessage::GetValue(const char* name, int64 defaultValue) const
{
int64 value;
if (FindInt64(name, &value) != B_OK)
return defaultValue;
return value;
}
uint64
SettingsMessage::GetValue(const char* name, uint64 defaultValue) const
{
uint64 value;
if (FindUInt64(name, &value) != B_OK)
return defaultValue;
return value;
}
float
SettingsMessage::GetValue(const char* name, float defaultValue) const
{
float value;
if (FindFloat(name, &value) != B_OK)
return defaultValue;
return value;
}
double
SettingsMessage::GetValue(const char* name, double defaultValue) const
{
double value;
if (FindDouble(name, &value) != B_OK)
return defaultValue;
return value;
}
BString
SettingsMessage::GetValue(const char* name, const BString& defaultValue) const
{
BString value;
if (FindString(name, &value) != B_OK)
return defaultValue;
return value;
}
const char*
SettingsMessage::GetValue(const char* name, const char* defaultValue) const
{
const char* value;
if (FindString(name, &value) != B_OK)
return defaultValue;
return value;
}
BPoint
SettingsMessage::GetValue(const char *name, BPoint defaultValue) const
{
BPoint value;
if (FindPoint(name, &value) != B_OK)
return defaultValue;
return value;
}
BRect
SettingsMessage::GetValue(const char* name, BRect defaultValue) const
{
BRect value;
if (FindRect(name, &value) != B_OK)
return defaultValue;
return value;
}
entry_ref
SettingsMessage::GetValue(const char* name, const entry_ref& defaultValue) const
{
entry_ref value;
if (FindRef(name, &value) != B_OK)
return defaultValue;
return value;
}
BMessage
SettingsMessage::GetValue(const char* name, const BMessage& defaultValue) const
{
BMessage value;
if (FindMessage(name, &value) != B_OK)
return defaultValue;
return value;
}
BFont
SettingsMessage::GetValue(const char* name, const BFont& defaultValue) const
{
BMessage fontMessage;
if (FindMessage(name, &fontMessage) != B_OK)
return defaultValue;
const char* family;
const char* style;
float size;
if (fontMessage.FindString("family", &family) != B_OK
|| fontMessage.FindString("style", &style) != B_OK
|| fontMessage.FindFloat("size", &size) != B_OK) {
return defaultValue;
}
BFont value;
if (value.SetFamilyAndStyle(family, style) != B_OK)
return defaultValue;
value.SetSize(size);
return value;
}
void*
SettingsMessage::GetValue(const char* name, type_code type, ssize_t numBytes,
const void** defaultValue) const
{
void* value;
if (FindData(name, type, (const void**)&value, &numBytes) != B_OK)
return defaultValue;
return value;
}
// #pragma mark - private
void
SettingsMessage::_NotifyValueChanged(const char* name) const
{
BMessage message(SETTINGS_VALUE_CHANGED);
message.AddString("name", name);
// Add the value of that name to the notification.
type_code type;
if (GetInfo(name, &type) == B_OK) {
const void* data;
ssize_t numBytes;
if (FindData(name, type, &data, &numBytes) == B_OK)
message.AddData("value", type, data, numBytes);
}
int32 count = fListeners.CountItems();
for (int32 i = 0; i < count; i++) {
BMessenger* listener = reinterpret_cast<BMessenger*>(
fListeners.ItemAtFast(i));
listener->SendMessage(&message);
}
}
| 19.440678
| 80
| 0.710898
|
Yn0ga
|
d5224e5b615d71be30c25d1d5145b20814782be7
| 328
|
cpp
|
C++
|
pvl/abridged/math/numtheory/extended-euclidean.cpp
|
bullybutcher/progvar-library
|
4d4b351c8a2540c522d00138e1bcf0edc528b540
|
[
"MIT"
] | 3
|
2021-10-16T13:22:58.000Z
|
2021-10-29T22:03:44.000Z
|
pvl/abridged/math/numtheory/extended-euclidean.cpp
|
bullybutcher/progvar-library
|
4d4b351c8a2540c522d00138e1bcf0edc528b540
|
[
"MIT"
] | 19
|
2021-11-27T14:40:00.000Z
|
2022-03-30T07:14:59.000Z
|
pvl/abridged/math/numtheory/extended-euclidean.cpp
|
bullybutcher/progvar-library
|
4d4b351c8a2540c522d00138e1bcf0edc528b540
|
[
"MIT"
] | 2
|
2022-03-11T20:53:41.000Z
|
2022-03-20T07:08:46.000Z
|
ll mod(ll x, ll m) { // use this instead of x % m
if (m == 0) return 0;
if (m < 0) m *= -1;
return (x%m + m) % m; // always nonnegative
}
ll extended_euclid(ll a, ll b, ll &x, ll &y) {
if (b==0) {x = 1; y = 0; return a;}
ll g = extended_euclid(b, a%b, x, y);
ll z = x - a/b*y;
x = y; y = z; return g;
}
| 27.333333
| 50
| 0.481707
|
bullybutcher
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.