text stringlengths 2 99k | meta dict |
|---|---|
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2013 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "domainDecomposition.H"
#include "dictionary.H"
#include "labelIOList.H"
#include "processorPolyPatch.H"
#include "processorCyclicPolyPatch.H"
#include "fvMesh.H"
#include "OSspecific.H"
#include "Map.H"
#include "globalMeshData.H"
#include "DynamicList.H"
#include "fvFieldDecomposer.H"
#include "IOobjectList.H"
#include "cellSet.H"
#include "faceSet.H"
#include "pointSet.H"
#include "uniformDimensionedFields.H"
// * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
void Foam::domainDecomposition::mark
(
const labelList& zoneElems,
const label zoneI,
labelList& elementToZone
)
{
forAll(zoneElems, i)
{
label pointi = zoneElems[i];
if (elementToZone[pointi] == -1)
{
// First occurrence
elementToZone[pointi] = zoneI;
}
else if (elementToZone[pointi] >= 0)
{
// Multiple zones
elementToZone[pointi] = -2;
}
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
// from components
Foam::domainDecomposition::domainDecomposition(const IOobject& io)
:
fvMesh(io),
facesInstancePointsPtr_
(
pointsInstance() != facesInstance()
? new pointIOField
(
IOobject
(
"points",
facesInstance(),
polyMesh::meshSubDir,
*this,
IOobject::MUST_READ,
IOobject::NO_WRITE,
false
)
)
: NULL
),
decompositionDict_
(
IOobject
(
"decomposeParDict",
time().system(),
*this,
IOobject::MUST_READ_IF_MODIFIED,
IOobject::NO_WRITE
)
),
nProcs_(readInt(decompositionDict_.lookup("numberOfSubdomains"))),
distributed_(false),
cellToProc_(nCells()),
procPointAddressing_(nProcs_),
procFaceAddressing_(nProcs_),
procCellAddressing_(nProcs_),
procPatchSize_(nProcs_),
procPatchStartIndex_(nProcs_),
procNeighbourProcessors_(nProcs_),
procProcessorPatchSize_(nProcs_),
procProcessorPatchStartIndex_(nProcs_),
procProcessorPatchSubPatchIDs_(nProcs_),
procProcessorPatchSubPatchStarts_(nProcs_)
{
decompositionDict_.readIfPresent("distributed", distributed_);
}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
Foam::domainDecomposition::~domainDecomposition()
{}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
bool Foam::domainDecomposition::writeDecomposition(const bool decomposeSets)
{
Info<< "\nConstructing processor meshes" << endl;
// Mark point/faces/cells that are in zones.
// -1 : not in zone
// -2 : in multiple zones
// >= 0 : in single given zone
// This will give direct lookup of elements that are in a single zone
// and we'll only have to revert back to searching through all zones
// for the duplicate elements
// Point zones
labelList pointToZone(points().size(), -1);
forAll(pointZones(), zoneI)
{
mark(pointZones()[zoneI], zoneI, pointToZone);
}
// Face zones
labelList faceToZone(faces().size(), -1);
forAll(faceZones(), zoneI)
{
mark(faceZones()[zoneI], zoneI, faceToZone);
}
// Cell zones
labelList cellToZone(nCells(), -1);
forAll(cellZones(), zoneI)
{
mark(cellZones()[zoneI], zoneI, cellToZone);
}
PtrList<const cellSet> cellSets;
PtrList<const faceSet> faceSets;
PtrList<const pointSet> pointSets;
if (decomposeSets)
{
// Read sets
IOobjectList objects(*this, facesInstance(), "polyMesh/sets");
{
IOobjectList cSets(objects.lookupClass(cellSet::typeName));
forAllConstIter(IOobjectList, cSets, iter)
{
cellSets.append(new cellSet(*iter()));
}
}
{
IOobjectList fSets(objects.lookupClass(faceSet::typeName));
forAllConstIter(IOobjectList, fSets, iter)
{
faceSets.append(new faceSet(*iter()));
}
}
{
IOobjectList pSets(objects.lookupClass(pointSet::typeName));
forAllConstIter(IOobjectList, pSets, iter)
{
pointSets.append(new pointSet(*iter()));
}
}
}
autoPtr<labelIOList> cellLevelPtr;
{
IOobject io
(
"cellLevel",
facesInstance(),
polyMesh::meshSubDir,
*this,
IOobject::MUST_READ,
IOobject::NO_WRITE
);
if (io.headerOk())
{
Info<< "Reading hexRef8 data : " << io.name() << endl;
cellLevelPtr.reset(new labelIOList(io));
}
}
autoPtr<labelIOList> pointLevelPtr;
{
IOobject io
(
"pointLevel",
facesInstance(),
polyMesh::meshSubDir,
*this,
IOobject::MUST_READ,
IOobject::NO_WRITE
);
if (io.headerOk())
{
Info<< "Reading hexRef8 data : " << io.name() << endl;
pointLevelPtr.reset(new labelIOList(io));
}
}
autoPtr<uniformDimensionedScalarField> level0EdgePtr;
{
IOobject io
(
"level0Edge",
facesInstance(),
polyMesh::meshSubDir,
*this,
IOobject::MUST_READ,
IOobject::NO_WRITE
);
if (io.headerOk())
{
Info<< "Reading hexRef8 data : " << io.name() << endl;
level0EdgePtr.reset(new uniformDimensionedScalarField(io));
}
}
label maxProcCells = 0;
label totProcFaces = 0;
label maxProcPatches = 0;
label totProcPatches = 0;
label maxProcFaces = 0;
// Write out the meshes
for (label procI = 0; procI < nProcs_; procI++)
{
// Create processor points
const labelList& curPointLabels = procPointAddressing_[procI];
const pointField& meshPoints = points();
labelList pointLookup(nPoints(), -1);
pointField procPoints(curPointLabels.size());
forAll(curPointLabels, pointi)
{
procPoints[pointi] = meshPoints[curPointLabels[pointi]];
pointLookup[curPointLabels[pointi]] = pointi;
}
// Create processor faces
const labelList& curFaceLabels = procFaceAddressing_[procI];
const faceList& meshFaces = faces();
labelList faceLookup(nFaces(), -1);
faceList procFaces(curFaceLabels.size());
forAll(curFaceLabels, facei)
{
// Mark the original face as used
// Remember to decrement the index by one (turning index)
//
label curF = mag(curFaceLabels[facei]) - 1;
faceLookup[curF] = facei;
// get the original face
labelList origFaceLabels;
if (curFaceLabels[facei] >= 0)
{
// face not turned
origFaceLabels = meshFaces[curF];
}
else
{
origFaceLabels = meshFaces[curF].reverseFace();
}
// translate face labels into local point list
face& procFaceLabels = procFaces[facei];
procFaceLabels.setSize(origFaceLabels.size());
forAll(origFaceLabels, pointi)
{
procFaceLabels[pointi] = pointLookup[origFaceLabels[pointi]];
}
}
// Create processor cells
const labelList& curCellLabels = procCellAddressing_[procI];
const cellList& meshCells = cells();
cellList procCells(curCellLabels.size());
forAll(curCellLabels, celli)
{
const labelList& origCellLabels = meshCells[curCellLabels[celli]];
cell& curCell = procCells[celli];
curCell.setSize(origCellLabels.size());
forAll(origCellLabels, cellFaceI)
{
curCell[cellFaceI] = faceLookup[origCellLabels[cellFaceI]];
}
}
// Create processor mesh without a boundary
fileName processorCasePath
(
time().caseName()/fileName(word("processor") + Foam::name(procI))
);
// make the processor directory
mkDir(time().rootPath()/processorCasePath);
// create a database
Time processorDb
(
Time::controlDictName,
time().rootPath(),
processorCasePath,
word("system"),
word("constant")
);
processorDb.setTime(time());
// create the mesh. Two situations:
// - points and faces come from the same time ('instance'). The mesh
// will get constructed in the same instance.
// - points come from a different time (moving mesh cases).
// It will read the points belonging to the faces instance and
// construct the procMesh with it which then gets handled as above.
// (so with 'old' geometry).
// Only at writing time will it additionally write the current
// points.
autoPtr<polyMesh> procMeshPtr;
if (facesInstancePointsPtr_.valid())
{
// Construct mesh from facesInstance.
pointField facesInstancePoints
(
facesInstancePointsPtr_(),
curPointLabels
);
procMeshPtr.reset
(
new polyMesh
(
IOobject
(
this->polyMesh::name(), // region of undecomposed mesh
facesInstance(),
processorDb
),
xferMove(facesInstancePoints),
xferMove(procFaces),
xferMove(procCells)
)
);
}
else
{
procMeshPtr.reset
(
new polyMesh
(
IOobject
(
this->polyMesh::name(), // region of undecomposed mesh
facesInstance(),
processorDb
),
xferMove(procPoints),
xferMove(procFaces),
xferMove(procCells)
)
);
}
polyMesh& procMesh = procMeshPtr();
// Create processor boundary patches
const labelList& curPatchSizes = procPatchSize_[procI];
const labelList& curPatchStarts = procPatchStartIndex_[procI];
const labelList& curNeighbourProcessors =
procNeighbourProcessors_[procI];
const labelList& curProcessorPatchSizes =
procProcessorPatchSize_[procI];
const labelList& curProcessorPatchStarts =
procProcessorPatchStartIndex_[procI];
const labelListList& curSubPatchIDs =
procProcessorPatchSubPatchIDs_[procI];
const labelListList& curSubStarts =
procProcessorPatchSubPatchStarts_[procI];
const polyPatchList& meshPatches = boundaryMesh();
// Count the number of inter-proc patches
label nInterProcPatches = 0;
forAll(curSubPatchIDs, procPatchI)
{
//Info<< "For processor " << procI
// << " have to destination processor "
// << curNeighbourProcessors[procPatchI] << endl;
//
//forAll(curSubPatchIDs[procPatchI], i)
//{
// Info<< " from patch:" << curSubPatchIDs[procPatchI][i]
// << " starting at:" << curSubStarts[procPatchI][i]
// << endl;
//}
nInterProcPatches += curSubPatchIDs[procPatchI].size();
}
//Info<< "For processor " << procI
// << " have " << nInterProcPatches
// << " patches to neighbouring processors" << endl;
List<polyPatch*> procPatches
(
curPatchSizes.size()
+ nInterProcPatches, //curProcessorPatchSizes.size(),
reinterpret_cast<polyPatch*>(0)
);
label nPatches = 0;
forAll(curPatchSizes, patchi)
{
// Get the face labels consistent with the field mapping
// (reuse the patch field mappers)
const polyPatch& meshPatch = meshPatches[patchi];
fvFieldDecomposer::patchFieldDecomposer patchMapper
(
SubList<label>
(
curFaceLabels,
curPatchSizes[patchi],
curPatchStarts[patchi]
),
meshPatch.start()
);
// Map existing patches
procPatches[nPatches] = meshPatch.clone
(
procMesh.boundaryMesh(),
nPatches,
patchMapper.directAddressing(),
curPatchStarts[patchi]
).ptr();
nPatches++;
}
forAll(curProcessorPatchSizes, procPatchI)
{
const labelList& subPatchID = curSubPatchIDs[procPatchI];
const labelList& subStarts = curSubStarts[procPatchI];
label curStart = curProcessorPatchStarts[procPatchI];
forAll(subPatchID, i)
{
label size =
(
i < subPatchID.size()-1
? subStarts[i+1] - subStarts[i]
: curProcessorPatchSizes[procPatchI] - subStarts[i]
);
// Info<< "From processor:" << procI << endl
// << " to processor:" << curNeighbourProcessors[procPatchI]
// << endl
// << " via patch:" << subPatchID[i] << endl
// << " start :" << curStart << endl
// << " size :" << size << endl;
if (subPatchID[i] == -1)
{
// From internal faces
procPatches[nPatches] =
new processorPolyPatch
(
word("procBoundary") + Foam::name(procI)
+ "to"
+ Foam::name(curNeighbourProcessors[procPatchI]),
size,
curStart,
nPatches,
procMesh.boundaryMesh(),
procI,
curNeighbourProcessors[procPatchI]
);
}
else
{
const coupledPolyPatch& pcPatch
= refCast<const coupledPolyPatch>
(
boundaryMesh()[subPatchID[i]]
);
// From cyclic
const word& referPatch = pcPatch.name();
procPatches[nPatches] =
new processorCyclicPolyPatch
(
word("procBoundary") + Foam::name(procI)
+ "to"
+ Foam::name(curNeighbourProcessors[procPatchI])
+ "through"
+ referPatch,
size,
curStart,
nPatches,
procMesh.boundaryMesh(),
procI,
curNeighbourProcessors[procPatchI],
referPatch,
pcPatch.transform()
);
}
curStart += size;
nPatches++;
}
}
//forAll(procPatches, patchI)
//{
// Pout<< " " << patchI
// << '\t' << "name:" << procPatches[patchI]->name()
// << '\t' << "type:" << procPatches[patchI]->type()
// << '\t' << "size:" << procPatches[patchI]->size()
// << endl;
//}
// Add boundary patches
procMesh.addPatches(procPatches);
// Create and add zones
// Point zones
{
const pointZoneMesh& pz = pointZones();
// Go through all the zoned points and find out if they
// belong to a zone. If so, add it to the zone as
// necessary
List<DynamicList<label> > zonePoints(pz.size());
// Estimate size
forAll(zonePoints, zoneI)
{
zonePoints[zoneI].setCapacity(pz[zoneI].size() / nProcs_);
}
// Use the pointToZone map to find out the single zone (if any),
// use slow search only for shared points.
forAll(curPointLabels, pointi)
{
label curPoint = curPointLabels[pointi];
label zoneI = pointToZone[curPoint];
if (zoneI >= 0)
{
// Single zone.
zonePoints[zoneI].append(pointi);
}
else if (zoneI == -2)
{
// Multiple zones. Lookup.
forAll(pz, zoneI)
{
label index = pz[zoneI].whichPoint(curPoint);
if (index != -1)
{
zonePoints[zoneI].append(pointi);
}
}
}
}
procMesh.pointZones().clearAddressing();
procMesh.pointZones().setSize(zonePoints.size());
forAll(zonePoints, zoneI)
{
procMesh.pointZones().set
(
zoneI,
pz[zoneI].clone
(
procMesh.pointZones(),
zoneI,
zonePoints[zoneI].shrink()
)
);
}
if (pz.size())
{
// Force writing on all processors
procMesh.pointZones().writeOpt() = IOobject::AUTO_WRITE;
}
}
// Face zones
{
const faceZoneMesh& fz = faceZones();
// Go through all the zoned face and find out if they
// belong to a zone. If so, add it to the zone as
// necessary
List<DynamicList<label> > zoneFaces(fz.size());
List<DynamicList<bool> > zoneFaceFlips(fz.size());
// Estimate size
forAll(zoneFaces, zoneI)
{
label procSize = fz[zoneI].size() / nProcs_;
zoneFaces[zoneI].setCapacity(procSize);
zoneFaceFlips[zoneI].setCapacity(procSize);
}
// Go through all the zoned faces and find out if they
// belong to a zone. If so, add it to the zone as
// necessary
forAll(curFaceLabels, facei)
{
// Remember to decrement the index by one (turning index)
//
label curF = mag(curFaceLabels[facei]) - 1;
label zoneI = faceToZone[curF];
if (zoneI >= 0)
{
// Single zone. Add the face
zoneFaces[zoneI].append(facei);
label index = fz[zoneI].whichFace(curF);
bool flip = fz[zoneI].flipMap()[index];
if (curFaceLabels[facei] < 0)
{
flip = !flip;
}
zoneFaceFlips[zoneI].append(flip);
}
else if (zoneI == -2)
{
// Multiple zones. Lookup.
forAll(fz, zoneI)
{
label index = fz[zoneI].whichFace(curF);
if (index != -1)
{
zoneFaces[zoneI].append(facei);
bool flip = fz[zoneI].flipMap()[index];
if (curFaceLabels[facei] < 0)
{
flip = !flip;
}
zoneFaceFlips[zoneI].append(flip);
}
}
}
}
procMesh.faceZones().clearAddressing();
procMesh.faceZones().setSize(zoneFaces.size());
forAll(zoneFaces, zoneI)
{
procMesh.faceZones().set
(
zoneI,
fz[zoneI].clone
(
zoneFaces[zoneI].shrink(), // addressing
zoneFaceFlips[zoneI].shrink(), // flipmap
zoneI,
procMesh.faceZones()
)
);
}
if (fz.size())
{
// Force writing on all processors
procMesh.faceZones().writeOpt() = IOobject::AUTO_WRITE;
}
}
// Cell zones
{
const cellZoneMesh& cz = cellZones();
// Go through all the zoned cells and find out if they
// belong to a zone. If so, add it to the zone as
// necessary
List<DynamicList<label> > zoneCells(cz.size());
// Estimate size
forAll(zoneCells, zoneI)
{
zoneCells[zoneI].setCapacity(cz[zoneI].size() / nProcs_);
}
forAll(curCellLabels, celli)
{
label curCellI = curCellLabels[celli];
label zoneI = cellToZone[curCellI];
if (zoneI >= 0)
{
// Single zone.
zoneCells[zoneI].append(celli);
}
else if (zoneI == -2)
{
// Multiple zones. Lookup.
forAll(cz, zoneI)
{
label index = cz[zoneI].whichCell(curCellI);
if (index != -1)
{
zoneCells[zoneI].append(celli);
}
}
}
}
procMesh.cellZones().clearAddressing();
procMesh.cellZones().setSize(zoneCells.size());
forAll(zoneCells, zoneI)
{
procMesh.cellZones().set
(
zoneI,
cz[zoneI].clone
(
zoneCells[zoneI].shrink(),
zoneI,
procMesh.cellZones()
)
);
}
if (cz.size())
{
// Force writing on all processors
procMesh.cellZones().writeOpt() = IOobject::AUTO_WRITE;
}
}
// Set the precision of the points data to 10
IOstream::defaultPrecision(10);
procMesh.write();
// Write points if pointsInstance differing from facesInstance
if (facesInstancePointsPtr_.valid())
{
pointIOField pointsInstancePoints
(
IOobject
(
"points",
pointsInstance(),
polyMesh::meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::NO_WRITE,
false
),
xferMove(procPoints)
);
pointsInstancePoints.write();
}
// Decompose any sets
if (decomposeSets)
{
forAll(cellSets, i)
{
const cellSet& cs = cellSets[i];
cellSet set(procMesh, cs.name(), cs.size()/nProcs_);
forAll(curCellLabels, i)
{
if (cs.found(curCellLabels[i]))
{
set.insert(i);
}
}
set.write();
}
forAll(faceSets, i)
{
const faceSet& cs = faceSets[i];
faceSet set(procMesh, cs.name(), cs.size()/nProcs_);
forAll(curFaceLabels, i)
{
if (cs.found(mag(curFaceLabels[i])-1))
{
set.insert(i);
}
}
set.write();
}
forAll(pointSets, i)
{
const pointSet& cs = pointSets[i];
pointSet set(procMesh, cs.name(), cs.size()/nProcs_);
forAll(curPointLabels, i)
{
if (cs.found(curPointLabels[i]))
{
set.insert(i);
}
}
set.write();
}
}
// hexRef8 data
if (cellLevelPtr.valid())
{
labelIOList
(
IOobject
(
cellLevelPtr().name(),
facesInstance(),
polyMesh::meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
UIndirectList<label>
(
cellLevelPtr(),
procCellAddressing_[procI]
)()
).write();
}
if (pointLevelPtr.valid())
{
labelIOList
(
IOobject
(
pointLevelPtr().name(),
facesInstance(),
polyMesh::meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
UIndirectList<label>
(
pointLevelPtr(),
procPointAddressing_[procI]
)()
).write();
}
if (level0EdgePtr.valid())
{
uniformDimensionedScalarField
(
IOobject
(
level0EdgePtr().name(),
facesInstance(),
polyMesh::meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::AUTO_WRITE
),
level0EdgePtr()
).write();
}
// Statistics
Info<< endl
<< "Processor " << procI << nl
<< " Number of cells = " << procMesh.nCells()
<< endl;
maxProcCells = max(maxProcCells, procMesh.nCells());
label nBoundaryFaces = 0;
label nProcPatches = 0;
label nProcFaces = 0;
forAll(procMesh.boundaryMesh(), patchi)
{
if (isA<processorPolyPatch>(procMesh.boundaryMesh()[patchi]))
{
const processorPolyPatch& ppp =
refCast<const processorPolyPatch>
(
procMesh.boundaryMesh()[patchi]
);
Info<< " Number of faces shared with processor "
<< ppp.neighbProcNo() << " = " << ppp.size() << endl;
nProcPatches++;
nProcFaces += ppp.size();
}
else
{
nBoundaryFaces += procMesh.boundaryMesh()[patchi].size();
}
}
Info<< " Number of processor patches = " << nProcPatches << nl
<< " Number of processor faces = " << nProcFaces << nl
<< " Number of boundary faces = " << nBoundaryFaces << endl;
totProcFaces += nProcFaces;
totProcPatches += nProcPatches;
maxProcPatches = max(maxProcPatches, nProcPatches);
maxProcFaces = max(maxProcFaces, nProcFaces);
// create and write the addressing information
labelIOList pointProcAddressing
(
IOobject
(
"pointProcAddressing",
procMesh.facesInstance(),
procMesh.meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
procPointAddressing_[procI]
);
pointProcAddressing.write();
labelIOList faceProcAddressing
(
IOobject
(
"faceProcAddressing",
procMesh.facesInstance(),
procMesh.meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
procFaceAddressing_[procI]
);
faceProcAddressing.write();
labelIOList cellProcAddressing
(
IOobject
(
"cellProcAddressing",
procMesh.facesInstance(),
procMesh.meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
procCellAddressing_[procI]
);
cellProcAddressing.write();
// Write patch map for backwards compatibility.
// (= identity map for original patches, -1 for processor patches)
label nMeshPatches = curPatchSizes.size();
labelList procBoundaryAddressing(identity(nMeshPatches));
procBoundaryAddressing.setSize(nMeshPatches+nProcPatches, -1);
labelIOList boundaryProcAddressing
(
IOobject
(
"boundaryProcAddressing",
procMesh.facesInstance(),
procMesh.meshSubDir,
procMesh,
IOobject::NO_READ,
IOobject::NO_WRITE
),
procBoundaryAddressing
);
boundaryProcAddressing.write();
}
scalar avgProcCells = scalar(nCells())/nProcs_;
scalar avgProcPatches = scalar(totProcPatches)/nProcs_;
scalar avgProcFaces = scalar(totProcFaces)/nProcs_;
// In case of all faces on one processor. Just to avoid division by 0.
if (totProcPatches == 0)
{
avgProcPatches = 1;
}
if (totProcFaces == 0)
{
avgProcFaces = 1;
}
Info<< nl
<< "Number of processor faces = " << totProcFaces/2 << nl
<< "Max number of cells = " << maxProcCells
<< " (" << 100.0*(maxProcCells-avgProcCells)/avgProcCells
<< "% above average " << avgProcCells << ")" << nl
<< "Max number of processor patches = " << maxProcPatches
<< " (" << 100.0*(maxProcPatches-avgProcPatches)/avgProcPatches
<< "% above average " << avgProcPatches << ")" << nl
<< "Max number of faces between processors = " << maxProcFaces
<< " (" << 100.0*(maxProcFaces-avgProcFaces)/avgProcFaces
<< "% above average " << avgProcFaces << ")" << nl
<< endl;
return true;
}
// ************************************************************************* //
| {
"pile_set_name": "Github"
} |
using System;
using System.IO;
using System.Text;
using BenchmarkDotNet.Attributes;
using ServiceStack.Text;
using ServiceStack.Text.Json;
using StackExchange.Profiling;
namespace ServiceStack.Text.Benchmarks
{
public class StringType
{
public string Value1 { get; set; }
public string Value2 { get; set; }
public string Value3 { get; set; }
public string Value4 { get; set; }
public string Value5 { get; set; }
public string Value6 { get; set; }
public string Value7 { get; set; }
public static StringType Create()
{
var st = new StringType();
st.Value1 = st.Value2 = st.Value3 = st.Value4 = st.Value5 = st.Value6 = st.Value7 = "Hello, world";
return st;
}
}
[MemoryDiagnoser]
public class JsonDeserializationBenchmarks
{
static ModelWithCommonTypes commonTypesModel = ModelWithCommonTypes.Create(3);
static MemoryStream stream = new MemoryStream(32768);
const string serializedString = "this is the test string";
readonly string serializedString256 = new string('t', 256);
readonly string serializedString512 = new string('t', 512);
readonly string serializedString4096 = new string('t', 4096);
static string commonTypesModelJson;
static string stringTypeJson;
static JsonDeserializationBenchmarks()
{
commonTypesModelJson = JsonSerializer.SerializeToString<ModelWithCommonTypes>(commonTypesModel);
stringTypeJson = JsonSerializer.SerializeToString<StringType>(StringType.Create());
}
[Benchmark(Description = "Deserialize Json: class with builtin types")]
public void DeserializeJsonCommonTypes()
{
var result = JsonSerializer.DeserializeFromString<ModelWithCommonTypes>(commonTypesModelJson);
}
[Benchmark(Description = "Deserialize Json: class with 10 string properties")]
public void DeserializeStringType()
{
var result = JsonSerializer.DeserializeFromString<StringType>(stringTypeJson);
}
[Benchmark(Description = "Deserialize Json: Complex MiniProfiler")]
public MiniProfiler ComplexDeserializeServiceStack() => ServiceStack.Text.JsonSerializer.DeserializeFromString<MiniProfiler>(_complexProfilerJson);
private static readonly MiniProfiler _complexProfiler = GetComplexProfiler();
private static readonly string _complexProfilerJson = _complexProfiler.ToJson();
private static MiniProfiler GetComplexProfiler()
{
var mp = new MiniProfiler("Complex");
for (var i = 0; i < 50; i++)
{
using (mp.Step("Step " + i))
{
for (var j = 0; j < 50; j++)
{
using (mp.Step("SubStep " + j))
{
for (var k = 0; k < 50; k++)
{
using (mp.CustomTiming("Custom " + k, "YOLO!"))
{
}
}
}
}
}
}
return mp;
}
}
}
| {
"pile_set_name": "Github"
} |
#! FIELDS idx_arg aver.targetdist-0 index description
#! SET type LinearBasisSet
#! SET ndimensions 1
#! SET ncoeffs_total 21
#! SET shape_arg 21
0 1.000000 0 L0(s)
1 0.000000 1 L1(s)
2 0.000000 2 L2(s)
3 0.000000 3 L3(s)
4 0.000000 4 L4(s)
5 0.000000 5 L5(s)
6 0.000000 6 L6(s)
7 0.000000 7 L7(s)
8 0.000000 8 L8(s)
9 0.000000 9 L9(s)
10 0.000000 10 L10(s)
11 0.000000 11 L11(s)
12 0.000000 12 L12(s)
13 0.000000 13 L13(s)
14 0.000000 14 L14(s)
15 0.000000 15 L15(s)
16 0.000000 16 L16(s)
17 0.000000 17 L17(s)
18 0.000000 18 L18(s)
19 0.000000 19 L19(s)
20 0.000000 20 L20(s)
#!-------------------
| {
"pile_set_name": "Github"
} |
/* Copyright (C) 2015-2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
* SPDX-License-Identifier: GPL-3.0-or-later
*/
/**
* @file pack.h
* @brief A length-prefixed list of objects, also an array list.
*
* Each object is prefixed by item length, unlike array this structure
* permits variable-length data. It is also equivallent to forward-only list
* backed by an array.
*
* @note Maximum object size is 2^16 bytes, see ::pack_objlen_t
* @todo If some mistake happens somewhere, the access may end up in an infinite loop.
* (equality comparison on pointers)
*
* # Example usage:
*
* @code{.c}
* pack_t pack;
* pack_init(pack);
*
* // Reserve 2 objects, 6 bytes total
* pack_reserve(pack, 2, 4 + 2);
*
* // Push 2 objects
* pack_obj_push(pack, U8("jedi"), 4)
* pack_obj_push(pack, U8("\xbe\xef"), 2);
*
* // Iterate length-value pairs
* uint8_t *it = pack_head(pack);
* while (it != pack_tail(pack)) {
* uint8_t *val = pack_obj_val(it);
* it = pack_obj_next(it);
* }
*
* // Remove object
* pack_obj_del(pack, U8("jedi"), 4);
*
* pack_clear(pack);
* @endcode
*
* \addtogroup generics
* @{
*/
#pragma once
#include <stdint.h>
#include <string.h>
#include "array.h"
#ifdef __cplusplus
extern "C" {
#endif
/** Packed object length type. */
typedef uint16_t pack_objlen_t;
/** Pack is defined as an array of bytes */
typedef array_t(uint8_t) pack_t;
/** Zero-initialize the pack. */
#define pack_init(pack) \
array_init(pack)
/** Make the pack empty and free pointed-to memory (plain malloc/free). */
#define pack_clear(pack) \
array_clear(pack)
/** Make the pack empty and free pointed-to memory.
* Mempool usage: pass mm_free and a knot_mm_t* . */
#define pack_clear_mm(pack, free, baton) \
array_clear_mm((pack), (free), (baton))
/** Reserve space for *additional* objects in the pack (plain malloc/free).
* @return 0 if success, <0 on failure */
#define pack_reserve(pack, objs_count, objs_len) \
pack_reserve_mm((pack), (objs_count), (objs_len), array_std_reserve, NULL)
/** Reserve space for *additional* objects in the pack.
* Mempool usage: pass kr_memreserve and a knot_mm_t* .
* @return 0 if success, <0 on failure */
#define pack_reserve_mm(pack, objs_count, objs_len, reserve, baton) \
array_reserve_mm((pack), (pack).len + (sizeof(pack_objlen_t)*(objs_count) + (objs_len)), (reserve), (baton))
/** Return pointer to first packed object.
*
* Recommended way to iterate:
* for (uint8_t *it = pack_head(pack); it != pack_tail(pack); it = pack_obj_next(it))
*/
#define pack_head(pack) \
((pack).len > 0 ? &((pack).at[0]) : NULL)
/** Return pack end pointer. */
#define pack_tail(pack) \
((pack).len > 0 ? &((pack).at[(pack).len]) : NULL)
/** Return packed object length. */
static inline pack_objlen_t pack_obj_len(uint8_t *it)
{
pack_objlen_t len = 0;
if (it != NULL)
memcpy(&len, it, sizeof(len));
return len;
}
/** Return packed object value. */
static inline uint8_t *pack_obj_val(uint8_t *it)
{
if (it == NULL) {
assert(it);
return NULL;
}
return it + sizeof(pack_objlen_t);
}
/** Return pointer to next packed object. */
static inline uint8_t *pack_obj_next(uint8_t *it)
{
if (it == NULL) {
assert(it);
return NULL;
}
return pack_obj_val(it) + pack_obj_len(it);
}
/** Return pointer to the last packed object. */
static inline uint8_t *pack_last(pack_t pack)
{
if (pack.len == 0) {
return NULL;
}
uint8_t *it = pack_head(pack);
uint8_t *tail = pack_tail(pack);
while (true) {
uint8_t *next = pack_obj_next(it);
if (next == tail) {
return it;
}
it = next;
}
}
/** Push object to the end of the pack
* @return 0 on success, negative number on failure
*/
static inline int pack_obj_push(pack_t *pack, const uint8_t *obj, pack_objlen_t len)
{
if (pack == NULL || obj == NULL) {
assert(false);
return kr_error(EINVAL);
}
size_t packed_len = len + sizeof(len);
if (pack->len + packed_len > pack->cap) {
return kr_error(ENOSPC);
}
uint8_t *endp = pack->at + pack->len;
memcpy(endp, (char *)&len, sizeof(len));
memcpy(endp + sizeof(len), obj, len);
pack->len += packed_len;
return 0;
}
/** Returns a pointer to packed object.
* @return pointer to packed object or NULL
*/
static inline uint8_t *pack_obj_find(pack_t *pack, const uint8_t *obj, pack_objlen_t len)
{
if (pack == NULL || obj == NULL) {
assert(obj != NULL);
return NULL;
}
uint8_t *endp = pack_tail(*pack);
uint8_t *it = pack_head(*pack);
while (it != endp) {
uint8_t *val = pack_obj_val(it);
if (pack_obj_len(it) == len && memcmp(obj, val, len) == 0) {
return it;
}
it = pack_obj_next(it);
}
return NULL;
}
/** Delete object from the pack
* @return 0 on success, negative number on failure
*/
static inline int pack_obj_del(pack_t *pack, const uint8_t *obj, pack_objlen_t len)
{
if (pack == NULL || obj == NULL) {
assert(obj != NULL);
return kr_error(EINVAL);
}
uint8_t *endp = pack_tail(*pack);
uint8_t *it = pack_obj_find(pack, obj, len);
if (it) {
size_t packed_len = len + sizeof(len);
memmove(it, it + packed_len, endp - it - packed_len);
pack->len -= packed_len;
return 0;
}
return -1;
}
/** Clone a pack, replacing destination pack; (*dst == NULL) is valid input.
* @return kr_error(ENOMEM) on allocation failure. */
static inline int pack_clone(pack_t **dst, const pack_t *src, knot_mm_t *pool)
{
if (!dst || !src) {
assert(false);
return kr_error(EINVAL);
}
/* Get a valid pack_t. */
if (!*dst) {
*dst = mm_alloc(pool, sizeof(pack_t));
if (!*dst) return kr_error(ENOMEM);
pack_init(**dst);
/* Clone data only if needed */
if (src->len == 0) return kr_ok();
}
/* Replace the contents of the pack_t. */
int ret = array_reserve_mm(**dst, src->len, kr_memreserve, pool);
if (ret < 0) {
return kr_error(ENOMEM);
}
memcpy((*dst)->at, src->at, src->len);
(*dst)->len = src->len;
return kr_ok();
}
#ifdef __cplusplus
}
#endif
/** @} */
| {
"pile_set_name": "Github"
} |
'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"Z.MU.",
"Z.MW."
],
"DAY": [
"Ku w\u2019indwi",
"Ku wa mbere",
"Ku wa kabiri",
"Ku wa gatatu",
"Ku wa kane",
"Ku wa gatanu",
"Ku wa gatandatu"
],
"ERANAMES": [
"Mbere ya Yezu",
"Nyuma ya Yezu"
],
"ERAS": [
"Mb.Y.",
"Ny.Y"
],
"FIRSTDAYOFWEEK": 0,
"MONTH": [
"Nzero",
"Ruhuhuma",
"Ntwarante",
"Ndamukiza",
"Rusama",
"Ruheshi",
"Mukakaro",
"Nyandagaro",
"Nyakanga",
"Gitugutu",
"Munyonyo",
"Kigarama"
],
"SHORTDAY": [
"cu.",
"mbe.",
"kab.",
"gtu.",
"kan.",
"gnu.",
"gnd."
],
"SHORTMONTH": [
"Mut.",
"Gas.",
"Wer.",
"Mat.",
"Gic.",
"Kam.",
"Nya.",
"Kan.",
"Nze.",
"Ukw.",
"Ugu.",
"Uku."
],
"STANDALONEMONTH": [
"Nzero",
"Ruhuhuma",
"Ntwarante",
"Ndamukiza",
"Rusama",
"Ruheshi",
"Mukakaro",
"Nyandagaro",
"Nyakanga",
"Gitugutu",
"Munyonyo",
"Kigarama"
],
"WEEKENDRANGE": [
5,
6
],
"fullDate": "EEEE d MMMM y",
"longDate": "d MMMM y",
"medium": "d MMM y HH:mm:ss",
"mediumDate": "d MMM y",
"mediumTime": "HH:mm:ss",
"short": "d/M/y HH:mm",
"shortDate": "d/M/y",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "FBu",
"DECIMAL_SEP": ",",
"GROUP_SEP": ".",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a4",
"posPre": "",
"posSuf": "\u00a4"
}
]
},
"id": "rn-bi",
"localeID": "rn_BI",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]);
| {
"pile_set_name": "Github"
} |
/*
Package http provides server-sent events for net/http server.
Example:
package main
import (
"gopkg.in/antage/eventsource.v1"
"log"
"net/http"
"strconv"
"time"
)
func main() {
es := eventsource.New(nil, nil)
defer es.Close()
http.Handle("/events", es)
go func() {
id := 1
for {
es.SendEventMessage("tick", "tick-event", strconv.Itoa(id))
id++
time.Sleep(2 * time.Second)
}
}()
log.Fatal(http.ListenAndServe(":8080", nil))
}
*/
package eventsource
| {
"pile_set_name": "Github"
} |
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/rpc/status.proto
package status
import (
fmt "fmt"
math "math"
proto "github.com/golang/protobuf/proto"
any "github.com/golang/protobuf/ptypes/any"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// The `Status` type defines a logical error model that is suitable for
// different programming environments, including REST APIs and RPC APIs. It is
// used by [gRPC](https://github.com/grpc). The error model is designed to be:
//
// - Simple to use and understand for most users
// - Flexible enough to meet unexpected needs
//
// # Overview
//
// The `Status` message contains three pieces of data: error code, error
// message, and error details. The error code should be an enum value of
// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes
// if needed. The error message should be a developer-facing English message
// that helps developers *understand* and *resolve* the error. If a localized
// user-facing error message is needed, put the localized message in the error
// details or localize it in the client. The optional error details may contain
// arbitrary information about the error. There is a predefined set of error
// detail types in the package `google.rpc` that can be used for common error
// conditions.
//
// # Language mapping
//
// The `Status` message is the logical representation of the error model, but it
// is not necessarily the actual wire format. When the `Status` message is
// exposed in different client libraries and different wire protocols, it can be
// mapped differently. For example, it will likely be mapped to some exceptions
// in Java, but more likely mapped to some error codes in C.
//
// # Other uses
//
// The error model and the `Status` message can be used in a variety of
// environments, either with or without APIs, to provide a
// consistent developer experience across different environments.
//
// Example uses of this error model include:
//
// - Partial errors. If a service needs to return partial errors to the client,
// it may embed the `Status` in the normal response to indicate the partial
// errors.
//
// - Workflow errors. A typical workflow has multiple steps. Each step may
// have a `Status` message for error reporting.
//
// - Batch operations. If a client uses batch request and batch response, the
// `Status` message should be used directly inside batch response, one for
// each error sub-response.
//
// - Asynchronous operations. If an API call embeds asynchronous operation
// results in its response, the status of those operations should be
// represented directly using the `Status` message.
//
// - Logging. If some API errors are stored in logs, the message `Status` could
// be used directly after any stripping needed for security/privacy reasons.
type Status struct {
// The status code, which should be an enum value of
// [google.rpc.Code][google.rpc.Code].
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized
// by the client.
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
// A list of messages that carry the error details. There is a common set of
// message types for APIs to use.
Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Status) Reset() { *m = Status{} }
func (m *Status) String() string { return proto.CompactTextString(m) }
func (*Status) ProtoMessage() {}
func (*Status) Descriptor() ([]byte, []int) {
return fileDescriptor_24d244abaf643bfe, []int{0}
}
func (m *Status) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Status.Unmarshal(m, b)
}
func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Status.Marshal(b, m, deterministic)
}
func (m *Status) XXX_Merge(src proto.Message) {
xxx_messageInfo_Status.Merge(m, src)
}
func (m *Status) XXX_Size() int {
return xxx_messageInfo_Status.Size(m)
}
func (m *Status) XXX_DiscardUnknown() {
xxx_messageInfo_Status.DiscardUnknown(m)
}
var xxx_messageInfo_Status proto.InternalMessageInfo
func (m *Status) GetCode() int32 {
if m != nil {
return m.Code
}
return 0
}
func (m *Status) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *Status) GetDetails() []*any.Any {
if m != nil {
return m.Details
}
return nil
}
func init() {
proto.RegisterType((*Status)(nil), "google.rpc.Status")
}
func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) }
var fileDescriptor_24d244abaf643bfe = []byte{
// 209 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81,
0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1,
0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83,
0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05,
0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7,
0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7,
0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c,
0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12,
0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12,
0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1,
0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00,
0x00,
}
| {
"pile_set_name": "Github"
} |
---
title: "Installation"
date:
draft: false
weight: 20
---
## Requirements
* **PostgreSQL 9.5** or later
* **PostGIS 2.4** or later
The tile server depends on the [ST_AsMVT()](https://postgis.net/docs/ST_AsMVT.html) function, which is only available if PostGIS has been compiled with support for the **libprotobuf** library. See the output from [PostGIS_Full_Version](https://postgis.net/docs/PostGIS_Full_Version.html), for example:
```sql
SELECT postgis_full_version()
```
```
POSTGIS="3.0.1" [EXTENSION] PGSQL="121" GEOS="3.8.0-CAPI-1.13.1 "
PROJ="6.1.0" LIBXML="2.9.4" LIBJSON="0.13"
LIBPROTOBUF="1.3.2" WAGYU="0.4.3 (Internal)"
```
You don't need advanced knowledge in Postgres/PostGIS or web mapping to install `pg_tileserv` and set up the examples in this guide. If you are new to functions in Postgres, you might try this [quick interactive course](https://learn.crunchydata.com/postgresql-devel/courses/beyond-basics/basicfunctions/) to better see how you might take advantage of `pg_tileserv`'s capabilities.
We also link to [further resources](/learn-more/) at the end of this guide, for your reference.
## Installation
To install `pg_tileserv`, download the binary file. Alternatively, you may run a container. These first two options will suit most use cases; needing to build the executable from source is rare.
### A. Download binaries
Builds of the latest code:
* [Linux](https://postgisftw.s3.amazonaws.com/pg_tileserv_latest_linux.zip)
* [Windows](https://postgisftw.s3.amazonaws.com/pg_tileserv_latest_windows.zip)
* [OSX](https://postgisftw.s3.amazonaws.com/pg_tileserv_latest_osx.zip)
Unzip the file, copy the `pg_tileserv` binary wherever you wish, or use it in place. If you move the binary, remember to move the `assets/` directory to the same location, or start the server using the `AssetsDir` configuration option.
### B. Run container
A Docker image is available on DockerHub:
* [Docker](https://hub.docker.com/r/pramsey/pg_tileserv/)
When you run the container, provide the database connection information in the `DATABASE_URL` environment variable and map the default service port (7800).
```sh
docker run -e DATABASE_URL=postgres://user:pass@host/dbname -p 7800:7800 pramsey/pg_tileserv
```
### C. Build from source
If you must build from source, install the [Go software development environment](https://golang.org/doc/install). Make sure that the [`GOPATH` environment variable](https://github.com/golang/go/wiki/SettingGOPATH) is also set.
```sh
SRC=$GOPATH/src/github.com/CrunchyData
mkdir -p $SRC
cd $SRC
git clone git@github.com:CrunchyData/pg_tileserv.git
cd pg_tileserv
go build
go install
```
To run the build, set the `DATABASE_URL` environment variable to the database you want to connect to, and run the binary.
```sh
export DATABASE_URL=postgres://user:pass@host/dbname
$GOPATH/bin/pg_tileserv
```
## Deployment
### Basic operation
#### Linux/OSX
```sh
export DATABASE_URL=postgresql://username:password@host/dbname
./pg_tileserv
```
#### Windows
```
SET DATABASE_URL=postgresql://username:password@host/dbname
pg_tileserv.exe
```
### Configuration file
The configuration file will be automatically read from the following locations, if it exists:
* In the system configuration directory, at `/etc/pg_tileserv.toml`
* Relative to the directory from which the program is run, `./pg_tileserv.toml`
If you want to pass a path directly to the configuration file, use the `--config` command line parameter.
Configuration files in other locations will be ignored when using the `--config` option.
```sh
./pg_tileserv --config /opt/pg_tileserv/pg_tileserv.toml
```
The default settings will suit most uses, and the program autodetects values such as the server name.
```toml
# Database connection
DbConnection = "user=you host=localhost dbname=yourdb"
# Close pooled connections after this interval
DbPoolMaxConnLifeTime = "1h"
# Hold no more than this number of connections in the database pool
DbPoolMaxConns = 4
# Look to read html templates from this directory
AssetsPath = "./assets"
# Accept connections on this subnet (default accepts on all subnets)
HttpHost = "0.0.0.0"
# Accept connections on this port
HttpPort = 7800
# Advertise URLs relative to this server name
# default is to look this up from incoming request headers
# UrlBase = "http://yourserver.com/"
# Resolution to quantize vector tiles to
DefaultResolution = 4096
# Rendering buffer to add to vector tiles
DefaultBuffer = 256
# Limit number of features requested (-1 = no limit)
MaxFeaturesPerTile = 10000
# Advertise this minimum zoom level
DefaultMinZoom = 0
# Advertise this maximum zoom level
DefaultMaxZoom = 22
# Allow any page to consume these tiles
CORSOrigins = *
tra logging information?
Debug = false
```
| {
"pile_set_name": "Github"
} |
@node pthread_rwlock_trywrlock
@section @code{pthread_rwlock_trywrlock}
@findex pthread_rwlock_trywrlock
POSIX specification:@* @url{https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_trywrlock.html}
Gnulib module: pthread-rwlock
Portability problems fixed by Gnulib:
@itemize
@item
This function is missing on some platforms:
Minix 3.1.8, mingw, MSVC 14, Android 4.3.
But the provided replacement is just a dummy on some of these platforms:
Minix 3.1.8.
@end itemize
Portability problems not fixed by Gnulib:
@itemize
@end itemize
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<packages>
<package id="Microsoft.Identity.Client" version="1.1.0-preview" targetFramework="net452" />
<package id="Microsoft.SharePointOnline.CSOM" version="16.1.6802.1200" targetFramework="net452" />
<package id="Newtonsoft.Json" version="10.0.3" targetFramework="net452" />
</packages> | {
"pile_set_name": "Github"
} |
/// https://leetcode.com/problems/minimum-size-subarray-sum/description/
/// Author : liuyubobobo
/// Time : 2017-11-13
// Sliding Window
// Another Implementation
// Time Complexity: O(n)
// Space Complexity: O(1)
public class Solution5 {
public int minSubArrayLen(int s, int[] nums) {
if(s <= 0 || nums == null)
throw new IllegalArgumentException("Illigal Arguments");
int l = 0 , r = -1; // sliding window: nums[l...r]
int sum = 0;
int res = nums.length + 1;
while(r + 1 < nums.length){
while(r + 1 < nums.length && sum < s)
sum += nums[++r];
if(sum >= s)
res = Math.min(res, r - l + 1);
while(l < nums.length && sum >= s){
sum -= nums[l++];
if(sum >= s)
res = Math.min(res, r - l + 1);
}
}
return res == nums.length + 1 ? 0 : res;
}
public static void main(String[] args) {
int[] nums = {2, 3, 1, 2, 4, 3};
int s = 7;
System.out.println((new Solution5()).minSubArrayLen(s, nums));
}
}
| {
"pile_set_name": "Github"
} |
(*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*)
open! IStd
module F = Format
module L = Logging
open PulseBasicInterface
module Memory = PulseBaseMemory
module Stack = PulseBaseStack
module AddressAttributes = PulseBaseAddressAttributes
(* {2 Abstract domain description } *)
type t = {heap: Memory.t; stack: Stack.t; attrs: AddressAttributes.t}
let empty =
{ heap=
Memory.empty
(* TODO: we could record that 0 is an invalid address at this point but this makes the
analysis go a bit overboard with the Nullptr reports. *)
; stack= Stack.empty
; attrs= AddressAttributes.empty }
type cell = Memory.Edges.t * Attributes.t
let find_cell_opt addr {heap; attrs} =
match (Memory.find_opt addr heap, AddressAttributes.find_opt addr attrs) with
| None, None ->
None
| edges_opt, attrs_opt ->
let edges = Option.value edges_opt ~default:Memory.Edges.empty in
let attrs = Option.value attrs_opt ~default:Attributes.empty in
Some (edges, attrs)
(** comparison between two elements of the domain to determine the [<=] relation
Given two states [lhs] and [rhs], try to find a bijection [lhs_to_rhs] (with inverse
[rhs_to_lhs]) between the addresses of [lhs] and [rhs] such that
[lhs_to_rhs(reachable(lhs)) = reachable(rhs)] (where addresses are reachable if they are
reachable from stack variables). *)
module GraphComparison = struct
module AddressMap = PrettyPrintable.MakePPMap (AbstractValue)
(** translation between the abstract values on the LHS and the ones on the RHS *)
type mapping =
{ rhs_to_lhs: AbstractValue.t AddressMap.t (** map from RHS values to LHS *)
; lhs_to_rhs: AbstractValue.t AddressMap.t (** inverse map from [rhs_to_lhs] *) }
let empty_mapping = {rhs_to_lhs= AddressMap.empty; lhs_to_rhs= AddressMap.empty}
let pp_mapping fmt {rhs_to_lhs; lhs_to_rhs} =
F.fprintf fmt "@[<v>{ rhs_to_lhs=@[<hv2>%a@];@,lhs_to_rhs=@[<hv2>%a@];@,}@]"
(AddressMap.pp ~pp_value:AbstractValue.pp)
rhs_to_lhs
(AddressMap.pp ~pp_value:AbstractValue.pp)
lhs_to_rhs
(** try to add the fact that [addr_lhs] corresponds to [addr_rhs] to the [mapping] *)
let record_equal ~addr_lhs ~addr_rhs mapping =
(* have we seen [addr_lhs] before?.. *)
match AddressMap.find_opt addr_lhs mapping.lhs_to_rhs with
| Some addr_rhs' when not (AbstractValue.equal addr_rhs addr_rhs') ->
(* ...yes, but it was bound to another address *)
L.d_printfln
"Aliasing in LHS not in RHS: LHS address %a in current already bound to %a, not %a@\n\
State=%a"
AbstractValue.pp addr_lhs AbstractValue.pp addr_rhs' AbstractValue.pp addr_rhs pp_mapping
mapping ;
`AliasingLHS
| Some _addr_rhs (* [_addr_rhs = addr_rhs] *) ->
`AlreadyVisited
| None -> (
(* ...and have we seen [addr_rhs] before?.. *)
match AddressMap.find_opt addr_rhs mapping.rhs_to_lhs with
| Some addr_lhs' ->
(* ...yes, but it was bound to another address: [addr_lhs' != addr_lhs] otherwise we would
have found [addr_lhs] in the [lhs_to_rhs] map above *)
L.d_printfln
"Aliasing in RHS not in LHS: RHS address %a in current already bound to %a, not %a@\n\
State=%a"
AbstractValue.pp addr_rhs AbstractValue.pp addr_lhs' AbstractValue.pp addr_lhs
pp_mapping mapping ;
`AliasingRHS
| None ->
(* [addr_rhs] and [addr_lhs] are both new, record that they correspond to each other *)
let mapping' =
{ rhs_to_lhs= AddressMap.add addr_rhs addr_lhs mapping.rhs_to_lhs
; lhs_to_rhs= AddressMap.add addr_lhs addr_rhs mapping.lhs_to_rhs }
in
`NotAlreadyVisited mapping' )
type isograph_relation =
| NotIsomorphic (** no mapping was found that can make LHS the same as the RHS *)
| IsomorphicUpTo of mapping (** [mapping(lhs)] is isomorphic to [rhs] *)
(** can we extend [mapping] so that the subgraph of [lhs] rooted at [addr_lhs] is isomorphic to
the subgraph of [rhs] rooted at [addr_rhs]? *)
let rec isograph_map_from_address ~lhs ~addr_lhs ~rhs ~addr_rhs mapping =
L.d_printfln "%a<->%a@\n" AbstractValue.pp addr_lhs AbstractValue.pp addr_rhs ;
match record_equal mapping ~addr_lhs ~addr_rhs with
| `AlreadyVisited ->
IsomorphicUpTo mapping
| `AliasingRHS | `AliasingLHS ->
NotIsomorphic
| `NotAlreadyVisited mapping -> (
let get_non_empty_cell addr astate =
find_cell_opt addr astate
|> Option.filter ~f:(fun (edges, attrs) ->
not (Memory.Edges.is_empty edges && Attributes.is_empty attrs)
(* this can happen because of [register_address] or because we don't care to delete empty
edges when removing edges *) )
in
let lhs_cell_opt = get_non_empty_cell addr_lhs lhs in
let rhs_cell_opt = get_non_empty_cell addr_rhs rhs in
match (lhs_cell_opt, rhs_cell_opt) with
| None, None ->
IsomorphicUpTo mapping
| Some _, None | None, Some _ ->
NotIsomorphic
| Some (edges_rhs, attrs_rhs), Some (edges_lhs, attrs_lhs) ->
(* continue the comparison recursively on all edges and attributes *)
if Attributes.equal attrs_rhs attrs_lhs then
let bindings_lhs = Memory.Edges.bindings edges_lhs in
let bindings_rhs = Memory.Edges.bindings edges_rhs in
isograph_map_edges ~lhs ~edges_lhs:bindings_lhs ~rhs ~edges_rhs:bindings_rhs mapping
else NotIsomorphic )
(** check that the isograph relation can be extended for all edges *)
and isograph_map_edges ~lhs ~edges_lhs ~rhs ~edges_rhs mapping =
match (edges_lhs, edges_rhs) with
| [], [] ->
IsomorphicUpTo mapping
| (a_lhs, (addr_lhs, _trace_lhs)) :: edges_lhs, (a_rhs, (addr_rhs, _trace_rhs)) :: edges_rhs
when Memory.Access.equal a_lhs a_rhs -> (
(* check isograph relation from the destination addresses *)
match isograph_map_from_address ~lhs ~addr_lhs ~rhs ~addr_rhs mapping with
| IsomorphicUpTo mapping ->
(* ok: continue with the other edges *)
isograph_map_edges ~lhs ~edges_lhs ~rhs ~edges_rhs mapping
| NotIsomorphic ->
NotIsomorphic )
| _ :: _, _ :: _ | [], _ :: _ | _ :: _, [] ->
NotIsomorphic
(** check that the memory graph induced by the addresses in [lhs] reachable from the variables in
[stack_lhs] is a isograph of the same graph in [rhs] starting from [stack_rhs], up to some
[mapping] *)
let rec isograph_map_from_stack ~lhs ~stack_lhs ~rhs ~stack_rhs mapping =
match (stack_lhs, stack_rhs) with
| [], [] ->
IsomorphicUpTo mapping
| (var_lhs, (addr_lhs, _trace_lhs)) :: stack_lhs, (var_rhs, (addr_rhs, _trace_rhs)) :: stack_rhs
when Var.equal var_lhs var_rhs -> (
match isograph_map_from_address ~lhs ~addr_lhs ~rhs ~addr_rhs mapping with
| IsomorphicUpTo mapping ->
isograph_map_from_stack ~lhs ~stack_lhs ~rhs ~stack_rhs mapping
| NotIsomorphic ->
NotIsomorphic )
| _ :: _, _ :: _ | [], _ :: _ | _ :: _, [] ->
NotIsomorphic
let isograph_map ~lhs ~rhs mapping =
let stack_lhs = Stack.bindings lhs.stack in
let stack_rhs = Stack.bindings rhs.stack in
isograph_map_from_stack ~lhs ~rhs ~stack_lhs ~stack_rhs mapping
let is_isograph ~lhs ~rhs mapping =
match isograph_map ~lhs ~rhs mapping with IsomorphicUpTo _ -> true | NotIsomorphic -> false
end
let pp fmt {heap; stack; attrs} =
F.fprintf fmt "{@[<v1> roots=@[<hv>%a@];@;mem =@[<hv>%a@];@;attrs=@[<hv>%a@];@]}" Stack.pp stack
Memory.pp heap AddressAttributes.pp attrs
module GraphVisit : sig
val fold :
var_filter:(Var.t -> bool)
-> t
-> init:'accum
-> f:
( 'accum
-> AbstractValue.t
-> Var.t
-> Memory.Access.t list
-> ('accum, 'final) Base.Continue_or_stop.t)
-> finish:('accum -> 'final)
-> AbstractValue.Set.t * 'final
(** Generic graph traversal of the memory starting from each variable in the stack that pass
[var_filter], in order. Returns the result of folding over every address in the graph and the
set of addresses that have been visited before [f] returned [Stop] or all reachable addresses
were seen. [f] is passed each address together with the variable from which the address was
reached and the access path from that variable to the address. *)
end = struct
open Base.Continue_or_stop
let visit address visited =
if AbstractValue.Set.mem address visited then `AlreadyVisited
else
let visited = AbstractValue.Set.add address visited in
`NotAlreadyVisited visited
let rec visit_address orig_var ~f rev_accesses astate address ((visited, accum) as visited_accum)
=
match visit address visited with
| `AlreadyVisited ->
Continue visited_accum
| `NotAlreadyVisited visited -> (
match f accum address orig_var rev_accesses with
| Continue accum -> (
match Memory.find_opt address astate.heap with
| None ->
Continue (visited, accum)
| Some edges ->
visit_edges orig_var ~f rev_accesses astate ~edges (visited, accum) )
| Stop fin ->
Stop (visited, fin) )
and visit_edges orig_var ~f rev_accesses ~edges astate visited_accum =
let finish visited_accum = Continue visited_accum in
Container.fold_until edges ~fold:Memory.Edges.fold ~finish ~init:visited_accum
~f:(fun visited_accum (access, (address, _trace)) ->
match visit_address orig_var ~f (access :: rev_accesses) astate address visited_accum with
| Continue _ as cont ->
cont
| Stop fin ->
Stop (Stop fin) )
let fold ~var_filter astate ~init ~f ~finish =
let finish (visited, accum) = (visited, finish accum) in
let init = (AbstractValue.Set.empty, init) in
Container.fold_until astate.stack ~fold:(IContainer.fold_of_pervasives_map_fold Stack.fold)
~init ~finish ~f:(fun visited_accum (var, (address, _loc)) ->
if var_filter var then visit_address var ~f [] astate address visited_accum
else Continue visited_accum )
end
include GraphComparison
let reachable_addresses astate =
GraphVisit.fold astate
~var_filter:(fun _ -> true)
~init:() ~finish:Fn.id
~f:(fun () _ _ _ -> Continue ())
|> fst
| {
"pile_set_name": "Github"
} |
/*******************************************************************************
* Copyright © 2012-2015 eBay Software Foundation
* This program is dual licensed under the MIT and Apache 2.0 licenses.
* Please see LICENSE for more information.
*******************************************************************************/
package com.ebay.jetstream.xmlser;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.ebay.jetstream.xmlser.simple.SimpleXMLSerializer;
import com.ebay.jetstream.xmlser.spring.SpringBeanDeserializer;
import com.ebay.jetstream.xmlser.spring.SpringXMLSerializer;
public class XMLSerializationManager {
private static final Map<String, XMLSerializer> s_serializers = new HashMap<String, XMLSerializer>();
private static final Map<String, IXmlDeserializer> s_deserializers = new HashMap<String, IXmlDeserializer>();
private static final List<Class<?>> s_xserializable = new ArrayList<Class<?>>();
static {
registerSerializer("xml", new SimpleXMLSerializer());
registerSerializer("spring", new SpringXMLSerializer());
registerDeserializer("spring", new SpringBeanDeserializer());
}
public static IXmlDeserializer getDeserializer(String format) {
return s_deserializers.get(format);
}
public static XMLSerializer getSerializer(String format) {
return s_serializers.get(format);
}
public static boolean isHidden(Method getter) {
return getter == null || getter.isAnnotationPresent(Hidden.class) || getter.getParameterTypes().length != 0
|| getter.getName().equals("getClass");
}
/**
* Returns true if classes of this object should be considered to be XML serializable.
*
* @param clazz
* the class to check
* @return true if the class is XSerializable, or is explicitly listed as serializable.
*/
public static boolean isXSerializable(Class<?> clazz) {
if (XSerializable.class.isAssignableFrom(clazz))
return true;
for (Class<?> explicitx : s_xserializable)
if (explicitx.isAssignableFrom(clazz))
return true;
return false;
}
public static IXmlDeserializer registerDeserializer(String format, IXmlDeserializer deserializer) {
IXmlDeserializer old = s_deserializers.get(format);
s_deserializers.put(format, deserializer);
return old;
}
public static XMLSerializer registerSerializer(String format, XMLSerializer serializer) {
XMLSerializer old = s_serializers.get(format);
s_serializers.put(format, serializer);
return old;
}
/**
* Adds a class to the explicit list of serializable classes. If the class implements XSerializable, it is already
* implicitly serializable. This mechanism exists for classes that should be serialized but the source is unavailable
* and extending isn't an appropriate option.
*
* @param clazz
* the class to add.
* @return true iff the class is added.
*/
public static boolean registerXSerializable(Class<?> clazz) {
boolean add = !s_xserializable.contains(clazz);
if (add)
s_xserializable.add(clazz);
return add;
}
}
| {
"pile_set_name": "Github"
} |
The models in this directory are bandle models of MMD software.
So I copied MMD readme.txt here.
by takahiro
----------------------------------------------------------------------
MikuMikuDance.exe Ver.5.24
3Dポリゴンモデルの振り付け用ツール
■操作方法■
ニコニコ動画にて公開中
http://www.nicovideo.jp/watch/sm2420025
■WAVE出力時の注意点■
WAVEはフレーム0から録画する時にしか出力できません。
■Windows2000での注意点■
Windows2000以前のOS(XPよりも前)で使用する場合には、
MMDxShow.dllをレジストリに登録しなければ使用できません。
Dataフォルダ内にあるWin2000.batを一度実行して下さい。
■お断り■
クリプトン・フューチャー・メディア株式会社様、その他
Vocaloidシリーズに関して権利を有する方からクレームが
あった場合には、本ツールの公開を中止する場合があります。
■JPEGライブラリ■
Independent JPEG Group(http://www.ijg.org/)のJPeg-6b
ライブラリを使用しています。
■免責事項■
本ツールを実行したことによって損害・不利益・事故等が
発生した場合でも、一切の責任を負いません。
ツールを使って製作した画像・動画を公開する場合には、
こちらには連絡や許可申請は必要ありません。
pmm、vpd、vmdファイルの公開もご自由に。
ただし、問題が発生しても一切責任を負いません。
**音楽等の著作権には特に気をつけて下さい!!**
■物理演算モード■
物理エンジンにはBullet Physics Engineを使用しています。
(http://www.bulletphysics.com/wordpress/ )
「ヘルプ」→「モデル拡張」→「物理演算」で、モデルの物理演算化用エディタが開きます
エディタの豆知識
・剛体の形状(位置・回転・大きさ)は画面右下のXYZアイコンで操作できます。
特に『大きさ』は、SHIFTキーを押しながら位置アイコンのドラッグで変更できますので、気付きにくいと思います。
・グループ横の欄は、その剛体と衝突判定をしないグループを羅列します
・「ボーン追従」と「物理演算」において、「物理演算」の剛体は何らかの形で「ボーン追従」の剛体とジョイントで接続されていなければ地面に落ちます
・「物理演算」の「ボーン位置合せ」は、「物理演算」の剛体に繋がっているジョイントの位置は、ボーンの位置からズレることがあるので、それを強制的にボーンの位置に合せるための仕様です。
ボーンの位置からジョイントがズレるとモデルの破綻が起きる場合にのみ使用して下さい(使うと計算が遅くなるのと、動きが不自然になります)
■連絡先■
higuchuu2@yahoo.co.jp 樋口優
http://www.geocities.jp/higuchuu4/index.htm
■バージョン履歴■
Ver.5.24(2010/11/02)
・回転連動ボーンの挙動変更
Ver.5.23(2010/08/27)
・MEIKOモデル追加
・物理演算のジョイント軸を回転させた場合の挙動に関するバグ修正
Ver.5.22a(2010/01/21)
・鏡音リンact2モデル追加
(MMD本体はVer.5.22のまま)
Ver.5.22(2009/10/25)
・カメラ・照明・アクセサリモードでvsq,vpdを読み込むとMMDが落ちるバグ修正
Ver.5.21(2009/10/25)
・フレーム窓内でボーン名をクリックしてボーンを選択する際、トグルする仕様に変更
・フレーム窓内のボーン括りをクリックすると、括り内の全ボーンを選択する仕様追加
・範囲選択に、現在選択中のボーンで設定する『選択ボーン』項目追加
・縦選択を、現在選択中のフレーム全てに適応する仕様に変更
・移動のみでなく、回転時も Shift→10倍、Ctrl→0.1倍 のキー操作追加
・背景BMPの読み込みにJpegも対応
・画面保存にBMPのみでなく、Jpegも対応
・MMDの画面に、各種ファイルをドラッグ&ドロップできる仕様追加
・その他色々バグ修正
Ver.5.20(2009/10/23)
・AVI出力時に、モデル表示フレームが1フレーム遅れていた点を修正
・VistaのUAC作動時に落ちるバグ修正
(Windows7については未確認。直ってるといいなぁ)
・pmmデータ保存時にバッファオーバーフローを起こす危険のあるバグ修正
(Ver.5.19のバグ。Ver.5.19は落ちる可能性がある為、使用しないで下さい)
・その他色々バグ修正
Ver.5.19(2009/10/19)
・色々細かいバグ修正
Ver.5.18(2009/10/17)
・モデルのjpeg対応バグ修正
Ver.5.17(2009/10/17)
・テクスチャにjpegファイルを使用できるよう変更
・その他色々バグ修正
Ver.5.16(2009/09/27)
・Ver.5.15修正によるバグ修正
Ver.5.15(2009/09/27)
・ビットマップ+スフィアマップモデルを拡張モデル保存した際にスフィアが消えるバグ修正
Ver.5.14(2009/09/24)
・スフィアマップアクセサリとモデルを同時表示した際のバグ修正
Ver.5.13(2009/09/15)
・物理エンジンBullet Ver.2.75 正式版公開に伴いBulletバージョン更新
Ver.5.12(2009/09/10)
・テクスチャBMP上にスフィアマップ表示する際のファイル名を
"テクスチャ名.bmp*スフィア名.bmp" に変更。
Ver.5.11で"/"を使っていた方は"*"に変更して下さい。
・スフィアマップのファイル名拡張子"sph"を、"spa"にすることにより
スフィアマップの展開が乗算でなく、加算で行われる仕様を追加
Ver.5.11(2009/09/7)
・スフィアマップの計算法をVer.5.09に戻す
・テクスチャBMP上にスフィアマップ表示できる仕様に変更
テクスチャ名を "テクスチャ名.bmp/スフィア名.bmp"にすることにより
テクスチャ上にスフィアマップが展開されます(例:fuk1.bmp/metal.sph)
*ただし、PMDフォーマットはテクスチャ名の長さが19文字分しか無いため、
モデルで使用する場合はファイル名全体(テクスチャ+スフィア)で19文字以下に
収める必要があります(アクセサリ(xファイル)は256文字までOK)
・その他バグ色々修正
Ver.5.10(2009/08/29)
・スフィアマップ計算法を入射角を考慮する方法に変更
(髪のキューティクル表現等が可能になりました(多分))
Ver.5.09(2009/08/25)
・スフィアマップ機能追加
スフィアマップ用BMPを拡張子sphとしてモデル・アクセサリのテクスチャに指定
することにより、スフィアマップが展開されて表示されます
Ver.5.08(2009/08/09)
・pmm読込時ファイルが見つからなかった場合、ファイルの場所をあらためて聞く仕様を追加
Ver.5.07(2009/08/08)
・アクセサリ拡大・縮小時にアクセサリの明るさが変化するバグ修正
・アイコンによる移動時にCtrlキーを押すことにより移動距離を1/10に減らす機能を追加
Ver.5.06(2009/08/05)
・Ver.5.05のCPU負荷が高すぎたため修正
・背景AVI表示時にも地面影を表示するように変更
Ver.5.05(2009/08/05)
・色々とバグ修正 MMDxShow.dllも修正しています。
(※※旧VerのMMDフォルダを利用する場合にはMMDxShow.dllも書き換えること※※)
・Windows2000以前のOS(XPよりも前)で使用する場合には、MMDxShow.dllをレジストリに登録しなければ
使用できません。Dataフォルダ内にあるWin2000.batを一度実行して下さい(一度実行すればおk)
Ver.5.04(2009/08/02)
・Ver.5.03のMMDxShow.dll実行に伴い、MSVCR71.DLL関連のエラーが出る場合があったため、
MMDxShow.dll修正(※※旧VerのMMDフォルダを利用する場合にはMMDxShow.dllも書き換えること※※)。
・地面影描写が、背景BMP読込時に表示されないバグ修正
Ver.5.03(2009/08/02)
・AVI出力
・DirectShowにて出力する仕様に変更(2GBの壁がなくなりました)
(Dataフォルダ内に新たに'MMDxShow.dll'が追加されています。以前のVerのフォルダに
MikuMikuDance.exeをコピーして使う場合には、上記ファイルもコピーして下さい)
・アルファチャンネルをグラボ・出力画質によらず出力できるよう変更
・WAVEを含む場合にインターリーブ出力するよう変更
(ただしEscキーによる録画中断時にはWaveが映像よりも長くなってしまうので注意してください)
・地面影描画法変更
半透明でも綺麗に描写されます。地面影はモデル描写の時に描かれますので、
アクセサリ編集より、モデルとアクセサリの描写順をうまく設定してください。
・アクセサリ数が128個を超えると、アクセサリ編集が落ちるバグ修正
・その他色々バグ修正
Ver.5.02(2009/07/14)
・PCによって再生時に音ズレが生じるバグを修正
Ver.5.01(2009/07/06)
・AVI出力にモード追加
・画質優先 Ver.5β版の出力法(βよりもさらに画質は良いが遅いです)
・速度優先 Ver.5.00版の出力法
・連番分割出力 2GBを超える場合は分割して出力して下さい。下にフレーム数を入力すると
そのフレーム数出力する毎に連番を付けた別ファイルに記録して行きます。
ただし、WAVEは記録できなくなります。
Ver.5.00(2009/07/02)
・物理演算モード追加(カイトモデルのみ未対応)
・描写深度を10倍まで拡大
・カメラ・モデルのアイコンによる移動時にShiftキーを押すと10倍の距離移動
・AVI出力法を、スピード優先方式に戻しました
・その他バグ色々修正
Ver.4.05(2009/05/27)
・Ver.4.04に伴うバグ修正
Ver.4.04(2009/05/27)
・再生時の動画時間計算を厳密化(音ズレ防止)
Ver.4.03(2009/05/25)
・英語対応(「ヘルプ」→「English Mode」。外国語版OSの場合は自動的に英語モードで立ち上がる)
・モデルの英語化法
1.「ヘルプ」→「モデル拡張」→「英語名編集」にて、モデル名やボーン・表情の英語名を入力
2.「ヘルプ」→「モデル拡張」→「拡張モデル保存」で、英語対応版pmdを保存
・pmdモデル毎にトゥーン用テクスチャを設定できるように変更
1.「ヘルプ」→「モデル拡張」→「トゥーンテクスチャ変更」でテクスチャをデフォルトのものから変更
・テクスチャはモデルファイル(*.pmd)と同じフォルダ内に格納しておく事
・テクスチャは同名のファイルがあった場合には最初に読み込んだもののみが使用されるので、
テクスチャ名はなるべく他と被らない、ユニークな名前を使用して下さい
2.「ヘルプ」→「モデル拡張」→「拡張モデル保存」で、トゥーンテクスチャを変更したモデルを保存
・モデル公開時には、使用したテクスチャをモデル(*.pmd)と一緒に公開して下さい
・ボーンフレームに空のフレーム挿入、削除機能追加
「フレーム編集」→「空フレーム挿入」or「列フレーム削除」
(複数モデルで同じモーションを行う時に、モデル毎に動作の遅れや進みを入れる時等に使用)
・その他バグ色々修正
Ver.4.02(2009/05/08)
・ボーン、カメラの回転・移動に数値入力モード追加(「ボーン編集」→「数値入力」)
・バグ色々修正
Ver.4.0(2009/03/05)
・MMD杯記念に、まささんより初音ミクVer2モデル追加
・アイコンによる回転・移動に、localモードとglobalモード追加
・モデル仕様に捩じり用ボーン追加
・AVI出力時、スピードよりも画質優先の仕様に変更
・screen.bmpの解像度変更
Ver.3.45(2009/01/25)
・エッジの太さを各モデルごとに設定できる仕様に変更
Ver.3.44(2009/01/10)
・アクセサリの地面影表示バグ修正
Ver.3.43(2009/01/08)
・pmmデータを読み込んだ際に地面影色がおかしくなるバグ修正
・エッジのZ深度調整(視野角を極端に小さくしても、前より少しだけ綺麗に表示されるようになりますた)
Ver.3.42(2009/01/07)
・KAITOモデル半目時修正
・加算合成時の表示バグ修正
・地面影に半透明モード追加
地面影は、1.モデル描写前のアクセサリ描写 2.モデル描写 の1と2の間に描写されます。
地面影を透けるようにするには、ステージ等のアクセサリをモデル描写の前に描く必要があります。
あまり綺麗に描けるわけではないので、期待しないでねw
・セーブデータ(pmm)にエッジの太さ、および地面影透けるモードのデータ追加
Ver.3.41(2009/01/05)
・KAITOモデル表情バグ修正
・表情窓バグ修正
Ver.3.40(2009/01/05)
・KAITOモデル追加
・地面影をモデル、アクセサリよりも前に描画する描画順に変更
・モデルのエッジの太さ設定機能追加
Ver.3.30(2008/12/22)
・24bitテクスチャ対応
・別フレームへペースト機能追加
コピー&ペーストの際、操作対象ボーン(画面左上に表示されるボーン)へコピーされた全ボーンの
フレームデータをペーストします。これにより、"前髪1"フレームの内容を"左髪"フレームに
コピーする等、別フレーム間でのコピー&ペーストが可能になります
・アクセサリ編集機能追加
アクセサリ名を自由に変更可能(デフォルトはxファイル名)
アクセサリの描画順序設定可能(半透明のモノを描写する時に使用)
モデル描写の前にアクセサリを描写することも可能です
・アクセサリのSizeに補間計算適用
・アクセサリフレームのコピー&ペースト機能追加
アクセサリのコピー&ペーストは、モデルの”別フレームへペースト機能追加”と同じく現在選択中の
アクセサリにペーストします。これにより別アクセサリ間でのフレームコピーが可能です
・アクセサリ、モデルに加算合成表示機能追加
光の表現強化用。加算合成表示は、フレーム登録はできません。
また、加算合成なのでバックが白だと何も表示されていないように見えるので注意
・アクセサリフレームの範囲選択機能追加
・フレーム位置角度補正機能追加
選択フレームポイントの位置・角度の値をx倍します。 ネンドロイドタイプの手足が
短いモデルにモーションをコピーした際や、動きを大きくしたり小さくする際に利用
・表情大きさ補正機能追加
選択フレームポイントの表情の大きさをx倍します。
リップシンク後大きさを小さくしてささやくような口の動きにしたりする事が可能です
・その他様々バグ修正
Ver.3.23(2008/12/19)
・カメラフレームのバグ修正
Ver.3.22a(2008/11/29)
・咲音メイコモデル修正(手足の長さを短くしました)
Ver.3.22(2008/11/24)
・pmmファイル読込時のアクセサリ関連バグによる強制終了の回避修正
・アクセサリに咲音マイク追加
Ver.3.21b(2008/11/24)
・咲音メイコモデルスキニング修正
Ver.3.21a(2008/11/23)
・咲音メイコモデル追加
Ver.3.21(2008/11/8)
・フレーム窓コピーボタンのモデル間での不具合修正
・地面影の明るさ調整機能追加(「表示」→「地面影色設定」)
Ver.3.20b(2008/10/31)
・亞北ネルモデル不具合修正(右スカート前、後ボーンの修正)
Ver.3.20a(2008/10/30)
・亞北ネルモデル不具合修正
Ver.3.20(2008/10/30)
・亞北ネルモデル追加
・「モデル編集時カメラ・照明(アクセサリ)追従」機能のバグ修正
・モデル編集時、視点パネルの「下面」を「カメラ」に変更する仕様に修正
Ver.3.16(2008/10/09)
・vacファイル読込時ラジアン度数表示バグ修正
・各種窓内数字修正時Deleteキー反応修正
・一部読み込めないvsqファイルに対応
Ver.3.15(2008/09/27)
・アクセサリを含むpmmファイル読込時バグ修正(ver.3.14によるバグ)
Ver.3.14(2008/09/26)
・表示・IKがらみのバグ修正
・pmmロード時、アクセサリxファイルが発見出来なかった場合に
xファイルの場所を指定するように変更
Ver.3.13(2008/09/21)
・メニュー項目ショートカットキー重複修正
・ショートカットキー追加
A:全て選択 S:未登録選択 D:センター位置バイアス付加
・フレーム操作窓Enterキー入力時の登録処理バグ修正
Ver.3.12(2008/09/18)
・ダミーボーン範囲選択時バグ修正
・アクセサリ数128個以上時バグ修正
・フレーム選択窓縦スクロールバーバグ修正
Ver.3.11(2008/09/17)
・wave波形表示ずれ修正
・ダイアログ表示時Enterキーでフレーム登録されるバグ修正
・pmmファイル関連付け起動対応
・レンモデル瞬き・セーラー服修正
Ver.3.10a(2008/09/12)
・鏡音レンモデルフレーム窓バグ修正
Ver.3.10(2008/09/11)
・鏡音レンモデル追加
・ディスプレイ解像度が高すぎる際に生じるエラー対策追加
(樋口の環境では出ないエラーの対策のため、うまくいったかどうかは不明)
Ver.3.05(2008/09/07)
・初音ミクモデル著作権表示修正
・弱音ハクモデル右肩スキニング修正
・アクセサリモデル255個まで追加
Ver.3.04(2008/08/31)
・線形補間ボタン不具合修正
・AVI出力時キャンセルボタン不具合修正
Ver.3.03(2008/08/30)
・ハクモデル不具合修正
・vacファイル読込追加
・その他
Ver.3.02(2008/08/30)
・表情フレーム登録ができないバグ修正
・モデルを消去したあと保存したデータのロード時エラーバグ修正
Ver.3.01(2008/08/30)
・マルチモデル化
・その他多数変更
Ver.2.02(2008/07/16)
・vsqファイル(tempo変更)バグ修正
・AVI出力時のフレームずれ修正
Ver.2.01(2008/04/08)
・トゥーンレンダリングのエッジの太さを距離によって変更させた
・Vista版対応(AVI出力時、高速モードを選択すると画質が落ちます)
・フレーム窓内も、SHIFTキー選択時に選択状態をトグルさせるように変更
Ver.2.00(2008/04/03)
・演出モードの追加
・アクセサリに地面モード追加(詳しくは"ステージ.vac"の中身参照)
・背景AVIを再生時にも同期させるようにした
・その他色々 | {
"pile_set_name": "Github"
} |
// In config ACPI, FFFFFFFF:_CRS to XCRS
// Find: 5F 43 52 53
// Replace: 58 43 52 53
// TgtBridge:FF FF FF FF
#ifndef NO_DEFINITIONBLOCK
DefinitionBlock("", "SSDT", 2, "hack", "CRSpatch", 0)
{
#endif
External(_SB.PCI0.GPI0._STA, MethodObj)
External(_SB.PCI0.I2CX.DEV0, DeviceObj)
Scope (_SB.PCI0.I2CX.DEV0)
{
Method (_CRS, 0, NotSerialized)
{
Name (XBFB, ResourceTemplate ()
{
//0xFFF1 = I2cSerialBusV2
I2cSerialBusV2 (0xFFF1, ControllerInitiated, 0x00061A80,
AddressingMode7Bit, "\\_SB.PCI0.I2CX",
0x00, ResourceConsumer, , Exclusive,
)
})
Name (XBFI, ResourceTemplate ()
{
Interrupt (ResourceConsumer, Level, ActiveLow, ExclusiveAndWake,,,)
{
0xFFF2//Interrupt
}
})
Name (XBFG, ResourceTemplate ()
{
GpioInt (Level, ActiveLow, ExclusiveAndWake, PullDefault, 0x0000,
"\\_SB.PCI0.GPI0", 0x00, ResourceConsumer, ,
)
{ // Pin list
// 0xFFF3 = GPI0's Pin
0xFFF3
}
})
Local0 = \_SB.PCI0.GPI0._STA()
If (Local0!=0)
{
Return (ConcatenateResTemplate (XBFB, XBFG))
}
Else
{
Return (ConcatenateResTemplate (XBFB, XBFI))
}
}
}
#ifndef NO_DEFINITIONBLOCK
}
#endif
//EOF | {
"pile_set_name": "Github"
} |
#ifndef KERBEROS_H
#define KERBEROS_H
#include <node.h>
#include <gssapi/gssapi.h>
#include <gssapi/gssapi_generic.h>
#include <gssapi/gssapi_krb5.h>
#include "nan.h"
#include <node_object_wrap.h>
#include <v8.h>
extern "C" {
#include "kerberosgss.h"
}
using namespace v8;
using namespace node;
class Kerberos : public Nan::ObjectWrap {
public:
Kerberos();
~Kerberos() {};
// Constructor used for creating new Kerberos objects from C++
static Nan::Persistent<FunctionTemplate> constructor_template;
// Initialize function for the object
static void Initialize(Nan::ADDON_REGISTER_FUNCTION_ARGS_TYPE target);
// Method available
static NAN_METHOD(AuthGSSClientInit);
static NAN_METHOD(AuthGSSClientStep);
static NAN_METHOD(AuthGSSClientUnwrap);
static NAN_METHOD(AuthGSSClientWrap);
static NAN_METHOD(AuthGSSClientClean);
static NAN_METHOD(AuthGSSServerInit);
static NAN_METHOD(AuthGSSServerClean);
static NAN_METHOD(AuthGSSServerStep);
private:
static NAN_METHOD(New);
// Handles the uv calls
static void Process(uv_work_t* work_req);
// Called after work is done
static void After(uv_work_t* work_req);
};
#endif
| {
"pile_set_name": "Github"
} |
/*
nsjail - CLONE_NEWUTS routines
-----------------------------------------
Copyright 2014 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "uts.h"
#include <string.h>
#include <unistd.h>
#include "logs.h"
namespace uts {
bool initNs(nsjconf_t* nsjconf) {
if (!nsjconf->clone_newuts) {
return true;
}
LOG_D("Setting hostname to '%s'", nsjconf->hostname.c_str());
if (sethostname(nsjconf->hostname.data(), nsjconf->hostname.length()) == -1) {
PLOG_E("sethostname('%s')", nsjconf->hostname.c_str());
return false;
}
return true;
}
} // namespace uts
| {
"pile_set_name": "Github"
} |
'''
This file is part of Maxfield.
Maxfield is a planning tool for helping Ingress players to determine
an efficient plan to create many in-game fields.
Copyright (C) 2015 by Jonathan Baker: babamots@gmail.com
Maxfield is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Maxfield is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Maxfield. If not, see <http://www.gnu.org/licenses/>.
'''
import geometry
np = geometry.np
# Set to False if only perfectly optimal plans should be produced
ALLOW_SUBOPTIMAL = True
class Deadend(Exception):
def __init__(self,s):
self.explain = s
def try_reduce_out_degree(a,p):
# Reverse as many edges out-edges of p as possible
toremove = []
for q in a.edge[p]:
if a.out_degree(q) < 8:
a.add_edge(q,p)
a.edge[q][p] = a.edge[p][q]
toremove.append(q)
for q in toremove:
a.remove_edge(p,q)
def try_ordered_edge(a,p,q,reversible):
if a.has_edge(p,q) or a.has_edge(q,p):
return
# if reversible and a.out_degree(p) > a.out_degree(q):
# p,q = q,p
if a.out_degree(p) >= 8:
try_reduce_out_degree(a,p)
if a.out_degree(p) >= 8:
# We tried but failed to reduce the out-degree of p
if not reversible and not ALLOW_SUBOPTIMAL:
# print '%s already has 8 outgoing'%p
raise(Deadend('%s already has 8 outgoing'%p))
if a.out_degree(q) >= 8:
try_reduce_out_degree(a,q)
if a.out_degree(q) >= 8 and not ALLOW_SUBOPTIMAL:
# print '%s and %s already have 8 outgoing'%(p,q)
raise(Deadend('%s and %s already have 8 outgoing'%(p,q)))
p,q = q,p
m = a.size()
a.add_edge(p,q,{'order':m,'reversible':reversible,'fields':[]})
try:
a.edgeStack.append( (p,q) )
except AttributeError:
a.edgeStack = [ (p,q) ]
# print 'adding',p,q
# print a.edgeStack
class Triangle:
def __init__(self,verts,a,exterior=False):
'''
verts should be a 3-list of Portals
verts[0] should be the final one used in linking
exterior should be set to true if this triangle has no triangle parent
the orientation of the outer edges of exterior Triangles do not matter
'''
# If this portal is exterior, the final vertex doesn't matter
self.verts = list(verts)
self.a = a
self.exterior = exterior
# This randomizes the Portal used for the jet link. I am experimenting with having maxfield.triangulate and Triangle.split choose this portal carefully, so don't randomize
'''
if exterior:
# Randomizing should help prevent perimeter nodes from getting too many links
final = np.random.randint(3)
tmp = self.verts[final]
self.verts[final] = self.verts[0]
self.verts[0] = tmp
'''
self.pts = np.array([a.node[p]['xyz'] for p in verts])
self.children = []
self.contents = []
self.center = None
def findContents(self,candidates=None):
if candidates is None:
candidates = xrange(self.a.order())
for p in candidates:
if p in self.verts:
continue
if geometry.sphereTriContains(self.pts,self.a.node[p]['xyz']):
self.contents.append(p)
def randSplit(self):
if len(self.contents) == 0:
return
p = self.contents[np.random.randint(len(self.contents))]
self.splitOn(p)
for child in self.children:
child.randSplit()
def nearSplit(self):
# Split on the node closest to final
if len(self.contents) == 0:
return
contentPts = np.array([self.a.node[p]['xyz'] for p in self.contents])
displaces = contentPts - self.a.node[self.verts[0]]['xyz']
dists = np.sum(displaces**2,1)
closest = np.argmin(dists)
self.splitOn(self.contents[closest])
for child in self.children:
child.nearSplit()
def splitOn(self,p):
# Splits this Triangle to produce 3 children using portal p
# p is passed as the first vertex parameter in the construction of 'opposite', so it will be opposite's 'final vertex' unless randomization is used
# 'opposite' is the child that does not share the final vertex
# Because of the build order, it's safe for this triangle to believe it is exterior
opposite = Triangle([p,self.verts[1],\
self.verts[2]],self.a,True)
# The other two children must also use my final as their final
adjacents = [\
Triangle([self.verts[0],\
self.verts[2],p],self.a),\
Triangle([self.verts[0],\
self.verts[1],p],self.a)\
]
self.children = [opposite]+adjacents
self.center = p
for child in self.children:
child.findContents(self.contents)
def tostr(self):
# Just a string representation of the triangle
return str([self.a.node[self.verts[i]]['name'] for i in range(3)])
def buildFinal(self):
# print 'building final',self.tostr()
if self.exterior:
# Avoid making the final the link origin when possible
# print self.tostr(),'is exterior'
try_ordered_edge(self.a,self.verts[1],\
self.verts[0],self.exterior)
try_ordered_edge(self.a,self.verts[2],\
self.verts[0],self.exterior)
else:
# print self.tostr(),'is NOT exterior'
try_ordered_edge(self.a,self.verts[0],\
self.verts[1],self.exterior)
try_ordered_edge(self.a,self.verts[0],\
self.verts[2],self.exterior)
if len(self.children) > 0:
for i in [1,2]:
self.children[i].buildFinal()
def buildExceptFinal(self):
# print 'building EXCEPT final',self.tostr()
if len(self.children) == 0:
# print 'no children'
p,q = self.verts[2] , self.verts[1]
try_ordered_edge(self.a,p,q,True)
return
# Child 0 is guaranteed to be the one opposite final
self.children[0].buildGraph()
for child in self.children[1:3]:
child.buildExceptFinal()
def buildGraph(self):
# print 'building',self.tostr()
'''
TODO
A first generation triangle could have its final vertex's edges already completed by neighbors.
This will cause the first generation to be completed when the opposite edge is added which complicates completing inside descendants.
This could be solved by choosing a new final vertex (or carefully choosing the order of completion of first generation triangles).
'''
if ( \
self.a.has_edge(self.verts[0],self.verts[1]) or \
self.a.has_edge(self.verts[1],self.verts[0]) \
) and \
( \
self.a.has_edge(self.verts[0],self.verts[2]) or \
self.a.has_edge(self.verts[2],self.verts[0]) \
):
# print 'Final vertex completed!!!'
raise Deadend('Final vertex completed by neighbors')
self.buildExceptFinal()
self.buildFinal()
def contains(self,pt):
return np.all(np.sum(self.orths*(pt-self.pts),1) < 0)
# Attach to each edge a list of fields that it completes
def markEdgesWithFields(self):
edges = [(0,0)]*3
for i in range(3):
p = self.verts[i-1]
q = self.verts[i-2]
if not self.a.has_edge(p,q):
p,q = q,p
# The graph should have been completed by now, so the edge p,q exists
edges[i] = (p,q)
if not self.a.has_edge(p,q):
print 'a does NOT have edge',p,q
print 'there is a programming error'
print 'a only has the edges:'
for p,q in self.a.edges_iter():
print p,q
print 'a has %s 1st gen triangles:'%len(self.a.triangulation)
for t in self.a.triangulation:
print t.verts
edgeOrders = [self.a.edge[p][q]['order'] for p,q in edges]
lastInd = np.argmax(edgeOrders)
# The edge that completes this triangle
p,q = edges[lastInd]
self.a.edge[p][q]['fields'].append(self.verts)
for child in self.children:
child.markEdgesWithFields()
def edgesByDepth(self,depth):
# Return list of edges of triangles at given depth
# 0 means edges of this very triangle
# 1 means edges splitting this triangle
# 2 means edges splitting this triangle's children
# etc.
if depth == 0:
return [ (self.verts[i],self.verts[i-1]) for i in range(3) ]
if depth == 1:
if self.center is None:
return []
return [ (self.verts[i],self.center) for i in range(3) ]
return [e for child in self.children\
for e in child.edgesByDepth(depth-1)]
| {
"pile_set_name": "Github"
} |
// (C) Copyright John Maddock 2007.
// Use, modification and distribution are subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt).
//
// See http://www.boost.org/libs/type_traits for most recent version including documentation.
#ifndef BOOST_TT_MAKE_UNSIGNED_HPP_INCLUDED
#define BOOST_TT_MAKE_UNSIGNED_HPP_INCLUDED
#include <boost/type_traits/conditional.hpp>
#include <boost/type_traits/is_integral.hpp>
#include <boost/type_traits/is_signed.hpp>
#include <boost/type_traits/is_unsigned.hpp>
#include <boost/type_traits/is_enum.hpp>
#include <boost/type_traits/is_same.hpp>
#include <boost/type_traits/remove_cv.hpp>
#include <boost/type_traits/is_const.hpp>
#include <boost/type_traits/is_volatile.hpp>
#include <boost/type_traits/add_const.hpp>
#include <boost/type_traits/add_volatile.hpp>
#include <boost/static_assert.hpp>
namespace mars_boost {} namespace boost = mars_boost; namespace mars_boost {
template <class T>
struct make_unsigned
{
private:
BOOST_STATIC_ASSERT_MSG((::mars_boost::is_integral<T>::value || ::mars_boost::is_enum<T>::value), "The template argument to make_unsigned must be an integer or enum type.");
BOOST_STATIC_ASSERT_MSG((! ::mars_boost::is_same<typename remove_cv<T>::type, bool>::value), "The template argument to make_unsigned must not be the type bool");
typedef typename remove_cv<T>::type t_no_cv;
typedef typename conditional<
(::mars_boost::is_unsigned<T>::value && ::mars_boost::is_integral<T>::value
&& ! ::mars_boost::is_same<t_no_cv, char>::value
&& ! ::mars_boost::is_same<t_no_cv, wchar_t>::value
&& ! ::mars_boost::is_same<t_no_cv, bool>::value),
T,
typename conditional<
(::mars_boost::is_integral<T>::value
&& ! ::mars_boost::is_same<t_no_cv, char>::value
&& ! ::mars_boost::is_same<t_no_cv, wchar_t>::value
&& ! ::mars_boost::is_same<t_no_cv, bool>::value),
typename conditional<
is_same<t_no_cv, signed char>::value,
unsigned char,
typename conditional<
is_same<t_no_cv, short>::value,
unsigned short,
typename conditional<
is_same<t_no_cv, int>::value,
unsigned int,
typename conditional<
is_same<t_no_cv, long>::value,
unsigned long,
#if defined(BOOST_HAS_LONG_LONG)
#ifdef BOOST_HAS_INT128
typename conditional<
sizeof(t_no_cv) == sizeof(mars_boost::ulong_long_type),
mars_boost::ulong_long_type,
mars_boost::uint128_type
>::type
#else
mars_boost::ulong_long_type
#endif
#elif defined(BOOST_HAS_MS_INT64)
unsigned __int64
#else
unsigned long
#endif
>::type
>::type
>::type
>::type,
// Not a regular integer type:
typename conditional<
sizeof(t_no_cv) == sizeof(unsigned char),
unsigned char,
typename conditional<
sizeof(t_no_cv) == sizeof(unsigned short),
unsigned short,
typename conditional<
sizeof(t_no_cv) == sizeof(unsigned int),
unsigned int,
typename conditional<
sizeof(t_no_cv) == sizeof(unsigned long),
unsigned long,
#if defined(BOOST_HAS_LONG_LONG)
#ifdef BOOST_HAS_INT128
typename conditional<
sizeof(t_no_cv) == sizeof(mars_boost::ulong_long_type),
mars_boost::ulong_long_type,
mars_boost::uint128_type
>::type
#else
mars_boost::ulong_long_type
#endif
#elif defined(BOOST_HAS_MS_INT64)
unsigned __int64
#else
unsigned long
#endif
>::type
>::type
>::type
>::type
>::type
>::type base_integer_type;
// Add back any const qualifier:
typedef typename conditional<
is_const<T>::value,
typename add_const<base_integer_type>::type,
base_integer_type
>::type const_base_integer_type;
public:
// Add back any volatile qualifier:
typedef typename conditional<
is_volatile<T>::value,
typename add_volatile<const_base_integer_type>::type,
const_base_integer_type
>::type type;
};
} // namespace mars_boost
#endif // BOOST_TT_ADD_REFERENCE_HPP_INCLUDED
| {
"pile_set_name": "Github"
} |
require 'active_support/json/encoding'
module CCInitializers
def self.json(_cc_config)
ActiveSupport.json_encoder = Class.new do
attr_reader :options
def initialize(options=nil)
@options = options || {}
end
def encode(value)
if Rails.env.test?
MultiJson.dump(value.as_json(options.dup))
else
MultiJson.dump(value.as_json(options.dup), options.merge(pretty: true))
end
end
end
end
end
| {
"pile_set_name": "Github"
} |
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAvgewhaYsKe5s2AM7xxKrT4A6n7hW17qSnBjonCcPcwTFmYqI
OdxWjYITgJuHrTwB4ZhBqWS7tTsUUu6hWLMeB7Uo5/GEQAAZspKkT9G/rNKF9lbW
K9PPhGGkeR01c/Q932m92HsnfCQ0Pp/OzD3nVTh0v9HKk+PObNMOCcqG87eYs4yl
PRxs0RrE/rP+bEGssKQSbeCZwazDnO+kiatVgKQZ2CK23iFdRE1z2rzqVDeaFWdv
BqrRdWnkOZClhlLgEQ5nK2yVB6tSqOiI3MmHyHzIkGOQJp2/s7Pe0ckEkzsjTsJW
8oKHlBBl6pRxHIKzNN4VFbeBvvYvrogrDrC/owIDAQABAoIBAB/o8KZwsgfUhqh7
WoViSCwQb0e0z7hoFwhpUl4uXPTGf1v6HEgDDPG0PwwgkdbwNaypQZVtWevj4NTQ
R326jjdjH1xbfQa2PZpz722L3jDqJR6plEtFxRoIv3KrCffPsrgabIu2mnnJJpDB
ixtW5cq0sT4ov2i4H0i85CWWwbSY/G/MHsvCuK9PhoCj9uToVqrf1KrAESE5q4fh
mPSYUL99KVnj7SZkUz+79rc8sLLPVks3szZACMlm1n05ZTj/d6Nd2ZZUO45DllIj
1XJghfWmnChrB/P/KYXgQ3Y9BofIAw1ra2y3wOZeqRFNsbmojcGldfdtN/iQzhEj
uk4ThokCgYEA9FTmv36N8qSPWuqX/KzkixDQ8WrDGohcB54kK98Wx4ijXx3i38SY
tFjO8YUS9GVo1+UgmRjZbzVX7xeum6+TdBBwOjNOxEQ4tzwiQBWDdGpli8BccdJ2
OOIVxSslWhiUWfpYloXVetrR88iHbT882g795pbonDaJdXSLnij4UW8CgYEAxxrr
QFpsmOEZvI/yPSOGdG7A1RIsCeH+cEOf4cKghs7+aCtAHlIweztNOrqirl3oKI1r
I0zQl46WsaW8S/y99v9lmmnZbWwqLa4vIu0NWs0zaZdzKZw3xljMhgp4Ge69hHa2
utCtAxcX+7q/yLlHoTiYwKdxX54iLkheCB8csw0CgYEAleEG820kkjXUIodJ2JwO
Tihwo8dEC6CeI6YktizRgnEVFqH0rCOjMO5Rc+KX8AfNOrK5PnD54LguSuKSH7qi
j04OKgWTSd43lF90+y63RtCFnibQDpp2HwrBJAQFk7EEP/XMJfnPLN/SbuMSADgM
kg8kPTFRW5Iw3DYz9z9WpE0CgYAkn6/8Q2XMbUOFqti9JEa8Lg8sYk5VdwuNbPMA
3QMYKQUk9ieyLB4c3Nik3+XCuyVUKEc31A5egmz3umu7cn8i6vGuiJ/k/8t2YZ7s
Bry5Ihu95Yzab5DW3Eiqs0xKQN79ebS9AluAwQO5Wy2h52rknfuDHIm/M+BHsSoS
xl5KFQKBgQCokCsYuX1z2GojHw369/R2aX3ovCGuHqy4k7fWxUrpHTHvth2+qNPr
84qLJ9rLWoZE5sUiZ5YdwCgW877EdfkT+v4aaBX79ixso5VdqgJ/PdnoNntah/Vq
njQiW1skn6/P5V/eyimN2n0VsyBr/zMDEtYTRP/Tb1zi/njFLQkZEA==
-----END RSA PRIVATE KEY-----
| {
"pile_set_name": "Github"
} |
metalk8s-sosreport for Debian
Metalk8s SOS report custom plugins
-- Yohann Cointe <yohann.cointe@scality.com> Wed, 6 Aug 2019 15:49:02 +0200
| {
"pile_set_name": "Github"
} |
##
# This file is an EasyBuild reciPY as per https://github.com/easybuilders/easybuild
#
# Authors:: Inge Gutheil <i.gutheil@fz-juelich.de>, Alan O'Cais <a.ocais@fz-juelich.de>
# License:: MIT/GPL
# $Id$
#
##
easyblock = 'ConfigureMake'
name = 'ELPA'
version = '2016.05.004'
homepage = 'http://elpa.rzg.mpg.de'
description = """Eigenvalue SoLvers for Petaflop-Applications ."""
toolchain = {'name': 'intel', 'version': '2017a'}
toolchainopts = {'usempi': True}
source_urls = ['http://elpa.mpcdf.mpg.de/html/Releases/%(version)s/']
sources = [SOURCELOWER_TAR_GZ]
patches = [
'%(name)s-%(version)s_install-libelpatest.patch',
]
builddependencies = [
('Autotools', '20150215'),
]
preconfigopts = 'autoreconf && '
local_common_configopts = 'FCFLAGS="-I$EBROOTIMKL/mkl/include/intel64/lp64 $FCFLAGS" '
local_common_configopts += 'LIBS="$LIBSCALAPACK" '
configopts = [
local_common_configopts + '--enable-openmp ',
# Default version last, so we can get the normal config.h/config-f90.h installed afterwards.
local_common_configopts,
]
buildopts = ' V=1 '
postinstallcmds = [
'cp config.h config-f90.h %(installdir)s/share/doc/elpa/examples',
]
sanity_check_paths = {
'files': ['lib/libelpa.a', 'lib/libelpa.%s' % SHLIB_EXT, 'lib/libelpa_openmp.a',
'lib/libelpa_openmp.%s' % SHLIB_EXT, 'lib/libelpatest.a', 'lib/libelpatest.%s' % SHLIB_EXT,
'lib/libelpatest_openmp.a', 'lib/libelpatest_openmp.%s' % SHLIB_EXT, 'share/doc/elpa/examples/config.h',
'share/doc/elpa/examples/config-f90.h'],
'dirs': ['bin', 'include/elpa-%(version)s/elpa', 'include/elpa-%(version)s/modules', 'lib/pkgconfig'],
}
moduleclass = 'math'
| {
"pile_set_name": "Github"
} |
import { Light } from './Light';
/**
* @author mrdoob / http://mrdoob.com/
*/
function AmbientLight( color, intensity ) {
Light.call( this, color, intensity );
this.type = 'AmbientLight';
this.castShadow = undefined;
}
AmbientLight.prototype = Object.assign( Object.create( Light.prototype ), {
constructor: AmbientLight,
isAmbientLight: true
} );
export { AmbientLight };
| {
"pile_set_name": "Github"
} |
---
title: Updating the IP headers for coalesced segments
description: This section describes how to update the IP headers for coalesced segments
ms.assetid: 18F2944A-D5A7-41BB-885F-EC183A00F7CE
ms.date: 04/20/2017
ms.localizationpriority: medium
---
# Updating the IP Headers for Coalesced Segments
When finalizing a single coalescing unit (SCU), a receive segment coalescing (RSC)-capable miniport driver updates the fields in the IP headers as described in the following tables.
- [Updating IPv4 header fields for coalesced segments](#updating-ipv4-header-fields-for-coalesced-segments)
- [Updating IPv6 header fields for coalesced segments](#updating-ipv6-header-fields-for-coalesced-segments)
## Updating IPv4 header fields for coalesced segments
<table>
<colgroup>
<col width="50%" />
<col width="50%" />
</colgroup>
<thead>
<tr class="header">
<th align="left">Field</th>
<th align="left">Description</th>
</tr>
</thead>
<tbody>
<tr class="odd">
<td align="left"><p><strong>Version</strong></p></td>
<td align="left"><p>The value of this field must be the same for all coalesced segments.</p></td>
</tr>
<tr class="even">
<td align="left"><p><strong>Header Length</strong></p></td>
<td align="left"><p>The length of a basic IPv4 header without any IP options.</p></td>
</tr>
<tr class="odd">
<td align="left"><p><strong>Differentiated Services</strong></p></td>
<td align="left"><p>The value of this field must be the same for all coalesced segments.</p></td>
</tr>
<tr class="even">
<td align="left"><p><strong>ECN bits</strong></p></td>
<td align="left"><p>See Exception 8 in <a href="exception-conditions-that-terminate-coalescing.md" data-raw-source="[Exception Conditions that Terminate Coalescing](exception-conditions-that-terminate-coalescing.md)">Exception Conditions that Terminate Coalescing</a>. Datagrams should be coalesced if they all have the same values for the ECN bits.</p></td>
</tr>
<tr class="odd">
<td align="left"><p><strong>Total Length</strong></p></td>
<td align="left"><p>The value of this field must be recomputed every time a new segment with non-zero TCP payload length is coalesced into an existing SCU. See <a href="exception-conditions-that-terminate-coalescing.md" data-raw-source="[Exception Conditions that Terminate Coalescing](exception-conditions-that-terminate-coalescing.md)">Exception Conditions that Terminate Coalescing</a> for special cases that arise from the value in this field.</p></td>
</tr>
<tr class="even">
<td align="left"><p><strong>Identification</strong></p></td>
<td align="left"><p>Must be set to the IP ID of the first coalesced segment.</p></td>
</tr>
<tr class="odd">
<td align="left"><p><strong>Flags</strong></p></td>
<td align="left"><ul>
<li><p>Datagrams may be coalesced as long as they have the same value for the DF (Don’t Fragment) bit: either all set or all clear.</p></li>
<li><p>Segments with the MF (More Fragments) bit set must not be coalesced.</p></li>
</ul></td>
</tr>
<tr class="even">
<td align="left"><p><strong>Fragment Offset</strong></p></td>
<td align="left"><p>Not applicable. Fragmented IP datagrams are not coalesced.</p></td>
</tr>
<tr class="odd">
<td align="left"><p><strong>Time To Live</strong></p></td>
<td align="left"><p>Must be set to the minimum time to live (TTL) value of the coalesced segments.</p></td>
</tr>
<tr class="even">
<td align="left"><p><strong>Protocol</strong></p></td>
<td align="left"><p>Always set to 6, for TCP.</p></td>
</tr>
<tr class="odd">
<td align="left"><p><strong>Header Checksum</strong></p></td>
<td align="left"><p>The value of this field must be recomputed by the miniport driver.</p></td>
</tr>
<tr class="even">
<td align="left"><p><strong>Source Address</strong></p></td>
<td align="left"><p>The value of this field must be the same for all coalesced segments.</p></td>
</tr>
<tr class="odd">
<td align="left"><p><strong>Destination Address</strong></p></td>
<td align="left"><p>The value of this field must be the same for all coalesced segments.</p></td>
</tr>
</tbody>
</table>
## Updating IPv6 header fields for coalesced segments
<table>
<colgroup>
<col width="50%" />
<col width="50%" />
</colgroup>
<thead>
<tr class="header">
<th align="left">Field</th>
<th align="left">Description</th>
</tr>
</thead>
<tbody>
<tr class="odd">
<td align="left"><p><strong>Version</strong></p></td>
<td align="left"><p>The value of this field must be the same for all coalesced segments.</p></td>
</tr>
<tr class="even">
<td align="left"><p><strong>Traffic Class</strong></p></td>
<td align="left"><p>The value of this field must be the same for all coalesced segments.</p></td>
</tr>
<tr class="odd">
<td align="left"><p><strong>Flow Label</strong></p></td>
<td align="left"><p>The value of this field must be the same for all coalesced segments.</p></td>
</tr>
<tr class="even">
<td align="left"><p><strong>Payload Length</strong></p></td>
<td align="left"><p>The value of this field must be recomputed whenever a new segment with nonzero TCP payload length is coalesced into an existing segment.</p></td>
</tr>
<tr class="odd">
<td align="left"><p><strong>Next Header</strong></p></td>
<td align="left"><p>Always set to 6, for TCP.</p></td>
</tr>
<tr class="even">
<td align="left"><p><strong>Hop Limit</strong></p></td>
<td align="left"><p>Must be set to the minimum <strong>Hop Limit</strong> value of the coalesced segments.</p></td>
</tr>
<tr class="odd">
<td align="left"><p><strong>Source Address</strong></p></td>
<td align="left"><p>The value of this field must be the same for all coalesced segments.</p></td>
</tr>
<tr class="even">
<td align="left"><p><strong>Destination Address</strong></p></td>
<td align="left"><p>The value of this field must be the same for all coalesced segments.</p></td>
</tr>
</tbody>
</table>
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2000-2001 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Sun designates this
* particular file as subject to the "Classpath" exception as provided
* by Sun in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
package javax.transaction.xa;
/**
* The XAException is thrown by the Resource Manager (RM) to inform the
* Transaction Manager of an error encountered by the involved transaction.
*
*/
public class XAException extends java.lang.Exception {
/**
* The error code with which to create the SystemException.
*
* @serial The error code for the exception
*/
public int errorCode;
/**
* Create an XAException.
*/
public XAException()
{
super();
}
/**
* Create an XAException with a given string.
*
* @param s The <code>String</code> object containing the exception
* message.
*/
public XAException(String s)
{
super(s);
}
/**
* Create an XAException with a given error code.
*
* @param errcode The error code identifying the exception.
*/
public XAException(int errcode)
{
super();
errorCode = errcode;
}
/**
* The inclusive lower bound of the rollback codes.
*/
public final static int XA_RBBASE = 100;
/**
* Indicates that the rollback was caused by an unspecified reason.
*/
public final static int XA_RBROLLBACK = XA_RBBASE;
/**
* Indicates that the rollback was caused by a communication failure.
*/
public final static int XA_RBCOMMFAIL = XA_RBBASE + 1;
/**
* A deadlock was detected.
*/
public final static int XA_RBDEADLOCK = XA_RBBASE + 2;
/**
* A condition that violates the integrity of the resource was detected.
*/
public final static int XA_RBINTEGRITY = XA_RBBASE + 3;
/**
* The resource manager rolled back the transaction branch for a reason
* not on this list.
*/
public final static int XA_RBOTHER = XA_RBBASE + 4;
/**
* A protocol error occurred in the resource manager.
*/
public final static int XA_RBPROTO = XA_RBBASE + 5;
/**
* A transaction branch took too long.
*/
public final static int XA_RBTIMEOUT = XA_RBBASE + 6;
/**
* May retry the transaction branch.
*/
public final static int XA_RBTRANSIENT = XA_RBBASE + 7;
/**
* The inclusive upper bound of the rollback error code.
*/
public final static int XA_RBEND = XA_RBTRANSIENT;
/**
* Resumption must occur where the suspension occurred.
*/
public final static int XA_NOMIGRATE = 9;
/**
* The transaction branch may have been heuristically completed.
*/
public final static int XA_HEURHAZ = 8;
/**
* The transaction branch has been heuristically committed.
*/
public final static int XA_HEURCOM = 7;
/**
* The transaction branch has been heuristically rolled back.
*/
public final static int XA_HEURRB = 6;
/**
* The transaction branch has been heuristically committed and
* rolled back.
*/
public final static int XA_HEURMIX = 5;
/**
* Routine returned with no effect and may be reissued.
*/
public final static int XA_RETRY = 4;
/**
* The transaction branch was read-only and has been committed.
*/
public final static int XA_RDONLY = 3;
/**
* There is an asynchronous operation already outstanding.
*/
public final static int XAER_ASYNC = -2;
/**
* A resource manager error has occurred in the transaction branch.
*/
public final static int XAER_RMERR = -3;
/**
* The XID is not valid.
*/
public final static int XAER_NOTA = -4;
/**
* Invalid arguments were given.
*/
public final static int XAER_INVAL = -5;
/**
* Routine was invoked in an inproper context.
*/
public final static int XAER_PROTO = -6;
/**
* Resource manager is unavailable.
*/
public final static int XAER_RMFAIL = -7;
/**
* The XID already exists.
*/
public final static int XAER_DUPID = -8;
/**
* The resource manager is doing work outside a global transaction.
*/
public final static int XAER_OUTSIDE = -9;
}
| {
"pile_set_name": "Github"
} |
---
title: All Ledgers
clientData:
laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=ledgers&endpoint=all
replacement: https://developers.stellar.org/api/resources/ledgers/
---
This endpoint represents all [ledgers](../resources/ledger.md).
This endpoint can also be used in [streaming](../streaming.md) mode so it is possible to use it to get notifications as ledgers are closed by the Stellar network.
If called in streaming mode Horizon will start at the earliest known ledger unless a `cursor` is set. In that case it will start from the `cursor`. You can also set `cursor` value to `now` to only stream ledgers created since your request time.
## Request
```
GET /ledgers{?cursor,limit,order}
```
### Arguments
| name | notes | description | example |
| ---- | ----- | ----------- | ------- |
| `?cursor` | optional, any, default _null_ | A paging token, specifying where to start returning records from. When streaming this can be set to `now` to stream object created since your request time. | `12884905984` |
| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` |
| `?limit` | optional, number, default: `10` | Maximum number of records to return. | `200` |
### curl Example Request
```sh
# Retrieve the 200 latest ledgers, ordered chronologically
curl "https://horizon-testnet.stellar.org/ledgers?limit=200&order=desc"
```
### JavaScript Example Request
```javascript
server.ledgers()
.call()
.then(function (ledgerResult) {
// page 1
console.log(ledgerResult.records)
return ledgerResult.next()
})
.then(function (ledgerResult) {
// page 2
console.log(ledgerResult.records)
})
.catch(function(err) {
console.log(err)
})
```
### JavaScript Streaming Example
```javascript
var StellarSdk = require('stellar-sdk')
var server = new StellarSdk.Server('https://horizon-testnet.stellar.org');
var ledgerHandler = function (ledgerResponse) {
console.log(ledgerResponse);
};
var es = server.ledgers()
.cursor('now')
.stream({
onmessage: ledgerHandler
})
```
## Response
This endpoint responds with a list of ledgers. See [ledger resource](../resources/ledger.md) for reference.
### Example Response
```json
{
"_embedded": {
"records": [
{
"_links": {
"effects": {
"href": "/ledgers/1/effects/{?cursor,limit,order}",
"templated": true
},
"operations": {
"href": "/ledgers/1/operations/{?cursor,limit,order}",
"templated": true
},
"self": {
"href": "/ledgers/1"
},
"transactions": {
"href": "/ledgers/1/transactions/{?cursor,limit,order}",
"templated": true
}
},
"id": "e8e10918f9c000c73119abe54cf089f59f9015cc93c49ccf00b5e8b9afb6e6b1",
"paging_token": "4294967296",
"hash": "e8e10918f9c000c73119abe54cf089f59f9015cc93c49ccf00b5e8b9afb6e6b1",
"sequence": 1,
"transaction_count": 0,
"successful_transaction_count": 0,
"failed_transaction_count": 0,
"operation_count": 0,
"tx_set_operation_count": 0,
"closed_at": "1970-01-01T00:00:00Z",
"total_coins": "100000000000.0000000",
"fee_pool": "0.0000000",
"base_fee_in_stroops": 100,
"base_reserve_in_stroops": 100000000,
"max_tx_set_size": 50
},
{
"_links": {
"effects": {
"href": "/ledgers/2/effects/{?cursor,limit,order}",
"templated": true
},
"operations": {
"href": "/ledgers/2/operations/{?cursor,limit,order}",
"templated": true
},
"self": {
"href": "/ledgers/2"
},
"transactions": {
"href": "/ledgers/2/transactions/{?cursor,limit,order}",
"templated": true
}
},
"id": "e12e5809ab8c59d8256e691cb48a024dd43960bc15902d9661cd627962b2bc71",
"paging_token": "8589934592",
"hash": "e12e5809ab8c59d8256e691cb48a024dd43960bc15902d9661cd627962b2bc71",
"prev_hash": "e8e10918f9c000c73119abe54cf089f59f9015cc93c49ccf00b5e8b9afb6e6b1",
"sequence": 2,
"transaction_count": 0,
"successful_transaction_count": 0,
"failed_transaction_count": 0,
"operation_count": 0,
"closed_at": "2015-07-16T23:49:00Z",
"total_coins": "100000000000.0000000",
"fee_pool": "0.0000000",
"base_fee_in_stroops": 100,
"base_reserve_in_stroops": 100000000,
"max_tx_set_size": 100
}
]
},
"_links": {
"next": {
"href": "/ledgers?order=asc&limit=2&cursor=8589934592"
},
"prev": {
"href": "/ledgers?order=desc&limit=2&cursor=4294967296"
},
"self": {
"href": "/ledgers?order=asc&limit=2&cursor="
}
}
}
```
### Example Streaming Event
```json
{
"_links": {
"effects": {
"href": "/ledgers/69859/effects/{?cursor,limit,order}",
"templated": true
},
"operations": {
"href": "/ledgers/69859/operations/{?cursor,limit,order}",
"templated": true
},
"self": {
"href": "/ledgers/69859"
},
"transactions": {
"href": "/ledgers/69859/transactions/{?cursor,limit,order}",
"templated": true
}
},
"id": "4db1e4f145e9ee75162040d26284795e0697e2e84084624e7c6c723ebbf80118",
"paging_token": "300042120331264",
"hash": "4db1e4f145e9ee75162040d26284795e0697e2e84084624e7c6c723ebbf80118",
"prev_hash": "4b0b8bace3b2438b2404776ce57643966855487ba6384724a3c664c7aa4cd9e4",
"sequence": 69859,
"transaction_count": 0,
"successful_transaction_count": 0,
"failed_transaction_count": 0,
"operation_count": 0,
"closed_at": "2015-07-20T15:51:52Z",
"total_coins": "100000000000.0000000",
"fee_pool": "0.0025600",
"base_fee_in_stroops": 100,
"base_reserve_in_stroops": "100000000",
"max_tx_set_size": 50
}
```
## Errors
- The [standard errors](../errors.md#standard-errors).
| {
"pile_set_name": "Github"
} |
/*
* DOSBox MP3 decoder API implementation
* -------------------------------------
* It makes use of the dr_wav library by David Reid (mackron@gmail.com)
* Source links:
* - dr_libs: https://github.com/mackron/dr_libs (source)
* - dr_wav: http://mackron.github.io/dr_wav.html (website)
*
* Copyright (C) 2020 The dosbox-staging team
* Copyright (C) 2018-2019 Kevin R. Croft <krcroft@gmail.com>
* Copyright (C) 2001-2017 Ryan C. Gordon <icculus@icculus.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#if HAVE_CONFIG_H
# include <config.h>
#endif
#include <math.h> /* llroundf */
#include "SDL_sound.h"
#define __SDL_SOUND_INTERNAL__
#include "SDL_sound_internal.h"
/* Map dr_wav's memory routines to SDL's */
#define DRWAV_FREE(p) SDL_free((p))
#define DRWAV_MALLOC(sz) SDL_malloc((sz))
#define DRWAV_REALLOC(p, sz) SDL_realloc((p), (sz))
#define DRWAV_ZERO_MEMORY(p, sz) SDL_memset((p), 0, (sz))
#define DRWAV_COPY_MEMORY(dst, src, sz) SDL_memcpy((dst), (src), (sz))
#define DR_WAV_NO_STDIO
#define DR_WAV_IMPLEMENTATION
#include "dr_wav.h"
static size_t wav_read(void* pUserData, void* pBufferOut, size_t bytesToRead)
{
Uint8 *ptr = (Uint8 *) pBufferOut;
Sound_Sample *sample = (Sound_Sample *) pUserData;
Sound_SampleInternal *internal = (Sound_SampleInternal *) sample->opaque;
SDL_RWops *rwops = internal->rw;
size_t bytes_read = 0;
while (bytes_read < bytesToRead) {
const size_t rc = SDL_RWread(rwops, ptr, 1, bytesToRead - bytes_read);
if (rc == 0) {
sample->flags |= SOUND_SAMPLEFLAG_EOF;
break;
} /* if */
bytes_read += rc;
ptr += rc;
} /* while */
return bytes_read;
} /* wav_read */
static drwav_bool32 wav_seek(void* pUserData, int offset, drwav_seek_origin origin)
{
const int whence = (origin == drwav_seek_origin_start) ? RW_SEEK_SET : RW_SEEK_CUR;
Sound_Sample *sample = (Sound_Sample *) pUserData;
Sound_SampleInternal *internal = (Sound_SampleInternal *) sample->opaque;
return (SDL_RWseek(internal->rw, offset, whence) != -1) ? DRWAV_TRUE : DRWAV_FALSE;
} /* wav_seek */
static int WAV_init(void)
{
return 1; /* always succeeds. */
} /* WAV_init */
static void WAV_quit(void)
{
/* it's a no-op. */
} /* WAV_quit */
static void WAV_close(Sound_Sample *sample)
{
Sound_SampleInternal *internal = (Sound_SampleInternal *) sample->opaque;
drwav *dr = (drwav *) internal->decoder_private;
if (dr != NULL) {
(void) drwav_uninit(dr);
SDL_free(dr);
internal->decoder_private = NULL;
}
return;
} /* WAV_close */
static int WAV_open(Sound_Sample *sample, const char *ext)
{
(void) ext; // deliberately unused, but present for API compliance
Sound_SampleInternal *internal = (Sound_SampleInternal *) sample->opaque;
drwav* dr = SDL_malloc(sizeof(drwav));
drwav_result result = drwav_init_ex(dr, wav_read, wav_seek, NULL, sample, NULL, 0, NULL);
internal->decoder_private = dr;
if (result == DRWAV_TRUE) {
SNDDBG(("WAV: Codec accepted the data stream.\n"));
sample->flags = SOUND_SAMPLEFLAG_CANSEEK;
sample->actual.rate = dr->sampleRate;
sample->actual.format = AUDIO_S16SYS;
sample->actual.channels = (Uint8)(dr->channels);
const Uint64 frames = (Uint64) dr->totalPCMFrameCount;
if (frames == 0) {
internal->total_time = -1;
}
else {
const Uint32 rate = (Uint32) dr->sampleRate;
internal->total_time = ( (Sint32)frames / rate) * 1000;
internal->total_time += ((frames % rate) * 1000) / rate;
} /* else */
} /* if result != DRWAV_TRUE */
else {
SNDDBG(("WAV: Codec could not parse the data stream.\n"));
WAV_close(sample);
}
return result;
} /* WAV_open */
static Uint32 WAV_read(Sound_Sample *sample, void* buffer, Uint32 desired_frames)
{
Sound_SampleInternal *internal = (Sound_SampleInternal *) sample->opaque;
drwav *dr = (drwav *) internal->decoder_private;
const drwav_uint64 frames_read = drwav_read_pcm_frames_s16(dr,
desired_frames,
(drwav_int16 *) buffer);
return (Uint32)frames_read;
} /* WAV_read */
static int WAV_rewind(Sound_Sample *sample)
{
Sound_SampleInternal *internal = (Sound_SampleInternal *) sample->opaque;
drwav *dr = (drwav *) internal->decoder_private;
return (drwav_seek_to_pcm_frame(dr, 0) == DRWAV_TRUE);
} /* WAV_rewind */
static int WAV_seek(Sound_Sample *sample, Uint32 ms)
{
Sound_SampleInternal *internal = (Sound_SampleInternal *) sample->opaque;
drwav *dr = (drwav *) internal->decoder_private;
const float frames_per_ms = ((float) sample->actual.rate) / 1000.0f;
const drwav_uint64 frame_offset = llroundf(frames_per_ms * ms);
return (drwav_seek_to_pcm_frame(dr, frame_offset) == DRWAV_TRUE);
} /* WAV_seek */
static const char *extensions_wav[] = { "WAV", "W64", NULL };
const Sound_DecoderFunctions __Sound_DecoderFunctions_WAV =
{
{
extensions_wav,
"WAV Audio Codec",
"The dosbox-staging team"
},
WAV_init, /* init() method */
WAV_quit, /* quit() method */
WAV_open, /* open() method */
WAV_close, /* close() method */
WAV_read, /* read() method */
WAV_rewind, /* rewind() method */
WAV_seek /* seek() method */
};
/* end of wav.c ... */
| {
"pile_set_name": "Github"
} |
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.anyList;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequest;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.exception.InvalidJsonRpcParameters;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcErrorResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.results.proof.GetProofResult;
import org.hyperledger.besu.ethereum.api.query.BlockchainQueries;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.MutableWorldState;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.proof.WorldStateProof;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import java.util.Collections;
import java.util.Optional;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.units.bigints.UInt256;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class EthGetProofTest {
@Rule public final ExpectedException thrown = ExpectedException.none();
@Mock private BlockchainQueries blockchainQueries;
private EthGetProof method;
private final String JSON_RPC_VERSION = "2.0";
private final String ETH_METHOD = "eth_getProof";
private final Address address =
Address.fromHexString("0x1234567890123456789012345678901234567890");
private final UInt256 storageKey =
UInt256.fromHexString("0x0000000000000000000000000000000000000000000000000000000000000001");
private final long blockNumber = 1;
@Before
public void setUp() {
method = new EthGetProof(blockchainQueries);
}
@Test
public void returnsCorrectMethodName() {
assertThat(method.getName()).isEqualTo(ETH_METHOD);
}
@Test
public void errorWhenNoAddressAccountSupplied() {
final JsonRpcRequestContext request = requestWithParams(null, null, "latest");
thrown.expect(InvalidJsonRpcParameters.class);
thrown.expectMessage("Missing required json rpc parameter at index 0");
method.response(request);
}
@Test
public void errorWhenNoStorageKeysSupplied() {
final JsonRpcRequestContext request = requestWithParams(address.toString(), null, "latest");
thrown.expect(InvalidJsonRpcParameters.class);
thrown.expectMessage("Missing required json rpc parameter at index 1");
method.response(request);
}
@Test
public void errorWhenNoBlockNumberSupplied() {
final JsonRpcRequestContext request = requestWithParams(address.toString(), new String[] {});
thrown.expect(InvalidJsonRpcParameters.class);
thrown.expectMessage("Missing required json rpc parameter at index 2");
method.response(request);
}
@Test
public void errorWhenAccountNotFound() {
generateWorldState();
final JsonRpcErrorResponse expectedResponse =
new JsonRpcErrorResponse(null, JsonRpcError.NO_ACCOUNT_FOUND);
final JsonRpcRequestContext request =
requestWithParams(
Address.fromHexString("0x0000000000000000000000000000000000000000"),
new String[] {storageKey.toString()},
String.valueOf(blockNumber));
final JsonRpcErrorResponse response = (JsonRpcErrorResponse) method.response(request);
assertThat(response).isEqualToComparingFieldByField(expectedResponse);
}
@Test
public void errorWhenWorldStateUnavailable() {
when(blockchainQueries.getWorldState(blockNumber)).thenReturn(Optional.empty());
final JsonRpcErrorResponse expectedResponse =
new JsonRpcErrorResponse(null, JsonRpcError.WORLD_STATE_UNAVAILABLE);
final JsonRpcRequestContext request =
requestWithParams(
Address.fromHexString("0x0000000000000000000000000000000000000000"),
new String[] {storageKey.toString()},
String.valueOf(blockNumber));
final JsonRpcErrorResponse response = (JsonRpcErrorResponse) method.response(request);
assertThat(response).isEqualToComparingFieldByField(expectedResponse);
}
@Test
public void getProof() {
final GetProofResult expectedResponse = generateWorldState();
final JsonRpcRequestContext request =
requestWithParams(
address.toString(), new String[] {storageKey.toString()}, String.valueOf(blockNumber));
final JsonRpcSuccessResponse response = (JsonRpcSuccessResponse) method.response(request);
assertThat(response.getResult()).isEqualToComparingFieldByFieldRecursively(expectedResponse);
}
private JsonRpcRequestContext requestWithParams(final Object... params) {
return new JsonRpcRequestContext(new JsonRpcRequest(JSON_RPC_VERSION, ETH_METHOD, params));
}
@SuppressWarnings("unchecked")
private GetProofResult generateWorldState() {
final Wei balance = Wei.of(1);
final Hash codeHash =
Hash.fromHexString("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470");
final long nonce = 1;
final Hash rootHash =
Hash.fromHexString("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b431");
final Hash storageRoot =
Hash.fromHexString("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421");
final WorldStateArchive worldStateArchive = mock(WorldStateArchive.class);
when(blockchainQueries.getWorldStateArchive()).thenReturn(worldStateArchive);
final StateTrieAccountValue stateTrieAccountValue = mock(StateTrieAccountValue.class);
when(stateTrieAccountValue.getBalance()).thenReturn(balance);
when(stateTrieAccountValue.getCodeHash()).thenReturn(codeHash);
when(stateTrieAccountValue.getNonce()).thenReturn(nonce);
when(stateTrieAccountValue.getStorageRoot()).thenReturn(storageRoot);
final WorldStateProof worldStateProof = mock(WorldStateProof.class);
when(worldStateProof.getAccountProof())
.thenReturn(
Collections.singletonList(
Bytes.fromHexString(
"0x1111111111111111111111111111111111111111111111111111111111111111")));
when(worldStateProof.getStateTrieAccountValue()).thenReturn(stateTrieAccountValue);
when(worldStateProof.getStorageKeys()).thenReturn(Collections.singletonList(storageKey));
when(worldStateProof.getStorageProof(storageKey))
.thenReturn(
Collections.singletonList(
Bytes.fromHexString(
"0x2222222222222222222222222222222222222222222222222222222222222222")));
when(worldStateProof.getStorageValue(storageKey)).thenReturn(UInt256.ZERO);
when(worldStateArchive.getAccountProof(eq(rootHash), eq(address), anyList()))
.thenReturn(Optional.of(worldStateProof));
final MutableWorldState mutableWorldState = mock(MutableWorldState.class);
when(mutableWorldState.rootHash()).thenReturn(rootHash);
when(blockchainQueries.getWorldState(blockNumber)).thenReturn(Optional.of(mutableWorldState));
return GetProofResult.buildGetProofResult(address, worldStateProof);
}
}
| {
"pile_set_name": "Github"
} |
---
title: "Installing boot9strap (DSiWare Save Injection)"
---
{% include toc title="Table of Contents" %}
### Required Reading
Note that if you have any payload files other than `GodMode9.firm` in the `/luma/payloads/` folder on your SD card, holding (Start) on boot will display a "chainloader menu" where you will have to use the D-Pad and the (A) button to select "GodMode9" for these instructions.
To use the [magnet](https://wikipedia.org/wiki/Magnet_URI_scheme) links on this page, you will need a torrent client like [Deluge](http://dev.deluge-torrent.org/wiki/Download).
To extract the `.7z` files linked on this page, you will need a file archiver like [7-Zip](http://www.7-zip.org/) or [The Unarchiver](https://theunarchiver.com/).
If you do not use the correct `.firm` corresponding to the target 3DS, you will BRICK! Ensure you download and use the correct one!
{: .notice--danger}
### What You Need
* Two 3DS family devices
+ **The source 3DS**: the device running boot9strap *on the latest version*
+ **The target 3DS**: the device on stock firmware *on 11.9.0*
* Already own one of the following exploitable DSiWare games (a pirated copy of the game will **not** work) on **the source 3DS**
+ **Fieldrunners**
+ **Legends of Exidia**
+ **Guitar Rock Tour**
+ **The Legend of Zelda: Four Swords**
* The latest release of [3ds_dsiwarehax_installer](https://github.com/zoogie/3ds_dsiwarehax_installer/releases/latest)
* The latest release of [GodMode9](https://github.com/d0k3/GodMode9/releases/latest)
* The latest release of [b9sTool](https://github.com/zoogie/b9sTool/releases/latest)
* The latest release of [Luma3DS](https://github.com/AuroraWright/Luma3DS/releases/latest) *(the `.7z` file)*
* The latest release of [the Homebrew Launcher](https://github.com/fincs/new-hbmenu/releases/latest)
### 操作步骤
#### Section I - Prep Work
Use a [save manager](https://github.com/FlagBrew/Checkpoint/releases/latest) to backup any saves you care about on *the target 3DS* (it will be formatted!)
{: .notice--warning}
1. Copy `GodMode9.firm` from the GodMode9 `.zip` to the `/luma/payloads/` folder on **the source 3DS**'s SD card
1. Copy the `gm9` folder from the GodMode9 `.zip` to the root of **the source 3DS**'s SD card
1. Copy the relevant `public.sav` from the `/dsiware/<8-character-id>/` folder in the 3ds_dsiwarehax_installer `.zip` to the root of **the source 3DS**'s SD card
+ **Fieldrunners USA Region**: `4b464445`
+ **Fieldrunners EUR Region**: `4b464456`
+ **Legends of Exidia USA Region**: `4b4c4545`
+ **Legends of Exidia EUR Region**: `4b4c4556`
+ **Legends of Exidia JPN Region**: `4b4c454a`
+ **Guitar Rock Tour EUR Region**: `4b475256`
+ **Guitar Rock Tour USA Region**: `4b475245`
+ **The Legend of Zelda: Four Swords EUR Region**: `4b513956`
+ **The Legend of Zelda: Four Swords USA Region**: `4b513945`
1. Reinsert **the source 3DS**'s SD card
1. Boot **the source 3DS** while holding (Start) to launch GodMode9
1. Press (Home) to bring up the action menu
1. Select "Scripts..."
1. Select "GM9Megascript"
1. Select "Backup Options"
1. Select "SysNAND Backup"
1. 按 (A) 确认
+ This process will take some time
+ If you get an error, make sure that you have at least 1.3GB of free space on your SD card
1. Press (A) to continue
1. Press (B) to return to the main menu
1. Select "Exit"
1. Press (A) to relock write permissions if prompted
1. Hold (R) and press (B) at the same time to eject **the source 3DS**'s SD card
1. Put **the source 3DS**'s SD card into your computer
1. Put **the target 3DS**'s SD card into your computer
1. **Backup every file on both 3DS's SD cards to two separate folders on your computer (keep track of which is which)!**
1. Reinsert each SD card back into their corresponding 3DS
1. Press (Start) on **the source 3DS** to reboot
#### Section II - Installing the Save
1. Boot **the source 3DS** while holding (Start) to launch GodMode9
1. Navigate to `[0:] SDCARD`
1. Press (Y) on `public.sav` to copy it
1. Press (B) to get back to the main menu
1. Navigate to `SYSNAND TWLN` -> `title` -> `00030004`
1. Navigate to the folder for your game and region:
+ **Fieldrunners USA Region**: `4b464445`
+ **Fieldrunners EUR Region**: `4b464456`
+ **Legends of Exidia USA Region**: `4b4c4545`
+ **Legends of Exidia EUR Region**: `4b4c4556`
+ **Legends of Exidia JPN Region**: `4b4c454a`
+ **Guitar Rock Tour EUR Region**: `4b475256`
+ **Guitar Rock Tour USA Region**: `4b475245`
+ **The Legend of Zelda: Four Swords EUR Region**: `4b513956`
+ **The Legend of Zelda: Four Swords USA Region**: `4b513945`
1. Navigate to the `data` folder
1. Press (X) on the existing `public.sav` to delete it
1. Input the key combo given to unlock SysNAND (lvl1) writing
1. Press (A) to continue
1. Press (B) to decline relocking write permissions if prompted
1. Press (Y) to paste `public.sav`
1. Select "Copy path(s)"
1. Press (Start) to reboot **the source 3DS**
1. Launch your DSiWare game on **the source 3DS**
1. Test if the save is functional
+ **Fieldrunners**: Touch the 'Scores' button at the main menu
+ **Legends of Exidia**: After pressing (A) or (Start) at the two title screens, select the first save slot and press continue
+ **Guitar Rock Tour**: Scroll down and go to High-Scores -> Drums -> Easy
+ **The Legend of Zelda: Four Swords**: Just start the game
+ If your game has an error about `boot.nds` or a white screen, **then the exploit has been successful**
+ If your game behaves normally and does not give you this error, then you should stop and figure out what went wrong
+ If you get a black screen, [follow this troubleshooting guide](troubleshooting#dsi--ds-functionality-is-broken-after-completing-the-guide)
#### Section III - System Transfer
1. **Backup every file on both 3DS's SD cards to two separate folders on your computer (keep track of which is which)!**
1. Reinsert each SD card back into their corresponding 3DS
1. If **the target 3DS** has a Nintendo Network ID on it, you must format the device using System Settings:
+ Go to the last page of "Other Settings" and select "Format System Memory", then follow all instructions
1. Read the following:
+ Your CFW 3DS = the source 3DS = "Source System"
+ Your Stock 3DS = the target 3DS = "Target System"
+ **Move DSiWare titles if prompted!**
+ Do **NOT** delete the source system's SD card contents if prompted
+ Make sure neither device's battery dies during the transfer
+ **Old 3DS or Old 2DS (source) to New 3DS or New 2DS (target) only**: if asked which method you wish to use to transfer the SD card data:
+ **Do NOT** choose the "Low-Capacity microSD Card Transfer" or minimal option (option 2), it will only transfer tickets and likely will not transfer the DSiWare save.
+ Fast Method: If you have the ability to move the data from the SD card (source) to the microSD card (target), when prompted use the "PC-Based Transfer" option (option 3).
+ Slowest Method: If you don't have the ability to move the data on a PC use the **full** "Wireless Transfer" option (option 1).
1. Go to [this link](http://en-americas-support.nintendo.com/app/answers/detail/a_id/227/) and follow Nintendo's official instructions for System Transferring from one system to another while keeping in mind what you just read
#### Section IV - Restoring the source 3DS
1. On **the source 3DS**, complete initial setup
1. Do one of the following
+ Do the rest of the sections and then the full guide on **the target 3DS**, then wait one week *(do not restore your NAND backup before system transferring)*, then System Transfer from **the target 3DS** back to **the source 3DS** *(remember you cannot transfer back from a New 3DS or New 2DS to an Old 3DS or Old 2DS)*
+ Call Nintendo and tell them you no longer have access to the device that your NNID is linked to (which is **the target 3DS** in this case), and would like it linked to a different device (which is **the source 3DS** in this case)
+ You can also just [remove the NNID](godmode9-usage#removing-an-nnid-without-formatting-your-device) from **the source 3DS** if you'd prefer it remain on **the target 3DS**
1. Reboot **the source 3DS** while holding (Start) to launch GodMode9
1. If you are prompted to create an essential files backup, press (A) to do so, then press (A) to continue once it has completed
1. If you are prompted to fix the RTC date&time, press (A) to do so, then set the date and time, then press (A) to continue
+ Note that, if you had to fix the RTC date and time, you will have to fix the time in the System Settings as well after this guide
1. Navigate to `[0:] SDCARD` -> `gm9` -> `out`
1. Press (A) on `<date>_<serialnumber>_sysnand_###.bin` to select it, then select "NAND image options...", then select "Restore SysNAND (safe)"
1. Press (A) to unlock SysNAND overwriting, then input the key combo given
+ This will not overwrite your boot9strap installation
1. Input the key combo given to unlock SysNAND (lvl1) writing
+ This process will take some time
1. + 完成后,按 (A) 键以继续
1. Press (A) to relock write permissions if prompted
1. Press (Start) to reboot **the source 3DS**
#### Section V - Flashing the target 3DS's FIRM
1. Copy `boot.firm` from the Luma3DS `.7z` to the root of **the target 3DS**'s SD card
1. Copy `boot.nds` (B9STool) to the root of **the target 3DS**'s SD card
1. Copy `boot.3dsx` to the root of **the target 3DS**'s SD card
1. Launch your DSiWare game on **the target 3DS**
1. Launch b9sTool using your DSiWare game
+ **Fieldrunners**: Touch the 'Scores' button at the main menu
+ **Legends of Exidia**: After pressing (A) or (Start) at the two title screens, select the first save slot and press continue
+ **Guitar Rock Tour**: Scroll down and go to High-Scores -> Drums -> Easy
+ **The Legend of Zelda: Four Swords**: Just start the game
1. Select "Install boot9strap" and confirm
1. Exit b9sTool, then power off your device
#### Section VI - Configuring Luma3DS
1. Boot **the target 3DS** while holding (Select) to launch the Luma configuration menu
+ If you get a black screen, [follow this troubleshooting guide](troubleshooting#black-screen-on-sysnand-boot-after-installing-boot9strap)
1. Use the (A) button and the D-Pad to turn on the following:
+ **"Show NAND or user string in System Settings"**
1. Press (Start) to save and reboot
+ If you get an error, just continue the next page
___
### Continue to [Finalizing Setup](finalizing-setup)
{: .notice--primary}
| {
"pile_set_name": "Github"
} |
<?php
/*
* CnabPHP - Geração de arquivos de remessa e retorno em PHP
*
* LICENSE: The MIT License (MIT)
*
* Copyright (C) 2013 Ciatec.net
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this
* software and associated documentation files (the "Software"), to deal in the Software
* without restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be included in all copies
* or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
namespace CnabPHP\resources\B033\retorno\L040;
use CnabPHP\resources\generico\retorno\L040\Generico0;
use CnabPHP\RetornoAbstract;
class Registro0 extends Generico0
{
public $trailler;
protected $meta = array(
'codigo_banco' => array(
'tamanho' => 3,
'default' => '033',
'tipo' => 'int',
'required' => true
),
'codigo_lote' => array(
'tamanho' => 4,
'default' => '0000',
'tipo' => 'int',
'required' => true
),
'tipo_registro' => array(
'tamanho' => 1,
'default' => '0',
'tipo' => 'int',
'required' => true
),
'filler1' => array(
'tamanho' => 8,
'default' => ' ',
'tipo' => 'alfa',
'required' => true
),
'tipo_inscricao' => array(
'tamanho' => 1,
'default' => '',
'tipo' => 'int',
'required' => true
),
'numero_inscricao' => array(
'tamanho' => 15,
'default' => '',
'tipo' => 'int',
'required' => true
),
'agencia' => array(
'tamanho' => 4,
'default' => '',
'tipo' => 'int',
'required' => true
),
'agencia_dv' => array(
'tamanho' => 1,
'default' => '',
'tipo' => 'int',
'required' => true
),
'conta' => array(
'tamanho' => 9,
'default' => '0',
'tipo' => 'int',
'required' => true
),
'conta_dv' => array(
'tamanho' => 1,
'default' => '',
'tipo' => 'int',
'required' => true
),
'filler2' => array(
'tamanho' => 5,
'default' => ' ',
'tipo' => 'alfa',
'required' => true
),
'codigo_beneficiario' => array(
'tamanho' => 9,
'default' => '0',
'tipo' => 'int',
'required' => true
),
'filler3' => array(
'tamanho' => 11,
'default' => ' ',
'tipo' => 'alfa',
'required' => true
),
'nome_empresa' => array(
'tamanho' => 30,
'default' => '',
'tipo' => 'alfa',
'required' => true
),
'nome_banco' => array(
'tamanho' => 30,
'default' => '',
'tipo' => 'alfa',
'required' => true
),
'filler4' => array(
'tamanho' => 10,
'default' => ' ',
'tipo' => 'alfa',
'required' => true
),
'codigo_remessa' => array(
'tamanho' => 1,
'default' => '2',
'tipo' => 'int',
'required' => true
),
'data_geracao' => array(
'tamanho' => 8,
'default' => '',
'tipo' => 'date',
'required' => true
),
'filler5' => array(
'tamanho' => 6,
'default' => ' ',
'tipo' => 'alfa',
'required' => true
),
'numero_sequencial_arquivo' => array(
'tamanho' => 6,
'default' => '',
'tipo' => 'int',
'required' => true
),
'versao_layout' => array(
'tamanho' => 3,
'default' => '040',
'tipo' => 'int',
'required' => true
),
'filler6' => array(
'tamanho' => 74,
'default' => ' ',
'tipo' => 'alfa',
'required' => true
),
);
public function __construct($linhaTxt)
{
parent::__construct($linhaTxt);
RetornoAbstract::$linesCounter++;
$this->inserirDetalhe();
}
public function inserirDetalhe()
{
while (RetornoAbstract::$linesCounter < (count(RetornoAbstract::$lines) - 4))
{
$class = 'CnabPHP\resources\\B' . RetornoAbstract::$banco . '\retorno\\' . RetornoAbstract::$layout . '\Registro1';
$lote = new $class(RetornoAbstract::$lines[RetornoAbstract::$linesCounter]);
$class = 'CnabPHP\resources\\B' . RetornoAbstract::$banco . '\retorno\\' . RetornoAbstract::$layout . '\Registro5';
$lote->trailler = new $class(RetornoAbstract::$lines[RetornoAbstract::$linesCounter]);
$this->children[] = $lote;
}
}
}
| {
"pile_set_name": "Github"
} |
Required if the app uses the <a href="webview_tag">Webview Tag</a> to embed live content from the web in the packaged app.
| {
"pile_set_name": "Github"
} |
#pragma once
#include <Process/Focus/FocusDispatcher.hpp>
#include <Process/ProcessContext.hpp>
#include <score/tools/Bind.hpp>
#include <QGuiApplication>
#include <QPointF>
#include <QScreen>
#include <chrono>
template <typename Tool_T, typename ToolPalette_T, typename Context_T, typename Input_T>
class ToolPaletteInputDispatcher : public QObject
{
public:
ToolPaletteInputDispatcher(const Input_T& input, ToolPalette_T& palette, Context_T& context)
: m_palette{palette}, m_context{context}, m_currentTool{palette.editionSettings().tool()}
{
auto screens = QGuiApplication::screens();
if (!screens.empty())
{
m_frameTime = 1000000. / screens.front()->refreshRate();
}
using EditionSettings_T = std::remove_reference_t<decltype(palette.editionSettings())>;
con(palette.editionSettings(),
&EditionSettings_T::toolChanged,
this,
&ToolPaletteInputDispatcher::on_toolChanged);
con(input, &Input_T::pressed, this, &ToolPaletteInputDispatcher::on_pressed);
con(input, &Input_T::moved, this, &ToolPaletteInputDispatcher::on_moved);
con(input, &Input_T::released, this, &ToolPaletteInputDispatcher::on_released);
con(input, &Input_T::escPressed, this, &ToolPaletteInputDispatcher::on_cancel);
}
void on_toolChanged(Tool_T t)
{
m_palette.desactivate(m_currentTool);
m_palette.activate(t);
m_currentTool = t;
if (m_running)
{
m_palette.on_cancel();
m_prev = std::chrono::steady_clock::now();
m_palette.on_pressed(m_currentPoint);
}
}
void on_pressed(QPointF p)
{
if constexpr (std::is_same_v<decltype(m_context.presenter), Process::LayerPresenter&>)
{
m_context.context.focusDispatcher.focus(&m_context.presenter);
}
m_currentPoint = p;
m_prev = std::chrono::steady_clock::now();
m_palette.on_pressed(p);
m_running = true;
}
void on_moved(QPointF p)
{
using namespace std::literals::chrono_literals;
const auto t = std::chrono::steady_clock::now();
if (t - m_prev < std::chrono::microseconds((int64_t)m_frameTime))
{
m_elapsedPoint = p;
}
m_currentPoint = p;
m_palette.on_moved(p);
m_prev = t;
}
void on_released(QPointF p)
{
m_running = false;
m_currentPoint = p;
m_palette.on_released(p);
}
void on_cancel()
{
m_running = false;
m_palette.on_cancel();
}
private:
ToolPalette_T& m_palette;
Context_T& m_context;
QPointF m_currentPoint;
Tool_T m_currentTool;
std::chrono::steady_clock::time_point m_prev;
QPointF m_elapsedPoint;
qreal m_frameTime{16666}; // In microseconds
bool m_running = false;
};
| {
"pile_set_name": "Github"
} |
// Package ec2metadata provides the client for making API calls to the
// EC2 Metadata service.
//
// This package's client can be disabled completely by setting the environment
// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
// true instructs the SDK to disable the EC2 Metadata client. The client cannot
// be used while the environemnt variable is set to true, (case insensitive).
package ec2metadata
import (
"bytes"
"errors"
"io"
"net/http"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/request"
)
// ServiceName is the name of the service.
const ServiceName = "ec2metadata"
const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
// A EC2Metadata is an EC2 Metadata service Client.
type EC2Metadata struct {
*client.Client
}
// New creates a new instance of the EC2Metadata client with a session.
// This client is safe to use across multiple goroutines.
//
//
// Example:
// // Create a EC2Metadata client from just a session.
// svc := ec2metadata.New(mySession)
//
// // Create a EC2Metadata client with additional configuration
// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
c := p.ClientConfig(ServiceName, cfgs...)
return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// NewClient returns a new EC2Metadata client. Should be used to create
// a client when not using a session. Generally using just New with a session
// is preferred.
//
// If an unmodified HTTP client is provided from the stdlib default, or no client
// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
// If the http client is unmodified and this feature is not disabled
// set custom timeouts for EC2Metadata requests.
cfg.HTTPClient = &http.Client{
// use a shorter timeout than default because the metadata
// service is local if it is running, and to fail faster
// if not running on an ec2 instance.
Timeout: 5 * time.Second,
}
}
svc := &EC2Metadata{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
Endpoint: endpoint,
APIVersion: "latest",
},
handlers,
),
}
svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
svc.Handlers.UnmarshalError.PushBack(unmarshalError)
svc.Handlers.Validate.Clear()
svc.Handlers.Validate.PushBack(validateEndpointHandler)
// Disable the EC2 Metadata service if the environment variable is set.
// This shortcirctes the service's functionality to always fail to send
// requests.
if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
svc.Handlers.Send.SwapNamed(request.NamedHandler{
Name: corehandlers.SendHandler.Name,
Fn: func(r *request.Request) {
r.Error = awserr.New(
request.CanceledErrorCode,
"EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
nil)
},
})
}
// Add additional options to the service config
for _, option := range opts {
option(svc.Client)
}
return svc
}
func httpClientZero(c *http.Client) bool {
return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
}
type metadataOutput struct {
Content string
}
func unmarshalHandler(r *request.Request) {
defer r.HTTPResponse.Body.Close()
b := &bytes.Buffer{}
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
return
}
if data, ok := r.Data.(*metadataOutput); ok {
data.Content = b.String()
}
}
func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
b := &bytes.Buffer{}
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
return
}
// Response body format is not consistent between metadata endpoints.
// Grab the error message as a string and include that as the source error
r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
}
func validateEndpointHandler(r *request.Request) {
if r.ClientInfo.Endpoint == "" {
r.Error = aws.ErrMissingEndpoint
}
}
| {
"pile_set_name": "Github"
} |
// Common/Vector.cpp
#include "StdAfx.h"
#include <string.h>
#include "Vector.h"
CBaseRecordVector::~CBaseRecordVector()
{ delete []((unsigned char *)_items); }
void CBaseRecordVector::Clear()
{ DeleteFrom(0); }
void CBaseRecordVector::DeleteBack()
{ Delete(_size - 1); }
void CBaseRecordVector::DeleteFrom(int index)
{ Delete(index, _size - index); }
void CBaseRecordVector::ReserveOnePosition()
{
if(_size != _capacity)
return;
int delta;
if (_capacity > 64)
delta = _capacity / 2;
else if (_capacity > 8)
delta = 8;
else
delta = 4;
Reserve(_capacity + delta);
}
void CBaseRecordVector::Reserve(int newCapacity)
{
if(newCapacity <= _capacity)
return;
unsigned char *p = new unsigned char[newCapacity * _itemSize];
int numRecordsToMove = _capacity;
memmove(p, _items, _itemSize * numRecordsToMove);
delete [](unsigned char *)_items;
_items = p;
_capacity = newCapacity;
}
void CBaseRecordVector::MoveItems(int destIndex, int srcIndex)
{
memmove(((unsigned char *)_items) + destIndex * _itemSize,
((unsigned char *)_items) + srcIndex * _itemSize,
_itemSize * (_size - srcIndex));
}
void CBaseRecordVector::InsertOneItem(int index)
{
ReserveOnePosition();
MoveItems(index + 1, index);
_size++;
}
void CBaseRecordVector::Delete(int index, int num)
{
TestIndexAndCorrectNum(index, num);
if (num > 0)
{
MoveItems(index, index + num);
_size -= num;
}
}
| {
"pile_set_name": "Github"
} |
// <td> under the mouse right now (if any)
let currentElem = null;
table.onmouseover = function(event) {
if (currentElem) {
// before entering a new element, the mouse always leaves the previous one
// if we didn't leave <td> yet, then we're still inside it, so can ignore the event
return;
}
let target = event.target.closest('td');
if (!target || !table.contains(target)) return;
// yeah we're inside <td> now
currentElem = target;
target.style.background = 'pink';
};
table.onmouseout = function(event) {
// if we're outside of any <td> now, then ignore the event
if (!currentElem) return;
// we're leaving the element -- where to? Maybe to a child element?
let relatedTarget = event.relatedTarget;
if (relatedTarget) { // possible: relatedTarget = null
while (relatedTarget) {
// go up the parent chain and check -- if we're still inside currentElem
// then that's an internal transition -- ignore it
if (relatedTarget == currentElem) return;
relatedTarget = relatedTarget.parentNode;
}
}
// we left the element. really.
currentElem.style.background = '';
currentElem = null;
};
| {
"pile_set_name": "Github"
} |
package com.faforever.client.preferences.ui;
import com.faforever.client.fx.Controller;
import com.faforever.client.fx.JavaFxUtil;
import com.faforever.client.fx.NodeListCell;
import javafx.scene.Node;
import javafx.scene.Parent;
import javafx.scene.control.Label;
import javafx.scene.control.ListCell;
import javafx.scene.control.ListView;
import javafx.scene.layout.Pane;
import org.springframework.beans.factory.config.ConfigurableBeanFactory;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Component;
import java.util.function.Function;
/**
* A {@link javafx.scene.control.ListCell} containing a 'remove' button that removes the represented item from the
* parent {@link ListView}.
*/
@Component
@Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE)
public class RemovableListCellController extends ListCell<String> implements Controller<Pane> {
public Pane removableCellRoot;
public Label label;
@Override
protected void updateItem(String item, boolean empty) {
super.updateItem(item, empty);
JavaFxUtil.assertApplicationThread();
setText(null);
if (empty || item == null) {
setGraphic(null);
} else {
setGraphic(removableCellRoot);
label.setText(item);
}
}
public void onRemoveButtonClicked() {
getListView().getItems().remove(getItem());
}
@Override
public Pane getRoot() {
return removableCellRoot;
}
}
| {
"pile_set_name": "Github"
} |
#include <vector>
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layers/reduction_layer.hpp"
#include "caffe/test/test_caffe_main.hpp"
#include "caffe/test/test_gradient_check_util.hpp"
namespace caffe {
template <typename TypeParam>
class ReductionLayerTest : public MultiDeviceTest<TypeParam> {
typedef typename TypeParam::Dtype Dtype;
protected:
ReductionLayerTest()
: blob_bottom_(new Blob<Dtype>(2, 3, 4, 5)),
blob_top_(new Blob<Dtype>()) {
// fill the values
Caffe::set_random_seed(1701);
FillerParameter filler_param;
UniformFiller<Dtype> filler(filler_param);
filler.Fill(this->blob_bottom_);
blob_bottom_vec_.push_back(blob_bottom_);
blob_top_vec_.push_back(blob_top_);
}
virtual ~ReductionLayerTest() {
delete blob_bottom_;
delete blob_top_;
}
void TestForward(ReductionParameter_ReductionOp op,
float coeff = 1, int axis = 0) {
LayerParameter layer_param;
ReductionParameter* reduction_param = layer_param.mutable_reduction_param();
reduction_param->set_operation(op);
if (coeff != 1.0) { reduction_param->set_coeff(coeff); }
if (axis != 0) { reduction_param->set_axis(axis); }
shared_ptr<ReductionLayer<Dtype> > layer(
new ReductionLayer<Dtype>(layer_param));
layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
const Dtype* in_data = this->blob_bottom_->cpu_data();
const int num = this->blob_bottom_->count(0, axis);
const int dim = this->blob_bottom_->count(axis);
for (int n = 0; n < num; ++n) {
Dtype expected_result = 0;
for (int d = 0; d < dim; ++d) {
switch (op) {
case ReductionParameter_ReductionOp_SUM:
expected_result += *in_data;
break;
case ReductionParameter_ReductionOp_MEAN:
expected_result += *in_data / dim;
break;
case ReductionParameter_ReductionOp_ASUM:
expected_result += fabs(*in_data);
break;
case ReductionParameter_ReductionOp_SUMSQ:
expected_result += (*in_data) * (*in_data);
break;
default:
LOG(FATAL) << "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op);
}
++in_data;
}
expected_result *= coeff;
const Dtype computed_result = this->blob_top_->cpu_data()[n];
EXPECT_FLOAT_EQ(expected_result, computed_result)
<< "Incorrect result computed with op "
<< ReductionParameter_ReductionOp_Name(op) << ", coeff " << coeff;
}
}
void TestGradient(ReductionParameter_ReductionOp op,
float coeff = 1, int axis = 0) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
ReductionParameter* reduction_param = layer_param.mutable_reduction_param();
reduction_param->set_operation(op);
reduction_param->set_coeff(coeff);
reduction_param->set_axis(axis);
ReductionLayer<Dtype> layer(layer_param);
GradientChecker<Dtype> checker(1e-2, 2e-3);
checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
this->blob_top_vec_);
}
Blob<Dtype>* const blob_bottom_;
Blob<Dtype>* const blob_top_;
vector<Blob<Dtype>*> blob_bottom_vec_;
vector<Blob<Dtype>*> blob_top_vec_;
};
TYPED_TEST_CASE(ReductionLayerTest, TestDtypesAndDevices);
TYPED_TEST(ReductionLayerTest, TestSetUp) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
shared_ptr<ReductionLayer<Dtype> > layer(
new ReductionLayer<Dtype>(layer_param));
layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
ASSERT_EQ(this->blob_top_->num_axes(), 0);
}
TYPED_TEST(ReductionLayerTest, TestSetUpWithAxis1) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
layer_param.mutable_reduction_param()->set_axis(1);
shared_ptr<ReductionLayer<Dtype> > layer(
new ReductionLayer<Dtype>(layer_param));
layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
ASSERT_EQ(this->blob_top_->num_axes(), 1);
EXPECT_EQ(this->blob_top_->shape(0), 2);
}
TYPED_TEST(ReductionLayerTest, TestSetUpWithAxis2) {
typedef typename TypeParam::Dtype Dtype;
LayerParameter layer_param;
layer_param.mutable_reduction_param()->set_axis(2);
shared_ptr<ReductionLayer<Dtype> > layer(
new ReductionLayer<Dtype>(layer_param));
layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
ASSERT_EQ(this->blob_top_->num_axes(), 2);
EXPECT_EQ(this->blob_top_->shape(0), 2);
EXPECT_EQ(this->blob_top_->shape(1), 3);
}
TYPED_TEST(ReductionLayerTest, TestSum) {
const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUM;
this->TestForward(kOp);
}
TYPED_TEST(ReductionLayerTest, TestSumCoeff) {
const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUM;
const float kCoeff = 2.3;
this->TestForward(kOp, kCoeff);
}
TYPED_TEST(ReductionLayerTest, TestSumCoeffAxis1) {
const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUM;
const float kCoeff = 2.3;
const int kAxis = 1;
this->TestForward(kOp, kCoeff, kAxis);
}
TYPED_TEST(ReductionLayerTest, TestSumGradient) {
const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUM;
this->TestGradient(kOp);
}
TYPED_TEST(ReductionLayerTest, TestSumCoeffGradient) {
const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUM;
const float kCoeff = 2.3;
this->TestGradient(kOp, kCoeff);
}
TYPED_TEST(ReductionLayerTest, TestSumCoeffAxis1Gradient) {
const ReductionParameter_ReductionOp kOp = ReductionParameter_ReductionOp_SUM;
const float kCoeff = 2.3;
const int kAxis = 1;
this->TestGradient(kOp, kCoeff, kAxis);
}
TYPED_TEST(ReductionLayerTest, TestMean) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_MEAN;
this->TestForward(kOp);
}
TYPED_TEST(ReductionLayerTest, TestMeanCoeff) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_MEAN;
const float kCoeff = 2.3;
this->TestForward(kOp, kCoeff);
}
TYPED_TEST(ReductionLayerTest, TestMeanCoeffAxis1) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_MEAN;
const float kCoeff = 2.3;
const int kAxis = 1;
this->TestForward(kOp, kCoeff, kAxis);
}
TYPED_TEST(ReductionLayerTest, TestMeanGradient) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_MEAN;
this->TestGradient(kOp);
}
TYPED_TEST(ReductionLayerTest, TestMeanCoeffGradient) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_MEAN;
const float kCoeff = 2.3;
this->TestGradient(kOp, kCoeff);
}
TYPED_TEST(ReductionLayerTest, TestMeanCoeffGradientAxis1) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_MEAN;
const float kCoeff = 2.3;
const int kAxis = 1;
this->TestGradient(kOp, kCoeff, kAxis);
}
TYPED_TEST(ReductionLayerTest, TestAbsSum) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_ASUM;
this->TestForward(kOp);
}
TYPED_TEST(ReductionLayerTest, TestAbsSumCoeff) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_ASUM;
const float kCoeff = 2.3;
this->TestForward(kOp, kCoeff);
}
TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffAxis1) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_ASUM;
const float kCoeff = 2.3;
const int kAxis = 1;
this->TestForward(kOp, kCoeff, kAxis);
}
TYPED_TEST(ReductionLayerTest, TestAbsSumGradient) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_ASUM;
this->TestGradient(kOp);
}
TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffGradient) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_ASUM;
const float kCoeff = 2.3;
this->TestGradient(kOp, kCoeff);
}
TYPED_TEST(ReductionLayerTest, TestAbsSumCoeffAxis1Gradient) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_ASUM;
const float kCoeff = 2.3;
const int kAxis = 1;
this->TestGradient(kOp, kCoeff, kAxis);
}
TYPED_TEST(ReductionLayerTest, TestSumOfSquares) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_SUMSQ;
this->TestForward(kOp);
}
TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeff) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_SUMSQ;
const float kCoeff = 2.3;
this->TestForward(kOp, kCoeff);
}
TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffAxis1) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_SUMSQ;
const float kCoeff = 2.3;
const int kAxis = 1;
this->TestForward(kOp, kCoeff, kAxis);
}
TYPED_TEST(ReductionLayerTest, TestSumOfSquaresGradient) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_SUMSQ;
this->TestGradient(kOp);
}
TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffGradient) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_SUMSQ;
const float kCoeff = 2.3;
this->TestGradient(kOp, kCoeff);
}
TYPED_TEST(ReductionLayerTest, TestSumOfSquaresCoeffAxis1Gradient) {
const ReductionParameter_ReductionOp kOp =
ReductionParameter_ReductionOp_SUMSQ;
const float kCoeff = 2.3;
const int kAxis = 1;
this->TestGradient(kOp, kCoeff, kAxis);
}
} // namespace caffe
| {
"pile_set_name": "Github"
} |
package jd.plugins;
import java.io.File;
import java.lang.ref.WeakReference;
import java.util.HashMap;
import java.util.regex.Pattern;
import javax.swing.Icon;
import jd.controlling.linkcrawler.CrawledLink;
import jd.controlling.packagecontroller.AbstractPackageChildrenNode;
import jd.parser.Regex;
import org.appwork.utils.Files;
import org.appwork.utils.StringUtils;
import org.appwork.utils.os.CrossSystem;
import org.jdownloader.controlling.filter.CompiledFiletypeFilter;
import org.jdownloader.controlling.filter.CompiledFiletypeFilter.ExtensionsFilterInterface;
import org.jdownloader.gui.IconKey;
import org.jdownloader.gui.translate._GUI;
import org.jdownloader.images.NewTheme;
import org.jdownloader.logging.LogController;
public class LinkInfo {
private final int partNum;
private final Icon icon;
public Icon getIcon() {
return icon;
}
public int getPartNum() {
return partNum;
}
private final ExtensionsFilterInterface extension;
public ExtensionsFilterInterface getExtension() {
return extension;
}
private LinkInfo(final int partNum, final ExtensionsFilterInterface extension, Icon icon) {
this.partNum = partNum;
this.icon = icon;
this.extension = extension;
}
private static final HashMap<String, WeakReference<LinkInfo>> CACHE = new HashMap<String, WeakReference<LinkInfo>>();
public static LinkInfo getLinkInfo(final File file) {
if (file == null || !file.isFile()) {
return null;
}
final String fileName = file.getName();
final String fileNameExtension = Files.getExtension(fileName);
int num = -1;
try {
String partID = new Regex(fileName, "\\.r(\\d+)$").getMatch(0);
if (partID == null) {
partID = new Regex(fileName, "\\.pa?r?t?\\.?(\\d+).*?\\.rar$").getMatch(0);
}
if (partID != null) {
num = Integer.parseInt(partID);
}
} catch (Throwable e) {
e.printStackTrace();
}
final String mimeHint = null;
final String ID = fileNameExtension + "_" + num + "_" + mimeHint;
synchronized (CACHE) {
LinkInfo ret = null;
WeakReference<LinkInfo> linkInfo = CACHE.get(ID);
if (linkInfo == null || (ret = linkInfo.get()) == null) {
final ExtensionsFilterInterface hint = CompiledFiletypeFilter.getExtensionsFilterInterface(mimeHint);
final ExtensionsFilterInterface compiled = CompiledFiletypeFilter.getExtensionsFilterInterface(fileNameExtension);
final ExtensionsFilterInterface extension;
if (compiled == null || (hint != null && !hint.isSameExtensionGroup(compiled))) {
extension = new ExtensionsFilterInterface() {
final String extension;
final String desc;
final Pattern pattern;
{
if (fileNameExtension != null && fileNameExtension.matches("^[a-zA-Z0-9]{1,4}$")) {
extension = fileNameExtension;
desc = fileNameExtension;
pattern = Pattern.compile(Pattern.quote(fileNameExtension), Pattern.DOTALL | Pattern.CASE_INSENSITIVE);
} else {
extension = "";
desc = _GUI.T.settings_linkgrabber_filter_others();
pattern = null;
}
}
@Override
public ExtensionsFilterInterface getSource() {
return hint;
}
@Override
public Pattern compiledAllPattern() {
if (hint != null) {
return hint.compiledAllPattern();
} else {
return null;
}
}
@Override
public String getDesc() {
if (hint != null) {
return hint.getDesc();
} else {
return desc;
}
}
@Override
public String getIconID() {
if (hint != null) {
return hint.getIconID();
} else {
return null;
}
}
@Override
public Pattern getPattern() {
if (hint != null) {
return hint.compiledAllPattern();
} else {
return pattern;
}
}
@Override
public String name() {
return extension;
}
@Override
public boolean isSameExtensionGroup(ExtensionsFilterInterface extension) {
if (hint != null) {
return hint.isSameExtensionGroup(extension);
} else {
return extension != null && extension.getIconID() == null && StringUtils.equals(extension.name(), name());
}
}
@Override
public ExtensionsFilterInterface[] listSameGroup() {
if (hint != null) {
return hint.listSameGroup();
} else {
return new ExtensionsFilterInterface[] { this };
}
}
};
} else {
extension = new ExtensionsFilterInterface() {
@Override
public Pattern compiledAllPattern() {
return compiled.compiledAllPattern();
}
@Override
public String getDesc() {
return compiled.getDesc();
}
@Override
public String getIconID() {
return compiled.getIconID();
}
@Override
public Pattern getPattern() {
return compiled.getPattern();
}
@Override
public ExtensionsFilterInterface getSource() {
return compiled;
}
@Override
public String name() {
return fileNameExtension;
}
@Override
public boolean isSameExtensionGroup(ExtensionsFilterInterface extension) {
return compiled.isSameExtensionGroup(extension);
}
@Override
public ExtensionsFilterInterface[] listSameGroup() {
return compiled.listSameGroup();
}
};
}
ret = new LinkInfo(num, extension, getIcon(fileName, extension));
CACHE.put(ID, new WeakReference<LinkInfo>(ret));
}
return ret;
}
}
public static LinkInfo getLinkInfo(AbstractPackageChildrenNode abstractChildrenNode) {
if (abstractChildrenNode != null) {
final String fileName;
final String mimeHint;
if (abstractChildrenNode instanceof DownloadLink) {
final DownloadLink link = (DownloadLink) abstractChildrenNode;
fileName = link.getView().getDisplayName();
mimeHint = link.getMimeHint();
} else if (abstractChildrenNode instanceof CrawledLink) {
final CrawledLink link = (CrawledLink) abstractChildrenNode;
fileName = link.getName();
final DownloadLink downloadLink = link.getDownloadLink();
if (downloadLink != null) {
mimeHint = downloadLink.getMimeHint();
} else {
mimeHint = null;
}
} else {
fileName = abstractChildrenNode.getName();
mimeHint = null;
}
final String fileNameExtension = Files.getExtension(fileName);
int num = -1;
try {
String partID = new Regex(fileName, "\\.r(\\d+)$").getMatch(0);
if (partID == null) {
partID = new Regex(fileName, "\\.pa?r?t?\\.?(\\d+).*?\\.rar$").getMatch(0);
}
if (partID != null) {
num = Integer.parseInt(partID);
}
} catch (Throwable e) {
e.printStackTrace();
}
final String ID = fileNameExtension + "_" + num + "_" + mimeHint;
synchronized (CACHE) {
LinkInfo ret = null;
WeakReference<LinkInfo> linkInfo = CACHE.get(ID);
if (linkInfo == null || (ret = linkInfo.get()) == null) {
final ExtensionsFilterInterface hint = CompiledFiletypeFilter.getExtensionsFilterInterface(mimeHint);
final ExtensionsFilterInterface compiled = CompiledFiletypeFilter.getExtensionsFilterInterface(fileNameExtension);
final ExtensionsFilterInterface extension;
if (compiled == null || (hint != null && !hint.isSameExtensionGroup(compiled))) {
extension = new ExtensionsFilterInterface() {
final String extension;
final String desc;
final Pattern pattern;
{
if (fileNameExtension != null && fileNameExtension.matches("^[a-zA-Z0-9]{1,4}$")) {
extension = fileNameExtension;
desc = fileNameExtension;
pattern = Pattern.compile(Pattern.quote(fileNameExtension), Pattern.DOTALL | Pattern.CASE_INSENSITIVE);
} else {
extension = "";
desc = _GUI.T.settings_linkgrabber_filter_others();
pattern = null;
}
}
@Override
public ExtensionsFilterInterface getSource() {
return hint;
}
@Override
public Pattern compiledAllPattern() {
if (hint != null) {
return hint.compiledAllPattern();
} else {
return null;
}
}
@Override
public String getDesc() {
if (hint != null) {
return hint.getDesc();
} else {
return desc;
}
}
@Override
public String getIconID() {
if (hint != null) {
return hint.getIconID();
} else {
return null;
}
}
@Override
public Pattern getPattern() {
if (hint != null) {
return hint.compiledAllPattern();
} else {
return pattern;
}
}
@Override
public String name() {
return extension;
}
@Override
public boolean isSameExtensionGroup(ExtensionsFilterInterface extension) {
if (hint != null) {
return hint.isSameExtensionGroup(extension);
} else {
return extension != null && extension.getIconID() == null && StringUtils.equals(extension.name(), name());
}
}
@Override
public ExtensionsFilterInterface[] listSameGroup() {
if (hint != null) {
return hint.listSameGroup();
} else {
return new ExtensionsFilterInterface[] { this };
}
}
};
} else {
extension = new ExtensionsFilterInterface() {
@Override
public Pattern compiledAllPattern() {
return compiled.compiledAllPattern();
}
@Override
public String getDesc() {
return compiled.getDesc();
}
@Override
public String getIconID() {
return compiled.getIconID();
}
@Override
public Pattern getPattern() {
return compiled.getPattern();
}
@Override
public ExtensionsFilterInterface getSource() {
return compiled;
}
@Override
public String name() {
return fileNameExtension;
}
@Override
public boolean isSameExtensionGroup(ExtensionsFilterInterface extension) {
return compiled.isSameExtensionGroup(extension);
}
@Override
public ExtensionsFilterInterface[] listSameGroup() {
return compiled.listSameGroup();
}
};
}
ret = new LinkInfo(num, extension, getIcon(fileName, extension));
CACHE.put(ID, new WeakReference<LinkInfo>(ret));
}
return ret;
}
}
return null;
}
public static Icon getIcon(final String name, final ExtensionsFilterInterface extension) {
Icon newIcon = null;
final String ext = Files.getExtension(name);
if (CrossSystem.isWindows() && ext != null) {
try {
newIcon = CrossSystem.getMime().getFileIcon(ext, 16, 16);
} catch (Throwable e) {
LogController.CL().log(e);
}
}
if (newIcon == null) {
String iconID = null;
if (extension != null && extension.getIconID() != null) {
iconID = extension.getIconID();
}
if (StringUtils.isEmpty(iconID)) {
iconID = IconKey.ICON_FILE;
}
newIcon = NewTheme.I().getIcon(iconID, 16);
}
return newIcon;
}
}
| {
"pile_set_name": "Github"
} |
@region $ Desert_1e {
north: Desert_3e.l;
south: Desert_1d.l;
east: Desert_2e.l;
west: Desert_1f.l;
region_orientation: FACE_WEST;
[
@sky { x:0; y:0; or:244; style:4; }
@flat { x:0; y:4; or:188;
8:0;
}
@tree { x:68; y:32; or:16; style:2; }
@garbage_can { x:116; y:144; or:228;
8:3;
9:0;
10:0;
}
@flat { x:0; y:4; or:188; style:1;
8:2;
}
]
}
| {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0
#include <stdint.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/stddef.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/pkt_cls.h>
#include <linux/tcp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
/* the maximum delay we are willing to add (drop packets beyond that) */
#define TIME_HORIZON_NS (2000 * 1000 * 1000)
#define NS_PER_SEC 1000000000
#define ECN_HORIZON_NS 5000000
#define THROTTLE_RATE_BPS (5 * 1000 * 1000)
/* flow_key => last_tstamp timestamp used */
struct bpf_map_def SEC("maps") flow_map = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(uint32_t),
.value_size = sizeof(uint64_t),
.max_entries = 1,
};
static inline int throttle_flow(struct __sk_buff *skb)
{
int key = 0;
uint64_t *last_tstamp = bpf_map_lookup_elem(&flow_map, &key);
uint64_t delay_ns = ((uint64_t)skb->len) * NS_PER_SEC /
THROTTLE_RATE_BPS;
uint64_t now = bpf_ktime_get_ns();
uint64_t tstamp, next_tstamp = 0;
if (last_tstamp)
next_tstamp = *last_tstamp + delay_ns;
tstamp = skb->tstamp;
if (tstamp < now)
tstamp = now;
/* should we throttle? */
if (next_tstamp <= tstamp) {
if (bpf_map_update_elem(&flow_map, &key, &tstamp, BPF_ANY))
return TC_ACT_SHOT;
return TC_ACT_OK;
}
/* do not queue past the time horizon */
if (next_tstamp - now >= TIME_HORIZON_NS)
return TC_ACT_SHOT;
/* set ecn bit, if needed */
if (next_tstamp - now >= ECN_HORIZON_NS)
bpf_skb_ecn_set_ce(skb);
if (bpf_map_update_elem(&flow_map, &key, &next_tstamp, BPF_EXIST))
return TC_ACT_SHOT;
skb->tstamp = next_tstamp;
return TC_ACT_OK;
}
static inline int handle_tcp(struct __sk_buff *skb, struct tcphdr *tcp)
{
void *data_end = (void *)(long)skb->data_end;
/* drop malformed packets */
if ((void *)(tcp + 1) > data_end)
return TC_ACT_SHOT;
if (tcp->dest == bpf_htons(9000))
return throttle_flow(skb);
return TC_ACT_OK;
}
static inline int handle_ipv4(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
struct iphdr *iph;
uint32_t ihl;
/* drop malformed packets */
if (data + sizeof(struct ethhdr) > data_end)
return TC_ACT_SHOT;
iph = (struct iphdr *)(data + sizeof(struct ethhdr));
if ((void *)(iph + 1) > data_end)
return TC_ACT_SHOT;
ihl = iph->ihl * 4;
if (((void *)iph) + ihl > data_end)
return TC_ACT_SHOT;
if (iph->protocol == IPPROTO_TCP)
return handle_tcp(skb, (struct tcphdr *)(((void *)iph) + ihl));
return TC_ACT_OK;
}
SEC("cls_test") int tc_prog(struct __sk_buff *skb)
{
if (skb->protocol == bpf_htons(ETH_P_IP))
return handle_ipv4(skb);
return TC_ACT_OK;
}
char __license[] SEC("license") = "GPL";
| {
"pile_set_name": "Github"
} |
'''
Represents a single filter on a column.
'''
class DrawRequestColumnFilter:
'''
Initialize the filter with the column name, filter text,
and operation (must be "=", "<=", ">=", "<", ">", or "!=").
'''
def __init__(self, column_name, filter_text, operation):
self.name = column_name
self.text = filter_text
self.operation = operation
def __repr__(self):
return "ColFilter(name=%s, text=%s, op=%s)" % (self.name, self.text, self.operation)
__str__ = __repr__
| {
"pile_set_name": "Github"
} |
# graceful-fs
graceful-fs functions as a drop-in replacement for the fs module,
making various improvements.
The improvements are meant to normalize behavior across different
platforms and environments, and to make filesystem access more
resilient to errors.
## Improvements over [fs module](http://api.nodejs.org/fs.html)
graceful-fs:
* Queues up `open` and `readdir` calls, and retries them once
something closes if there is an EMFILE error from too many file
descriptors.
* fixes `lchmod` for Node versions prior to 0.6.2.
* implements `fs.lutimes` if possible. Otherwise it becomes a noop.
* ignores `EINVAL` and `EPERM` errors in `chown`, `fchown` or
`lchown` if the user isn't root.
* makes `lchmod` and `lchown` become noops, if not available.
* retries reading a file if `read` results in EAGAIN error.
On Windows, it retries renaming a file for up to one second if `EACCESS`
or `EPERM` error occurs, likely because antivirus software has locked
the directory.
## USAGE
```javascript
// use just like fs
var fs = require('graceful-fs')
// now go and do stuff with it...
fs.readFileSync('some-file-or-whatever')
```
| {
"pile_set_name": "Github"
} |
GL_SGIX_depth_texture
http://www.opengl.org/registry/specs/SGIX/depth_texture.txt
GL_SGIX_depth_texture
GL_DEPTH_COMPONENT16_SGIX 0x81A5
GL_DEPTH_COMPONENT24_SGIX 0x81A6
GL_DEPTH_COMPONENT32_SGIX 0x81A7
| {
"pile_set_name": "Github"
} |
{
"domain": "keyboard_remote",
"name": "Keyboard Remote",
"documentation": "https://www.home-assistant.io/integrations/keyboard_remote",
"requirements": ["evdev==1.1.2", "aionotify==0.2.0"],
"codeowners": ["@bendavid"]
}
| {
"pile_set_name": "Github"
} |
/*
CryptoJS v3.1.2
code.google.com/p/crypto-js
(c) 2009-2013 by Jeff Mott. All rights reserved.
code.google.com/p/crypto-js/wiki/License
*/
/**
* Cipher core components.
*/
CryptoJS.lib.Cipher || (function (undefined) {
// Shortcuts
var C = CryptoJS;
var C_lib = C.lib;
var Base = C_lib.Base;
var WordArray = C_lib.WordArray;
var BufferedBlockAlgorithm = C_lib.BufferedBlockAlgorithm;
var C_enc = C.enc;
var Utf8 = C_enc.Utf8;
var Base64 = C_enc.Base64;
var C_algo = C.algo;
var EvpKDF = C_algo.EvpKDF;
/**
* Abstract base cipher template.
*
* @property {number} keySize This cipher's key size. Default: 4 (128 bits)
* @property {number} ivSize This cipher's IV size. Default: 4 (128 bits)
* @property {number} _ENC_XFORM_MODE A constant representing encryption mode.
* @property {number} _DEC_XFORM_MODE A constant representing decryption mode.
*/
var Cipher = C_lib.Cipher = BufferedBlockAlgorithm.extend({
/**
* Configuration options.
*
* @property {WordArray} iv The IV to use for this operation.
*/
cfg: Base.extend(),
/**
* Creates this cipher in encryption mode.
*
* @param {WordArray} key The key.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
* @return {Cipher} A cipher instance.
*
* @static
*
* @example
*
* var cipher = CryptoJS.algo.AES.createEncryptor(keyWordArray, { iv: ivWordArray });
*/
createEncryptor: function (key, cfg) {
return this.create(this._ENC_XFORM_MODE, key, cfg);
},
/**
* Creates this cipher in decryption mode.
*
* @param {WordArray} key The key.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
* @return {Cipher} A cipher instance.
*
* @static
*
* @example
*
* var cipher = CryptoJS.algo.AES.createDecryptor(keyWordArray, { iv: ivWordArray });
*/
createDecryptor: function (key, cfg) {
return this.create(this._DEC_XFORM_MODE, key, cfg);
},
/**
* Initializes a newly created cipher.
*
* @param {number} xformMode Either the encryption or decryption transormation mode constant.
* @param {WordArray} key The key.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
* @example
*
* var cipher = CryptoJS.algo.AES.create(CryptoJS.algo.AES._ENC_XFORM_MODE, keyWordArray, { iv: ivWordArray });
*/
init: function (xformMode, key, cfg) {
// Apply config defaults
this.cfg = this.cfg.extend(cfg);
// Store transform mode and key
this._xformMode = xformMode;
this._key = key;
// Set initial values
this.reset();
},
/**
* Resets this cipher to its initial state.
*
* @example
*
* cipher.reset();
*/
reset: function () {
// Reset data buffer
BufferedBlockAlgorithm.reset.call(this);
// Perform concrete-cipher logic
this._doReset();
},
/**
* Adds data to be encrypted or decrypted.
*
* @param {WordArray|string} dataUpdate The data to encrypt or decrypt.
*
* @return {WordArray} The data after processing.
*
* @example
*
* var encrypted = cipher.process('data');
* var encrypted = cipher.process(wordArray);
*/
process: function (dataUpdate) {
// Append
this._append(dataUpdate);
// Process available blocks
return this._process();
},
/**
* Finalizes the encryption or decryption process.
* Note that the finalize operation is effectively a destructive, read-once operation.
*
* @param {WordArray|string} dataUpdate The final data to encrypt or decrypt.
*
* @return {WordArray} The data after final processing.
*
* @example
*
* var encrypted = cipher.finalize();
* var encrypted = cipher.finalize('data');
* var encrypted = cipher.finalize(wordArray);
*/
finalize: function (dataUpdate) {
// Final data update
if (dataUpdate) {
this._append(dataUpdate);
}
// Perform concrete-cipher logic
var finalProcessedData = this._doFinalize();
return finalProcessedData;
},
keySize: 128/32,
ivSize: 128/32,
_ENC_XFORM_MODE: 1,
_DEC_XFORM_MODE: 2,
/**
* Creates shortcut functions to a cipher's object interface.
*
* @param {Cipher} cipher The cipher to create a helper for.
*
* @return {Object} An object with encrypt and decrypt shortcut functions.
*
* @static
*
* @example
*
* var AES = CryptoJS.lib.Cipher._createHelper(CryptoJS.algo.AES);
*/
_createHelper: (function () {
function selectCipherStrategy(key) {
if (typeof key == 'string') {
return PasswordBasedCipher;
} else {
return SerializableCipher;
}
}
return function (cipher) {
return {
encrypt: function (message, key, cfg) {
return selectCipherStrategy(key).encrypt(cipher, message, key, cfg);
},
decrypt: function (ciphertext, key, cfg) {
return selectCipherStrategy(key).decrypt(cipher, ciphertext, key, cfg);
}
};
};
}())
});
/**
* Abstract base stream cipher template.
*
* @property {number} blockSize The number of 32-bit words this cipher operates on. Default: 1 (32 bits)
*/
var StreamCipher = C_lib.StreamCipher = Cipher.extend({
_doFinalize: function () {
// Process partial blocks
var finalProcessedBlocks = this._process(!!'flush');
return finalProcessedBlocks;
},
blockSize: 1
});
/**
* Mode namespace.
*/
var C_mode = C.mode = {};
/**
* Abstract base block cipher mode template.
*/
var BlockCipherMode = C_lib.BlockCipherMode = Base.extend({
/**
* Creates this mode for encryption.
*
* @param {Cipher} cipher A block cipher instance.
* @param {Array} iv The IV words.
*
* @static
*
* @example
*
* var mode = CryptoJS.mode.CBC.createEncryptor(cipher, iv.words);
*/
createEncryptor: function (cipher, iv) {
return this.Encryptor.create(cipher, iv);
},
/**
* Creates this mode for decryption.
*
* @param {Cipher} cipher A block cipher instance.
* @param {Array} iv The IV words.
*
* @static
*
* @example
*
* var mode = CryptoJS.mode.CBC.createDecryptor(cipher, iv.words);
*/
createDecryptor: function (cipher, iv) {
return this.Decryptor.create(cipher, iv);
},
/**
* Initializes a newly created mode.
*
* @param {Cipher} cipher A block cipher instance.
* @param {Array} iv The IV words.
*
* @example
*
* var mode = CryptoJS.mode.CBC.Encryptor.create(cipher, iv.words);
*/
init: function (cipher, iv) {
this._cipher = cipher;
this._iv = iv;
}
});
/**
* Cipher Block Chaining mode.
*/
var CBC = C_mode.CBC = (function () {
/**
* Abstract base CBC mode.
*/
var CBC = BlockCipherMode.extend();
/**
* CBC encryptor.
*/
CBC.Encryptor = CBC.extend({
/**
* Processes the data block at offset.
*
* @param {Array} words The data words to operate on.
* @param {number} offset The offset where the block starts.
*
* @example
*
* mode.processBlock(data.words, offset);
*/
processBlock: function (words, offset) {
// Shortcuts
var cipher = this._cipher;
var blockSize = cipher.blockSize;
// XOR and encrypt
xorBlock.call(this, words, offset, blockSize);
cipher.encryptBlock(words, offset);
// Remember this block to use with next block
this._prevBlock = words.slice(offset, offset + blockSize);
}
});
/**
* CBC decryptor.
*/
CBC.Decryptor = CBC.extend({
/**
* Processes the data block at offset.
*
* @param {Array} words The data words to operate on.
* @param {number} offset The offset where the block starts.
*
* @example
*
* mode.processBlock(data.words, offset);
*/
processBlock: function (words, offset) {
// Shortcuts
var cipher = this._cipher;
var blockSize = cipher.blockSize;
// Remember this block to use with next block
var thisBlock = words.slice(offset, offset + blockSize);
// Decrypt and XOR
cipher.decryptBlock(words, offset);
xorBlock.call(this, words, offset, blockSize);
// This block becomes the previous block
this._prevBlock = thisBlock;
}
});
function xorBlock(words, offset, blockSize) {
// Shortcut
var iv = this._iv;
// Choose mixing block
if (iv) {
var block = iv;
// Remove IV for subsequent blocks
this._iv = undefined;
} else {
var block = this._prevBlock;
}
// XOR blocks
for (var i = 0; i < blockSize; i++) {
words[offset + i] ^= block[i];
}
}
return CBC;
}());
/**
* Padding namespace.
*/
var C_pad = C.pad = {};
/**
* PKCS #5/7 padding strategy.
*/
var Pkcs7 = C_pad.Pkcs7 = {
/**
* Pads data using the algorithm defined in PKCS #5/7.
*
* @param {WordArray} data The data to pad.
* @param {number} blockSize The multiple that the data should be padded to.
*
* @static
*
* @example
*
* CryptoJS.pad.Pkcs7.pad(wordArray, 4);
*/
pad: function (data, blockSize) {
// Shortcut
var blockSizeBytes = blockSize * 4;
// Count padding bytes
var nPaddingBytes = blockSizeBytes - data.sigBytes % blockSizeBytes;
// Create padding word
var paddingWord = (nPaddingBytes << 24) | (nPaddingBytes << 16) | (nPaddingBytes << 8) | nPaddingBytes;
// Create padding
var paddingWords = [];
for (var i = 0; i < nPaddingBytes; i += 4) {
paddingWords.push(paddingWord);
}
var padding = WordArray.create(paddingWords, nPaddingBytes);
// Add padding
data.concat(padding);
},
/**
* Unpads data that had been padded using the algorithm defined in PKCS #5/7.
*
* @param {WordArray} data The data to unpad.
*
* @static
*
* @example
*
* CryptoJS.pad.Pkcs7.unpad(wordArray);
*/
unpad: function (data) {
// Get number of padding bytes from last byte
var nPaddingBytes = data.words[(data.sigBytes - 1) >>> 2] & 0xff;
// Remove padding
data.sigBytes -= nPaddingBytes;
}
};
/**
* Abstract base block cipher template.
*
* @property {number} blockSize The number of 32-bit words this cipher operates on. Default: 4 (128 bits)
*/
var BlockCipher = C_lib.BlockCipher = Cipher.extend({
/**
* Configuration options.
*
* @property {Mode} mode The block mode to use. Default: CBC
* @property {Padding} padding The padding strategy to use. Default: Pkcs7
*/
cfg: Cipher.cfg.extend({
mode: CBC,
padding: Pkcs7
}),
reset: function () {
// Reset cipher
Cipher.reset.call(this);
// Shortcuts
var cfg = this.cfg;
var iv = cfg.iv;
var mode = cfg.mode;
// Reset block mode
if (this._xformMode == this._ENC_XFORM_MODE) {
var modeCreator = mode.createEncryptor;
} else /* if (this._xformMode == this._DEC_XFORM_MODE) */ {
var modeCreator = mode.createDecryptor;
// Keep at least one block in the buffer for unpadding
this._minBufferSize = 1;
}
this._mode = modeCreator.call(mode, this, iv && iv.words);
},
_doProcessBlock: function (words, offset) {
this._mode.processBlock(words, offset);
},
_doFinalize: function () {
// Shortcut
var padding = this.cfg.padding;
// Finalize
if (this._xformMode == this._ENC_XFORM_MODE) {
// Pad data
padding.pad(this._data, this.blockSize);
// Process final blocks
var finalProcessedBlocks = this._process(!!'flush');
} else /* if (this._xformMode == this._DEC_XFORM_MODE) */ {
// Process final blocks
var finalProcessedBlocks = this._process(!!'flush');
// Unpad data
padding.unpad(finalProcessedBlocks);
}
return finalProcessedBlocks;
},
blockSize: 128/32
});
/**
* A collection of cipher parameters.
*
* @property {WordArray} ciphertext The raw ciphertext.
* @property {WordArray} key The key to this ciphertext.
* @property {WordArray} iv The IV used in the ciphering operation.
* @property {WordArray} salt The salt used with a key derivation function.
* @property {Cipher} algorithm The cipher algorithm.
* @property {Mode} mode The block mode used in the ciphering operation.
* @property {Padding} padding The padding scheme used in the ciphering operation.
* @property {number} blockSize The block size of the cipher.
* @property {Format} formatter The default formatting strategy to convert this cipher params object to a string.
*/
var CipherParams = C_lib.CipherParams = Base.extend({
/**
* Initializes a newly created cipher params object.
*
* @param {Object} cipherParams An object with any of the possible cipher parameters.
*
* @example
*
* var cipherParams = CryptoJS.lib.CipherParams.create({
* ciphertext: ciphertextWordArray,
* key: keyWordArray,
* iv: ivWordArray,
* salt: saltWordArray,
* algorithm: CryptoJS.algo.AES,
* mode: CryptoJS.mode.CBC,
* padding: CryptoJS.pad.PKCS7,
* blockSize: 4,
* formatter: CryptoJS.format.OpenSSL
* });
*/
init: function (cipherParams) {
this.mixIn(cipherParams);
},
/**
* Converts this cipher params object to a string.
*
* @param {Format} formatter (Optional) The formatting strategy to use.
*
* @return {string} The stringified cipher params.
*
* @throws Error If neither the formatter nor the default formatter is set.
*
* @example
*
* var string = cipherParams + '';
* var string = cipherParams.toString();
* var string = cipherParams.toString(CryptoJS.format.OpenSSL);
*/
toString: function (formatter) {
return (formatter || this.formatter).stringify(this);
}
});
/**
* Format namespace.
*/
var C_format = C.format = {};
/**
* OpenSSL formatting strategy.
*/
var OpenSSLFormatter = C_format.OpenSSL = {
/**
* Converts a cipher params object to an OpenSSL-compatible string.
*
* @param {CipherParams} cipherParams The cipher params object.
*
* @return {string} The OpenSSL-compatible string.
*
* @static
*
* @example
*
* var openSSLString = CryptoJS.format.OpenSSL.stringify(cipherParams);
*/
stringify: function (cipherParams) {
// Shortcuts
var ciphertext = cipherParams.ciphertext;
var salt = cipherParams.salt;
// Format
if (salt) {
var wordArray = WordArray.create([0x53616c74, 0x65645f5f]).concat(salt).concat(ciphertext);
} else {
var wordArray = ciphertext;
}
return wordArray.toString(Base64);
},
/**
* Converts an OpenSSL-compatible string to a cipher params object.
*
* @param {string} openSSLStr The OpenSSL-compatible string.
*
* @return {CipherParams} The cipher params object.
*
* @static
*
* @example
*
* var cipherParams = CryptoJS.format.OpenSSL.parse(openSSLString);
*/
parse: function (openSSLStr) {
// Parse base64
var ciphertext = Base64.parse(openSSLStr);
// Shortcut
var ciphertextWords = ciphertext.words;
// Test for salt
if (ciphertextWords[0] == 0x53616c74 && ciphertextWords[1] == 0x65645f5f) {
// Extract salt
var salt = WordArray.create(ciphertextWords.slice(2, 4));
// Remove salt from ciphertext
ciphertextWords.splice(0, 4);
ciphertext.sigBytes -= 16;
}
return CipherParams.create({ ciphertext: ciphertext, salt: salt });
}
};
/**
* A cipher wrapper that returns ciphertext as a serializable cipher params object.
*/
var SerializableCipher = C_lib.SerializableCipher = Base.extend({
/**
* Configuration options.
*
* @property {Formatter} format The formatting strategy to convert cipher param objects to and from a string. Default: OpenSSL
*/
cfg: Base.extend({
format: OpenSSLFormatter
}),
/**
* Encrypts a message.
*
* @param {Cipher} cipher The cipher algorithm to use.
* @param {WordArray|string} message The message to encrypt.
* @param {WordArray} key The key.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
* @return {CipherParams} A cipher params object.
*
* @static
*
* @example
*
* var ciphertextParams = CryptoJS.lib.SerializableCipher.encrypt(CryptoJS.algo.AES, message, key);
* var ciphertextParams = CryptoJS.lib.SerializableCipher.encrypt(CryptoJS.algo.AES, message, key, { iv: iv });
* var ciphertextParams = CryptoJS.lib.SerializableCipher.encrypt(CryptoJS.algo.AES, message, key, { iv: iv, format: CryptoJS.format.OpenSSL });
*/
encrypt: function (cipher, message, key, cfg) {
// Apply config defaults
cfg = this.cfg.extend(cfg);
// Encrypt
var encryptor = cipher.createEncryptor(key, cfg);
var ciphertext = encryptor.finalize(message);
// Shortcut
var cipherCfg = encryptor.cfg;
// Create and return serializable cipher params
return CipherParams.create({
ciphertext: ciphertext,
key: key,
iv: cipherCfg.iv,
algorithm: cipher,
mode: cipherCfg.mode,
padding: cipherCfg.padding,
blockSize: cipher.blockSize,
formatter: cfg.format
});
},
/**
* Decrypts serialized ciphertext.
*
* @param {Cipher} cipher The cipher algorithm to use.
* @param {CipherParams|string} ciphertext The ciphertext to decrypt.
* @param {WordArray} key The key.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
* @return {WordArray} The plaintext.
*
* @static
*
* @example
*
* var plaintext = CryptoJS.lib.SerializableCipher.decrypt(CryptoJS.algo.AES, formattedCiphertext, key, { iv: iv, format: CryptoJS.format.OpenSSL });
* var plaintext = CryptoJS.lib.SerializableCipher.decrypt(CryptoJS.algo.AES, ciphertextParams, key, { iv: iv, format: CryptoJS.format.OpenSSL });
*/
decrypt: function (cipher, ciphertext, key, cfg) {
// Apply config defaults
cfg = this.cfg.extend(cfg);
// Convert string to CipherParams
ciphertext = this._parse(ciphertext, cfg.format);
// Decrypt
var plaintext = cipher.createDecryptor(key, cfg).finalize(ciphertext.ciphertext);
return plaintext;
},
/**
* Converts serialized ciphertext to CipherParams,
* else assumed CipherParams already and returns ciphertext unchanged.
*
* @param {CipherParams|string} ciphertext The ciphertext.
* @param {Formatter} format The formatting strategy to use to parse serialized ciphertext.
*
* @return {CipherParams} The unserialized ciphertext.
*
* @static
*
* @example
*
* var ciphertextParams = CryptoJS.lib.SerializableCipher._parse(ciphertextStringOrParams, format);
*/
_parse: function (ciphertext, format) {
if (typeof ciphertext == 'string') {
return format.parse(ciphertext, this);
} else {
return ciphertext;
}
}
});
/**
* Key derivation function namespace.
*/
var C_kdf = C.kdf = {};
/**
* OpenSSL key derivation function.
*/
var OpenSSLKdf = C_kdf.OpenSSL = {
/**
* Derives a key and IV from a password.
*
* @param {string} password The password to derive from.
* @param {number} keySize The size in words of the key to generate.
* @param {number} ivSize The size in words of the IV to generate.
* @param {WordArray|string} salt (Optional) A 64-bit salt to use. If omitted, a salt will be generated randomly.
*
* @return {CipherParams} A cipher params object with the key, IV, and salt.
*
* @static
*
* @example
*
* var derivedParams = CryptoJS.kdf.OpenSSL.execute('Password', 256/32, 128/32);
* var derivedParams = CryptoJS.kdf.OpenSSL.execute('Password', 256/32, 128/32, 'saltsalt');
*/
execute: function (password, keySize, ivSize, salt) {
// Generate random salt
if (!salt) {
salt = WordArray.random(64/8);
}
// Derive key and IV
var key = EvpKDF.create({ keySize: keySize + ivSize }).compute(password, salt);
// Separate key and IV
var iv = WordArray.create(key.words.slice(keySize), ivSize * 4);
key.sigBytes = keySize * 4;
// Return params
return CipherParams.create({ key: key, iv: iv, salt: salt });
}
};
/**
* A serializable cipher wrapper that derives the key from a password,
* and returns ciphertext as a serializable cipher params object.
*/
var PasswordBasedCipher = C_lib.PasswordBasedCipher = SerializableCipher.extend({
/**
* Configuration options.
*
* @property {KDF} kdf The key derivation function to use to generate a key and IV from a password. Default: OpenSSL
*/
cfg: SerializableCipher.cfg.extend({
kdf: OpenSSLKdf
}),
/**
* Encrypts a message using a password.
*
* @param {Cipher} cipher The cipher algorithm to use.
* @param {WordArray|string} message The message to encrypt.
* @param {string} password The password.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
* @return {CipherParams} A cipher params object.
*
* @static
*
* @example
*
* var ciphertextParams = CryptoJS.lib.PasswordBasedCipher.encrypt(CryptoJS.algo.AES, message, 'password');
* var ciphertextParams = CryptoJS.lib.PasswordBasedCipher.encrypt(CryptoJS.algo.AES, message, 'password', { format: CryptoJS.format.OpenSSL });
*/
encrypt: function (cipher, message, password, cfg) {
// Apply config defaults
cfg = this.cfg.extend(cfg);
// Derive key and other params
var derivedParams = cfg.kdf.execute(password, cipher.keySize, cipher.ivSize);
// Add IV to config
cfg.iv = derivedParams.iv;
// Encrypt
var ciphertext = SerializableCipher.encrypt.call(this, cipher, message, derivedParams.key, cfg);
// Mix in derived params
ciphertext.mixIn(derivedParams);
return ciphertext;
},
/**
* Decrypts serialized ciphertext using a password.
*
* @param {Cipher} cipher The cipher algorithm to use.
* @param {CipherParams|string} ciphertext The ciphertext to decrypt.
* @param {string} password The password.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
* @return {WordArray} The plaintext.
*
* @static
*
* @example
*
* var plaintext = CryptoJS.lib.PasswordBasedCipher.decrypt(CryptoJS.algo.AES, formattedCiphertext, 'password', { format: CryptoJS.format.OpenSSL });
* var plaintext = CryptoJS.lib.PasswordBasedCipher.decrypt(CryptoJS.algo.AES, ciphertextParams, 'password', { format: CryptoJS.format.OpenSSL });
*/
decrypt: function (cipher, ciphertext, password, cfg) {
// Apply config defaults
cfg = this.cfg.extend(cfg);
// Convert string to CipherParams
ciphertext = this._parse(ciphertext, cfg.format);
// Derive key and other params
var derivedParams = cfg.kdf.execute(password, cipher.keySize, cipher.ivSize, ciphertext.salt);
// Add IV to config
cfg.iv = derivedParams.iv;
// Decrypt
var plaintext = SerializableCipher.decrypt.call(this, cipher, ciphertext, derivedParams.key, cfg);
return plaintext;
}
});
}());
| {
"pile_set_name": "Github"
} |
<?php
/*
* Copyright 2010 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
/**
* Service definition for Reseller (v1).
*
* <p>
* Lets you create and manage your customers and their subscriptions.
* </p>
*
* <p>
* For more information about this service, see the API
* <a href="https://developers.google.com/google-apps/reseller/" target="_blank">Documentation</a>
* </p>
*
* @author Google, Inc.
*/
class Google_Service_Reseller extends Google_Service
{
/** Manage users on your domain. */
const APPS_ORDER = "https://www.googleapis.com/auth/apps.order";
/** Manage users on your domain. */
const APPS_ORDER_READONLY = "https://www.googleapis.com/auth/apps.order.readonly";
public $customers;
public $subscriptions;
/**
* Constructs the internal representation of the Reseller service.
*
* @param Google_Client $client
*/
public function __construct(Google_Client $client)
{
parent::__construct($client);
$this->servicePath = 'apps/reseller/v1/';
$this->version = 'v1';
$this->serviceName = 'reseller';
$this->customers = new Google_Service_Reseller_Customers_Resource(
$this,
$this->serviceName,
'customers',
array(
'methods' => array(
'get' => array(
'path' => 'customers/{customerId}',
'httpMethod' => 'GET',
'parameters' => array(
'customerId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
),
),'insert' => array(
'path' => 'customers',
'httpMethod' => 'POST',
'parameters' => array(
'customerAuthToken' => array(
'location' => 'query',
'type' => 'string',
),
),
),'patch' => array(
'path' => 'customers/{customerId}',
'httpMethod' => 'PATCH',
'parameters' => array(
'customerId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
),
),'update' => array(
'path' => 'customers/{customerId}',
'httpMethod' => 'PUT',
'parameters' => array(
'customerId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
),
),
)
)
);
$this->subscriptions = new Google_Service_Reseller_Subscriptions_Resource(
$this,
$this->serviceName,
'subscriptions',
array(
'methods' => array(
'changePlan' => array(
'path' => 'customers/{customerId}/subscriptions/{subscriptionId}/changePlan',
'httpMethod' => 'POST',
'parameters' => array(
'customerId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
'subscriptionId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
),
),'changeRenewalSettings' => array(
'path' => 'customers/{customerId}/subscriptions/{subscriptionId}/changeRenewalSettings',
'httpMethod' => 'POST',
'parameters' => array(
'customerId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
'subscriptionId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
),
),'changeSeats' => array(
'path' => 'customers/{customerId}/subscriptions/{subscriptionId}/changeSeats',
'httpMethod' => 'POST',
'parameters' => array(
'customerId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
'subscriptionId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
),
),'delete' => array(
'path' => 'customers/{customerId}/subscriptions/{subscriptionId}',
'httpMethod' => 'DELETE',
'parameters' => array(
'customerId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
'subscriptionId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
'deletionType' => array(
'location' => 'query',
'type' => 'string',
'required' => true,
),
),
),'get' => array(
'path' => 'customers/{customerId}/subscriptions/{subscriptionId}',
'httpMethod' => 'GET',
'parameters' => array(
'customerId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
'subscriptionId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
),
),'insert' => array(
'path' => 'customers/{customerId}/subscriptions',
'httpMethod' => 'POST',
'parameters' => array(
'customerId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
'customerAuthToken' => array(
'location' => 'query',
'type' => 'string',
),
),
),'list' => array(
'path' => 'subscriptions',
'httpMethod' => 'GET',
'parameters' => array(
'customerAuthToken' => array(
'location' => 'query',
'type' => 'string',
),
'pageToken' => array(
'location' => 'query',
'type' => 'string',
),
'customerId' => array(
'location' => 'query',
'type' => 'string',
),
'maxResults' => array(
'location' => 'query',
'type' => 'integer',
),
'customerNamePrefix' => array(
'location' => 'query',
'type' => 'string',
),
),
),'startPaidService' => array(
'path' => 'customers/{customerId}/subscriptions/{subscriptionId}/startPaidService',
'httpMethod' => 'POST',
'parameters' => array(
'customerId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
'subscriptionId' => array(
'location' => 'path',
'type' => 'string',
'required' => true,
),
),
),
)
)
);
}
}
/**
* The "customers" collection of methods.
* Typical usage is:
* <code>
* $resellerService = new Google_Service_Reseller(...);
* $customers = $resellerService->customers;
* </code>
*/
class Google_Service_Reseller_Customers_Resource extends Google_Service_Resource
{
/**
* Gets a customer resource if one exists and is owned by the reseller.
* (customers.get)
*
* @param string $customerId
* Id of the Customer
* @param array $optParams Optional parameters.
* @return Google_Service_Reseller_Customer
*/
public function get($customerId, $optParams = array())
{
$params = array('customerId' => $customerId);
$params = array_merge($params, $optParams);
return $this->call('get', array($params), "Google_Service_Reseller_Customer");
}
/**
* Creates a customer resource if one does not already exist. (customers.insert)
*
* @param Google_Customer $postBody
* @param array $optParams Optional parameters.
*
* @opt_param string customerAuthToken
* An auth token needed for inserting a customer for which domain already exists. Can be generated
* at https://www.google.com/a/cpanel//TransferToken. Optional.
* @return Google_Service_Reseller_Customer
*/
public function insert(Google_Service_Reseller_Customer $postBody, $optParams = array())
{
$params = array('postBody' => $postBody);
$params = array_merge($params, $optParams);
return $this->call('insert', array($params), "Google_Service_Reseller_Customer");
}
/**
* Update a customer resource if one it exists and is owned by the reseller.
* This method supports patch semantics. (customers.patch)
*
* @param string $customerId
* Id of the Customer
* @param Google_Customer $postBody
* @param array $optParams Optional parameters.
* @return Google_Service_Reseller_Customer
*/
public function patch($customerId, Google_Service_Reseller_Customer $postBody, $optParams = array())
{
$params = array('customerId' => $customerId, 'postBody' => $postBody);
$params = array_merge($params, $optParams);
return $this->call('patch', array($params), "Google_Service_Reseller_Customer");
}
/**
* Update a customer resource if one it exists and is owned by the reseller.
* (customers.update)
*
* @param string $customerId
* Id of the Customer
* @param Google_Customer $postBody
* @param array $optParams Optional parameters.
* @return Google_Service_Reseller_Customer
*/
public function update($customerId, Google_Service_Reseller_Customer $postBody, $optParams = array())
{
$params = array('customerId' => $customerId, 'postBody' => $postBody);
$params = array_merge($params, $optParams);
return $this->call('update', array($params), "Google_Service_Reseller_Customer");
}
}
/**
* The "subscriptions" collection of methods.
* Typical usage is:
* <code>
* $resellerService = new Google_Service_Reseller(...);
* $subscriptions = $resellerService->subscriptions;
* </code>
*/
class Google_Service_Reseller_Subscriptions_Resource extends Google_Service_Resource
{
/**
* Changes the plan of a subscription (subscriptions.changePlan)
*
* @param string $customerId
* Id of the Customer
* @param string $subscriptionId
* Id of the subscription, which is unique for a customer
* @param Google_ChangePlanRequest $postBody
* @param array $optParams Optional parameters.
* @return Google_Service_Reseller_Subscription
*/
public function changePlan($customerId, $subscriptionId, Google_Service_Reseller_ChangePlanRequest $postBody, $optParams = array())
{
$params = array('customerId' => $customerId, 'subscriptionId' => $subscriptionId, 'postBody' => $postBody);
$params = array_merge($params, $optParams);
return $this->call('changePlan', array($params), "Google_Service_Reseller_Subscription");
}
/**
* Changes the renewal settings of a subscription
* (subscriptions.changeRenewalSettings)
*
* @param string $customerId
* Id of the Customer
* @param string $subscriptionId
* Id of the subscription, which is unique for a customer
* @param Google_RenewalSettings $postBody
* @param array $optParams Optional parameters.
* @return Google_Service_Reseller_Subscription
*/
public function changeRenewalSettings($customerId, $subscriptionId, Google_Service_Reseller_RenewalSettings $postBody, $optParams = array())
{
$params = array('customerId' => $customerId, 'subscriptionId' => $subscriptionId, 'postBody' => $postBody);
$params = array_merge($params, $optParams);
return $this->call('changeRenewalSettings', array($params), "Google_Service_Reseller_Subscription");
}
/**
* Changes the seats configuration of a subscription (subscriptions.changeSeats)
*
* @param string $customerId
* Id of the Customer
* @param string $subscriptionId
* Id of the subscription, which is unique for a customer
* @param Google_Seats $postBody
* @param array $optParams Optional parameters.
* @return Google_Service_Reseller_Subscription
*/
public function changeSeats($customerId, $subscriptionId, Google_Service_Reseller_Seats $postBody, $optParams = array())
{
$params = array('customerId' => $customerId, 'subscriptionId' => $subscriptionId, 'postBody' => $postBody);
$params = array_merge($params, $optParams);
return $this->call('changeSeats', array($params), "Google_Service_Reseller_Subscription");
}
/**
* Cancels/Downgrades a subscription. (subscriptions.delete)
*
* @param string $customerId
* Id of the Customer
* @param string $subscriptionId
* Id of the subscription, which is unique for a customer
* @param string $deletionType
* Whether the subscription is to be fully cancelled or downgraded
* @param array $optParams Optional parameters.
*/
public function delete($customerId, $subscriptionId, $deletionType, $optParams = array())
{
$params = array('customerId' => $customerId, 'subscriptionId' => $subscriptionId, 'deletionType' => $deletionType);
$params = array_merge($params, $optParams);
return $this->call('delete', array($params));
}
/**
* Gets a subscription of the customer. (subscriptions.get)
*
* @param string $customerId
* Id of the Customer
* @param string $subscriptionId
* Id of the subscription, which is unique for a customer
* @param array $optParams Optional parameters.
* @return Google_Service_Reseller_Subscription
*/
public function get($customerId, $subscriptionId, $optParams = array())
{
$params = array('customerId' => $customerId, 'subscriptionId' => $subscriptionId);
$params = array_merge($params, $optParams);
return $this->call('get', array($params), "Google_Service_Reseller_Subscription");
}
/**
* Creates/Transfers a subscription for the customer. (subscriptions.insert)
*
* @param string $customerId
* Id of the Customer
* @param Google_Subscription $postBody
* @param array $optParams Optional parameters.
*
* @opt_param string customerAuthToken
* An auth token needed for transferring a subscription. Can be generated at
* https://www.google.com/a/cpanel/customer-domain/TransferToken. Optional.
* @return Google_Service_Reseller_Subscription
*/
public function insert($customerId, Google_Service_Reseller_Subscription $postBody, $optParams = array())
{
$params = array('customerId' => $customerId, 'postBody' => $postBody);
$params = array_merge($params, $optParams);
return $this->call('insert', array($params), "Google_Service_Reseller_Subscription");
}
/**
* Lists subscriptions of a reseller, optionally filtered by a customer name
* prefix. (subscriptions.listSubscriptions)
*
* @param array $optParams Optional parameters.
*
* @opt_param string customerAuthToken
* An auth token needed if the customer is not a resold customer of this reseller. Can be generated
* at https://www.google.com/a/cpanel/customer-domain/TransferToken.Optional.
* @opt_param string pageToken
* Token to specify next page in the list
* @opt_param string customerId
* Id of the Customer
* @opt_param string maxResults
* Maximum number of results to return
* @opt_param string customerNamePrefix
* Prefix of the customer's domain name by which the subscriptions should be filtered. Optional
* @return Google_Service_Reseller_Subscriptions
*/
public function listSubscriptions($optParams = array())
{
$params = array();
$params = array_merge($params, $optParams);
return $this->call('list', array($params), "Google_Service_Reseller_Subscriptions");
}
/**
* Starts paid service of a trial subscription (subscriptions.startPaidService)
*
* @param string $customerId
* Id of the Customer
* @param string $subscriptionId
* Id of the subscription, which is unique for a customer
* @param array $optParams Optional parameters.
* @return Google_Service_Reseller_Subscription
*/
public function startPaidService($customerId, $subscriptionId, $optParams = array())
{
$params = array('customerId' => $customerId, 'subscriptionId' => $subscriptionId);
$params = array_merge($params, $optParams);
return $this->call('startPaidService', array($params), "Google_Service_Reseller_Subscription");
}
}
class Google_Service_Reseller_Address extends Google_Model
{
public $addressLine1;
public $addressLine2;
public $addressLine3;
public $contactName;
public $countryCode;
public $kind;
public $locality;
public $organizationName;
public $postalCode;
public $region;
public function setAddressLine1($addressLine1)
{
$this->addressLine1 = $addressLine1;
}
public function getAddressLine1()
{
return $this->addressLine1;
}
public function setAddressLine2($addressLine2)
{
$this->addressLine2 = $addressLine2;
}
public function getAddressLine2()
{
return $this->addressLine2;
}
public function setAddressLine3($addressLine3)
{
$this->addressLine3 = $addressLine3;
}
public function getAddressLine3()
{
return $this->addressLine3;
}
public function setContactName($contactName)
{
$this->contactName = $contactName;
}
public function getContactName()
{
return $this->contactName;
}
public function setCountryCode($countryCode)
{
$this->countryCode = $countryCode;
}
public function getCountryCode()
{
return $this->countryCode;
}
public function setKind($kind)
{
$this->kind = $kind;
}
public function getKind()
{
return $this->kind;
}
public function setLocality($locality)
{
$this->locality = $locality;
}
public function getLocality()
{
return $this->locality;
}
public function setOrganizationName($organizationName)
{
$this->organizationName = $organizationName;
}
public function getOrganizationName()
{
return $this->organizationName;
}
public function setPostalCode($postalCode)
{
$this->postalCode = $postalCode;
}
public function getPostalCode()
{
return $this->postalCode;
}
public function setRegion($region)
{
$this->region = $region;
}
public function getRegion()
{
return $this->region;
}
}
class Google_Service_Reseller_ChangePlanRequest extends Google_Model
{
public $kind;
public $planName;
public $purchaseOrderId;
protected $seatsType = 'Google_Service_Reseller_Seats';
protected $seatsDataType = '';
public function setKind($kind)
{
$this->kind = $kind;
}
public function getKind()
{
return $this->kind;
}
public function setPlanName($planName)
{
$this->planName = $planName;
}
public function getPlanName()
{
return $this->planName;
}
public function setPurchaseOrderId($purchaseOrderId)
{
$this->purchaseOrderId = $purchaseOrderId;
}
public function getPurchaseOrderId()
{
return $this->purchaseOrderId;
}
public function setSeats(Google_Service_Reseller_Seats $seats)
{
$this->seats = $seats;
}
public function getSeats()
{
return $this->seats;
}
}
class Google_Service_Reseller_Customer extends Google_Model
{
public $alternateEmail;
public $customerDomain;
public $customerId;
public $kind;
public $phoneNumber;
protected $postalAddressType = 'Google_Service_Reseller_Address';
protected $postalAddressDataType = '';
public $resourceUiUrl;
public function setAlternateEmail($alternateEmail)
{
$this->alternateEmail = $alternateEmail;
}
public function getAlternateEmail()
{
return $this->alternateEmail;
}
public function setCustomerDomain($customerDomain)
{
$this->customerDomain = $customerDomain;
}
public function getCustomerDomain()
{
return $this->customerDomain;
}
public function setCustomerId($customerId)
{
$this->customerId = $customerId;
}
public function getCustomerId()
{
return $this->customerId;
}
public function setKind($kind)
{
$this->kind = $kind;
}
public function getKind()
{
return $this->kind;
}
public function setPhoneNumber($phoneNumber)
{
$this->phoneNumber = $phoneNumber;
}
public function getPhoneNumber()
{
return $this->phoneNumber;
}
public function setPostalAddress(Google_Service_Reseller_Address $postalAddress)
{
$this->postalAddress = $postalAddress;
}
public function getPostalAddress()
{
return $this->postalAddress;
}
public function setResourceUiUrl($resourceUiUrl)
{
$this->resourceUiUrl = $resourceUiUrl;
}
public function getResourceUiUrl()
{
return $this->resourceUiUrl;
}
}
class Google_Service_Reseller_RenewalSettings extends Google_Model
{
public $kind;
public $renewalType;
public function setKind($kind)
{
$this->kind = $kind;
}
public function getKind()
{
return $this->kind;
}
public function setRenewalType($renewalType)
{
$this->renewalType = $renewalType;
}
public function getRenewalType()
{
return $this->renewalType;
}
}
class Google_Service_Reseller_Seats extends Google_Model
{
public $kind;
public $maximumNumberOfSeats;
public $numberOfSeats;
public function setKind($kind)
{
$this->kind = $kind;
}
public function getKind()
{
return $this->kind;
}
public function setMaximumNumberOfSeats($maximumNumberOfSeats)
{
$this->maximumNumberOfSeats = $maximumNumberOfSeats;
}
public function getMaximumNumberOfSeats()
{
return $this->maximumNumberOfSeats;
}
public function setNumberOfSeats($numberOfSeats)
{
$this->numberOfSeats = $numberOfSeats;
}
public function getNumberOfSeats()
{
return $this->numberOfSeats;
}
}
class Google_Service_Reseller_Subscription extends Google_Model
{
public $creationTime;
public $customerId;
public $kind;
protected $planType = 'Google_Service_Reseller_SubscriptionPlan';
protected $planDataType = '';
public $purchaseOrderId;
protected $renewalSettingsType = 'Google_Service_Reseller_RenewalSettings';
protected $renewalSettingsDataType = '';
public $resourceUiUrl;
protected $seatsType = 'Google_Service_Reseller_Seats';
protected $seatsDataType = '';
public $skuId;
public $status;
public $subscriptionId;
protected $transferInfoType = 'Google_Service_Reseller_SubscriptionTransferInfo';
protected $transferInfoDataType = '';
protected $trialSettingsType = 'Google_Service_Reseller_SubscriptionTrialSettings';
protected $trialSettingsDataType = '';
public function setCreationTime($creationTime)
{
$this->creationTime = $creationTime;
}
public function getCreationTime()
{
return $this->creationTime;
}
public function setCustomerId($customerId)
{
$this->customerId = $customerId;
}
public function getCustomerId()
{
return $this->customerId;
}
public function setKind($kind)
{
$this->kind = $kind;
}
public function getKind()
{
return $this->kind;
}
public function setPlan(Google_Service_Reseller_SubscriptionPlan $plan)
{
$this->plan = $plan;
}
public function getPlan()
{
return $this->plan;
}
public function setPurchaseOrderId($purchaseOrderId)
{
$this->purchaseOrderId = $purchaseOrderId;
}
public function getPurchaseOrderId()
{
return $this->purchaseOrderId;
}
public function setRenewalSettings(Google_Service_Reseller_RenewalSettings $renewalSettings)
{
$this->renewalSettings = $renewalSettings;
}
public function getRenewalSettings()
{
return $this->renewalSettings;
}
public function setResourceUiUrl($resourceUiUrl)
{
$this->resourceUiUrl = $resourceUiUrl;
}
public function getResourceUiUrl()
{
return $this->resourceUiUrl;
}
public function setSeats(Google_Service_Reseller_Seats $seats)
{
$this->seats = $seats;
}
public function getSeats()
{
return $this->seats;
}
public function setSkuId($skuId)
{
$this->skuId = $skuId;
}
public function getSkuId()
{
return $this->skuId;
}
public function setStatus($status)
{
$this->status = $status;
}
public function getStatus()
{
return $this->status;
}
public function setSubscriptionId($subscriptionId)
{
$this->subscriptionId = $subscriptionId;
}
public function getSubscriptionId()
{
return $this->subscriptionId;
}
public function setTransferInfo(Google_Service_Reseller_SubscriptionTransferInfo $transferInfo)
{
$this->transferInfo = $transferInfo;
}
public function getTransferInfo()
{
return $this->transferInfo;
}
public function setTrialSettings(Google_Service_Reseller_SubscriptionTrialSettings $trialSettings)
{
$this->trialSettings = $trialSettings;
}
public function getTrialSettings()
{
return $this->trialSettings;
}
}
class Google_Service_Reseller_SubscriptionPlan extends Google_Model
{
protected $commitmentIntervalType = 'Google_Service_Reseller_SubscriptionPlanCommitmentInterval';
protected $commitmentIntervalDataType = '';
public $isCommitmentPlan;
public $planName;
public function setCommitmentInterval(Google_Service_Reseller_SubscriptionPlanCommitmentInterval $commitmentInterval)
{
$this->commitmentInterval = $commitmentInterval;
}
public function getCommitmentInterval()
{
return $this->commitmentInterval;
}
public function setIsCommitmentPlan($isCommitmentPlan)
{
$this->isCommitmentPlan = $isCommitmentPlan;
}
public function getIsCommitmentPlan()
{
return $this->isCommitmentPlan;
}
public function setPlanName($planName)
{
$this->planName = $planName;
}
public function getPlanName()
{
return $this->planName;
}
}
class Google_Service_Reseller_SubscriptionPlanCommitmentInterval extends Google_Model
{
public $endTime;
public $startTime;
public function setEndTime($endTime)
{
$this->endTime = $endTime;
}
public function getEndTime()
{
return $this->endTime;
}
public function setStartTime($startTime)
{
$this->startTime = $startTime;
}
public function getStartTime()
{
return $this->startTime;
}
}
class Google_Service_Reseller_SubscriptionTransferInfo extends Google_Model
{
public $minimumTransferableSeats;
public $transferabilityExpirationTime;
public function setMinimumTransferableSeats($minimumTransferableSeats)
{
$this->minimumTransferableSeats = $minimumTransferableSeats;
}
public function getMinimumTransferableSeats()
{
return $this->minimumTransferableSeats;
}
public function setTransferabilityExpirationTime($transferabilityExpirationTime)
{
$this->transferabilityExpirationTime = $transferabilityExpirationTime;
}
public function getTransferabilityExpirationTime()
{
return $this->transferabilityExpirationTime;
}
}
class Google_Service_Reseller_SubscriptionTrialSettings extends Google_Model
{
public $isInTrial;
public $trialEndTime;
public function setIsInTrial($isInTrial)
{
$this->isInTrial = $isInTrial;
}
public function getIsInTrial()
{
return $this->isInTrial;
}
public function setTrialEndTime($trialEndTime)
{
$this->trialEndTime = $trialEndTime;
}
public function getTrialEndTime()
{
return $this->trialEndTime;
}
}
class Google_Service_Reseller_Subscriptions extends Google_Collection
{
public $kind;
public $nextPageToken;
protected $subscriptionsType = 'Google_Service_Reseller_Subscription';
protected $subscriptionsDataType = 'array';
public function setKind($kind)
{
$this->kind = $kind;
}
public function getKind()
{
return $this->kind;
}
public function setNextPageToken($nextPageToken)
{
$this->nextPageToken = $nextPageToken;
}
public function getNextPageToken()
{
return $this->nextPageToken;
}
public function setSubscriptions($subscriptions)
{
$this->subscriptions = $subscriptions;
}
public function getSubscriptions()
{
return $this->subscriptions;
}
}
| {
"pile_set_name": "Github"
} |
class S { val q = "" }
class B extends S {
val x1: B.super.q.type = q
val x2: B.this.q.type = q
}
| {
"pile_set_name": "Github"
} |
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat, 27 May 2017 19:02:06 +0200
Subject: kernel/sched/core: add migrate_disable()
[bristot@redhat.com: rt: Increase/decrease the nr of migratory tasks when enabling/disabling migration
Link: https://lkml.kernel.org/r/e981d271cbeca975bca710e2fbcc6078c09741b0.1498482127.git.bristot@redhat.com
]
[swood@redhat.com: fixups and optimisations
Link:https://lkml.kernel.org/r/20190727055638.20443-1-swood@redhat.com
Link:https://lkml.kernel.org/r/20191012065214.28109-1-swood@redhat.com
]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/preempt.h | 32 +++++++
include/linux/sched.h | 35 ++++++++
include/linux/smp.h | 3
init/init_task.c | 4
kernel/cpu.c | 42 ++++++++++
kernel/locking/rtmutex.c | 12 ++
kernel/locking/rwlock-rt.c | 18 +++-
kernel/rcu/tree_plugin.h | 6 +
kernel/sched/core.c | 181 ++++++++++++++++++++++++++++++++++++++++++++-
kernel/sched/debug.c | 4
kernel/sched/sched.h | 4
lib/smp_processor_id.c | 5 +
12 files changed, 336 insertions(+), 10 deletions(-)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -201,6 +201,31 @@ do { \
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
+
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+
+int __migrate_disabled(struct task_struct *p);
+
+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
+
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+static inline int __migrate_disabled(struct task_struct *p)
+{
+ return 0;
+}
+
+#else
+#define migrate_disable() preempt_disable()
+#define migrate_enable() preempt_enable()
+static inline int __migrate_disabled(struct task_struct *p)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_PREEMPTION
#define preempt_enable() \
do { \
@@ -270,6 +295,13 @@ do { \
#define preempt_check_resched_rt() barrier()
#define preemptible() 0
+#define migrate_disable() barrier()
+#define migrate_enable() barrier()
+
+static inline int __migrate_disabled(struct task_struct *p)
+{
+ return 0;
+}
#endif /* CONFIG_PREEMPT_COUNT */
#ifdef MODULE
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -233,6 +233,8 @@ extern void io_schedule_finish(int token
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
+int cpu_nr_pinned(int cpu);
+
/**
* struct prev_cputime - snapshot of system and user cputime
* @utime: time spent in user mode
@@ -705,6 +707,20 @@ struct task_struct {
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
+ int migrate_disable;
+ bool migrate_disable_scheduled;
+# ifdef CONFIG_SCHED_DEBUG
+ int pinned_on_cpu;
+# endif
+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
+# ifdef CONFIG_SCHED_DEBUG
+ int migrate_disable;
+# endif
+#endif
+#ifdef CONFIG_PREEMPT_RT
+ int sleeping_lock;
+#endif
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
@@ -1865,6 +1881,23 @@ static __always_inline bool need_resched
return unlikely(tif_need_resched());
}
+#ifdef CONFIG_PREEMPT_RT
+static inline void sleeping_lock_inc(void)
+{
+ current->sleeping_lock++;
+}
+
+static inline void sleeping_lock_dec(void)
+{
+ current->sleeping_lock--;
+}
+
+#else
+
+static inline void sleeping_lock_inc(void) { }
+static inline void sleeping_lock_dec(void) { }
+#endif
+
/*
* Wrappers for p->thread_info->cpu access. No-op on UP.
*/
@@ -2056,4 +2089,6 @@ int sched_trace_rq_cpu(struct rq *rq);
const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
+extern struct task_struct *takedown_cpu_task;
+
#endif
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -221,6 +221,9 @@ static inline int get_boot_cpu_id(void)
#define get_cpu() ({ preempt_disable(); __smp_processor_id(); })
#define put_cpu() preempt_enable()
+#define get_cpu_light() ({ migrate_disable(); __smp_processor_id(); })
+#define put_cpu_light() migrate_enable()
+
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
* boot command line:
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -73,6 +73,10 @@ struct task_struct init_task
.cpus_ptr = &init_task.cpus_mask,
.cpus_mask = CPU_MASK_ALL,
.nr_cpus_allowed= NR_CPUS,
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT) && \
+ defined(CONFIG_SCHED_DEBUG)
+ .pinned_on_cpu = -1,
+#endif
.mm = NULL,
.active_mm = &init_mm,
.restart_block = {
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -848,6 +848,15 @@ static int take_cpu_down(void *_param)
int err, cpu = smp_processor_id();
int ret;
+#ifdef CONFIG_PREEMPT_RT
+ /*
+ * If any tasks disabled migration before we got here,
+ * go back and sleep again.
+ */
+ if (cpu_nr_pinned(cpu))
+ return -EAGAIN;
+#endif
+
/* Ensure this CPU doesn't handle any more interrupts. */
err = __cpu_disable();
if (err < 0)
@@ -877,6 +886,8 @@ static int take_cpu_down(void *_param)
return 0;
}
+struct task_struct *takedown_cpu_task;
+
static int takedown_cpu(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
@@ -891,11 +902,39 @@ static int takedown_cpu(unsigned int cpu
*/
irq_lock_sparse();
+#ifdef CONFIG_PREEMPT_RT
+ WARN_ON_ONCE(takedown_cpu_task);
+ takedown_cpu_task = current;
+
+again:
+ /*
+ * If a task pins this CPU after we pass this check, take_cpu_down
+ * will return -EAGAIN.
+ */
+ for (;;) {
+ int nr_pinned;
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ nr_pinned = cpu_nr_pinned(cpu);
+ if (nr_pinned == 0)
+ break;
+ schedule();
+ }
+ set_current_state(TASK_RUNNING);
+#endif
+
/*
* So now all preempt/rcu users must observe !cpu_active().
*/
err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
+#ifdef CONFIG_PREEMPT_RT
+ if (err == -EAGAIN)
+ goto again;
+#endif
if (err) {
+#ifdef CONFIG_PREEMPT_RT
+ takedown_cpu_task = NULL;
+#endif
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
@@ -914,6 +953,9 @@ static int takedown_cpu(unsigned int cpu
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
+#ifdef CONFIG_PREEMPT_RT
+ takedown_cpu_task = NULL;
+#endif
/* Interrupts are moved away from the dying cpu, reenable alloc/free */
irq_unlock_sparse();
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1140,6 +1140,7 @@ void __sched rt_spin_lock_slowunlock(str
void __lockfunc rt_spin_lock(spinlock_t *lock)
{
+ sleeping_lock_inc();
migrate_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
@@ -1154,6 +1155,7 @@ void __lockfunc __rt_spin_lock(struct rt
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
{
+ sleeping_lock_inc();
migrate_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
@@ -1167,6 +1169,7 @@ void __lockfunc rt_spin_unlock(spinlock_
spin_release(&lock->dep_map, 1, _RET_IP_);
rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
migrate_enable();
+ sleeping_lock_dec();
}
EXPORT_SYMBOL(rt_spin_unlock);
@@ -1192,12 +1195,15 @@ int __lockfunc rt_spin_trylock(spinlock_
{
int ret;
+ sleeping_lock_inc();
migrate_disable();
ret = __rt_mutex_trylock(&lock->lock);
- if (ret)
+ if (ret) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- else
+ } else {
migrate_enable();
+ sleeping_lock_dec();
+ }
return ret;
}
EXPORT_SYMBOL(rt_spin_trylock);
@@ -1209,6 +1215,7 @@ int __lockfunc rt_spin_trylock_bh(spinlo
local_bh_disable();
ret = __rt_mutex_trylock(&lock->lock);
if (ret) {
+ sleeping_lock_inc();
migrate_disable();
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
} else
@@ -1224,6 +1231,7 @@ int __lockfunc rt_spin_trylock_irqsave(s
*flags = 0;
ret = __rt_mutex_trylock(&lock->lock);
if (ret) {
+ sleeping_lock_inc();
migrate_disable();
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
}
--- a/kernel/locking/rwlock-rt.c
+++ b/kernel/locking/rwlock-rt.c
@@ -305,12 +305,15 @@ int __lockfunc rt_read_trylock(rwlock_t
{
int ret;
+ sleeping_lock_inc();
migrate_disable();
ret = do_read_rt_trylock(rwlock);
- if (ret)
+ if (ret) {
rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
- else
+ } else {
migrate_enable();
+ sleeping_lock_dec();
+ }
return ret;
}
EXPORT_SYMBOL(rt_read_trylock);
@@ -319,18 +322,22 @@ int __lockfunc rt_write_trylock(rwlock_t
{
int ret;
+ sleeping_lock_inc();
migrate_disable();
ret = do_write_rt_trylock(rwlock);
- if (ret)
+ if (ret) {
rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
- else
+ } else {
migrate_enable();
+ sleeping_lock_dec();
+ }
return ret;
}
EXPORT_SYMBOL(rt_write_trylock);
void __lockfunc rt_read_lock(rwlock_t *rwlock)
{
+ sleeping_lock_inc();
migrate_disable();
rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
do_read_rt_lock(rwlock);
@@ -339,6 +346,7 @@ EXPORT_SYMBOL(rt_read_lock);
void __lockfunc rt_write_lock(rwlock_t *rwlock)
{
+ sleeping_lock_inc();
migrate_disable();
rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
do_write_rt_lock(rwlock);
@@ -350,6 +358,7 @@ void __lockfunc rt_read_unlock(rwlock_t
rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
do_read_rt_unlock(rwlock);
migrate_enable();
+ sleeping_lock_dec();
}
EXPORT_SYMBOL(rt_read_unlock);
@@ -358,6 +367,7 @@ void __lockfunc rt_write_unlock(rwlock_t
rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
do_write_rt_unlock(rwlock);
migrate_enable();
+ sleeping_lock_dec();
}
EXPORT_SYMBOL(rt_write_unlock);
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -287,10 +287,14 @@ void rcu_note_context_switch(bool preemp
struct task_struct *t = current;
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp;
+ int sleeping_l = 0;
trace_rcu_utilization(TPS("Start context switch"));
lockdep_assert_irqs_disabled();
- WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
+#if defined(CONFIG_PREEMPT_RT)
+ sleeping_l = t->sleeping_lock;
+#endif
+ WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !sleeping_l);
if (t->rcu_read_lock_nesting > 0 &&
!t->rcu_read_unlock_special.b.blocked) {
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1488,7 +1488,7 @@ static inline bool is_cpu_allowed(struct
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
return false;
- if (is_per_cpu_kthread(p))
+ if (is_per_cpu_kthread(p) || __migrate_disabled(p))
return cpu_online(cpu);
return cpu_active(cpu);
@@ -1612,9 +1612,18 @@ static int migration_cpu_stop(void *data
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
{
cpumask_copy(&p->cpus_mask, new_mask);
- p->nr_cpus_allowed = cpumask_weight(new_mask);
+ if (p->cpus_ptr == &p->cpus_mask)
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
}
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
+int __migrate_disabled(struct task_struct *p)
+{
+ return p->migrate_disable;
+}
+EXPORT_SYMBOL_GPL(__migrate_disabled);
+#endif
+
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
struct rq *rq = task_rq(p);
@@ -1703,7 +1712,8 @@ static int __set_cpus_allowed_ptr(struct
}
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
+ if (cpumask_test_cpu(task_cpu(p), new_mask) ||
+ p->cpus_ptr != &p->cpus_mask)
goto out;
if (task_running(rq, p) || p->state == TASK_WAKING) {
@@ -4032,6 +4042,8 @@ pick_next_task(struct rq *rq, struct tas
BUG();
}
+static void migrate_disabled_sched(struct task_struct *p);
+
/*
* __schedule() is the main scheduler function.
*
@@ -4102,6 +4114,9 @@ static void __sched notrace __schedule(b
rq_lock(rq, &rf);
smp_mb__after_spinlock();
+ if (__migrate_disabled(prev))
+ migrate_disabled_sched(prev);
+
/* Promote REQ to ACT */
rq->clock_update_flags <<= 1;
update_rq_clock(rq);
@@ -6344,6 +6359,7 @@ static void migrate_tasks(struct rq *dea
break;
next = __pick_migrate_task(rq);
+ WARN_ON_ONCE(__migrate_disabled(next));
/*
* Rules for changing task_struct::cpus_mask are holding
@@ -8046,3 +8062,162 @@ const u32 sched_prio_to_wmult[40] = {
};
#undef CREATE_TRACE_POINTS
+
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
+
+static inline void
+update_nr_migratory(struct task_struct *p, long delta)
+{
+ if (unlikely((p->sched_class == &rt_sched_class ||
+ p->sched_class == &dl_sched_class) &&
+ p->nr_cpus_allowed > 1)) {
+ if (p->sched_class == &rt_sched_class)
+ task_rq(p)->rt.rt_nr_migratory += delta;
+ else
+ task_rq(p)->dl.dl_nr_migratory += delta;
+ }
+}
+
+static inline void
+migrate_disable_update_cpus_allowed(struct task_struct *p)
+{
+ p->cpus_ptr = cpumask_of(smp_processor_id());
+ update_nr_migratory(p, -1);
+ p->nr_cpus_allowed = 1;
+}
+
+static inline void
+migrate_enable_update_cpus_allowed(struct task_struct *p)
+{
+ struct rq *rq;
+ struct rq_flags rf;
+
+ rq = task_rq_lock(p, &rf);
+ p->cpus_ptr = &p->cpus_mask;
+ p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask);
+ update_nr_migratory(p, 1);
+ task_rq_unlock(rq, p, &rf);
+}
+
+void migrate_disable(void)
+{
+ preempt_disable();
+
+ if (++current->migrate_disable == 1) {
+ this_rq()->nr_pinned++;
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(current->pinned_on_cpu >= 0);
+ current->pinned_on_cpu = smp_processor_id();
+#endif
+ }
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_disable);
+
+static void migrate_disabled_sched(struct task_struct *p)
+{
+ if (p->migrate_disable_scheduled)
+ return;
+
+ migrate_disable_update_cpus_allowed(p);
+ p->migrate_disable_scheduled = 1;
+}
+
+void migrate_enable(void)
+{
+ struct task_struct *p = current;
+ struct rq *rq = this_rq();
+ int cpu = task_cpu(p);
+
+ WARN_ON_ONCE(p->migrate_disable <= 0);
+ if (p->migrate_disable > 1) {
+ p->migrate_disable--;
+ return;
+ }
+
+ preempt_disable();
+
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(current->pinned_on_cpu != cpu);
+ current->pinned_on_cpu = -1;
+#endif
+
+ WARN_ON_ONCE(rq->nr_pinned < 1);
+
+ p->migrate_disable = 0;
+ rq->nr_pinned--;
+ if (rq->nr_pinned == 0 && unlikely(!cpu_active(cpu)) &&
+ takedown_cpu_task)
+ wake_up_process(takedown_cpu_task);
+
+ if (!p->migrate_disable_scheduled)
+ goto out;
+
+ p->migrate_disable_scheduled = 0;
+
+ migrate_enable_update_cpus_allowed(p);
+
+ WARN_ON(smp_processor_id() != cpu);
+ if (!is_cpu_allowed(p, cpu)) {
+ struct migration_arg arg = { p };
+ struct rq_flags rf;
+
+ rq = task_rq_lock(p, &rf);
+ update_rq_clock(rq);
+ arg.dest_cpu = select_fallback_rq(cpu, p);
+ task_rq_unlock(rq, p, &rf);
+
+ preempt_enable();
+
+ sleeping_lock_inc();
+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+ sleeping_lock_dec();
+ return;
+
+ }
+
+out:
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_enable);
+
+int cpu_nr_pinned(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ return rq->nr_pinned;
+}
+
+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
+static void migrate_disabled_sched(struct task_struct *p)
+{
+}
+
+void migrate_disable(void)
+{
+#ifdef CONFIG_SCHED_DEBUG
+ current->migrate_disable++;
+#endif
+ barrier();
+}
+EXPORT_SYMBOL(migrate_disable);
+
+void migrate_enable(void)
+{
+#ifdef CONFIG_SCHED_DEBUG
+ struct task_struct *p = current;
+
+ WARN_ON_ONCE(p->migrate_disable <= 0);
+ p->migrate_disable--;
+#endif
+ barrier();
+}
+EXPORT_SYMBOL(migrate_enable);
+
+#else
+static void migrate_disabled_sched(struct task_struct *p)
+{
+}
+
+#endif
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -958,6 +958,10 @@ void proc_sched_show_task(struct task_st
P(dl.runtime);
P(dl.deadline);
}
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
+ P(migrate_disable);
+#endif
+ P(nr_cpus_allowed);
#undef PN_SCHEDSTAT
#undef PN
#undef __PN
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -999,6 +999,10 @@ struct rq {
/* Must be inspected within a rcu lock section */
struct cpuidle_state *idle_state;
#endif
+
+#if defined(CONFIG_PREEMPT_RT) && defined(CONFIG_SMP)
+ int nr_pinned;
+#endif
};
#ifdef CONFIG_FAIR_GROUP_SCHED
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -23,6 +23,11 @@ unsigned int check_preemption_disabled(c
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
+#if defined(CONFIG_PREEMPT_RT) && (defined(CONFIG_SMP) || defined(CONFIG_SCHED_DEBUG))
+ if (current->migrate_disable)
+ goto out;
+#endif
+
if (current->nr_cpus_allowed == 1)
goto out;
| {
"pile_set_name": "Github"
} |
<!--
~ Copyright (c) 2018 Wolfram Research, Inc. All rights reserved.
~ Redistribution or use of this work in any form, with or without modification,
~ requires written permission from the copyright holder.
-->
<h3><a href="http://reference.wolfram.com/mathematica/ref/TranslationTransform.html">TranslationTransform</a></h3>
<ul>
<li>TranslationTransform[<em>v</em>] gives a TransformationFunction that represents translation of points by a
vector <em>v</em>.
</ul><p><b>Attributes:</b> Protected, ReadProtected</p><p><b>Symbol has no options.</b></p> | {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Jun 9 2015 22:53:21).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2014 by Steve Nygard.
//
#import "NSObject-Protocol.h"
@class AVAsset, AVPlayerView, NSArray, NSSharingService, NSString, NSURL;
@protocol AVSharingServicesDelegate <NSObject>
@optional
- (BOOL)playerView:(AVPlayerView *)arg1 canShareCompositionWithService:(NSSharingService *)arg2;
- (NSString *)titleOfAssetToShareForPlayerView:(AVPlayerView *)arg1;
- (NSURL *)fileURLForRenderedAssetToShareForPlayerView:(AVPlayerView *)arg1;
- (AVAsset *)assetToShareForPlayerView:(AVPlayerView *)arg1 videoComposition:(id *)arg2 audioMix:(id *)arg3;
- (NSArray *)proposedServicesForPlayerView:(AVPlayerView *)arg1 fileURLForItemToShare:(NSURL *)arg2;
@end
| {
"pile_set_name": "Github"
} |
package org.aisen.weibo.sina.sinasdk.bean;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
public class Users implements Serializable {
private static final long serialVersionUID = 7267744057748550572L;
private List<WeiBoUser> users;
public Users() {
users = new ArrayList<WeiBoUser>();
}
public Users(List<WeiBoUser> users) {
this.users = users;
}
public List<WeiBoUser> getUsers() {
return users;
}
public void setUsers(List<WeiBoUser> users) {
this.users = users;
}
}
| {
"pile_set_name": "Github"
} |
// (C) Copyright Tobias Schwinger
//
// Use modification and distribution are subject to the boost Software License,
// Version 1.0. (See http://www.boost.org/LICENSE_1_0.txt).
//------------------------------------------------------------------------------
#ifndef BOOST_FT_DETAIL_CLASSIFIER_HPP_INCLUDED
#define BOOST_FT_DETAIL_CLASSIFIER_HPP_INCLUDED
#include <boost/type.hpp>
#include <boost/config.hpp>
#include <boost/type_traits/is_reference.hpp>
#include <boost/type_traits/add_reference.hpp>
#include <boost/function_types/config/config.hpp>
#include <boost/function_types/property_tags.hpp>
namespace boost { namespace function_types { namespace detail {
template<typename T> struct classifier;
template<std::size_t S> struct char_array { typedef char (&type)[S]; };
template<bits_t Flags, bits_t CCID, std::size_t Arity> struct encode_charr
{
typedef typename char_array<
::boost::function_types::detail::encode_charr_impl<Flags,CCID,Arity>::value
>::type type;
};
#if defined(BOOST_MSVC) || (defined(__BORLANDC__) && !defined(BOOST_DISABLE_WIN32))
# define BOOST_FT_DECL __cdecl
#else
# define BOOST_FT_DECL /**/
#endif
char BOOST_FT_DECL classifier_impl(...);
#define BOOST_FT_variations BOOST_FT_function|BOOST_FT_pointer|\
BOOST_FT_member_pointer
#define BOOST_FT_type_function(cc,name) BOOST_FT_SYNTAX( \
R BOOST_PP_EMPTY,BOOST_PP_LPAREN,cc,* BOOST_PP_EMPTY,name,BOOST_PP_RPAREN)
#define BOOST_FT_type_function_pointer(cc,name) BOOST_FT_SYNTAX( \
R BOOST_PP_EMPTY,BOOST_PP_LPAREN,cc,** BOOST_PP_EMPTY,name,BOOST_PP_RPAREN)
#define BOOST_FT_type_member_function_pointer(cc,name) BOOST_FT_SYNTAX( \
R BOOST_PP_EMPTY,BOOST_PP_LPAREN,cc,T0::** BOOST_PP_EMPTY,name,BOOST_PP_RPAREN)
#define BOOST_FT_al_path boost/function_types/detail/classifier_impl
#include <boost/function_types/detail/pp_loop.hpp>
template<typename T> struct classifier_bits
{
static typename boost::add_reference<T>::type tester;
BOOST_STATIC_CONSTANT(bits_t,value = (bits_t)sizeof(
boost::function_types::detail::classifier_impl(& tester)
)-1);
};
template<typename T> struct classifier
{
typedef detail::constant<
::boost::function_types::detail::decode_bits<
::boost::function_types::detail::classifier_bits<T>::value
>::tag_bits >
bits;
typedef detail::full_mask mask;
typedef detail::constant<
::boost::function_types::detail::decode_bits<
::boost::function_types::detail::classifier_bits<T>::value
>::arity >
function_arity;
};
} } } // namespace ::boost::function_types::detail
#endif
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Real AJAX Mutiple Upload</title>
<meta name="keywords" content="ajax,upload,ajax upload, html5 upload" />
<meta name="description" content="Ajax uploader" />
<meta name="author" content="AlbanX" />
<script src="jslibs/shCore.js" type="text/javascript"></script>
<script src="jslibs/shBrushJScript.js" type="text/javascript" ></script>
<script src="jslibs/shBrushXml.js" type="text/javascript" ></script>
<!-- SET UP AXUPLOADER -->
<script src="jslibs/jquery.js" type="text/javascript"></script>
<script src="jslibs/ajaxupload.js" type="text/javascript"></script>
<link rel="stylesheet" href="css/classicTheme/style.css" type="text/css" media="all" />
<!-- /SET UP AXUPLOADER -->
<link rel="stylesheet" href="css/body.css" type="text/css" media="all" />
<link rel="stylesheet" href="css/shCore.css" type="text/css" media="all" />
<link rel="stylesheet" href="css/shThemeEclipse.css" type="text/css" media="all" />
<link rel="stylesheet" href="css/shCoreDefault.css" type="text/css"/>
<script type="text/javascript">
SyntaxHighlighter.all({toolbar:false});
</script>
</head>
<body><h1>Real Ajax Multi Uploader</h1>
<h2>Form Integration, double click on file name to edit</h2>
<table class="options">
<thead>
<tr>
<th>Example with form integration and file rename</th>
<th>Setup code</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<p>Upload all files, or submit form. After file are uploaded form will be submited automatically. To the form will be added
an array with file names uploaded. By double clicking on file name it can be renamed.</p>
<div id="demo1" style="width:500px"></div>
<form action="test.php" method="post" id="THEFORM">
<fieldset>
<legend>A Normal Form</legend>
<input type="text" value="Form field" name="test_input" />
<input type="text" value="Form field 2" name="test_input2" />
<input type="submit" value="Submit" />
</fieldset>
</form>
<script type="text/javascript">
$('#demo1').ajaxupload({
url:'upload.php',
remotePath:'uploaded/',
editFilename:true,
form:'#THEFORM'
});
</script>
</td>
<td>
<pre class="brush: xml">
<div id="demo1"></div>
<form action="test.php" method="post" id="THEFORM">
<fieldset>
<legend>A Normal Form</legend>
<input type="text" value="Form field" name="test_input" />
<input type="text" value="Form field 2" name="test_input2" />
<input type="submit" value="Submit" />
</fieldset>
</form>
</pre>
<pre class="brush: javascript">
$('#demo1').ajaxupload({
url:'upload.php',
remotePath:'uploaded/',
editFilename:true,
form:'#THEFORM'
});
</pre>
</td>
</tr>
</tbody>
</table>
</body>
</html>
| {
"pile_set_name": "Github"
} |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export class RestError {
status: number;
message: string;
error: string;
}
| {
"pile_set_name": "Github"
} |
// Boost Lambda Library -- switch.hpp -----------------------------------
//
// Copyright (C) 2000 Gary Powell (powellg@amazon.com)
// Copyright (C) 1999, 2000 Jaakko Jarvi (jaakko.jarvi@cs.utu.fi)
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.lslboost.org/LICENSE_1_0.txt)
//
// For more information, see www.lslboost.org
// --------------------------------------------------------------------------
#if !defined(BOOST_LAMBDA_SWITCH_HPP)
#define BOOST_LAMBDA_SWITCH_HPP
#include "lslboost/lambda/core.hpp"
#include "lslboost/lambda/detail/control_constructs_common.hpp"
#include "lslboost/preprocessor/enum_shifted_params.hpp"
#include "lslboost/preprocessor/repeat_2nd.hpp"
#include "lslboost/preprocessor/tuple.hpp"
namespace lslboost {
namespace lambda {
// Switch actions
template <int N, class Switch1 = null_type, class Switch2 = null_type,
class Switch3 = null_type, class Switch4 = null_type,
class Switch5 = null_type, class Switch6 = null_type,
class Switch7 = null_type, class Switch8 = null_type,
class Switch9 = null_type>
struct switch_action {};
namespace detail {
// templates to represent special lambda functors for the cases in
// switch statements
template <int Value> struct case_label {};
struct default_label {};
template<class Type> struct switch_case_tag {};
// a normal case is represented as:
// tagged_lambda_functor<switch_case_tag<case_label<N> > >, LambdaFunctor>
// the default case as:
// tagged_lambda_functor<switch_case_tag<default_label> >, LambdaFunctor>
} // end detail
/// create switch_case_tag tagged_lambda_functors
template <int CaseValue, class Arg>
inline const
tagged_lambda_functor<
detail::switch_case_tag<detail::case_label<CaseValue> >,
lambda_functor<Arg>
>
case_statement(const lambda_functor<Arg>& a) {
return
tagged_lambda_functor<
detail::switch_case_tag<detail::case_label<CaseValue> >,
lambda_functor<Arg>
>(a);
}
// No case body case.
template <int CaseValue>
inline const
tagged_lambda_functor<
detail::switch_case_tag<detail::case_label<CaseValue> >,
lambda_functor<
lambda_functor_base<
do_nothing_action,
null_type
>
>
>
case_statement() {
return
tagged_lambda_functor<
detail::switch_case_tag<detail::case_label<CaseValue> >,
lambda_functor<
lambda_functor_base<
do_nothing_action,
null_type
>
>
> () ;
}
// default label
template <class Arg>
inline const
tagged_lambda_functor<
detail::switch_case_tag<detail::default_label>,
lambda_functor<Arg>
>
default_statement(const lambda_functor<Arg>& a) {
return
tagged_lambda_functor<
detail::switch_case_tag<detail::default_label>,
lambda_functor<Arg>
>(a);
}
// default lable, no case body case.
inline const
tagged_lambda_functor<
detail::switch_case_tag<detail::default_label>,
lambda_functor<
lambda_functor_base<
do_nothing_action,
null_type
>
>
>
default_statement() {
return
lambda_functor_base<
do_nothing_action,
null_type
> () ;
}
// Specializations for lambda_functor_base of case_statement -----------------
// 0 case type:
// useless (just the condition part) but provided for completeness.
template<class Args>
class
lambda_functor_base<
switch_action<1>,
Args
>
{
public:
Args args;
template <class SigArgs> struct sig { typedef void type; };
public:
explicit lambda_functor_base(const Args& a) : args(a) {}
template<class RET, CALL_TEMPLATE_ARGS>
RET call(CALL_FORMAL_ARGS) const {
detail::select(::lslboost::tuples::get<1>(args), CALL_ACTUAL_ARGS);
}
};
// 1 case type:
// template<class Args, int Case1>
// class
// lambda_functor_base<
// action<
// 2,
// return_void_action<switch_action<detail::case_label<Case1> > >
// >,
// Args
// >
// {
// Args args;
// public:
// explicit lambda_functor_base(const Args& a) : args(a) {}
// template<class RET, class A, class B, class C>
// RET call(A& a, B& b, C& c) const {
// switch( detail::select(::lslboost::tuples::get<0>(args), a, b, c) )
// {
// case Case1:
// detail::select(::lslboost::tuples::get<1>(args), a, b, c);
// break;
// }
// }
// };
// switch with default being the sole label - doesn't make much sense but
// it is there for completeness
// template<class Args>
// class
// lambda_functor_base<
// action<
// 2,
// return_void_action<switch_action<detail::default_label> >
// >,
// Args
// >
// {
// Args args;
// public:
// explicit lambda_functor_base(const Args& a) : args(a) {}
//
// template<class RET, class A, class B, class C>
// RET call(A& a, B& b, C& c) const {
// switch( detail::select(::lslboost::tuples::get<0>(args), a, b, c) )
// {
// default:
// detail::select(::lslboost::tuples::get<1>(args), a, b, c);
// break;
// }
// }
// };
// // 2 case type:
// The different specializations are generated with Vesa Karvonen's
// preprocessor library.
// This is just a comment to show what the generated classes look like
// template<class Args, int Case1, int Case2>
// class
// lambda_functor_base<
// action<3,
// return_void_action<
// switch_action<
// detail::case_label<Case1>,
// detail::case_label<Case2>
// >
// >
// >,
// Args
// >
// {
// Args args;
// public:
// explicit lambda_functor_base(const Args& a) : args(a) {}
// template<class RET, class A, class B, class C>
// RET call(A& a, B& b, C& c) const {
// switch( detail::select(::lslboost::tuples::get<0>(args), a, b, c) )
// {
// case Case1:
// detail::select(::lslboost::tuples::get<1>(args), a, b, c);
// break;
// case Case2:
// detail::select(::lslboost::tuples::get<2>(args), a, b, c);
// break;
// }
// }
// };
// template<class Args, int Case1>
// class
// lambda_functor_base<
// action<3,
// return_void_action<
// switch_action<
// detail::case_label<Case1>,
// detail::default_label
// >
// >
// >,
// Args
// >
// {
// Args args;
// public:
// explicit lambda_functor_base(const Args& a) : args(a) {}
// template<class RET, class A, class B, class C>
// RET call(A& a, B& b, C& c) const {
// switch( detail::select(::lslboost::tuples::get<0>(args), a, b, c) )
// {
// case Case1:
// detail::select(::lslboost::tuples::get<1>(args), a, b, c);
// break;
// default:
// detail::select(::lslboost::tuples::get<2>(args), a, b, c);
// break;
// }
// }
// };
// -------------------------
// Some helper preprocessor macros ---------------------------------
// BOOST_LAMBDA_A_I_LIST(N, X) is a list of form X0, X1, ..., XN
// BOOST_LAMBDA_A_I_B_LIST(N, X, Y) is a list of form X0 Y, X1 Y, ..., XN Y
#define BOOST_LAMBDA_A_I(z, i, A) \
BOOST_PP_COMMA_IF(i) BOOST_PP_CAT(A,i)
#define BOOST_LAMBDA_A_I_B(z, i, T) \
BOOST_PP_COMMA_IF(i) BOOST_PP_CAT(BOOST_PP_TUPLE_ELEM(2,0,T),i) BOOST_PP_TUPLE_ELEM(2,1,T)
#define BOOST_LAMBDA_A_I_LIST(i, A) \
BOOST_PP_REPEAT(i,BOOST_LAMBDA_A_I, A)
#define BOOST_LAMBDA_A_I_B_LIST(i, A, B) \
BOOST_PP_REPEAT(i,BOOST_LAMBDA_A_I_B, (A,B))
// Switch related macros -------------------------------------------
#define BOOST_LAMBDA_SWITCH_CASE_BLOCK(z, N, A) \
case Case##N: \
detail::select(::lslboost::tuples::get<BOOST_PP_INC(N)>(args), CALL_ACTUAL_ARGS); \
break;
#define BOOST_LAMBDA_SWITCH_CASE_BLOCK_LIST(N) \
BOOST_PP_REPEAT(N, BOOST_LAMBDA_SWITCH_CASE_BLOCK, FOO)
// 2 case type:
#define BOOST_LAMBDA_SWITCH_NO_DEFAULT_CASE(N) \
template<class Args, BOOST_LAMBDA_A_I_LIST(N, int Case)> \
class \
lambda_functor_base< \
switch_action<BOOST_PP_INC(N), \
BOOST_LAMBDA_A_I_B_LIST(N, detail::case_label<Case,>) \
>, \
Args \
> \
{ \
public: \
Args args; \
template <class SigArgs> struct sig { typedef void type; }; \
public: \
explicit lambda_functor_base(const Args& a) : args(a) {} \
\
template<class RET, CALL_TEMPLATE_ARGS> \
RET call(CALL_FORMAL_ARGS) const { \
switch( detail::select(::lslboost::tuples::get<0>(args), CALL_ACTUAL_ARGS) ) \
{ \
BOOST_LAMBDA_SWITCH_CASE_BLOCK_LIST(N) \
} \
} \
};
#define BOOST_LAMBDA_SWITCH_WITH_DEFAULT_CASE(N) \
template< \
class Args BOOST_PP_COMMA_IF(BOOST_PP_DEC(N)) \
BOOST_LAMBDA_A_I_LIST(BOOST_PP_DEC(N), int Case) \
> \
class \
lambda_functor_base< \
switch_action<BOOST_PP_INC(N), \
BOOST_LAMBDA_A_I_B_LIST(BOOST_PP_DEC(N), \
detail::case_label<Case, >) \
BOOST_PP_COMMA_IF(BOOST_PP_DEC(N)) \
detail::default_label \
>, \
Args \
> \
{ \
public: \
Args args; \
template <class SigArgs> struct sig { typedef void type; }; \
public: \
explicit lambda_functor_base(const Args& a) : args(a) {} \
\
template<class RET, CALL_TEMPLATE_ARGS> \
RET call(CALL_FORMAL_ARGS) const { \
switch( detail::select(::lslboost::tuples::get<0>(args), CALL_ACTUAL_ARGS) ) \
{ \
BOOST_LAMBDA_SWITCH_CASE_BLOCK_LIST(BOOST_PP_DEC(N)) \
default: \
detail::select(::lslboost::tuples::get<N>(args), CALL_ACTUAL_ARGS); \
break; \
} \
} \
};
// switch_statement bind functions -------------------------------------
// The zero argument case, for completeness sake
inline const
lambda_functor<
lambda_functor_base<
do_nothing_action,
null_type
>
>
switch_statement() {
return
lambda_functor_base<
do_nothing_action,
null_type
>
();
}
// 1 argument case, this is useless as well, just the condition part
template <class TestArg>
inline const
lambda_functor<
lambda_functor_base<
switch_action<1>,
tuple<lambda_functor<TestArg> >
>
>
switch_statement(const lambda_functor<TestArg>& a1) {
return
lambda_functor_base<
switch_action<1>,
tuple< lambda_functor<TestArg> >
>
( tuple<lambda_functor<TestArg> >(a1));
}
#define HELPER(z, N, FOO) \
BOOST_PP_COMMA_IF(N) \
BOOST_PP_CAT( \
const tagged_lambda_functor<detail::switch_case_tag<TagData, \
N>) \
BOOST_PP_COMMA() Arg##N>& a##N
#define HELPER_LIST(N) BOOST_PP_REPEAT(N, HELPER, FOO)
#define BOOST_LAMBDA_SWITCH_STATEMENT(N) \
template <class TestArg, \
BOOST_LAMBDA_A_I_LIST(N, class TagData), \
BOOST_LAMBDA_A_I_LIST(N, class Arg)> \
inline const \
lambda_functor< \
lambda_functor_base< \
switch_action<BOOST_PP_INC(N), \
BOOST_LAMBDA_A_I_LIST(N, TagData) \
>, \
tuple<lambda_functor<TestArg>, BOOST_LAMBDA_A_I_LIST(N, Arg)> \
> \
> \
switch_statement( \
const lambda_functor<TestArg>& ta, \
HELPER_LIST(N) \
) \
{ \
return \
lambda_functor_base< \
switch_action<BOOST_PP_INC(N), \
BOOST_LAMBDA_A_I_LIST(N, TagData) \
>, \
tuple<lambda_functor<TestArg>, BOOST_LAMBDA_A_I_LIST(N, Arg)> \
> \
( tuple<lambda_functor<TestArg>, BOOST_LAMBDA_A_I_LIST(N, Arg)> \
(ta, BOOST_LAMBDA_A_I_LIST(N, a) )); \
}
// Here's the actual generation
#define BOOST_LAMBDA_SWITCH(N) \
BOOST_LAMBDA_SWITCH_NO_DEFAULT_CASE(N) \
BOOST_LAMBDA_SWITCH_WITH_DEFAULT_CASE(N)
// Use this to avoid case 0, these macros work only from case 1 upwards
#define BOOST_LAMBDA_SWITCH_HELPER(z, N, A) \
BOOST_LAMBDA_SWITCH( BOOST_PP_INC(N) )
// Use this to avoid cases 0 and 1, these macros work only from case 2 upwards
#define BOOST_LAMBDA_SWITCH_STATEMENT_HELPER(z, N, A) \
BOOST_LAMBDA_SWITCH_STATEMENT(BOOST_PP_INC(N))
#ifdef BOOST_MSVC
#pragma warning(push)
#pragma warning(disable:4065)
#endif
// up to 9 cases supported (counting default:)
BOOST_PP_REPEAT_2ND(9,BOOST_LAMBDA_SWITCH_HELPER,FOO)
BOOST_PP_REPEAT_2ND(9,BOOST_LAMBDA_SWITCH_STATEMENT_HELPER,FOO)
#ifdef BOOST_MSVC
#pragma warning(pop)
#endif
} // namespace lambda
} // namespace lslboost
#undef HELPER
#undef HELPER_LIST
#undef BOOST_LAMBDA_SWITCH_HELPER
#undef BOOST_LAMBDA_SWITCH
#undef BOOST_LAMBDA_SWITCH_NO_DEFAULT_CASE
#undef BOOST_LAMBDA_SWITCH_WITH_DEFAULT_CASE
#undef BOOST_LAMBDA_SWITCH_CASE_BLOCK
#undef BOOST_LAMBDA_SWITCH_CASE_BLOCK_LIST
#undef BOOST_LAMBDA_SWITCH_STATEMENT
#undef BOOST_LAMBDA_SWITCH_STATEMENT_HELPER
#endif
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env bash
#
# SPDX-License-Identifier: GPL-2.0-only
#
help ()
{
base=`basename $0`
echo -e "Usage: $base command"
echo "Avaliable commands:"
echo -e "\texport <file.conf>: export and lock down the AUTOPR values from the PR service into a file for release."
echo -e "\timport <file.conf>: import the AUTOPR values from the exported file into the PR service."
}
clean_cache()
{
s=`bitbake -e | grep ^CACHE= | cut -f2 -d\"`
if [ "x${s}" != "x" ]; then
rm -rf ${s}
fi
}
do_export ()
{
file=$1
[ "x${file}" == "x" ] && help && exit 1
rm -f ${file}
clean_cache
bitbake -R conf/prexport.conf -p
s=`bitbake -R conf/prexport.conf -e | grep ^PRSERV_DUMPFILE= | cut -f2 -d\"`
if [ "x${s}" != "x" ];
then
[ -e $s ] && mv -f $s $file && echo "Exporting to file $file succeeded!"
return 0
fi
echo "Exporting to file $file failed!"
return 1
}
do_import ()
{
file=$1
[ "x${file}" == "x" ] && help && exit 1
clean_cache
bitbake -R conf/primport.conf -R $file -p
ret=$?
[ $ret -eq 0 ] && echo "Importing from file $file succeeded!" || echo "Importing from file $file failed!"
return $ret
}
do_migrate_localcount ()
{
df=`bitbake -R conf/migrate_localcount.conf -e | \
grep ^LOCALCOUNT_DUMPFILE= | cut -f2 -d\"`
if [ "x${df}" == "x" ];
then
echo "LOCALCOUNT_DUMPFILE is not defined!"
return 1
fi
rm -rf $df
clean_cache
echo "Exporting LOCALCOUNT to AUTOINCs..."
bitbake -R conf/migrate_localcount.conf -p
[ ! $? -eq 0 ] && echo "Exporting to file $df failed!" && exit 1
if [ -e $df ];
then
echo "Exporting to file $df succeeded!"
else
echo "Exporting to file $df failed!"
exit 1
fi
echo "Importing generated AUTOINC entries..."
[ -e $df ] && do_import $df
if [ ! $? -eq 0 ]
then
echo "Migration from LOCALCOUNT to AUTOINCs failed!"
return 1
fi
echo "Migration from LOCALCOUNT to AUTOINCs succeeded!"
return 0
}
[ $# -eq 0 ] && help && exit 1
case $2 in
*.conf|*.inc)
;;
*)
echo ERROR: $2 must end with .conf or .inc!
exit 1
;;
esac
case $1 in
export)
do_export $2
;;
import)
do_import $2
;;
migrate_localcount)
do_migrate_localcount
;;
*)
help
exit 1
;;
esac
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2010 Ryszard Wiśniewski <brut.alll@gmail.com>
* Copyright (C) 2010 Connor Tumbleson <connor.tumbleson@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package brut.util;
import brut.common.BrutException;
import java.io.*;
import java.util.Arrays;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
import org.apache.commons.io.IOUtils;
/**
* @author Ryszard Wiśniewski <brut.alll@gmail.com>
*/
public class OS {
private static final Logger LOGGER = Logger.getLogger("");
public static void rmdir(File dir) throws BrutException {
if (! dir.exists()) {
return;
}
File[] files = dir.listFiles();
for (int i = 0; i < files.length; i++) {
File file = files[i];
if (file.isDirectory()) {
rmdir(file);
} else {
file.delete();
}
}
dir.delete();
}
public static void rmfile(String file) throws BrutException {
File del = new File(file);
del.delete();
}
public static void rmdir(String dir) throws BrutException {
rmdir(new File(dir));
}
public static void cpdir(File src, File dest) throws BrutException {
dest.mkdirs();
File[] files = src.listFiles();
for (int i = 0; i < files.length; i++) {
File file = files[i];
File destFile = new File(dest.getPath() + File.separatorChar
+ file.getName());
if (file.isDirectory()) {
cpdir(file, destFile);
continue;
}
try {
InputStream in = new FileInputStream(file);
OutputStream out = new FileOutputStream(destFile);
IOUtils.copy(in, out);
in.close();
out.close();
} catch (IOException ex) {
throw new BrutException("Could not copy file: " + file, ex);
}
}
}
public static void cpdir(String src, String dest) throws BrutException {
cpdir(new File(src), new File(dest));
}
public static void exec(String[] cmd) throws BrutException {
Process ps = null;
int exitValue = -99;
try {
ProcessBuilder builder = new ProcessBuilder(cmd);
ps = builder.start();
new StreamForwarder(ps.getErrorStream(), "ERROR").start();
new StreamForwarder(ps.getInputStream(), "OUTPUT").start();
exitValue = ps.waitFor();
if (exitValue != 0)
throw new BrutException("could not exec (exit code = " + exitValue + "): " + Arrays.toString(cmd));
} catch (IOException ex) {
throw new BrutException("could not exec: " + Arrays.toString(cmd), ex);
} catch (InterruptedException ex) {
throw new BrutException("could not exec : " + Arrays.toString(cmd), ex);
}
}
public static String execAndReturn(String[] cmd) {
ExecutorService executor = Executors.newCachedThreadPool();
try {
ProcessBuilder builder = new ProcessBuilder(cmd);
builder.redirectErrorStream(true);
Process process = builder.start();
StreamCollector collector = new StreamCollector(process.getInputStream());
executor.execute(collector);
process.waitFor();
if (! executor.awaitTermination(15, TimeUnit.SECONDS)) {
executor.shutdownNow();
if (! executor.awaitTermination(5, TimeUnit.SECONDS)) {
System.err.println("Stream collector did not terminate.");
}
}
return collector.get();
} catch (IOException | InterruptedException e) {
return null;
}
}
public static File createTempDirectory() throws BrutException {
try {
File tmp = File.createTempFile("BRUT", null);
tmp.deleteOnExit();
if (!tmp.delete()) {
throw new BrutException("Could not delete tmp file: " + tmp.getAbsolutePath());
}
if (!tmp.mkdir()) {
throw new BrutException("Could not create tmp dir: " + tmp.getAbsolutePath());
}
return tmp;
} catch (IOException ex) {
throw new BrutException("Could not create tmp dir", ex);
}
}
static class StreamForwarder extends Thread {
StreamForwarder(InputStream is, String type) {
mIn = is;
mType = type;
}
@Override
public void run() {
try {
BufferedReader br = new BufferedReader(new InputStreamReader(mIn));
String line;
while ((line = br.readLine()) != null) {
if (mType.equals("OUTPUT")) {
LOGGER.info(line);
} else {
LOGGER.warning(line);
}
}
} catch (IOException ex) {
ex.printStackTrace();
}
}
private final InputStream mIn;
private final String mType;
}
static class StreamCollector implements Runnable {
private final StringBuffer buffer = new StringBuffer();
private final InputStream inputStream;
public StreamCollector(InputStream inputStream) {
super();
this.inputStream = inputStream;
}
@Override
public void run() {
String line;
try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream))) {
while ((line = reader.readLine()) != null) {
buffer.append(line).append('\n');
}
} catch (IOException ignored) {}
}
public String get() {
return buffer.toString();
}
}
}
| {
"pile_set_name": "Github"
} |
/*
* reserved comment block
* DO NOT REMOVE OR ALTER!
*/
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.sun.org.apache.xpath.internal.operations;
import com.sun.org.apache.xml.internal.dtm.DTMManager;
import com.sun.org.apache.xpath.internal.Expression;
import com.sun.org.apache.xpath.internal.XPathContext;
import com.sun.org.apache.xpath.internal.objects.XNodeSet;
import com.sun.org.apache.xpath.internal.objects.XObject;
/**
* This is a "smart" variable reference that is used in situations where
* an absolute path is optimized into a variable reference, but may
* be used in some situations where the document context may have changed.
* For instance, in select="document(doc/@href)//name[//salary > 7250]", the
* root in the predicate will be different for each node in the set. While
* this is easy to detect statically in this case, in other cases static
* detection would be very hard or impossible. So, this class does a dynamic check
* to make sure the document context of the referenced variable is the same as
* the current document context, and, if it is not, execute the referenced variable's
* expression with the current context instead.
*/
public class VariableSafeAbsRef extends Variable
{
static final long serialVersionUID = -9174661990819967452L;
/**
* Dereference the variable, and return the reference value. Note that lazy
* evaluation will occur. If a variable within scope is not found, a warning
* will be sent to the error listener, and an empty nodeset will be returned.
*
*
* @param xctxt The runtime execution context.
*
* @return The evaluated variable, or an empty nodeset if not found.
*
* @throws javax.xml.transform.TransformerException
*/
public XObject execute(XPathContext xctxt, boolean destructiveOK)
throws javax.xml.transform.TransformerException
{
XNodeSet xns = (XNodeSet)super.execute(xctxt, destructiveOK);
DTMManager dtmMgr = xctxt.getDTMManager();
int context = xctxt.getContextNode();
if(dtmMgr.getDTM(xns.getRoot()).getDocument() !=
dtmMgr.getDTM(context).getDocument())
{
Expression expr = (Expression)xns.getContainedIter();
xns = (XNodeSet)expr.asIterator(xctxt, context);
}
return xns;
}
}
| {
"pile_set_name": "Github"
} |
// AFHTTPSessionManager.h
// Copyright (c) 2011–2016 Alamofire Software Foundation ( http://alamofire.org/ )
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#import <Foundation/Foundation.h>
#if !TARGET_OS_WATCH
#import <SystemConfiguration/SystemConfiguration.h>
#endif
#import <TargetConditionals.h>
#if TARGET_OS_IOS || TARGET_OS_WATCH || TARGET_OS_TV
#import <MobileCoreServices/MobileCoreServices.h>
#else
#import <CoreServices/CoreServices.h>
#endif
#import "AFURLSessionManager.h"
/**
`AFHTTPSessionManager` is a subclass of `AFURLSessionManager` with convenience methods for making HTTP requests. When a `baseURL` is provided, requests made with the `GET` / `POST` / et al. convenience methods can be made with relative paths.
## Subclassing Notes
Developers targeting iOS 7 or Mac OS X 10.9 or later that deal extensively with a web service are encouraged to subclass `AFHTTPSessionManager`, providing a class method that returns a shared singleton object on which authentication and other configuration can be shared across the application.
For developers targeting iOS 6 or Mac OS X 10.8 or earlier, `AFHTTPRequestOperationManager` may be used to similar effect.
## Methods to Override
To change the behavior of all data task operation construction, which is also used in the `GET` / `POST` / et al. convenience methods, override `dataTaskWithRequest:uploadProgress:downloadProgress:completionHandler:`.
## Serialization
Requests created by an HTTP client will contain default headers and encode parameters according to the `requestSerializer` property, which is an object conforming to `<AFURLRequestSerialization>`.
Responses received from the server are automatically validated and serialized by the `responseSerializers` property, which is an object conforming to `<AFURLResponseSerialization>`
## URL Construction Using Relative Paths
For HTTP convenience methods, the request serializer constructs URLs from the path relative to the `-baseURL`, using `NSURL +URLWithString:relativeToURL:`, when provided. If `baseURL` is `nil`, `path` needs to resolve to a valid `NSURL` object using `NSURL +URLWithString:`.
Below are a few examples of how `baseURL` and relative paths interact:
NSURL *baseURL = [NSURL URLWithString:@"http://example.com/v1/"];
[NSURL URLWithString:@"foo" relativeToURL:baseURL]; // http://example.com/v1/foo
[NSURL URLWithString:@"foo?bar=baz" relativeToURL:baseURL]; // http://example.com/v1/foo?bar=baz
[NSURL URLWithString:@"/foo" relativeToURL:baseURL]; // http://example.com/foo
[NSURL URLWithString:@"foo/" relativeToURL:baseURL]; // http://example.com/v1/foo
[NSURL URLWithString:@"/foo/" relativeToURL:baseURL]; // http://example.com/foo/
[NSURL URLWithString:@"http://example2.com/" relativeToURL:baseURL]; // http://example2.com/
Also important to note is that a trailing slash will be added to any `baseURL` without one. This would otherwise cause unexpected behavior when constructing URLs using paths without a leading slash.
@warning Managers for background sessions must be owned for the duration of their use. This can be accomplished by creating an application-wide or shared singleton instance.
*/
NS_ASSUME_NONNULL_BEGIN
@interface AFHTTPSessionManager : AFURLSessionManager <NSSecureCoding, NSCopying>
/**
The URL used to construct requests from relative paths in methods like `requestWithMethod:URLString:parameters:`, and the `GET` / `POST` / et al. convenience methods.
*/
@property (readonly, nonatomic, strong, nullable) NSURL *baseURL;
/**
Requests created with `requestWithMethod:URLString:parameters:` & `multipartFormRequestWithMethod:URLString:parameters:constructingBodyWithBlock:` are constructed with a set of default headers using a parameter serialization specified by this property. By default, this is set to an instance of `AFHTTPRequestSerializer`, which serializes query string parameters for `GET`, `HEAD`, and `DELETE` requests, or otherwise URL-form-encodes HTTP message bodies.
@warning `requestSerializer` must not be `nil`.
*/
@property (nonatomic, strong) AFHTTPRequestSerializer <AFURLRequestSerialization> * requestSerializer;
/**
Responses sent from the server in data tasks created with `dataTaskWithRequest:success:failure:` and run using the `GET` / `POST` / et al. convenience methods are automatically validated and serialized by the response serializer. By default, this property is set to an instance of `AFJSONResponseSerializer`.
@warning `responseSerializer` must not be `nil`.
*/
@property (nonatomic, strong) AFHTTPResponseSerializer <AFURLResponseSerialization> * responseSerializer;
///-------------------------------
/// @name Managing Security Policy
///-------------------------------
/**
The security policy used by created session to evaluate server trust for secure connections. `AFURLSessionManager` uses the `defaultPolicy` unless otherwise specified. A security policy configured with `AFSSLPinningModePublicKey` or `AFSSLPinningModeCertificate` can only be applied on a session manager initialized with a secure base URL (i.e. https). Applying a security policy with pinning enabled on an insecure session manager throws an `Invalid Security Policy` exception.
*/
@property (nonatomic, strong) AFSecurityPolicy *securityPolicy;
///---------------------
/// @name Initialization
///---------------------
/**
Creates and returns an `AFHTTPSessionManager` object.
*/
+ (instancetype)manager;
/**
Initializes an `AFHTTPSessionManager` object with the specified base URL.
@param url The base URL for the HTTP client.
@return The newly-initialized HTTP client
*/
- (instancetype)initWithBaseURL:(nullable NSURL *)url;
/**
Initializes an `AFHTTPSessionManager` object with the specified base URL.
This is the designated initializer.
@param url The base URL for the HTTP client.
@param configuration The configuration used to create the managed session.
@return The newly-initialized HTTP client
*/
- (instancetype)initWithBaseURL:(nullable NSURL *)url
sessionConfiguration:(nullable NSURLSessionConfiguration *)configuration NS_DESIGNATED_INITIALIZER;
///---------------------------
/// @name Making HTTP Requests
///---------------------------
/**
Creates and runs an `NSURLSessionDataTask` with a `GET` request.
@param URLString The URL string used to create the request URL.
@param parameters The parameters to be encoded according to the client request serializer.
@param success A block object to be executed when the task finishes successfully. This block has no return value and takes two arguments: the data task, and the response object created by the client response serializer.
@param failure A block object to be executed when the task finishes unsuccessfully, or that finishes successfully, but encountered an error while parsing the response data. This block has no return value and takes a two arguments: the data task and the error describing the network or parsing error that occurred.
@see -dataTaskWithRequest:completionHandler:
*/
- (nullable NSURLSessionDataTask *)GET:(NSString *)URLString
parameters:(nullable id)parameters
success:(nullable void (^)(NSURLSessionDataTask *task, id _Nullable responseObject))success
failure:(nullable void (^)(NSURLSessionDataTask * _Nullable task, NSError *error))failure DEPRECATED_ATTRIBUTE;
/**
Creates and runs an `NSURLSessionDataTask` with a `GET` request.
@param URLString The URL string used to create the request URL.
@param parameters The parameters to be encoded according to the client request serializer.
@param downloadProgress A block object to be executed when the download progress is updated. Note this block is called on the session queue, not the main queue.
@param success A block object to be executed when the task finishes successfully. This block has no return value and takes two arguments: the data task, and the response object created by the client response serializer.
@param failure A block object to be executed when the task finishes unsuccessfully, or that finishes successfully, but encountered an error while parsing the response data. This block has no return value and takes a two arguments: the data task and the error describing the network or parsing error that occurred.
@see -dataTaskWithRequest:uploadProgress:downloadProgress:completionHandler:
*/
- (nullable NSURLSessionDataTask *)GET:(NSString *)URLString
parameters:(nullable id)parameters
progress:(nullable void (^)(NSProgress *downloadProgress))downloadProgress
success:(nullable void (^)(NSURLSessionDataTask *task, id _Nullable responseObject))success
failure:(nullable void (^)(NSURLSessionDataTask * _Nullable task, NSError *error))failure;
/**
Creates and runs an `NSURLSessionDataTask` with a `HEAD` request.
@param URLString The URL string used to create the request URL.
@param parameters The parameters to be encoded according to the client request serializer.
@param success A block object to be executed when the task finishes successfully. This block has no return value and takes a single arguments: the data task.
@param failure A block object to be executed when the task finishes unsuccessfully, or that finishes successfully, but encountered an error while parsing the response data. This block has no return value and takes a two arguments: the data task and the error describing the network or parsing error that occurred.
@see -dataTaskWithRequest:completionHandler:
*/
- (nullable NSURLSessionDataTask *)HEAD:(NSString *)URLString
parameters:(nullable id)parameters
success:(nullable void (^)(NSURLSessionDataTask *task))success
failure:(nullable void (^)(NSURLSessionDataTask * _Nullable task, NSError *error))failure;
/**
Creates and runs an `NSURLSessionDataTask` with a `POST` request.
@param URLString The URL string used to create the request URL.
@param parameters The parameters to be encoded according to the client request serializer.
@param success A block object to be executed when the task finishes successfully. This block has no return value and takes two arguments: the data task, and the response object created by the client response serializer.
@param failure A block object to be executed when the task finishes unsuccessfully, or that finishes successfully, but encountered an error while parsing the response data. This block has no return value and takes a two arguments: the data task and the error describing the network or parsing error that occurred.
@see -dataTaskWithRequest:completionHandler:
*/
- (nullable NSURLSessionDataTask *)POST:(NSString *)URLString
parameters:(nullable id)parameters
success:(nullable void (^)(NSURLSessionDataTask *task, id _Nullable responseObject))success
failure:(nullable void (^)(NSURLSessionDataTask * _Nullable task, NSError *error))failure DEPRECATED_ATTRIBUTE;
/**
Creates and runs an `NSURLSessionDataTask` with a `POST` request.
@param URLString The URL string used to create the request URL.
@param parameters The parameters to be encoded according to the client request serializer.
@param uploadProgress A block object to be executed when the upload progress is updated. Note this block is called on the session queue, not the main queue.
@param success A block object to be executed when the task finishes successfully. This block has no return value and takes two arguments: the data task, and the response object created by the client response serializer.
@param failure A block object to be executed when the task finishes unsuccessfully, or that finishes successfully, but encountered an error while parsing the response data. This block has no return value and takes a two arguments: the data task and the error describing the network or parsing error that occurred.
@see -dataTaskWithRequest:uploadProgress:downloadProgress:completionHandler:
*/
- (nullable NSURLSessionDataTask *)POST:(NSString *)URLString
parameters:(nullable id)parameters
progress:(nullable void (^)(NSProgress *uploadProgress))uploadProgress
success:(nullable void (^)(NSURLSessionDataTask *task, id _Nullable responseObject))success
failure:(nullable void (^)(NSURLSessionDataTask * _Nullable task, NSError *error))failure;
/**
Creates and runs an `NSURLSessionDataTask` with a multipart `POST` request.
@param URLString The URL string used to create the request URL.
@param parameters The parameters to be encoded according to the client request serializer.
@param block A block that takes a single argument and appends data to the HTTP body. The block argument is an object adopting the `AFMultipartFormData` protocol.
@param success A block object to be executed when the task finishes successfully. This block has no return value and takes two arguments: the data task, and the response object created by the client response serializer.
@param failure A block object to be executed when the task finishes unsuccessfully, or that finishes successfully, but encountered an error while parsing the response data. This block has no return value and takes a two arguments: the data task and the error describing the network or parsing error that occurred.
@see -dataTaskWithRequest:completionHandler:
*/
- (nullable NSURLSessionDataTask *)POST:(NSString *)URLString
parameters:(nullable id)parameters
constructingBodyWithBlock:(nullable void (^)(id <AFMultipartFormData> formData))block
success:(nullable void (^)(NSURLSessionDataTask *task, id _Nullable responseObject))success
failure:(nullable void (^)(NSURLSessionDataTask * _Nullable task, NSError *error))failure DEPRECATED_ATTRIBUTE;
/**
Creates and runs an `NSURLSessionDataTask` with a multipart `POST` request.
@param URLString The URL string used to create the request URL.
@param parameters The parameters to be encoded according to the client request serializer.
@param block A block that takes a single argument and appends data to the HTTP body. The block argument is an object adopting the `AFMultipartFormData` protocol.
@param uploadProgress A block object to be executed when the upload progress is updated. Note this block is called on the session queue, not the main queue.
@param success A block object to be executed when the task finishes successfully. This block has no return value and takes two arguments: the data task, and the response object created by the client response serializer.
@param failure A block object to be executed when the task finishes unsuccessfully, or that finishes successfully, but encountered an error while parsing the response data. This block has no return value and takes a two arguments: the data task and the error describing the network or parsing error that occurred.
@see -dataTaskWithRequest:uploadProgress:downloadProgress:completionHandler:
*/
- (nullable NSURLSessionDataTask *)POST:(NSString *)URLString
parameters:(nullable id)parameters
constructingBodyWithBlock:(nullable void (^)(id <AFMultipartFormData> formData))block
progress:(nullable void (^)(NSProgress *uploadProgress))uploadProgress
success:(nullable void (^)(NSURLSessionDataTask *task, id _Nullable responseObject))success
failure:(nullable void (^)(NSURLSessionDataTask * _Nullable task, NSError *error))failure;
/**
Creates and runs an `NSURLSessionDataTask` with a `PUT` request.
@param URLString The URL string used to create the request URL.
@param parameters The parameters to be encoded according to the client request serializer.
@param success A block object to be executed when the task finishes successfully. This block has no return value and takes two arguments: the data task, and the response object created by the client response serializer.
@param failure A block object to be executed when the task finishes unsuccessfully, or that finishes successfully, but encountered an error while parsing the response data. This block has no return value and takes a two arguments: the data task and the error describing the network or parsing error that occurred.
@see -dataTaskWithRequest:completionHandler:
*/
- (nullable NSURLSessionDataTask *)PUT:(NSString *)URLString
parameters:(nullable id)parameters
success:(nullable void (^)(NSURLSessionDataTask *task, id _Nullable responseObject))success
failure:(nullable void (^)(NSURLSessionDataTask * _Nullable task, NSError *error))failure;
/**
Creates and runs an `NSURLSessionDataTask` with a `PATCH` request.
@param URLString The URL string used to create the request URL.
@param parameters The parameters to be encoded according to the client request serializer.
@param success A block object to be executed when the task finishes successfully. This block has no return value and takes two arguments: the data task, and the response object created by the client response serializer.
@param failure A block object to be executed when the task finishes unsuccessfully, or that finishes successfully, but encountered an error while parsing the response data. This block has no return value and takes a two arguments: the data task and the error describing the network or parsing error that occurred.
@see -dataTaskWithRequest:completionHandler:
*/
- (nullable NSURLSessionDataTask *)PATCH:(NSString *)URLString
parameters:(nullable id)parameters
success:(nullable void (^)(NSURLSessionDataTask *task, id _Nullable responseObject))success
failure:(nullable void (^)(NSURLSessionDataTask * _Nullable task, NSError *error))failure;
/**
Creates and runs an `NSURLSessionDataTask` with a `DELETE` request.
@param URLString The URL string used to create the request URL.
@param parameters The parameters to be encoded according to the client request serializer.
@param success A block object to be executed when the task finishes successfully. This block has no return value and takes two arguments: the data task, and the response object created by the client response serializer.
@param failure A block object to be executed when the task finishes unsuccessfully, or that finishes successfully, but encountered an error while parsing the response data. This block has no return value and takes a two arguments: the data task and the error describing the network or parsing error that occurred.
@see -dataTaskWithRequest:completionHandler:
*/
- (nullable NSURLSessionDataTask *)DELETE:(NSString *)URLString
parameters:(nullable id)parameters
success:(nullable void (^)(NSURLSessionDataTask *task, id _Nullable responseObject))success
failure:(nullable void (^)(NSURLSessionDataTask * _Nullable task, NSError *error))failure;
@end
NS_ASSUME_NONNULL_END
| {
"pile_set_name": "Github"
} |
/* Scicos
*
* Copyright (C) INRIA - METALAU Project <scicos@inria.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* See the file ./license.txt
*/
/*--------------------------------------------------------------------------*/
#include "dynlib_scicos_blocks.h"
SCICOS_BLOCKS_IMPEXP void plusblk(int *flag, int *nevprt, double *t, double xd[], double x[],
int *nx, double z[], int *nz, double tvec[], int *ntvec, double rpar[], int *nrpar,
int ipar[], int *nipar, double *inptr[], int insz[], int *nin, double *outptr[],
int outsz[], int *nout)
{
int i = 0;
int n = outsz[0]; /* insz[0]==insz[1] .. ==insz[*nin]== outsz[0] */
double *y = (double *)outptr[0];
for (i = 0; i < n; i++)
{
int k = 0;
y[i] = 0.0;
for (k = 0; k < *nin; k++)
{
double *u = (double *)inptr[k];
y[i] = y[i] + u[i];
}
}
}
/*--------------------------------------------------------------------------*/
| {
"pile_set_name": "Github"
} |
/*
* NASA Docket No. GSC-18,370-1, and identified as "Operating System Abstraction Layer"
*
* Copyright (c) 2019 United States Government as represented by
* the Administrator of the National Aeronautics and Space Administration.
* All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* \file os-impl-posix-dl-symtab.c
* \author joseph.p.hickey@nasa.gov
*
* This file contains a module loader implementation for systems
* that implement a POSIX-style dynamic module loader. This includes
* RTEMS even if built without its native POSIX API.
*/
/****************************************************************************************
INCLUDE FILES
***************************************************************************************/
/*
* Inclusions Defined by OSAL layer.
*
* typically this must include dlfcn.h and whatever else is required
* to get the prototypes of these functions:
*
* dlsym()
* dlerror()
*
* In POSIX these functions are grouped with the loader (dl) library
*/
#include <string.h>
#include <stdlib.h>
#include "os-impl-loader.h"
#include "os-shared-module.h"
/****************************************************************************************
DEFINES
***************************************************************************************/
/*
* Determine what to pass in for the first parameter of dlsym()
*
* If the "os-impl-loader.h" header already defined this, then use that.
*
* Otherwise, check if the C library provides an "RTLD_DEFAULT" symbol -
* This symbol is not POSIX standard but many implementations do provide it.
*
* Lastly, if nothing else works, use NULL. This is technically undefined
* behavior per POSIX, but most implementations do seem to interpret this
* as referring to the complete process (base executable + all loaded modules).
*/
#ifndef OSAL_DLSYM_DEFAULT_HANDLE
#ifdef RTLD_DEFAULT
#define OSAL_DLSYM_DEFAULT_HANDLE RTLD_DEFAULT
#else
#define OSAL_DLSYM_DEFAULT_HANDLE NULL
#endif
#endif
/****************************************************************************************
Symbol table API
***************************************************************************************/
/*----------------------------------------------------------------
*
* Function: OS_SymbolLookup_Impl
*
* Purpose: Implemented per internal OSAL API
* See prototype for argument/return detail
*
*-----------------------------------------------------------------*/
int32 OS_SymbolLookup_Impl( cpuaddr *SymbolAddress, const char *SymbolName )
{
int32 status = OS_ERROR;
const char *dlError; /* Pointer to error string */
void *Function;
/*
* call dlerror() to clear any prior error that might have occurred.
*/
dlerror();
Function = dlsym(OSAL_DLSYM_DEFAULT_HANDLE, SymbolName);
dlError = dlerror();
/*
* For the POSIX DL implementation, if the symbol does not exist
* then dlerror() is supposed to return non-null. This is intended
* to differentiate between valid symbols which are actually 0/NULL
* and invalid symbols that do not exist.
*
* Some implementations do _not_ implement this detail, and dlerror()
* still returns NULL after looking up an invalid symbol name.
*
* In practice, this is expected to be used for looking up functions
* and as such all valid symbols should be non-NULL, so NULL is considered
* an error even if the C library doesn't consider this an error.
*/
if( dlError == NULL && Function != NULL )
{
*SymbolAddress = (cpuaddr)Function;
status = OS_SUCCESS;
}
return status;
} /* end OS_SymbolLookup_Impl */
/*----------------------------------------------------------------
*
* Function: OS_SymbolTableDump_Impl
*
* Purpose: Implemented per internal OSAL API
* See prototype for argument/return detail
*
* POSIX DL does not provide
*
*-----------------------------------------------------------------*/
int32 OS_SymbolTableDump_Impl ( const char *filename, uint32 SizeLimit )
{
/*
* Limiting strictly to POSIX-defined API means there is no defined
* method to get iterate over the symbol table.
*
* Some C libraries do offer an extension to provide this function, so
* if this becomes a requirement on those platforms, this function
* might need to move.
*
* Currently this is not a widely used/needed feature so it will report
* unimplemented here.
*/
return(OS_ERR_NOT_IMPLEMENTED);
} /* end OS_SymbolTableDump_Impl */
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<Workspace
version = "1.0">
<FileRef
location = "group:SenderSynth.xcodeproj">
</FileRef>
<FileRef
location = "group:Pods/Pods.xcodeproj">
</FileRef>
</Workspace>
| {
"pile_set_name": "Github"
} |
<table>
<tr>
<td>
blabla
</td>
</tr>
</table>
| {
"pile_set_name": "Github"
} |
include/master-slave.inc
[connection master]
drop database if exists mysqltest1;
drop database if exists mysqltest2;
create database mysqltest1;
create database mysqltest2;
use mysqltest1;
create table t1 (a int);
insert into t1 values(1);
connection slave;
select * from mysqltest1.t1;
ERROR 42S02: Table 'mysqltest1.t1' doesn't exist
connection master;
use mysqltest2;
create table t1 (a int);
insert into t1 values(1);
connection slave;
select * from mysqltest2.t1;
a
1
connection master;
drop database mysqltest1;
drop database mysqltest2;
connection slave;
include/rpl_end.inc
| {
"pile_set_name": "Github"
} |
module.exports = {
init: {
options: {
create: [ "<%= pkg.config.src %>", "<%= pkg.config.demo %>" ]
}
}
};
| {
"pile_set_name": "Github"
} |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import click
from indico.cli.core import cli_group
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import PrincipalType
from indico.core.db.sqlalchemy.util.models import get_simple_column_attrs
from indico.modules.attachments import Attachment, AttachmentFolder
from indico.modules.attachments.models.principals import AttachmentFolderPrincipal, AttachmentPrincipal
from indico.modules.events.contributions import Contribution
from indico.modules.events.contributions.models.principals import ContributionPrincipal
from indico.modules.events.models.principals import EventPrincipal
from indico.modules.events.models.roles import EventRole
from indico.modules.events.sessions import Session
from indico.modules.events.sessions.models.principals import SessionPrincipal
click.disable_unicode_literals_warning = True
@cli_group()
def cli():
pass
def _fix_role_principals(principals, get_event):
role_attrs = get_simple_column_attrs(EventRole) | {'members'}
for p in principals:
click.echo('Fixing {}'.format(p))
event = get_event(p)
try:
event_role = [r for r in event.roles if r.code == p.event_role.code][0]
except IndexError:
event_role = EventRole(event=event)
event_role.populate_from_attrs(p.event_role, role_attrs)
else:
click.echo(' using existing role {}'.format(event_role))
p.event_role = event_role
db.session.flush()
@cli.command()
def fix_event_role_acls():
"""Fixes ACLs referencing event roles from other events.
This happened due to a bug prior to 2.2.3 when cloning an event
which had event roles in its ACL.
"""
fixed_something = False
broken = (EventPrincipal.query
.join(EventRole, EventRole.id == EventPrincipal.event_role_id)
.filter(EventPrincipal.type == PrincipalType.event_role, EventPrincipal.event_id != EventRole.event_id)
.all())
_fix_role_principals(broken, lambda p: p.event)
fixed_something = fixed_something or bool(broken)
broken = (SessionPrincipal.query
.join(Session, Session.id == SessionPrincipal.session_id)
.join(EventRole, EventRole.id == SessionPrincipal.event_role_id)
.filter(SessionPrincipal.type == PrincipalType.event_role, Session.event_id != EventRole.event_id)
.all())
_fix_role_principals(broken, lambda p: p.session.event)
fixed_something = fixed_something or bool(broken)
broken = (ContributionPrincipal.query
.join(Contribution, Contribution.id == ContributionPrincipal.contribution_id)
.join(EventRole, EventRole.id == ContributionPrincipal.event_role_id)
.filter(ContributionPrincipal.type == PrincipalType.event_role,
Contribution.event_id != EventRole.event_id)
.all())
_fix_role_principals(broken, lambda p: p.contribution.event)
fixed_something = fixed_something or bool(broken)
broken = (AttachmentFolderPrincipal.query
.join(AttachmentFolder, AttachmentFolder.id == AttachmentFolderPrincipal.folder_id)
.join(EventRole, EventRole.id == AttachmentFolderPrincipal.event_role_id)
.filter(AttachmentFolderPrincipal.type == PrincipalType.event_role,
AttachmentFolder.event_id != EventRole.event_id)
.all())
_fix_role_principals(broken, lambda p: p.folder.event)
fixed_something = fixed_something or bool(broken)
broken = (AttachmentPrincipal.query
.join(Attachment, Attachment.id == AttachmentPrincipal.attachment_id)
.join(AttachmentFolder, AttachmentFolder.id == Attachment.folder_id)
.join(EventRole, EventRole.id == AttachmentPrincipal.event_role_id)
.filter(AttachmentPrincipal.type == PrincipalType.event_role,
AttachmentFolder.event_id != EventRole.event_id)
.all())
_fix_role_principals(broken, lambda p: p.attachment.folder.event)
fixed_something = fixed_something or bool(broken)
if not fixed_something:
click.secho('Nothing to fix :)', fg='green')
return
click.confirm(click.style('Do you want to commit the fixes shown above?', fg='white', bold=True),
default=True, abort=True)
db.session.commit()
click.secho('Success!', fg='green')
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2016 United States Government as represented by the Administrator of the
* National Aeronautics and Space Administration. All Rights Reserved.
*/
package gov.nasa.worldwindx;
import android.content.Intent;
import android.os.Bundle;
import android.support.v4.app.Fragment;
import android.support.v7.app.ActionBar;
import android.support.v7.app.AppCompatActivity;
import android.view.MenuItem;
public class CodeActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_code);
// Show the Up button in the action bar.
ActionBar actionBar = getSupportActionBar();
if (actionBar != null) {
actionBar.setDisplayHomeAsUpEnabled(true);
}
// Create the code viewer fragment
if (savedInstanceState == null) {
Bundle bundle = getIntent().getBundleExtra("arguments");
Fragment fragment = new CodeFragment();
fragment.setArguments(bundle);
getSupportFragmentManager()
.beginTransaction()
.replace(R.id.code_container, fragment)
.commit();
}
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
int id = item.getItemId();
if (id == android.R.id.home) {
// This ID represents the Home or Up button. In the case of this
// activity, the Up button is shown. Return to the globe view
navigateUpTo(new Intent(this, MainActivity.class));
return true;
}
return super.onOptionsItemSelected(item);
}
}
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<!-- IE Standards Mode -->
<meta content="IE=edge" http-equiv="X-UA-Compatible"></meta>
<!-- Favicon -->
<link href="../images/vl_blue.ico" type="image/x-icon" rel="icon"></link>
<link href="../images/vl_blue.ico" type="image/x-icon" rel="shortcut icon"></link>
<!-- Page title -->
<title>VLFeat - Documentation > C API</title>
<!-- Stylesheets -->
<link href="../vlfeat.css" type="text/css" rel="stylesheet"></link>
<link href="../pygmentize.css" type="text/css" rel="stylesheet"></link>
<style xml:space="preserve">
/* fixes a conflict between Pygmentize and MathJax */
.MathJax .mo, .MathJax .mi {color: inherit ! important}
</style>
<link rel="stylesheet" type="text/css" href="doxygen.css"></link>
<!-- Scripts-->
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<!-- MathJax -->
<script xml:space="preserve" type="text/x-mathjax-config">
MathJax.Hub.Config({
tex2jax: {
inlineMath: [ ['$','$'], ['\\(','\\)'] ],
processEscapes: true,
},
TeX: {
Macros: {
balpha: '\\boldsymbol{\\alpha}',
bc: '\\mathbf{c}',
be: '\\mathbf{e}',
bg: '\\mathbf{g}',
bq: '\\mathbf{q}',
bu: '\\mathbf{u}',
bv: '\\mathbf{v}',
bw: '\\mathbf{w}',
bx: '\\mathbf{x}',
by: '\\mathbf{y}',
bz: '\\mathbf{z}',
bsigma: '\\mathbf{\\sigma}',
sign: '\\operatorname{sign}',
diag: '\\operatorname{diag}',
real: '\\mathbb{R}',
},
equationNumbers: { autoNumber: 'AMS' }
}
});
</script>
<script src="http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML" xml:space="preserve" type="text/javascript"></script>
<!-- Google Custom Search -->
<script xml:space="preserve">
(function() {
var cx = '003215582122030917471:oq23albfeam';
var gcse = document.createElement('script'); gcse.type = 'text/javascript'; gcse.async = true;
gcse.src = (document.location.protocol == 'https' ? 'https:' : 'http:') +
'//www.google.com/cse/cse.js?cx=' + cx;
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(gcse, s);
})();
</script>
<!-- Google Analytics -->
<script xml:space="preserve" type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-4936091-2']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</head>
<!-- Body Start -->
<body>
<div id="header-section">
<div id="header">
<!-- Google CSE Search Box -->
<div class="searchbox">
<gcse:searchbox-only autoCompleteMaxCompletions="5" autoCompleteMatchType="any" resultsUrl="http://www.vlfeat.org/search.html"></gcse:searchbox-only>
</div>
<h1 id="id-16"><a shape="rect" href="../index.html" class="plain"><span id="vlfeat">VLFeat</span><span id="dotorg">.org</span></a></h1>
</div>
<div id="sidebar"> <!-- Navigation Start -->
<ul>
<li><a href="../index.html">Home</a>
<ul>
<li><a href="../about.html">About</a>
</li>
<li><a href="../license.html">License</a>
</li>
</ul></li>
<li><a href="../download.html">Download</a>
<ul>
<li><a href="../install-matlab.html">Using from MATLAB</a>
</li>
<li><a href="../install-octave.html">Using from Octave</a>
</li>
<li><a href="../install-shell.html">Using from the command line</a>
</li>
<li><a href="../install-c.html">Using from C</a>
<ul>
<li><a href="../xcode.html">Xcode</a>
</li>
<li><a href="../vsexpress.html">Visual C++</a>
</li>
<li><a href="../gcc.html">g++</a>
</li>
</ul></li>
<li><a href="../compiling.html">Compiling</a>
<ul>
<li><a href="../compiling-unix.html">Compiling on UNIX-like platforms</a>
</li>
<li><a href="../compiling-windows.html">Compiling on Windows</a>
</li>
</ul></li>
</ul></li>
<li><a href="../overview/tut.html">Tutorials</a>
<ul>
<li><a href="../overview/frame.html">Local feature frames</a>
</li>
<li><a href="../overview/covdet.html">Covariant feature detectors</a>
</li>
<li><a href="../overview/hog.html">HOG features</a>
</li>
<li><a href="../overview/sift.html">SIFT detector and descriptor</a>
</li>
<li><a href="../overview/dsift.html">Dense SIFT</a>
</li>
<li><a href="../overview/liop.html">LIOP local descriptor</a>
</li>
<li><a href="../overview/mser.html">MSER feature detector</a>
</li>
<li><a href="../overview/imdisttf.html">Distance transform</a>
</li>
<li><a href="../overview/encodings.html">Fisher Vector and VLAD</a>
</li>
<li><a href="../overview/gmm.html">Gaussian Mixture Models</a>
</li>
<li><a href="../overview/kmeans.html">K-means clustering</a>
</li>
<li><a href="../overview/aib.html">Agglomerative Infromation Bottleneck</a>
</li>
<li><a href="../overview/quickshift.html">Quick shift superpixels</a>
</li>
<li><a href="../overview/slic.html">SLIC superpixels</a>
</li>
<li><a href="../overview/svm.html#tut.svm">Support Vector Machines (SVMs)</a>
</li>
<li><a href="../overview/kdtree.html">KD-trees and forests</a>
</li>
<li><a href="../overview/plots-rank.html">Plotting AP and ROC curves</a>
</li>
<li><a href="../overview/utils.html">Miscellaneous utilities</a>
</li>
<li><a href="../overview/ikm.html">Integer K-means</a>
</li>
<li><a href="../overview/hikm.html">Hierarchical integer k-means</a>
</li>
</ul></li>
<li><a href="../applications/apps.html">Applications</a>
</li>
<li class='active'><a href="../doc.html">Documentation</a>
<ul>
<li><a href="../matlab/matlab.html">MATLAB API</a>
</li>
<li class='active' class='activeLeaf'><a href="index.html">C API</a>
</li>
<li><a href="../man/man.html">Man pages</a>
</li>
</ul></li>
</ul>
</div> <!-- sidebar -->
</div>
<div id="headbanner-section">
<div id="headbanner">
<span class='page'><a href="../doc.html">Documentation</a></span><span class='separator'>></span><span class='page'><a href="index.html">C API</a></span>
</div>
</div>
<div id="content-section">
<div id="content-wrapper">
<div id="content">
<!-- <pagestyle href="%pathto:root;api/tabs.css"/> -->
<div class="doxygen">
<div id="top">
<div id="top">
<!-- Generated by Doxygen 1.8.7 -->
<div id="navrow1" class="tabs">
<ul class="tablist">
<li><a href="index.html"><span>Main Page</span></a></li>
<li><a href="pages.html"><span>Related Pages</span></a></li>
<li><a href="annotated.html"><span>Data Structures</span></a></li>
<li class="current"><a href="files.html"><span>Files</span></a></li>
</ul>
</div>
<div id="navrow2" class="tabs2">
<ul class="tablist">
<li><a href="files.html"><span>File List</span></a></li>
<li><a href="globals.html"><span>Globals</span></a></li>
</ul>
</div>
<div id="nav-path" class="navpath">
<ul>
<li class="navelem"><a class="el" href="dir_8dd540b382164c224dfa1eb6e8a3add6.html">vl</a></li> </ul>
</div>
</div><!-- top -->
<div class="header">
<div class="headertitle">
<div class="title">mathop_sse2.c File Reference</div> </div>
</div><!--header-->
<div class="contents">
<p>mathop for SSE2 - Definition
<a href="#details">More...</a></p>
<div class="textblock"><code>#include "<a class="el" href="mathop__sse2_8h.html">mathop_sse2.h</a>"</code><br />
<code>#include "<a class="el" href="mathop__sse2_8c.html">mathop_sse2.c</a>"</code><br />
<code>#include <emmintrin.h></code><br />
<code>#include "<a class="el" href="mathop_8h.html">mathop.h</a>"</code><br />
<code>#include "<a class="el" href="generic_8h.html">generic.h</a>"</code><br />
<code>#include "float.th"</code><br />
</div><a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
<div class="textblock"><dl class="section author"><dt>Author</dt><dd>Andrea Vedaldi, David Novotny </dd></dl>
</div></div><!-- contents -->
<!-- Doc Here -->
</div>
</div>
<div class="clear"> </div>
</div>
</div> <!-- content-section -->
<div id="footer-section">
<div id="footer">
© 2007-13 The authors of VLFeat
</div> <!-- footer -->
</div> <!-- footer section -->
</body>
<!-- Body ends -->
</html>
| {
"pile_set_name": "Github"
} |
/***************************************************************************
File : FindDialog.cpp
Project : QtiPlot
--------------------------------------------------------------------
Copyright : (C) 2006 by Ion Vasilief, Tilman Hoener zu
Siederdissen
Email (use @ for *) : ion_vasilief*yahoo.fr, thzs*gmx.net
Description : Find dialog
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the Free Software *
* Foundation, Inc., 51 Franklin Street, Fifth Floor, *
* Boston, MA 02110-1301 USA *
* *
***************************************************************************/
#include "FindDialog.h"
#include "ApplicationWindow.h"
#include "Folder.h"
#include <QCheckBox>
#include <QComboBox>
#include <QFrame>
#include <QGridLayout>
#include <QGroupBox>
#include <QLabel>
#include <QPushButton>
#include <QRegExp>
#include <QVBoxLayout>
FindDialog::FindDialog(QWidget *parent, const Qt::WFlags &fl)
: QDialog(parent, fl) {
setWindowTitle(tr("MantidPlot") + " - " + tr("Find"));
setSizeGripEnabled(true);
QGridLayout *topLayout = new QGridLayout();
QGridLayout *bottomLayout = new QGridLayout();
topLayout->addWidget(new QLabel(tr("Start From")), 0, 0);
labelStart = new QLabel();
labelStart->setFrameStyle(QFrame::Panel | QFrame::Sunken);
labelStart->setSizePolicy(
QSizePolicy(QSizePolicy::Expanding, QSizePolicy::Fixed));
topLayout->addWidget(labelStart, 0, 1, 1, 4);
topLayout->addWidget(new QLabel(tr("Find")), 1, 0);
boxFind = new QComboBox();
boxFind->setEditable(true);
boxFind->setDuplicatesEnabled(false);
boxFind->setInsertPolicy(QComboBox::InsertAtTop);
boxFind->setAutoCompletion(true);
boxFind->setMaxCount(10);
boxFind->setMaxVisibleItems(10);
boxFind->setSizePolicy(
QSizePolicy(QSizePolicy::Expanding, QSizePolicy::Fixed));
topLayout->addWidget(boxFind, 1, 1, 1, 4);
QGroupBox *groupBox = new QGroupBox(tr("Search in"));
QVBoxLayout *groupBoxLayout = new QVBoxLayout(groupBox);
boxWindowNames = new QCheckBox(tr("&Window Names"));
boxWindowNames->setChecked(true);
groupBoxLayout->addWidget(boxWindowNames);
boxWindowLabels = new QCheckBox(tr("Window &Labels"));
boxWindowLabels->setChecked(false);
groupBoxLayout->addWidget(boxWindowLabels);
boxFolderNames = new QCheckBox(tr("Folder &Names"));
boxFolderNames->setChecked(false);
groupBoxLayout->addWidget(boxFolderNames);
bottomLayout->addWidget(groupBox, 0, 0, 3, 1);
boxCaseSensitive = new QCheckBox(tr("Case &Sensitive"));
boxCaseSensitive->setChecked(false);
bottomLayout->addWidget(boxCaseSensitive, 0, 1);
boxPartialMatch = new QCheckBox(tr("&Partial Match Allowed"));
boxPartialMatch->setChecked(true);
bottomLayout->addWidget(boxPartialMatch, 1, 1);
boxSubfolders = new QCheckBox(tr("&Include Subfolders"));
boxSubfolders->setChecked(true);
bottomLayout->addWidget(boxSubfolders, 2, 1);
buttonFind = new QPushButton(tr("&Find"));
buttonFind->setDefault(true);
bottomLayout->addWidget(buttonFind, 0, 2);
buttonReset = new QPushButton(tr("&Update Start Path"));
bottomLayout->addWidget(buttonReset, 1, 2);
buttonCancel = new QPushButton(tr("&Close"));
bottomLayout->addWidget(buttonCancel, 2, 2);
QVBoxLayout *mainLayout = new QVBoxLayout(this);
mainLayout->addLayout(topLayout);
mainLayout->addLayout(bottomLayout);
setStartPath();
// signals and slots connections
connect(buttonFind, SIGNAL(clicked()), this, SLOT(accept()));
connect(buttonReset, SIGNAL(clicked()), this, SLOT(setStartPath()));
connect(buttonCancel, SIGNAL(clicked()), this, SLOT(reject()));
}
void FindDialog::setStartPath() {
ApplicationWindow *app = dynamic_cast<ApplicationWindow *>(this->parent());
if (!app) {
throw std::logic_error(
"Parent of FindDialog is not ApplicationWindow as expected.");
}
labelStart->setText(app->currentFolder()->path());
}
void FindDialog::accept() {
ApplicationWindow *app = dynamic_cast<ApplicationWindow *>(this->parent());
if (!app) {
throw std::logic_error(
"Parent of FindDialog is not ApplicationWindow as expected.");
}
app->find(boxFind->currentText(), boxWindowNames->isChecked(),
boxWindowLabels->isChecked(), boxFolderNames->isChecked(),
boxCaseSensitive->isChecked(), boxPartialMatch->isChecked(),
boxSubfolders->isChecked());
// add the combo box's current text to the list when the find button is
// pressed
QString text = boxFind->currentText();
if (!text.isEmpty()) {
if (boxFind->findText(text) == -1) // no duplicates
{
boxFind->insertItem(0, text);
boxFind->setCurrentIndex(0);
}
}
}
FindDialog::~FindDialog() {}
| {
"pile_set_name": "Github"
} |
#
# ***************************************************************************
# Copyright (c) 2010 Qcadoo Limited
# Project: Qcadoo MES
# Version: 1.4
#
# This file is part of Qcadoo.
#
# Qcadoo is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ***************************************************************************
#
### ROLE
qcadooSecurity.role.ROLE_MATERIAL_FLOW.description = Dostęp do magazynów, stanów miejsc składowania, rezerwacji stanów, miejsc składowania, zasobów.
### MENU
materialFlow.menu.materialFlow = Magazyn
materialFlow.menu.materialFlow.description = zarządzaj stanami magazynowymi
materialFlow.menu.materialFlow.locations = Magazyny
materialFlow.menu.materialFlow.locations.description = Magazyny przetrzymujące materiały
### RIBBON
basic.generalParameters.window.ribbon.parameters.dashboardParameters = Dashboard
materialFlow.location.ribbon.message.productIsExternal = Produkt jest powiązany z systemem zewnętrznym
materialFlow.location.ribbon.message.locationIsExternal = Lokalizacja jest powiązana z systemem zewnętrznym
materialFlow.location.ribbon.message.companyIsExternal = Firma jest powiązana z systemem zewnętrznym
### MODEL
basic.parameter.showChartOnDashboard.label = Pokaż wykres dziennej produkcji
basic.parameter.whatToShowOnDashboard.label = Widoczne
basic.parameter.whatToShowOnDashboard.value.01orders = zlecenia produkcyjne
basic.parameter.whatToShowOnDashboard.value.02operationalTasks = zadania operacyjne
basic.parameter.dashboardOperation.label = Operacja
basic.parameter.dashboardOperation.label.focus = Wybierz operację
basic.parameter.dashboardComponentsLocation.label = Magazyn pobrania surowców
basic.parameter.dashboardComponentsLocation.label.focus = Wybierz magazyn
basic.parameter.dashboardProductsInputLocation.label = Magazyn przyjmowania wyrobów
basic.parameter.dashboardProductsInputLocation.label.focus = Wybierz magazyn
materialFlow.location.number.label = Numer
materialFlow.location.name.label = Nazwa
materialFlow.location.externalNumber.label = ID w systemie zewnętrznym
materialFlow.materialFlow.number.label = Numer
materialFlow.materialFlow.location.label = Magazyn
materialFlow.materialFlow.product.label = Produkt
materialFlow.materialFlow.quantity.label = Jakość
### VIEW
materialFlow.dashboardParameters.window.main.dashboardParameters.headerEdit = Dashboard
materialFlow.dashboardParameters.window.main.dashboardParameters.showChartOnDashboard.description = Zaznacz parametr jeśli chcesz śledzić dzienną produkcję na wykresie umieszczonym na dashboardzie.
materialFlow.dashboardParameters.window.main.dashboardParameters.whatToShowOnDashboard.description = Wskaż co chciałbyś widzieć na stronie głównej qcadoo.
materialFlow.dashboardParameters.window.main.dashboardParameters.dashboardButtonsLabel = Parametry dodawania zlecenia i zadań
materialFlow.dashboardParameters.window.main.dashboardParameters.dashboardOperation.description = Wskaż operację, którą uzupełnimy w technologii utworzonej do zlecenia produkcyjnego dodawanego z dashboardu.
materialFlow.dashboardParameters.window.main.dashboardParameters.dashboardOperation.lookup.window.grid.header = Operacje
materialFlow.dashboardParameters.window.main.dashboardParameters.dashboardComponentsLocation.description = Podaj z jakiego magazynu chcesz pobierać surowce do produkcji w ramach zlecenia produkcyjnego dodawanego z dashboardu.
materialFlow.dashboardParameters.window.main.dashboardParameters.dashboardComponentsLocation.lookup.window.grid.header = Magazyny
materialFlow.dashboardParameters.window.main.dashboardParameters.dashboardProductsInputLocation.description = Podaj na jaki magazyn chcesz przyjmować wyroby gotowe wytworzone zleceniem dodanym z dashboardu.
materialFlow.dashboardParameters.window.main.dashboardParameters.dashboardProductsInputLocation.lookup.window.grid.header = Magazyny
materialFlow.dashboardParameters.window.main.dashboardParameters.dashboardButtons.header = Przyciski szybkiego wybierania
materialFlow.dashboardButtonDetails.window.mainTab.dashboardButton.headerEdit = Przycisk
materialFlow.dashboardButtonDetails.window.mainTab.dashboardButton.identifier.label = Funkcja
materialFlow.dashboardButtonDetails.window.mainTab.dashboardButton.item.label = Widok
materialFlow.dashboardButtonDetails.window.mainTab.dashboardButton.item.lookup.window.grid.header = Widoki
materialFlow.locationDetails.window.integrationTab.tabLabel = Integracja
materialFlow.locationDetails.window.integrationTab.externalNumber.label = ID w systemie zewnętrznym
materialFlow.locationsList.window.mainTab.locationsGrid.header = Lista magazynów
materialFlow.locationsList.window.mainTab.locationsGrid.column.name = Nazwa
materialFlow.locationsList.window.mainTab.locationsGrid.column.number = Numer
materialFlow.locationsList.window.mainTab.locationsGrid.column.type = Typ
materialFlow.materialFlow.window.mainTab.materialFlow.header = Przepływ materiałów
materialFlow.materialFlow.window.mainTab.materialFlow.perPage = Ilość na stronę:
materialFlow.materialFlowList.window.mainTab.materialFlow.column.number = Numer
materialFlow.materialFlowList.window.mainTab.materialFlow.column.name = Nazwa
materialFlow.materialFlowList.window.mainTab.grid.header = Lista przepływów materiałówy
materialFlow.materialFlowDetails.window.mainTab.materialFlowDetails.headerNew = Nowy przepływ materiałów
materialFlow.materialFlowDetails.window.mainTab.materialFlowDetails.headerEdit = Edytuj przepływ materiałów
materialFlow.materialFlowDetails.window.mainTab.materialFlowDetails.number.label = Numer
materialFlow.materialFlowDetails.window.mainTab.materialFlowDetails.name.label = Nazwa
materialFlow.materialFlowDetails.window.ribbon.navigation.back = Powrót
materialFlow.materialFlowDetails.window.mainTab.materialFlowDetails.confirmDeleteMessage = Czy chcesz usunąć ten obiekt?
materialFlow.materialFlowDetails.window.mainTab.materialFlow.confirmDeleteMessage = Czy chcesz usunąć ten obiekt?
materialFlow.materialFlowDetails.window.mainTab.materialFlowDetails.saveMessage = Obiekt został zapisany do bazy danych.
materialFlow.materialFlowDetails.window.mainTab.materialFlowDetails.deleteMessage = Obiekt został usunięty
materialFlow.materialFlowDetails.window.mainTab.materialFlowDetails.deleteFailedMessage = Nie można usunąć obiektu
materialFlow.materialFlowDetail.window.mainTab.form.location.lookup.window.grid.header = Magazyny
materialFlow.materialFlowDetail.window.mainTab.form.headerNew = Dodaj magazyn
materialFlow.materialFlowDetail.window.mainTab.form.headerEdit = Magazyn
materialFlow.materialFlowDetail.window.mainTab.form.location.label.focus = Wybierz magazyn
materialFlow.materialFlowDetail.window.mainTab.form.product.label.focus = Wybierz produkt
materialFlow.materialFlowDetail.window.mainTab.form.product.lookup.window.grid.header = Produkty
materialFlow.locationDetails.window.mainTab.form.headerNew = Dodaj Magazyn
materialFlow.locationDetails.window.mainTab.form.headerEdit = Magazyn
materialFlow.locationDetails.window.mainTab.form.type.label.focus = Wybierz typ
### INFO
materialFlow.location.delete.withExternalNumber = Nie można usunąć magazynu zintegrowanego z systemem zewnętrznym.
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<Workspace
version = "1.0">
<FileRef
location = "group:TinderSwipeView.xcodeproj">
</FileRef>
<FileRef
location = "group:Pods/Pods.xcodeproj">
</FileRef>
</Workspace>
| {
"pile_set_name": "Github"
} |
<?php
/*
* $Id: BufferedReader.php 3076 2006-12-18 08:52:12Z fabien $
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This software consists of voluntary contributions made by many individuals
* and is licensed under the LGPL. For more information please see
* <http://phing.info>.
*/
include_once 'phing/system/io/Reader.php';
/*
* Convenience class for reading files.
*
* @author <a href="mailto:yl@seasonfive.com">Yannick Lecaillez</a>
* @version $Revision: 1.6 $ $Date: 2005/12/27 19:12:13 $
* @access public
* @see FilterReader
* @package phing.system.io
*/
class BufferedReader extends Reader {
private $bufferSize = 0;
private $buffer = null;
private $bufferPos = 0;
/**
* The Reader we are buffering for.
*/
private $in;
/**
*
* @param object $reader The reader (e.g. FileReader).
* @param integer $buffsize The size of the buffer we should use for reading files.
* A large buffer ensures that most files (all scripts?) are parsed in 1 buffer.
*/
function __construct(Reader $reader, $buffsize = 65536) {
$this->in = $reader;
$this->bufferSize = $buffsize;
}
/**
* Reads and returns $_bufferSize chunk of data.
* @return mixed buffer or -1 if EOF.
*/
function read($len = null) {
// ignore $len param, not sure how to hanlde it, since
// this should only read bufferSize amount of data.
if ($len !== null) {
$this->currentPosition = ftell($this->fd);
}
if ( ($data = $this->in->read($this->bufferSize)) !== -1 ) {
// not all files end with a newline character, so we also need to check EOF
if (!$this->in->eof()) {
$notValidPart = strrchr($data, "\n");
$notValidPartSize = strlen($notValidPart);
if ( $notValidPartSize > 1 ) {
// Block doesn't finish on a EOL
// Find the last EOL and forgot all following stuff
$dataSize = strlen($data);
$validSize = $dataSize - $notValidPartSize + 1;
$data = substr($data, 0, $validSize);
// Rewind to the begining of the forgotten stuff.
$this->in->skip(-$notValidPartSize+1);
}
} // if !EOF
}
return $data;
}
function skip($n) {
return $this->in->skip($n);
}
function reset() {
return $this->in->reset();
}
function close() {
return $this->in->close();
}
function open() {
return $this->in->open();
}
/**
* Read a line from input stream.
*/
function readLine() {
$line = null;
while ( ($ch = $this->readChar()) !== -1 ) {
if ( $ch === "\n" ) {
break;
}
$line .= $ch;
}
// Warning : Not considering an empty line as an EOF
if ( $line === null && $ch !== -1 )
return "";
return $line;
}
/**
* Reads a single char from the reader.
* @return string single char or -1 if EOF.
*/
function readChar() {
if ( $this->buffer === null ) {
// Buffer is empty, fill it ...
$read = $this->in->read($this->bufferSize);
if ($read === -1) {
$ch = -1;
} else {
$this->buffer = $read;
return $this->readChar(); // recurse
}
} else {
// Get next buffered char ...
// handle case where buffer is read-in, but is empty. The next readChar() will return -1 EOF,
// so we just return empty string (char) at this point. (Probably could also return -1 ...?)
$ch = ($this->buffer !== "") ? $this->buffer{$this->bufferPos} : '';
$this->bufferPos++;
if ( $this->bufferPos >= strlen($this->buffer) ) {
$this->buffer = null;
$this->bufferPos = 0;
}
}
return $ch;
}
/**
* Returns whether eof has been reached in stream.
* This is important, because filters may want to know if the end of the file (and not just buffer)
* has been reached.
* @return boolean
*/
function eof() {
return $this->in->eof();
}
function getResource() {
return $this->in->getResource();
}
}
?>
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<title>script.aculo.us Drag and drop functional test file</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<script type="text/javascript" src="../../MochiKit/MochiKit.js"></script>
<script type="text/javascript" src="../../MochiKit/Position.js"></script>
<script type="text/javascript" src="../../MochiKit/Visual.js"></script>
<script type="text/javascript" src="../../MochiKit/DragAndDrop.js"></script>
</head>
<body style="background-color:#eee;">
<div style="position:relative;margin-left:100px;top:40px;background-color:white;">
(inside position:relative container)
<h1>script.aculo.us Drag and drop functional test file</h1>
<h2>Draggables/Droppables</h2>
<div id="absolute_positioned_element" class="box1" style="width:150px;height:150px;background:#bbf;">
<b>Ghosting effect</b>
<input type="text" value="blah"/>
<div id="hurradiegams">test!</div>
<br/>
</div>
<a href="#" onclick="alert($('hurradiegams').innerHTML); return false;">alert contents of test div</a>
<div id="absolute_positioned_element2" class="box1" style="width:150px;height:150px;background:#bbf;">
<span id="handle1">Ghost effect</span><br/>
</div>
<script type="text/javascript" language="javascript" charset="utf-8">
// <![CDATA[
new MochiKit.DragAndDrop.Draggable('absolute_positioned_element',{ghosting: true});
new MochiKit.DragAndDrop.Draggable('absolute_positioned_element2',{ghosting: true, revert:true});
// ]]>
</script>
</div>
</body>
</html>
| {
"pile_set_name": "Github"
} |
# Gopkg.toml example
#
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
ignored = ["github.com/modern-go/test","github.com/modern-go/test/must","github.com/modern-go/test/should"]
[[constraint]]
name = "github.com/modern-go/concurrent"
version = "1.0.0"
[prune]
go-tests = true
unused-packages = true
| {
"pile_set_name": "Github"
} |
#include "precompiled.h"
//
// Copyright (c) 2012-2013 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// SwapChain9.cpp: Implements a back-end specific class for the D3D9 swap chain.
#include "libGLESv2/renderer/d3d9/SwapChain9.h"
#include "libGLESv2/renderer/d3d9/renderer9_utils.h"
#include "libGLESv2/renderer/d3d9/Renderer9.h"
namespace rx
{
SwapChain9::SwapChain9(Renderer9 *renderer, HWND window, HANDLE shareHandle,
GLenum backBufferFormat, GLenum depthBufferFormat)
: mRenderer(renderer), SwapChain(window, shareHandle, backBufferFormat, depthBufferFormat)
{
mSwapChain = NULL;
mBackBuffer = NULL;
mDepthStencil = NULL;
mRenderTarget = NULL;
mOffscreenTexture = NULL;
mWidth = -1;
mHeight = -1;
mSwapInterval = -1;
}
SwapChain9::~SwapChain9()
{
release();
}
void SwapChain9::release()
{
if (mSwapChain)
{
mSwapChain->Release();
mSwapChain = NULL;
}
if (mBackBuffer)
{
mBackBuffer->Release();
mBackBuffer = NULL;
}
if (mDepthStencil)
{
mDepthStencil->Release();
mDepthStencil = NULL;
}
if (mRenderTarget)
{
mRenderTarget->Release();
mRenderTarget = NULL;
}
if (mOffscreenTexture)
{
mOffscreenTexture->Release();
mOffscreenTexture = NULL;
}
if (mWindow)
mShareHandle = NULL;
}
static DWORD convertInterval(EGLint interval)
{
#if ANGLE_FORCE_VSYNC_OFF
return D3DPRESENT_INTERVAL_IMMEDIATE;
#else
switch(interval)
{
case 0: return D3DPRESENT_INTERVAL_IMMEDIATE;
case 1: return D3DPRESENT_INTERVAL_ONE;
case 2: return D3DPRESENT_INTERVAL_TWO;
case 3: return D3DPRESENT_INTERVAL_THREE;
case 4: return D3DPRESENT_INTERVAL_FOUR;
default: UNREACHABLE();
}
return D3DPRESENT_INTERVAL_DEFAULT;
#endif
}
EGLint SwapChain9::resize(int backbufferWidth, int backbufferHeight)
{
// D3D9 does not support resizing swap chains without recreating them
return reset(backbufferWidth, backbufferHeight, mSwapInterval);
}
EGLint SwapChain9::reset(int backbufferWidth, int backbufferHeight, EGLint swapInterval)
{
IDirect3DDevice9 *device = mRenderer->getDevice();
if (device == NULL)
{
return EGL_BAD_ACCESS;
}
// Evict all non-render target textures to system memory and release all resources
// before reallocating them to free up as much video memory as possible.
device->EvictManagedResources();
HRESULT result;
// Release specific resources to free up memory for the new render target, while the
// old render target still exists for the purpose of preserving its contents.
if (mSwapChain)
{
mSwapChain->Release();
mSwapChain = NULL;
}
if (mBackBuffer)
{
mBackBuffer->Release();
mBackBuffer = NULL;
}
if (mOffscreenTexture)
{
mOffscreenTexture->Release();
mOffscreenTexture = NULL;
}
if (mDepthStencil)
{
mDepthStencil->Release();
mDepthStencil = NULL;
}
HANDLE *pShareHandle = NULL;
if (!mWindow && mRenderer->getShareHandleSupport())
{
pShareHandle = &mShareHandle;
}
result = device->CreateTexture(backbufferWidth, backbufferHeight, 1, D3DUSAGE_RENDERTARGET,
gl_d3d9::ConvertRenderbufferFormat(mBackBufferFormat), D3DPOOL_DEFAULT,
&mOffscreenTexture, pShareHandle);
if (FAILED(result))
{
ERR("Could not create offscreen texture: %08lX", result);
release();
if (d3d9::isDeviceLostError(result))
{
return EGL_CONTEXT_LOST;
}
else
{
return EGL_BAD_ALLOC;
}
}
IDirect3DSurface9 *oldRenderTarget = mRenderTarget;
result = mOffscreenTexture->GetSurfaceLevel(0, &mRenderTarget);
ASSERT(SUCCEEDED(result));
if (oldRenderTarget)
{
RECT rect =
{
0, 0,
mWidth, mHeight
};
if (rect.right > static_cast<LONG>(backbufferWidth))
{
rect.right = backbufferWidth;
}
if (rect.bottom > static_cast<LONG>(backbufferHeight))
{
rect.bottom = backbufferHeight;
}
mRenderer->endScene();
result = device->StretchRect(oldRenderTarget, &rect, mRenderTarget, &rect, D3DTEXF_NONE);
ASSERT(SUCCEEDED(result));
oldRenderTarget->Release();
}
if (mWindow)
{
D3DPRESENT_PARAMETERS presentParameters = {0};
presentParameters.AutoDepthStencilFormat = gl_d3d9::ConvertRenderbufferFormat(mDepthBufferFormat);
presentParameters.BackBufferCount = 1;
presentParameters.BackBufferFormat = gl_d3d9::ConvertRenderbufferFormat(mBackBufferFormat);
presentParameters.EnableAutoDepthStencil = FALSE;
presentParameters.Flags = 0;
presentParameters.hDeviceWindow = mWindow;
presentParameters.MultiSampleQuality = 0; // FIXME: Unimplemented
presentParameters.MultiSampleType = D3DMULTISAMPLE_NONE; // FIXME: Unimplemented
presentParameters.PresentationInterval = convertInterval(swapInterval);
presentParameters.SwapEffect = D3DSWAPEFFECT_DISCARD;
presentParameters.Windowed = TRUE;
presentParameters.BackBufferWidth = backbufferWidth;
presentParameters.BackBufferHeight = backbufferHeight;
// http://crbug.com/140239
// http://crbug.com/143434
//
// Some AMD/Intel switchable systems / drivers appear to round swap chain surfaces to a multiple of 64 pixels in width
// when using the integrated Intel. This rounds the width up rather than down.
//
// Some non-switchable AMD GPUs / drivers do not respect the source rectangle to Present. Therefore, when the vendor ID
// is not Intel, the back buffer width must be exactly the same width as the window or horizontal scaling will occur.
if (mRenderer->getAdapterVendor() == VENDOR_ID_INTEL)
{
presentParameters.BackBufferWidth = (presentParameters.BackBufferWidth + 63) / 64 * 64;
}
result = device->CreateAdditionalSwapChain(&presentParameters, &mSwapChain);
if (FAILED(result))
{
ASSERT(result == D3DERR_OUTOFVIDEOMEMORY || result == E_OUTOFMEMORY || result == D3DERR_INVALIDCALL || result == D3DERR_DEVICELOST);
ERR("Could not create additional swap chains or offscreen surfaces: %08lX", result);
release();
if (d3d9::isDeviceLostError(result))
{
return EGL_CONTEXT_LOST;
}
else
{
return EGL_BAD_ALLOC;
}
}
result = mSwapChain->GetBackBuffer(0, D3DBACKBUFFER_TYPE_MONO, &mBackBuffer);
ASSERT(SUCCEEDED(result));
InvalidateRect(mWindow, NULL, FALSE);
}
if (mDepthBufferFormat != GL_NONE)
{
result = device->CreateDepthStencilSurface(backbufferWidth, backbufferHeight,
gl_d3d9::ConvertRenderbufferFormat(mDepthBufferFormat),
D3DMULTISAMPLE_NONE, 0, FALSE, &mDepthStencil, NULL);
if (FAILED(result))
{
ASSERT(result == D3DERR_OUTOFVIDEOMEMORY || result == E_OUTOFMEMORY || result == D3DERR_INVALIDCALL);
ERR("Could not create depthstencil surface for new swap chain: 0x%08X", result);
release();
if (d3d9::isDeviceLostError(result))
{
return EGL_CONTEXT_LOST;
}
else
{
return EGL_BAD_ALLOC;
}
}
}
mWidth = backbufferWidth;
mHeight = backbufferHeight;
mSwapInterval = swapInterval;
return EGL_SUCCESS;
}
// parameters should be validated/clamped by caller
EGLint SwapChain9::swapRect(EGLint x, EGLint y, EGLint width, EGLint height)
{
if (!mSwapChain)
{
return EGL_SUCCESS;
}
IDirect3DDevice9 *device = mRenderer->getDevice();
// Disable all pipeline operations
device->SetRenderState(D3DRS_ZENABLE, D3DZB_FALSE);
device->SetRenderState(D3DRS_FILLMODE, D3DFILL_SOLID);
device->SetRenderState(D3DRS_ALPHATESTENABLE, FALSE);
device->SetRenderState(D3DRS_ALPHABLENDENABLE, FALSE);
device->SetRenderState(D3DRS_CULLMODE, D3DCULL_NONE);
device->SetRenderState(D3DRS_STENCILENABLE, FALSE);
device->SetRenderState(D3DRS_CLIPPLANEENABLE, 0);
device->SetRenderState(D3DRS_COLORWRITEENABLE, D3DCOLORWRITEENABLE_ALPHA | D3DCOLORWRITEENABLE_BLUE | D3DCOLORWRITEENABLE_GREEN | D3DCOLORWRITEENABLE_RED);
device->SetRenderState(D3DRS_SRGBWRITEENABLE, FALSE);
device->SetRenderState(D3DRS_SCISSORTESTENABLE, FALSE);
device->SetPixelShader(NULL);
device->SetVertexShader(NULL);
device->SetRenderTarget(0, mBackBuffer);
device->SetDepthStencilSurface(NULL);
device->SetTexture(0, mOffscreenTexture);
device->SetTextureStageState(0, D3DTSS_COLOROP, D3DTOP_SELECTARG1);
device->SetTextureStageState(0, D3DTSS_COLORARG1, D3DTA_TEXTURE);
device->SetTextureStageState(1, D3DTSS_COLOROP, D3DTOP_DISABLE);
device->SetSamplerState(0, D3DSAMP_MAGFILTER, D3DTEXF_POINT);
device->SetSamplerState(0, D3DSAMP_MINFILTER, D3DTEXF_POINT);
device->SetSamplerState(0, D3DSAMP_ADDRESSU, D3DTADDRESS_CLAMP);
device->SetSamplerState(0, D3DSAMP_ADDRESSV, D3DTADDRESS_CLAMP);
device->SetFVF(D3DFVF_XYZRHW | D3DFVF_TEX1);
for (UINT streamIndex = 0; streamIndex < gl::MAX_VERTEX_ATTRIBS; streamIndex++)
{
device->SetStreamSourceFreq(streamIndex, 1);
}
D3DVIEWPORT9 viewport = {0, 0, mWidth, mHeight, 0.0f, 1.0f};
device->SetViewport(&viewport);
float x1 = x - 0.5f;
float y1 = (mHeight - y - height) - 0.5f;
float x2 = (x + width) - 0.5f;
float y2 = (mHeight - y) - 0.5f;
float u1 = x / float(mWidth);
float v1 = y / float(mHeight);
float u2 = (x + width) / float(mWidth);
float v2 = (y + height) / float(mHeight);
float quad[4][6] = {{x1, y1, 0.0f, 1.0f, u1, v2},
{x2, y1, 0.0f, 1.0f, u2, v2},
{x2, y2, 0.0f, 1.0f, u2, v1},
{x1, y2, 0.0f, 1.0f, u1, v1}}; // x, y, z, rhw, u, v
mRenderer->startScene();
device->DrawPrimitiveUP(D3DPT_TRIANGLEFAN, 2, quad, 6 * sizeof(float));
mRenderer->endScene();
device->SetTexture(0, NULL);
RECT rect =
{
x, mHeight - y - height,
x + width, mHeight - y
};
HRESULT result = mSwapChain->Present(&rect, &rect, NULL, NULL, 0);
mRenderer->markAllStateDirty();
if (result == D3DERR_OUTOFVIDEOMEMORY || result == E_OUTOFMEMORY || result == D3DERR_DRIVERINTERNALERROR)
{
return EGL_BAD_ALLOC;
}
// http://crbug.com/313210
// If our swap failed, trigger a device lost event. Resetting will work around an AMD-specific
// device removed bug with lost contexts when reinstalling drivers.
if (FAILED(result))
{
mRenderer->notifyDeviceLost();
return EGL_CONTEXT_LOST;
}
return EGL_SUCCESS;
}
// Increments refcount on surface.
// caller must Release() the returned surface
IDirect3DSurface9 *SwapChain9::getRenderTarget()
{
if (mRenderTarget)
{
mRenderTarget->AddRef();
}
return mRenderTarget;
}
// Increments refcount on surface.
// caller must Release() the returned surface
IDirect3DSurface9 *SwapChain9::getDepthStencil()
{
if (mDepthStencil)
{
mDepthStencil->AddRef();
}
return mDepthStencil;
}
// Increments refcount on texture.
// caller must Release() the returned texture
IDirect3DTexture9 *SwapChain9::getOffscreenTexture()
{
if (mOffscreenTexture)
{
mOffscreenTexture->AddRef();
}
return mOffscreenTexture;
}
SwapChain9 *SwapChain9::makeSwapChain9(SwapChain *swapChain)
{
ASSERT(HAS_DYNAMIC_TYPE(rx::SwapChain9*, swapChain));
return static_cast<rx::SwapChain9*>(swapChain);
}
void SwapChain9::recreate()
{
if (!mSwapChain)
{
return;
}
IDirect3DDevice9 *device = mRenderer->getDevice();
if (device == NULL)
{
return;
}
D3DPRESENT_PARAMETERS presentParameters;
HRESULT result = mSwapChain->GetPresentParameters(&presentParameters);
ASSERT(SUCCEEDED(result));
IDirect3DSwapChain9* newSwapChain = NULL;
result = device->CreateAdditionalSwapChain(&presentParameters, &newSwapChain);
if (FAILED(result))
{
return;
}
mSwapChain->Release();
mSwapChain = newSwapChain;
mBackBuffer->Release();
result = mSwapChain->GetBackBuffer(0, D3DBACKBUFFER_TYPE_MONO, &mBackBuffer);
ASSERT(SUCCEEDED(result));
}
}
| {
"pile_set_name": "Github"
} |
<?xml version='1.0' encoding='utf-8'?>
<section xmlns="https://code.dccouncil.us/schemas/dc-library" xmlns:codified="https://code.dccouncil.us/schemas/codified" xmlns:codify="https://code.dccouncil.us/schemas/codify" xmlns:xi="http://www.w3.org/2001/XInclude" containing-doc="D.C. Code">
<num>38-405</num>
<heading>Sale of part of lot 14 in square 263.</heading>
<text>The proceeds of that portion of lot No. 14, in square No. 263, which was authorized to be sold by an Act of Congress dated June 4, 1872, shall be invested by the authorities of the District in another lot or part of a lot in the City of Washington, and in improvements thereon; and the property so purchased shall be used for the purpose of the public schools, and for no other purpose.</text>
<annotations>
<annotation type="History" doc="R.S., D.C." path="§318">R.S., D.C., § 318</annotation>
<annotation type="Prior Codifications">1973 Ed., § 31-806.</annotation>
<annotation type="Prior Codifications">1981 Ed., § 31-205.</annotation>
<annotation type="Section References">This section is referenced in <cite path="§38-404">§ 38-404</cite>.</annotation>
</annotations>
</section>
| {
"pile_set_name": "Github"
} |
/*
Jonathan Dummer
image helper functions
MIT license
*/
#include "image_helper.h"
#include <stdlib.h>
#include <math.h>
/* Upscaling the image uses simple bilinear interpolation */
int
up_scale_image
(
const unsigned char* const orig,
int width, int height, int channels,
unsigned char* resampled,
int resampled_width, int resampled_height
)
{
float dx, dy;
int x, y, c;
/* error(s) check */
if ( (width < 1) || (height < 1) ||
(resampled_width < 2) || (resampled_height < 2) ||
(channels < 1) ||
(NULL == orig) || (NULL == resampled) )
{
/* signify badness */
return 0;
}
/*
for each given pixel in the new map, find the exact location
from the original map which would contribute to this guy
*/
dx = (width - 1.0f) / (resampled_width - 1.0f);
dy = (height - 1.0f) / (resampled_height - 1.0f);
for ( y = 0; y < resampled_height; ++y )
{
/* find the base y index and fractional offset from that */
float sampley = y * dy;
int inty = (int)sampley;
/* if( inty < 0 ) { inty = 0; } else */
if( inty > height - 2 ) { inty = height - 2; }
sampley -= inty;
for ( x = 0; x < resampled_width; ++x )
{
float samplex = x * dx;
int intx = (int)samplex;
int base_index;
/* find the base x index and fractional offset from that */
/* if( intx < 0 ) { intx = 0; } else */
if( intx > width - 2 ) { intx = width - 2; }
samplex -= intx;
/* base index into the original image */
base_index = (inty * width + intx) * channels;
for ( c = 0; c < channels; ++c )
{
/* do the sampling */
float value = 0.5f;
value += orig[base_index]
*(1.0f-samplex)*(1.0f-sampley);
value += orig[base_index+channels]
*(samplex)*(1.0f-sampley);
value += orig[base_index+width*channels]
*(1.0f-samplex)*(sampley);
value += orig[base_index+width*channels+channels]
*(samplex)*(sampley);
/* move to the next channel */
++base_index;
/* save the new value */
resampled[y*resampled_width*channels+x*channels+c] =
(unsigned char)(value);
}
}
}
/* done */
return 1;
}
int
mipmap_image
(
const unsigned char* const orig,
int width, int height, int channels,
unsigned char* resampled,
int block_size_x, int block_size_y
)
{
int mip_width, mip_height;
int i, j, c;
/* error check */
if( (width < 1) || (height < 1) ||
(channels < 1) || (orig == NULL) ||
(resampled == NULL) ||
(block_size_x < 1) || (block_size_y < 1) )
{
/* nothing to do */
return 0;
}
mip_width = width / block_size_x;
mip_height = height / block_size_y;
if( mip_width < 1 )
{
mip_width = 1;
}
if( mip_height < 1 )
{
mip_height = 1;
}
for( j = 0; j < mip_height; ++j )
{
for( i = 0; i < mip_width; ++i )
{
for( c = 0; c < channels; ++c )
{
const int index = (j*block_size_y)*width*channels + (i*block_size_x)*channels + c;
int sum_value;
int u,v;
int u_block = block_size_x;
int v_block = block_size_y;
int block_area;
/* do a bit of checking so we don't over-run the boundaries
(necessary for non-square textures!) */
if( block_size_x * (i+1) > width )
{
u_block = width - i*block_size_y;
}
if( block_size_y * (j+1) > height )
{
v_block = height - j*block_size_y;
}
block_area = u_block*v_block;
/* for this pixel, see what the average
of all the values in the block are.
note: start the sum at the rounding value, not at 0 */
sum_value = block_area >> 1;
for( v = 0; v < v_block; ++v )
for( u = 0; u < u_block; ++u )
{
sum_value += orig[index + v*width*channels + u*channels];
}
resampled[j*mip_width*channels + i*channels + c] = sum_value / block_area;
}
}
}
return 1;
}
int
scale_image_RGB_to_NTSC_safe
(
unsigned char* orig,
int width, int height, int channels
)
{
const float scale_lo = 16.0f - 0.499f;
const float scale_hi = 235.0f + 0.499f;
int i, j;
int nc = channels;
unsigned char scale_LUT[256];
/* error check */
if( (width < 1) || (height < 1) ||
(channels < 1) || (orig == NULL) )
{
/* nothing to do */
return 0;
}
/* set up the scaling Look Up Table */
for( i = 0; i < 256; ++i )
{
scale_LUT[i] = (unsigned char)((scale_hi - scale_lo) * i / 255.0f + scale_lo);
}
/* for channels = 2 or 4, ignore the alpha component */
nc -= 1 - (channels & 1);
/* OK, go through the image and scale any non-alpha components */
for( i = 0; i < width*height*channels; i += channels )
{
for( j = 0; j < nc; ++j )
{
orig[i+j] = scale_LUT[orig[i+j]];
}
}
return 1;
}
unsigned char clamp_byte( int x ) { return ( (x) < 0 ? (0) : ( (x) > 255 ? 255 : (x) ) ); }
/*
This function takes the RGB components of the image
and converts them into YCoCg. 3 components will be
re-ordered to CoYCg (for optimum DXT1 compression),
while 4 components will be ordered CoCgAY (for DXT5
compression).
*/
int
convert_RGB_to_YCoCg
(
unsigned char* orig,
int width, int height, int channels
)
{
int i;
/* error check */
if( (width < 1) || (height < 1) ||
(channels < 3) || (channels > 4) ||
(orig == NULL) )
{
/* nothing to do */
return -1;
}
/* do the conversion */
if( channels == 3 )
{
for( i = 0; i < width*height*3; i += 3 )
{
int r = orig[i+0];
int g = (orig[i+1] + 1) >> 1;
int b = orig[i+2];
int tmp = (2 + r + b) >> 2;
/* Co */
orig[i+0] = clamp_byte( 128 + ((r - b + 1) >> 1) );
/* Y */
orig[i+1] = clamp_byte( g + tmp );
/* Cg */
orig[i+2] = clamp_byte( 128 + g - tmp );
}
} else
{
for( i = 0; i < width*height*4; i += 4 )
{
int r = orig[i+0];
int g = (orig[i+1] + 1) >> 1;
int b = orig[i+2];
unsigned char a = orig[i+3];
int tmp = (2 + r + b) >> 2;
/* Co */
orig[i+0] = clamp_byte( 128 + ((r - b + 1) >> 1) );
/* Cg */
orig[i+1] = clamp_byte( 128 + g - tmp );
/* Alpha */
orig[i+2] = a;
/* Y */
orig[i+3] = clamp_byte( g + tmp );
}
}
/* done */
return 0;
}
/*
This function takes the YCoCg components of the image
and converts them into RGB. See above.
*/
int
convert_YCoCg_to_RGB
(
unsigned char* orig,
int width, int height, int channels
)
{
int i;
/* error check */
if( (width < 1) || (height < 1) ||
(channels < 3) || (channels > 4) ||
(orig == NULL) )
{
/* nothing to do */
return -1;
}
/* do the conversion */
if( channels == 3 )
{
for( i = 0; i < width*height*3; i += 3 )
{
int co = orig[i+0] - 128;
int y = orig[i+1];
int cg = orig[i+2] - 128;
/* R */
orig[i+0] = clamp_byte( y + co - cg );
/* G */
orig[i+1] = clamp_byte( y + cg );
/* B */
orig[i+2] = clamp_byte( y - co - cg );
}
} else
{
for( i = 0; i < width*height*4; i += 4 )
{
int co = orig[i+0] - 128;
int cg = orig[i+1] - 128;
unsigned char a = orig[i+2];
int y = orig[i+3];
/* R */
orig[i+0] = clamp_byte( y + co - cg );
/* G */
orig[i+1] = clamp_byte( y + cg );
/* B */
orig[i+2] = clamp_byte( y - co - cg );
/* A */
orig[i+3] = a;
}
}
/* done */
return 0;
}
float
find_max_RGBE
(
unsigned char *image,
int width, int height
)
{
float max_val = 0.0f;
unsigned char *img = image;
int i, j;
for( i = width * height; i > 0; --i )
{
/* float scale = powf( 2.0f, img[3] - 128.0f ) / 255.0f; */
float scale = (float)ldexp( 1.0f / 255.0f, (int)(img[3]) - 128 );
for( j = 0; j < 3; ++j )
{
if( img[j] * scale > max_val )
{
max_val = img[j] * scale;
}
}
/* next pixel */
img += 4;
}
return max_val;
}
int
RGBE_to_RGBdivA
(
unsigned char *image,
int width, int height,
int rescale_to_max
)
{
/* local variables */
int i, iv;
unsigned char *img = image;
float scale = 1.0f;
/* error check */
if( (!image) || (width < 1) || (height < 1) )
{
return 0;
}
/* convert (note: no negative numbers, but 0.0 is possible) */
if( rescale_to_max )
{
scale = 255.0f / find_max_RGBE( image, width, height );
}
for( i = width * height; i > 0; --i )
{
/* decode this pixel, and find the max */
float r,g,b,e, m;
/* e = scale * powf( 2.0f, img[3] - 128.0f ) / 255.0f; */
e = scale * (float)ldexp( 1.0f / 255.0f, (int)(img[3]) - 128 );
r = e * img[0];
g = e * img[1];
b = e * img[2];
m = (r > g) ? r : g;
m = (b > m) ? b : m;
/* and encode it into RGBdivA */
iv = (m != 0.0f) ? (int)(255.0f / m) : 1;
iv = (iv < 1) ? 1 : iv;
img[3] = (iv > 255) ? 255 : iv;
iv = (int)(img[3] * r + 0.5f);
img[0] = (iv > 255) ? 255 : iv;
iv = (int)(img[3] * g + 0.5f);
img[1] = (iv > 255) ? 255 : iv;
iv = (int)(img[3] * b + 0.5f);
img[2] = (iv > 255) ? 255 : iv;
/* and on to the next pixel */
img += 4;
}
return 1;
}
int
RGBE_to_RGBdivA2
(
unsigned char *image,
int width, int height,
int rescale_to_max
)
{
/* local variables */
int i, iv;
unsigned char *img = image;
float scale = 1.0f;
/* error check */
if( (!image) || (width < 1) || (height < 1) )
{
return 0;
}
/* convert (note: no negative numbers, but 0.0 is possible) */
if( rescale_to_max )
{
scale = 255.0f * 255.0f / find_max_RGBE( image, width, height );
}
for( i = width * height; i > 0; --i )
{
/* decode this pixel, and find the max */
float r,g,b,e, m;
/* e = scale * powf( 2.0f, img[3] - 128.0f ) / 255.0f; */
e = scale * (float)ldexp( 1.0f / 255.0f, (int)(img[3]) - 128 );
r = e * img[0];
g = e * img[1];
b = e * img[2];
m = (r > g) ? r : g;
m = (b > m) ? b : m;
/* and encode it into RGBdivA */
iv = (m != 0.0f) ? (int)sqrtf( 255.0f * 255.0f / m ) : 1;
iv = (iv < 1) ? 1 : iv;
img[3] = (iv > 255) ? 255 : iv;
iv = (int)(img[3] * img[3] * r / 255.0f + 0.5f);
img[0] = (iv > 255) ? 255 : iv;
iv = (int)(img[3] * img[3] * g / 255.0f + 0.5f);
img[1] = (iv > 255) ? 255 : iv;
iv = (int)(img[3] * img[3] * b / 255.0f + 0.5f);
img[2] = (iv > 255) ? 255 : iv;
/* and on to the next pixel */
img += 4;
}
return 1;
}
| {
"pile_set_name": "Github"
} |
# recmetrics
A python library of evalulation metrics and diagnostic tools for recommender systems.
Install with `pip install recmetrics`.
Full documentation coming soon.... In the interm, the python notebook in this repo, `example.ipynb`, contains examples of these plots and metrics in action using the [MovieLens 20M Dataset](https://grouplens.org/datasets/movielens/20m/). You can also view my [Medium Article](https://towardsdatascience.com/evaluation-metrics-for-recommender-systems-df56c6611093).
<i>This library is an open source project. The goal is to create a go-to source for metrics related to recommender systems. I have begun by adding metrics and plots I found useful during my career as a Data Scientist at a retail company, and encourage the community to contribute. If you would like to see a new metric in this package, or find a bug, or have suggestions for improvement, please contribute!</i>
<img src="https://media.giphy.com/media/YAnpMSHcurJVS/giphy.gif" width=200>
## Long Tail Plot
`recmetrics.long_tail_plot()`
The Long Tail plot is used to explore popularity patterns in user-item interaction data. Typically, a small number of items will make up most of the volume of interactions and this is referred to as the "head". The "long tail" typically consists of most products, but make up a small percent of interaction volume.
<img src="images/long_tail_plot.png" alt="Long Tail Plot" width=600>
The items in the "long tail" usually do not have enough interactions to accurately be recommended using user-based recommender systems like collaborative filtering due to inherent popularity bias in these models and data sparsity. Many recommender systems require a certain level of sparsity to train. A good recommender must balance sparsity requirements with popularity bias.
## Mar@K and Map@K
`recmetrics.mark()`
`recmetrics.mark_plot()`
`recmetrics.mapk_plot()`
Mean Average Recall at K (Mar@k) measures the recall at the kth recommendations. Mar@k considers the order of recommendations, and penalizes correct recommendations if based on the order of the recommendations. Map@k and Mar@k are ideal for evaluating an ordered list of recommendations. There is a fantastic implmentation of Mean Average Precision at K (Map@k) available [here](https://github.com/benhamner/Metrics), so I have not included it in this repo.
<img src="images/mark_plot.png" alt="Mar@k" width=600>
Map@k and Mar@k metrics suffer from popularity bias. If a model works well on popular items, the majority of recommendations will be correct, and Mar@k and Map@k can appear to be high while the model may not be making useful or personalized recommendations.
## Coverage
`recmetrics.prediction_coverage()` `recmetrics.catalog_coverage()`
`recmetrics.coverage_plot()`
Coverage is the percent of items that the recommender is able to recommend. It referred as prediction coverage and it's depicted by the next formula.
<img src="images/coverage_equation.gif" alt="Coverage Equation" width=200>
Where 'I' is the number of unique items the model recommends in the test data, and 'N' is the total number of unique items in the training data.
The catalog coverage is the rate of distinct items recommended over a period of time
to the user. For this purpose the catalog coverage function take also as parameter 'k' the number of observed recommendation lists. In essence, both of metrics quantify the proportion of items that the system is able to work with.
<img src="images/coverage_plot.png" alt="Coverage Plot" width=400>
## Novelty
`recmetrics.novelty()`
Novelty measures the capacity of recommender system to propose novel and unexpected items which a user is unlikely to know about already. It uses the self-information of the recommended item and it calculates the mean self-information per top-N recommended list and averages them over all users.
<img src="images/novelty.gif" alt="Coverage Equation" width=200>
Where the absolute U is the number of users, count(i) is the number of users consumed the specific item and N is the length of recommended list.
## Personalization
`recmetrics.personalization()`
Personalization is the dissimilarity between user's lists of recommendations.
A high score indicates user's recommendations are different).
A low personalization score indicates user's recommendations are very similar.
For example, if two users have recommendations lists [A,B,C,D] and [A,B,C,Y], the personalization can be calculated as:
<img src="images/personalization_code.png" alt="Coverage Plot" width=400>
## Intra-list Similarity
`recmetrics.intra_list_similarity()`
Intra-list similarity uses a feature matrix to calculate the cosine similarity between the items in a list of recommendations.
The feature matrix is indexed by the item id and includes one-hot-encoded features.
If a recommender system is recommending lists of very similar items, the intra-list similarity will be high.
<img src="images/ils_matrix.png" alt="Coverage Plot" width=400>
<img src="images/ils_code.png" alt="Coverage Plot" width=400>
## MSE and RMSE
`recmetrics.mse()`
`recmetrics.rmse()`
Mean Squared Error (MSE) and Root Mean Squared Error (RMSE) are used to evaluate the accuracy of predicted values yhat such as ratings compared to the true value, y.
These can also be used to evalaute the reconstruction of a ratings matrix.
<img src="images/mse.gif" alt="MSE Equation" width=200>
<img src="images/rmse.gif" alt="RMSE Equation" width=200>
## Predicted Class Probability Distribution Plots
`recmetrics.class_separation_plot()`
This is a plot of the distribution of the predicted class probabilities from a classification model. The plot is typically used to visualize how well a model is able to distinguish between two classes, and can assist a Data Scientist in picking the optimal decision threshold to classify observations to class 1 (0.5 is usually the default threshold for this method). The color of the distribution plots represent true class 0 and 1, and everything to the right of the decision threshold is classified as class 0.
<img src="images/class_probs.png" alt="binary class probs" width=400>
This plot can also be used to visualize the recommendation scores in two ways.
In this example, and item is considered class 1 if it is rated more than 3 stars, and class 0 if it is not. This example shows the performance of a model that recommends an item when the predicted 5-star rating is greater than 3 (plotted as a vertical decision threshold line). This plot shows that the recommender model will perform better if items with a predicted rating of 3.5 stars or greater is recommended.
<img src="images/rec_scores.png" alt="ratings scores" width=500>
The raw predicted 5 star rating for all recommended movies could be visualized with this plot to see the optimal predicted rating score to threshold into a prediction of that movie. This plot also visualizes how well the model is able to distinguish between each rating value.
<img src="images/ratings_distribution.png" alt="ratings distributions" width=500>
## ROC and AUC
`recmetrics.roc_plot()`
The Receiver Operating Characteristic (ROC) plot is used to visualize the trade-off between true positives and false positives for binary classification. The Area Under the Curve (AUC) is sometimes used as an evaluation metrics.
<img src="images/ROC.png" alt="ROC" width=600>
## Recommender Precision and Recall
`recmetrics.recommender_precision()`
`recmetrics.recommender_recall()`
Recommender precision and recall uses all recommended items over all users to calculate traditional precision and recall. A recommended item that was actually interacted with in the test data is considered an accurate prediction, and a recommended item that is not interacted with, or received a poor interaction value, can be considered an inaccurate recommendation. The user can assign these values based on their judgment.
## Precision and Recall Curve
`recmetrics.precision_recall_plot()`
The Precision and Recall plot is used to visualize the trade-off between precision and recall for one class in a classification.
<img src="images/PrecisionRecallCurve.png" alt="PandRcurve" width=400>
## Confusion Matrix
`recmetrics.make_confusion_matrix()`
Traditional confusion matrix used to evaluate false positive and false negative trade-offs.
<img src="images/confusion_matrix.png" alt="PandRcurve" width=400>
## Rank Order Analysis
`recmetrics.rank_order_analysis()`
coming soon...
| {
"pile_set_name": "Github"
} |
// Hide empty <a> tag within highlighted CodeBlock for screen reader accessibility (see https://github.com/jgm/pandoc/issues/6352#issuecomment-626106786) -->
// v0.0.1
// Written by JooYoung Seo (jooyoung@psu.edu) and Atsushi Yasumoto on June 1st, 2020.
document.addEventListener('DOMContentLoaded', function() {
const codeList = document.getElementsByClassName("sourceCode");
for (var i = 0; i < codeList.length; i++) {
var linkList = codeList[i].getElementsByTagName('a');
for (var j = 0; j < linkList.length; j++) {
if (linkList[j].innerHTML === "") {
linkList[j].setAttribute('aria-hidden', 'true');
}
}
}
});
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2002-2020 "Neo4j,"
* Neo4j Sweden AB [http://neo4j.com]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.neo4j.cypher.internal.ast.semantics
import org.neo4j.cypher.internal.expressions
import org.neo4j.cypher.internal.util.DummyPosition
import org.neo4j.cypher.internal.util.symbols.CTBoolean
import org.neo4j.cypher.internal.util.symbols.CTInteger
import org.neo4j.cypher.internal.util.symbols.CTNode
class XorTest extends InfixExpressionTestBase(expressions.Xor(_, _)(DummyPosition(0))) {
test("shouldCombineBooleans") {
testValidTypes(CTBoolean, CTBoolean)(CTBoolean)
}
test("shouldCoerceArguments") {
testInvalidApplication(CTInteger, CTBoolean)("Type mismatch: expected Boolean but was Integer")
testInvalidApplication(CTBoolean, CTInteger)("Type mismatch: expected Boolean but was Integer")
}
test("shouldReturnErrorIfInvalidArgumentTypes") {
testInvalidApplication(CTNode, CTBoolean)("Type mismatch: expected Boolean but was Node")
testInvalidApplication(CTBoolean, CTNode)("Type mismatch: expected Boolean but was Node")
}
}
| {
"pile_set_name": "Github"
} |
# /etc/strongswan.conf - strongSwan configuration file
charon {
load = random nonce aes sha1 sha2 pem pkcs1 gmp x509 curl revocation hmac vici kernel-netlink socket-default updown sqlite sql attr-sql
plugins {
sql {
database = sqlite:///etc/db.d/ipsec.db
}
attr-sql {
database = sqlite:///etc/db.d/ipsec.db
}
}
}
pool {
load = sqlite
}
| {
"pile_set_name": "Github"
} |
/*
* Page cache for QEMU
* The cache is base on a hash of the page address
*
* Copyright 2012 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Orit Wasserman <owasserm@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <string.h>
#include <sys/time.h>
#include <sys/types.h>
#include <stdbool.h>
#include <glib.h>
#include "qemu-common.h"
#include "migration/page_cache.h"
#ifdef DEBUG_CACHE
#define DPRINTF(fmt, ...) \
do { fprintf(stdout, "cache: " fmt, ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
typedef struct CacheItem CacheItem;
struct CacheItem {
uint64_t it_addr;
uint64_t it_age;
uint8_t *it_data;
};
struct PageCache {
CacheItem *page_cache;
unsigned int page_size;
int64_t max_num_items;
uint64_t max_item_age;
int64_t num_items;
};
PageCache *cache_init(int64_t num_pages, unsigned int page_size)
{
int64_t i;
PageCache *cache;
if (num_pages <= 0) {
DPRINTF("invalid number of pages\n");
return NULL;
}
/* We prefer not to abort if there is no memory */
cache = g_try_malloc(sizeof(*cache));
if (!cache) {
DPRINTF("Failed to allocate cache\n");
return NULL;
}
/* round down to the nearest power of 2 */
if (!is_power_of_2(num_pages)) {
num_pages = pow2floor(num_pages);
DPRINTF("rounding down to %" PRId64 "\n", num_pages);
}
cache->page_size = page_size;
cache->num_items = 0;
cache->max_item_age = 0;
cache->max_num_items = num_pages;
DPRINTF("Setting cache buckets to %" PRId64 "\n", cache->max_num_items);
/* We prefer not to abort if there is no memory */
cache->page_cache = g_try_malloc((cache->max_num_items) *
sizeof(*cache->page_cache));
if (!cache->page_cache) {
DPRINTF("Failed to allocate cache->page_cache\n");
g_free(cache);
return NULL;
}
for (i = 0; i < cache->max_num_items; i++) {
cache->page_cache[i].it_data = NULL;
cache->page_cache[i].it_age = 0;
cache->page_cache[i].it_addr = -1;
}
return cache;
}
void cache_fini(PageCache *cache)
{
int64_t i;
g_assert(cache);
g_assert(cache->page_cache);
for (i = 0; i < cache->max_num_items; i++) {
g_free(cache->page_cache[i].it_data);
}
g_free(cache->page_cache);
cache->page_cache = NULL;
g_free(cache);
}
static size_t cache_get_cache_pos(const PageCache *cache,
uint64_t address)
{
size_t pos;
g_assert(cache->max_num_items);
pos = (address / cache->page_size) & (cache->max_num_items - 1);
return pos;
}
bool cache_is_cached(const PageCache *cache, uint64_t addr)
{
size_t pos;
g_assert(cache);
g_assert(cache->page_cache);
pos = cache_get_cache_pos(cache, addr);
return (cache->page_cache[pos].it_addr == addr);
}
static CacheItem *cache_get_by_addr(const PageCache *cache, uint64_t addr)
{
size_t pos;
g_assert(cache);
g_assert(cache->page_cache);
pos = cache_get_cache_pos(cache, addr);
return &cache->page_cache[pos];
}
uint8_t *get_cached_data(const PageCache *cache, uint64_t addr)
{
return cache_get_by_addr(cache, addr)->it_data;
}
int cache_insert(PageCache *cache, uint64_t addr, const uint8_t *pdata)
{
CacheItem *it = NULL;
g_assert(cache);
g_assert(cache->page_cache);
/* actual update of entry */
it = cache_get_by_addr(cache, addr);
/* allocate page */
if (!it->it_data) {
it->it_data = g_try_malloc(cache->page_size);
if (!it->it_data) {
DPRINTF("Error allocating page\n");
return -1;
}
cache->num_items++;
}
memcpy(it->it_data, pdata, cache->page_size);
it->it_age = ++cache->max_item_age;
it->it_addr = addr;
return 0;
}
int64_t cache_resize(PageCache *cache, int64_t new_num_pages)
{
PageCache *new_cache;
int64_t i;
CacheItem *old_it, *new_it;
g_assert(cache);
/* cache was not inited */
if (cache->page_cache == NULL) {
return -1;
}
/* same size */
if (pow2floor(new_num_pages) == cache->max_num_items) {
return cache->max_num_items;
}
new_cache = cache_init(new_num_pages, cache->page_size);
if (!(new_cache)) {
DPRINTF("Error creating new cache\n");
return -1;
}
/* move all data from old cache */
for (i = 0; i < cache->max_num_items; i++) {
old_it = &cache->page_cache[i];
if (old_it->it_addr != -1) {
/* check for collision, if there is, keep MRU page */
new_it = cache_get_by_addr(new_cache, old_it->it_addr);
if (new_it->it_data && new_it->it_age >= old_it->it_age) {
/* keep the MRU page */
g_free(old_it->it_data);
} else {
if (!new_it->it_data) {
new_cache->num_items++;
}
g_free(new_it->it_data);
new_it->it_data = old_it->it_data;
new_it->it_age = old_it->it_age;
new_it->it_addr = old_it->it_addr;
}
}
}
g_free(cache->page_cache);
cache->page_cache = new_cache->page_cache;
cache->max_num_items = new_cache->max_num_items;
cache->num_items = new_cache->num_items;
g_free(new_cache);
return cache->max_num_items;
}
| {
"pile_set_name": "Github"
} |
package headfirst.proxy.gumballmonitor;
public class SoldState implements State {
GumballMachine gumballMachine;
public SoldState(GumballMachine gumballMachine) {
this.gumballMachine = gumballMachine;
}
public void insertQuarter() {
System.out.println("Please wait, we're already giving you a gumball");
}
public void ejectQuarter() {
System.out.println("Sorry, you already turned the crank");
}
public void turnCrank() {
System.out.println("Turning twice doesn't get you another gumball!");
}
public void dispense() {
gumballMachine.releaseBall();
try {
if (gumballMachine.getCount() > 0) {
gumballMachine.setState(gumballMachine.getNoQuarterState());
} else {
System.out.println("Oops, out of gumballs!");
gumballMachine.setState(gumballMachine.getSoldOutState());
}
} catch (Exception e) {
e.printStackTrace();
}
}
public String toString() {
return "dispensing a gumball";
}
}
| {
"pile_set_name": "Github"
} |
/***
*
* Copyright (c) 1996-2002, Valve LLC. All rights reserved.
*
* This product contains software technology licensed from Id
* Software, Inc. ("Id Technology"). Id Technology (c) 1996 Id Software, Inc.
* All Rights Reserved.
*
* Use, distribution, and modification of this source code and/or resulting
* object code is restricted to non-commercial enhancements to products from
* Valve LLC. All other use, distribution, or modification is prohibited
* without written permission from Valve LLC.
*
****/
#include "extdll.h"
#include "util.h"
#include "cbase.h"
#include "monsters.h"
#include "weapons.h"
#include "nodes.h"
#include "player.h"
#include "usercmd.h"
#include "entity_state.h"
#include "demo_api.h"
#include "pm_defs.h"
#include "event_api.h"
#include "r_efx.h"
#include "../hud_iface.h"
#include "../com_weapons.h"
#include "../demo.h"
extern globalvars_t *gpGlobals;
extern int g_iUser1;
// Pool of client side entities/entvars_t
static entvars_t ev[ 32 ];
static int num_ents = 0;
// The entity we'll use to represent the local client
static CBasePlayer player;
// Local version of game .dll global variables ( time, etc. )
static globalvars_t Globals;
static CBasePlayerWeapon *g_pWpns[ 32 ];
float g_flApplyVel = 0.0;
int g_irunninggausspred = 0;
vec3_t previousorigin;
// HLDM Weapon placeholder entities.
CGlock g_Glock;
CCrowbar g_Crowbar;
CPython g_Python;
CMP5 g_Mp5;
CCrossbow g_Crossbow;
CShotgun g_Shotgun;
CRpg g_Rpg;
CGauss g_Gauss;
CEgon g_Egon;
CHgun g_HGun;
CHandGrenade g_HandGren;
CSatchel g_Satchel;
CTripmine g_Tripmine;
CSqueak g_Snark;
/*
======================
AlertMessage
Print debug messages to console
======================
*/
void AlertMessage( ALERT_TYPE atype, char *szFmt, ... )
{
va_list argptr;
static char string[1024];
va_start (argptr, szFmt);
vsprintf (string, szFmt,argptr);
va_end (argptr);
gEngfuncs.Con_Printf( "cl: " );
gEngfuncs.Con_Printf( string );
}
//Returns if it's multiplayer.
//Mostly used by the client side weapons.
bool bIsMultiplayer ( void )
{
return gEngfuncs.GetMaxClients() == 1 ? 0 : 1;
}
//Just loads a v_ model.
void LoadVModel ( char *szViewModel, CBasePlayer *m_pPlayer )
{
gEngfuncs.CL_LoadModel( szViewModel, &m_pPlayer->pev->viewmodel );
}
/*
=====================
HUD_PrepEntity
Links the raw entity to an entvars_s holder. If a player is passed in as the owner, then
we set up the m_pPlayer field.
=====================
*/
void HUD_PrepEntity( CBaseEntity *pEntity, CBasePlayer *pWeaponOwner )
{
memset( &ev[ num_ents ], 0, sizeof( entvars_t ) );
pEntity->pev = &ev[ num_ents++ ];
pEntity->Precache();
pEntity->Spawn();
if ( pWeaponOwner )
{
ItemInfo info;
((CBasePlayerWeapon *)pEntity)->m_pPlayer = pWeaponOwner;
((CBasePlayerWeapon *)pEntity)->GetItemInfo( &info );
g_pWpns[ info.iId ] = (CBasePlayerWeapon *)pEntity;
}
}
/*
=====================
CBaseEntity :: Killed
If weapons code "kills" an entity, just set its effects to EF_NODRAW
=====================
*/
void CBaseEntity :: Killed( entvars_t *pevAttacker, int iGib )
{
pev->effects |= EF_NODRAW;
}
/*
=====================
CBasePlayerWeapon :: DefaultReload
=====================
*/
BOOL CBasePlayerWeapon :: DefaultReload( int iClipSize, int iAnim, float fDelay, int body )
{
if (m_pPlayer->m_rgAmmo[m_iPrimaryAmmoType] <= 0)
return FALSE;
int j = min(iClipSize - m_iClip, m_pPlayer->m_rgAmmo[m_iPrimaryAmmoType]);
if (j == 0)
return FALSE;
m_pPlayer->m_flNextAttack = UTIL_WeaponTimeBase() + fDelay;
//!!UNDONE -- reload sound goes here !!!
SendWeaponAnim( iAnim, UseDecrement(), body );
m_fInReload = TRUE;
m_flTimeWeaponIdle = UTIL_WeaponTimeBase() + 3;
return TRUE;
}
/*
=====================
CBasePlayerWeapon :: CanDeploy
=====================
*/
BOOL CBasePlayerWeapon :: CanDeploy( void )
{
BOOL bHasAmmo = 0;
if ( !pszAmmo1() )
{
// this weapon doesn't use ammo, can always deploy.
return TRUE;
}
if ( pszAmmo1() )
{
bHasAmmo |= (m_pPlayer->m_rgAmmo[m_iPrimaryAmmoType] != 0);
}
if ( pszAmmo2() )
{
bHasAmmo |= (m_pPlayer->m_rgAmmo[m_iSecondaryAmmoType] != 0);
}
if (m_iClip > 0)
{
bHasAmmo |= 1;
}
if (!bHasAmmo)
{
return FALSE;
}
return TRUE;
}
/*
=====================
CBasePlayerWeapon :: DefaultDeploy
=====================
*/
BOOL CBasePlayerWeapon :: DefaultDeploy( char *szViewModel, char *szWeaponModel, int iAnim, char *szAnimExt, int skiplocal, int body )
{
if ( !CanDeploy() )
return FALSE;
gEngfuncs.CL_LoadModel( szViewModel, &m_pPlayer->pev->viewmodel );
SendWeaponAnim( iAnim, skiplocal, body );
g_irunninggausspred = false;
m_pPlayer->m_flNextAttack = 0.5;
m_flTimeWeaponIdle = 1.0;
return TRUE;
}
/*
=====================
CBasePlayerWeapon :: PlayEmptySound
=====================
*/
BOOL CBasePlayerWeapon :: PlayEmptySound( void )
{
if (m_iPlayEmptySound)
{
HUD_PlaySound( "weapons/357_cock1.wav", 0.8 );
m_iPlayEmptySound = 0;
return 0;
}
return 0;
}
/*
=====================
CBasePlayerWeapon :: ResetEmptySound
=====================
*/
void CBasePlayerWeapon :: ResetEmptySound( void )
{
m_iPlayEmptySound = 1;
}
/*
=====================
CBasePlayerWeapon::Holster
Put away weapon
=====================
*/
void CBasePlayerWeapon::Holster( int skiplocal /* = 0 */ )
{
m_fInReload = FALSE; // cancel any reload in progress.
g_irunninggausspred = false;
m_pPlayer->pev->viewmodel = 0;
}
/*
=====================
CBasePlayerWeapon::SendWeaponAnim
Animate weapon model
=====================
*/
void CBasePlayerWeapon::SendWeaponAnim( int iAnim, int skiplocal, int body )
{
m_pPlayer->pev->weaponanim = iAnim;
HUD_SendWeaponAnim( iAnim, body, 0 );
}
/*
=====================
CBaseEntity::FireBulletsPlayer
Only produces random numbers to match the server ones.
=====================
*/
Vector CBaseEntity::FireBulletsPlayer ( ULONG cShots, Vector vecSrc, Vector vecDirShooting, Vector vecSpread, float flDistance, int iBulletType, int iTracerFreq, int iDamage, entvars_t *pevAttacker, int shared_rand )
{
float x, y, z;
for ( ULONG iShot = 1; iShot <= cShots; iShot++ )
{
if ( pevAttacker == NULL )
{
// get circular gaussian spread
do {
x = RANDOM_FLOAT(-0.5, 0.5) + RANDOM_FLOAT(-0.5, 0.5);
y = RANDOM_FLOAT(-0.5, 0.5) + RANDOM_FLOAT(-0.5, 0.5);
z = x*x+y*y;
} while (z > 1);
}
else
{
//Use player's random seed.
// get circular gaussian spread
x = UTIL_SharedRandomFloat( shared_rand + iShot, -0.5, 0.5 ) + UTIL_SharedRandomFloat( shared_rand + ( 1 + iShot ) , -0.5, 0.5 );
y = UTIL_SharedRandomFloat( shared_rand + ( 2 + iShot ), -0.5, 0.5 ) + UTIL_SharedRandomFloat( shared_rand + ( 3 + iShot ), -0.5, 0.5 );
z = x * x + y * y;
}
}
return Vector ( x * vecSpread.x, y * vecSpread.y, 0.0 );
}
/*
=====================
CBasePlayerWeapon::ItemPostFrame
Handles weapon firing, reloading, etc.
=====================
*/
void CBasePlayerWeapon::ItemPostFrame( void )
{
if ((m_fInReload) && (m_pPlayer->m_flNextAttack <= 0.0))
{
#if 0 // FIXME, need ammo on client to make this work right
// complete the reload.
int j = min( iMaxClip() - m_iClip, m_pPlayer->m_rgAmmo[m_iPrimaryAmmoType]);
// Add them to the clip
m_iClip += j;
m_pPlayer->m_rgAmmo[m_iPrimaryAmmoType] -= j;
#else
m_iClip += 10;
#endif
m_fInReload = FALSE;
}
if ((m_pPlayer->pev->button & IN_ATTACK2) && (m_flNextSecondaryAttack <= 0.0))
{
if ( pszAmmo2() && !m_pPlayer->m_rgAmmo[SecondaryAmmoIndex()] )
{
m_fFireOnEmpty = TRUE;
}
SecondaryAttack();
m_pPlayer->pev->button &= ~IN_ATTACK2;
}
else if ((m_pPlayer->pev->button & IN_ATTACK) && (m_flNextPrimaryAttack <= 0.0))
{
if ( (m_iClip == 0 && pszAmmo1()) || (iMaxClip() == -1 && !m_pPlayer->m_rgAmmo[PrimaryAmmoIndex()] ) )
{
m_fFireOnEmpty = TRUE;
}
PrimaryAttack();
}
else if ( m_pPlayer->pev->button & IN_RELOAD && iMaxClip() != WEAPON_NOCLIP && !m_fInReload )
{
// reload when reload is pressed, or if no buttons are down and weapon is empty.
Reload();
}
else if ( !(m_pPlayer->pev->button & (IN_ATTACK|IN_ATTACK2) ) )
{
// no fire buttons down
m_fFireOnEmpty = FALSE;
// weapon is useable. Reload if empty and weapon has waited as long as it has to after firing
if ( m_iClip == 0 && !(iFlags() & ITEM_FLAG_NOAUTORELOAD) && m_flNextPrimaryAttack < 0.0 )
{
Reload();
return;
}
WeaponIdle( );
return;
}
// catch all
if ( ShouldWeaponIdle() )
{
WeaponIdle();
}
}
/*
=====================
CBasePlayer::SelectItem
Switch weapons
=====================
*/
void CBasePlayer::SelectItem(const char *pstr)
{
if (!pstr)
return;
CBasePlayerItem *pItem = NULL;
if (!pItem)
return;
if (pItem == m_pActiveItem)
return;
if (m_pActiveItem)
m_pActiveItem->Holster( );
m_pLastItem = m_pActiveItem;
m_pActiveItem = pItem;
if (m_pActiveItem)
{
m_pActiveItem->Deploy( );
}
}
/*
=====================
CBasePlayer::SelectLastItem
=====================
*/
void CBasePlayer::SelectLastItem(void)
{
if (!m_pLastItem)
{
return;
}
if ( m_pActiveItem && !m_pActiveItem->CanHolster() )
{
return;
}
if (m_pActiveItem)
m_pActiveItem->Holster( );
CBasePlayerItem *pTemp = m_pActiveItem;
m_pActiveItem = m_pLastItem;
m_pLastItem = pTemp;
m_pActiveItem->Deploy( );
}
/*
=====================
CBasePlayer::Killed
=====================
*/
void CBasePlayer::Killed( entvars_t *pevAttacker, int iGib )
{
// Holster weapon immediately, to allow it to cleanup
if ( m_pActiveItem )
m_pActiveItem->Holster( );
g_irunninggausspred = false;
}
/*
=====================
CBasePlayer::Spawn
=====================
*/
void CBasePlayer::Spawn( void )
{
if (m_pActiveItem)
m_pActiveItem->Deploy( );
g_irunninggausspred = false;
}
/*
=====================
UTIL_TraceLine
Don't actually trace, but act like the trace didn't hit anything.
=====================
*/
void UTIL_TraceLine( const Vector &vecStart, const Vector &vecEnd, IGNORE_MONSTERS igmon, edict_t *pentIgnore, TraceResult *ptr )
{
memset( ptr, 0, sizeof( *ptr ) );
ptr->flFraction = 1.0;
}
/*
=====================
UTIL_ParticleBox
For debugging, draw a box around a player made out of particles
=====================
*/
void UTIL_ParticleBox( CBasePlayer *player, float *mins, float *maxs, float life, unsigned char r, unsigned char g, unsigned char b )
{
int i;
vec3_t mmin, mmax;
for ( i = 0; i < 3; i++ )
{
mmin[ i ] = player->pev->origin[ i ] + mins[ i ];
mmax[ i ] = player->pev->origin[ i ] + maxs[ i ];
}
gEngfuncs.pEfxAPI->R_ParticleBox( (float *)&mmin, (float *)&mmax, 5.0, 0, 255, 0 );
}
/*
=====================
UTIL_ParticleBoxes
For debugging, draw boxes for other collidable players
=====================
*/
void UTIL_ParticleBoxes( void )
{
int idx;
physent_t *pe;
cl_entity_t *player;
vec3_t mins, maxs;
gEngfuncs.pEventAPI->EV_SetUpPlayerPrediction( false, true );
// Store off the old count
gEngfuncs.pEventAPI->EV_PushPMStates();
player = gEngfuncs.GetLocalPlayer();
// Now add in all of the players.
gEngfuncs.pEventAPI->EV_SetSolidPlayers ( player->index - 1 );
for ( idx = 1; idx < 100; idx++ )
{
pe = gEngfuncs.pEventAPI->EV_GetPhysent( idx );
if ( !pe )
break;
if ( pe->info >= 1 && pe->info <= gEngfuncs.GetMaxClients() )
{
mins = pe->origin + pe->mins;
maxs = pe->origin + pe->maxs;
gEngfuncs.pEfxAPI->R_ParticleBox( (float *)&mins, (float *)&maxs, 0, 0, 255, 2.0 );
}
}
gEngfuncs.pEventAPI->EV_PopPMStates();
}
/*
=====================
UTIL_ParticleLine
For debugging, draw a line made out of particles
=====================
*/
void UTIL_ParticleLine( CBasePlayer *player, float *start, float *end, float life, unsigned char r, unsigned char g, unsigned char b )
{
gEngfuncs.pEfxAPI->R_ParticleLine( start, end, r, g, b, life );
}
/*
=====================
HUD_InitClientWeapons
Set up weapons, player and functions needed to run weapons code client-side.
=====================
*/
void HUD_InitClientWeapons( void )
{
static int initialized = 0;
if ( initialized )
return;
initialized = 1;
// Set up pointer ( dummy object )
gpGlobals = &Globals;
// Fill in current time ( probably not needed )
gpGlobals->time = gEngfuncs.GetClientTime();
// Fake functions
g_engfuncs.pfnPrecacheModel = stub_PrecacheModel;
g_engfuncs.pfnPrecacheSound = stub_PrecacheSound;
g_engfuncs.pfnPrecacheEvent = stub_PrecacheEvent;
g_engfuncs.pfnNameForFunction = stub_NameForFunction;
g_engfuncs.pfnSetModel = stub_SetModel;
g_engfuncs.pfnSetClientMaxspeed = HUD_SetMaxSpeed;
// Handled locally
g_engfuncs.pfnPlaybackEvent = HUD_PlaybackEvent;
g_engfuncs.pfnAlertMessage = AlertMessage;
// Pass through to engine
g_engfuncs.pfnPrecacheEvent = gEngfuncs.pfnPrecacheEvent;
g_engfuncs.pfnRandomFloat = gEngfuncs.pfnRandomFloat;
g_engfuncs.pfnRandomLong = gEngfuncs.pfnRandomLong;
// Allocate a slot for the local player
HUD_PrepEntity( &player , NULL );
// Allocate slot(s) for each weapon that we are going to be predicting
HUD_PrepEntity( &g_Glock , &player );
HUD_PrepEntity( &g_Crowbar , &player );
HUD_PrepEntity( &g_Python , &player );
HUD_PrepEntity( &g_Mp5 , &player );
HUD_PrepEntity( &g_Crossbow , &player );
HUD_PrepEntity( &g_Shotgun , &player );
HUD_PrepEntity( &g_Rpg , &player );
HUD_PrepEntity( &g_Gauss , &player );
HUD_PrepEntity( &g_Egon , &player );
HUD_PrepEntity( &g_HGun , &player );
HUD_PrepEntity( &g_HandGren , &player );
HUD_PrepEntity( &g_Satchel , &player );
HUD_PrepEntity( &g_Tripmine , &player );
HUD_PrepEntity( &g_Snark , &player );
}
/*
=====================
HUD_GetLastOrg
Retruns the last position that we stored for egon beam endpoint.
=====================
*/
void HUD_GetLastOrg( float *org )
{
int i;
// Return last origin
for ( i = 0; i < 3; i++ )
{
org[i] = previousorigin[i];
}
}
/*
=====================
HUD_SetLastOrg
Remember our exact predicted origin so we can draw the egon to the right position.
=====================
*/
void HUD_SetLastOrg( void )
{
int i;
// Offset final origin by view_offset
for ( i = 0; i < 3; i++ )
{
previousorigin[i] = g_finalstate->playerstate.origin[i] + g_finalstate->client.view_ofs[ i ];
}
}
/*
=====================
HUD_WeaponsPostThink
Run Weapon firing code on client
=====================
*/
void HUD_WeaponsPostThink( local_state_s *from, local_state_s *to, usercmd_t *cmd, double time, unsigned int random_seed )
{
int i;
int buttonsChanged;
CBasePlayerWeapon *pWeapon = NULL;
CBasePlayerWeapon *pCurrent;
weapon_data_t nulldata, *pfrom, *pto;
static int lasthealth;
memset( &nulldata, 0, sizeof( nulldata ) );
HUD_InitClientWeapons();
// Get current clock
gpGlobals->time = time;
// Fill in data based on selected weapon
// FIXME, make this a method in each weapon? where you pass in an entity_state_t *?
switch ( from->client.m_iId )
{
case WEAPON_CROWBAR:
pWeapon = &g_Crowbar;
break;
case WEAPON_GLOCK:
pWeapon = &g_Glock;
break;
case WEAPON_PYTHON:
pWeapon = &g_Python;
break;
case WEAPON_MP5:
pWeapon = &g_Mp5;
break;
case WEAPON_CROSSBOW:
pWeapon = &g_Crossbow;
break;
case WEAPON_SHOTGUN:
pWeapon = &g_Shotgun;
break;
case WEAPON_RPG:
pWeapon = &g_Rpg;
break;
case WEAPON_GAUSS:
pWeapon = &g_Gauss;
break;
case WEAPON_EGON:
pWeapon = &g_Egon;
break;
case WEAPON_HORNETGUN:
pWeapon = &g_HGun;
break;
case WEAPON_HANDGRENADE:
pWeapon = &g_HandGren;
break;
case WEAPON_SATCHEL:
pWeapon = &g_Satchel;
break;
case WEAPON_TRIPMINE:
pWeapon = &g_Tripmine;
break;
case WEAPON_SNARK:
pWeapon = &g_Snark;
break;
}
// Store pointer to our destination entity_state_t so we can get our origin, etc. from it
// for setting up events on the client
g_finalstate = to;
// If we are running events/etc. go ahead and see if we
// managed to die between last frame and this one
// If so, run the appropriate player killed or spawn function
if ( g_runfuncs )
{
if ( to->client.health <= 0 && lasthealth > 0 )
{
player.Killed( NULL, 0 );
}
else if ( to->client.health > 0 && lasthealth <= 0 )
{
player.Spawn();
}
lasthealth = to->client.health;
}
// We are not predicting the current weapon, just bow out here.
if ( !pWeapon )
return;
for ( i = 0; i < 32; i++ )
{
pCurrent = g_pWpns[ i ];
if ( !pCurrent )
{
continue;
}
pfrom = &from->weapondata[ i ];
pCurrent->m_fInReload = pfrom->m_fInReload;
pCurrent->m_fInSpecialReload = pfrom->m_fInSpecialReload;
// pCurrent->m_flPumpTime = pfrom->m_flPumpTime;
pCurrent->m_iClip = pfrom->m_iClip;
pCurrent->m_flNextPrimaryAttack = pfrom->m_flNextPrimaryAttack;
pCurrent->m_flNextSecondaryAttack = pfrom->m_flNextSecondaryAttack;
pCurrent->m_flTimeWeaponIdle = pfrom->m_flTimeWeaponIdle;
pCurrent->pev->fuser1 = pfrom->fuser1;
pCurrent->m_flStartThrow = pfrom->fuser2;
pCurrent->m_flReleaseThrow = pfrom->fuser3;
pCurrent->m_chargeReady = pfrom->iuser1;
pCurrent->m_fInAttack = pfrom->iuser2;
pCurrent->m_fireState = pfrom->iuser3;
pCurrent->m_iSecondaryAmmoType = (int)from->client.vuser3[ 2 ];
pCurrent->m_iPrimaryAmmoType = (int)from->client.vuser4[ 0 ];
player.m_rgAmmo[ pCurrent->m_iPrimaryAmmoType ] = (int)from->client.vuser4[ 1 ];
player.m_rgAmmo[ pCurrent->m_iSecondaryAmmoType ] = (int)from->client.vuser4[ 2 ];
}
// For random weapon events, use this seed to seed random # generator
player.random_seed = random_seed;
// Get old buttons from previous state.
player.m_afButtonLast = from->playerstate.oldbuttons;
// Which buttsons chave changed
buttonsChanged = (player.m_afButtonLast ^ cmd->buttons); // These buttons have changed this frame
// Debounced button codes for pressed/released
// The changed ones still down are "pressed"
player.m_afButtonPressed = buttonsChanged & cmd->buttons;
// The ones not down are "released"
player.m_afButtonReleased = buttonsChanged & (~cmd->buttons);
// Set player variables that weapons code might check/alter
player.pev->button = cmd->buttons;
player.pev->velocity = from->client.velocity;
player.pev->flags = from->client.flags;
player.pev->deadflag = from->client.deadflag;
player.pev->waterlevel = from->client.waterlevel;
player.pev->maxspeed = from->client.maxspeed;
player.pev->fov = from->client.fov;
player.pev->weaponanim = from->client.weaponanim;
player.pev->viewmodel = from->client.viewmodel;
player.m_flNextAttack = from->client.m_flNextAttack;
player.m_flNextAmmoBurn = from->client.fuser2;
player.m_flAmmoStartCharge = from->client.fuser3;
//Stores all our ammo info, so the client side weapons can use them.
player.ammo_9mm = (int)from->client.vuser1[0];
player.ammo_357 = (int)from->client.vuser1[1];
player.ammo_argrens = (int)from->client.vuser1[2];
player.ammo_bolts = (int)from->client.ammo_nails; //is an int anyways...
player.ammo_buckshot = (int)from->client.ammo_shells;
player.ammo_uranium = (int)from->client.ammo_cells;
player.ammo_hornets = (int)from->client.vuser2[0];
player.ammo_rockets = (int)from->client.ammo_rockets;
// Point to current weapon object
if ( from->client.m_iId )
{
player.m_pActiveItem = g_pWpns[ from->client.m_iId ];
}
if ( player.m_pActiveItem->m_iId == WEAPON_RPG )
{
( ( CRpg * )player.m_pActiveItem)->m_fSpotActive = (int)from->client.vuser2[ 1 ];
( ( CRpg * )player.m_pActiveItem)->m_cActiveRockets = (int)from->client.vuser2[ 2 ];
}
// Don't go firing anything if we have died or are spectating
// Or if we don't have a weapon model deployed
if ( ( player.pev->deadflag != ( DEAD_DISCARDBODY + 1 ) ) &&
!CL_IsDead() && player.pev->viewmodel && !g_iUser1 )
{
if ( player.m_flNextAttack <= 0 )
{
pWeapon->ItemPostFrame();
}
}
// Assume that we are not going to switch weapons
to->client.m_iId = from->client.m_iId;
// Now see if we issued a changeweapon command ( and we're not dead )
if ( cmd->weaponselect && ( player.pev->deadflag != ( DEAD_DISCARDBODY + 1 ) ) )
{
// Switched to a different weapon?
if ( from->weapondata[ cmd->weaponselect ].m_iId == cmd->weaponselect )
{
CBasePlayerWeapon *pNew = g_pWpns[ cmd->weaponselect ];
if ( pNew && ( pNew != pWeapon ) )
{
// Put away old weapon
if (player.m_pActiveItem)
player.m_pActiveItem->Holster( );
player.m_pLastItem = player.m_pActiveItem;
player.m_pActiveItem = pNew;
// Deploy new weapon
if (player.m_pActiveItem)
{
player.m_pActiveItem->Deploy( );
}
// Update weapon id so we can predict things correctly.
to->client.m_iId = cmd->weaponselect;
}
}
}
// Copy in results of prediction code
to->client.viewmodel = player.pev->viewmodel;
to->client.fov = player.pev->fov;
to->client.weaponanim = player.pev->weaponanim;
to->client.m_flNextAttack = player.m_flNextAttack;
to->client.fuser2 = player.m_flNextAmmoBurn;
to->client.fuser3 = player.m_flAmmoStartCharge;
to->client.maxspeed = player.pev->maxspeed;
//HL Weapons
to->client.vuser1[0] = player.ammo_9mm;
to->client.vuser1[1] = player.ammo_357;
to->client.vuser1[2] = player.ammo_argrens;
to->client.ammo_nails = player.ammo_bolts;
to->client.ammo_shells = player.ammo_buckshot;
to->client.ammo_cells = player.ammo_uranium;
to->client.vuser2[0] = player.ammo_hornets;
to->client.ammo_rockets = player.ammo_rockets;
if ( player.m_pActiveItem->m_iId == WEAPON_RPG )
{
from->client.vuser2[ 1 ] = ( ( CRpg * )player.m_pActiveItem)->m_fSpotActive;
from->client.vuser2[ 2 ] = ( ( CRpg * )player.m_pActiveItem)->m_cActiveRockets;
}
// Make sure that weapon animation matches what the game .dll is telling us
// over the wire ( fixes some animation glitches )
if ( g_runfuncs && ( HUD_GetWeaponAnim() != to->client.weaponanim ) )
{
int body = 2;
//Pop the model to body 0.
if ( pWeapon == &g_Tripmine )
body = 0;
//Show laser sight/scope combo
if ( pWeapon == &g_Python && bIsMultiplayer() )
body = 1;
// Force a fixed anim down to viewmodel
HUD_SendWeaponAnim( to->client.weaponanim, body, 1 );
}
for ( i = 0; i < 32; i++ )
{
pCurrent = g_pWpns[ i ];
pto = &to->weapondata[ i ];
if ( !pCurrent )
{
memset( pto, 0, sizeof( weapon_data_t ) );
continue;
}
pto->m_fInReload = pCurrent->m_fInReload;
pto->m_fInSpecialReload = pCurrent->m_fInSpecialReload;
// pto->m_flPumpTime = pCurrent->m_flPumpTime;
pto->m_iClip = pCurrent->m_iClip;
pto->m_flNextPrimaryAttack = pCurrent->m_flNextPrimaryAttack;
pto->m_flNextSecondaryAttack = pCurrent->m_flNextSecondaryAttack;
pto->m_flTimeWeaponIdle = pCurrent->m_flTimeWeaponIdle;
pto->fuser1 = pCurrent->pev->fuser1;
pto->fuser2 = pCurrent->m_flStartThrow;
pto->fuser3 = pCurrent->m_flReleaseThrow;
pto->iuser1 = pCurrent->m_chargeReady;
pto->iuser2 = pCurrent->m_fInAttack;
pto->iuser3 = pCurrent->m_fireState;
// Decrement weapon counters, server does this at same time ( during post think, after doing everything else )
pto->m_flNextReload -= cmd->msec / 1000.0;
pto->m_fNextAimBonus -= cmd->msec / 1000.0;
pto->m_flNextPrimaryAttack -= cmd->msec / 1000.0;
pto->m_flNextSecondaryAttack -= cmd->msec / 1000.0;
pto->m_flTimeWeaponIdle -= cmd->msec / 1000.0;
pto->fuser1 -= cmd->msec / 1000.0;
to->client.vuser3[2] = pCurrent->m_iSecondaryAmmoType;
to->client.vuser4[0] = pCurrent->m_iPrimaryAmmoType;
to->client.vuser4[1] = player.m_rgAmmo[ pCurrent->m_iPrimaryAmmoType ];
to->client.vuser4[2] = player.m_rgAmmo[ pCurrent->m_iSecondaryAmmoType ];
/* if ( pto->m_flPumpTime != -9999 )
{
pto->m_flPumpTime -= cmd->msec / 1000.0;
if ( pto->m_flPumpTime < -0.001 )
pto->m_flPumpTime = -0.001;
}*/
if ( pto->m_fNextAimBonus < -1.0 )
{
pto->m_fNextAimBonus = -1.0;
}
if ( pto->m_flNextPrimaryAttack < -1.0 )
{
pto->m_flNextPrimaryAttack = -1.0;
}
if ( pto->m_flNextSecondaryAttack < -0.001 )
{
pto->m_flNextSecondaryAttack = -0.001;
}
if ( pto->m_flTimeWeaponIdle < -0.001 )
{
pto->m_flTimeWeaponIdle = -0.001;
}
if ( pto->m_flNextReload < -0.001 )
{
pto->m_flNextReload = -0.001;
}
if ( pto->fuser1 < -0.001 )
{
pto->fuser1 = -0.001;
}
}
// m_flNextAttack is now part of the weapons, but is part of the player instead
to->client.m_flNextAttack -= cmd->msec / 1000.0;
if ( to->client.m_flNextAttack < -0.001 )
{
to->client.m_flNextAttack = -0.001;
}
to->client.fuser2 -= cmd->msec / 1000.0;
if ( to->client.fuser2 < -0.001 )
{
to->client.fuser2 = -0.001;
}
to->client.fuser3 -= cmd->msec / 1000.0;
if ( to->client.fuser3 < -0.001 )
{
to->client.fuser3 = -0.001;
}
// Store off the last position from the predicted state.
HUD_SetLastOrg();
// Wipe it so we can't use it after this frame
g_finalstate = NULL;
}
/*
=====================
HUD_PostRunCmd
Client calls this during prediction, after it has moved the player and updated any info changed into to->
time is the current client clock based on prediction
cmd is the command that caused the movement, etc
runfuncs is 1 if this is the first time we've predicted this command. If so, sounds and effects should play, otherwise, they should
be ignored
=====================
*/
void CL_DLLEXPORT HUD_PostRunCmd( struct local_state_s *from, struct local_state_s *to, struct usercmd_s *cmd, int runfuncs, double time, unsigned int random_seed )
{
// RecClPostRunCmd(from, to, cmd, runfuncs, time, random_seed);
g_runfuncs = runfuncs;
#if defined( CLIENT_WEAPONS )
if ( cl_lw && cl_lw->value )
{
HUD_WeaponsPostThink( from, to, cmd, time, random_seed );
}
else
#endif
{
to->client.fov = g_lastFOV;
}
if ( g_irunninggausspred == 1 )
{
Vector forward;
gEngfuncs.pfnAngleVectors( v_angles, forward, NULL, NULL );
to->client.velocity = to->client.velocity - forward * g_flApplyVel * 5;
g_irunninggausspred = false;
}
// All games can use FOV state
g_lastFOV = to->client.fov;
}
| {
"pile_set_name": "Github"
} |
/*
* Minimal 'console' binding.
*
* https://github.com/DeveloperToolsWG/console-object/blob/master/api.md
* https://developers.google.com/web/tools/chrome-devtools/debug/console/console-reference
* https://developer.mozilla.org/en/docs/Web/API/console
*/
#include <stdio.h>
#include <stdarg.h>
#include "duktape.h"
#include "duk_console.h"
/* XXX: Add some form of log level filtering. */
/* XXX: For now logs everything to stdout, V8/Node.js logs debug/info level
* to stdout, warn and above to stderr. Should this extra do the same?
*/
/* XXX: Should all output be written via e.g. console.write(formattedMsg)?
* This would make it easier for user code to redirect all console output
* to a custom backend.
*/
/* XXX: Init console object using duk_def_prop() when that call is available. */
static duk_ret_t duk__console_log_helper(duk_context *ctx, const char *error_name) {
duk_idx_t i, n;
duk_uint_t flags;
flags = (duk_uint_t) duk_get_current_magic(ctx);
n = duk_get_top(ctx);
duk_get_global_string(ctx, "console");
duk_get_prop_string(ctx, -1, "format");
for (i = 0; i < n; i++) {
if (duk_check_type_mask(ctx, i, DUK_TYPE_MASK_OBJECT)) {
/* Slow path formatting. */
duk_dup(ctx, -1); /* console.format */
duk_dup(ctx, i);
duk_call(ctx, 1);
duk_replace(ctx, i); /* arg[i] = console.format(arg[i]); */
}
}
duk_pop_2(ctx);
duk_push_string(ctx, " ");
duk_insert(ctx, 0);
duk_join(ctx, n);
if (error_name) {
duk_push_error_object(ctx, DUK_ERR_ERROR, "%s", duk_require_string(ctx, -1));
duk_push_string(ctx, "name");
duk_push_string(ctx, error_name);
duk_def_prop(ctx, -3, DUK_DEFPROP_FORCE | DUK_DEFPROP_HAVE_VALUE); /* to get e.g. 'Trace: 1 2 3' */
duk_get_prop_string(ctx, -1, "stack");
}
fprintf(stdout, "%s\n", duk_to_string(ctx, -1));
if (flags & DUK_CONSOLE_FLUSH) {
fflush(stdout);
}
return 0;
}
static duk_ret_t duk__console_assert(duk_context *ctx) {
if (duk_to_boolean(ctx, 0)) {
return 0;
}
duk_remove(ctx, 0);
return duk__console_log_helper(ctx, "AssertionError");
}
static duk_ret_t duk__console_log(duk_context *ctx) {
return duk__console_log_helper(ctx, NULL);
}
static duk_ret_t duk__console_trace(duk_context *ctx) {
return duk__console_log_helper(ctx, "Trace");
}
static duk_ret_t duk__console_info(duk_context *ctx) {
return duk__console_log_helper(ctx, NULL);
}
static duk_ret_t duk__console_warn(duk_context *ctx) {
return duk__console_log_helper(ctx, NULL);
}
static duk_ret_t duk__console_error(duk_context *ctx) {
return duk__console_log_helper(ctx, "Error");
}
static duk_ret_t duk__console_dir(duk_context *ctx) {
/* For now, just share the formatting of .log() */
return duk__console_log_helper(ctx, 0);
}
static void duk__console_reg_vararg_func(duk_context *ctx, duk_c_function func, const char *name, duk_uint_t flags) {
duk_push_c_function(ctx, func, DUK_VARARGS);
duk_push_string(ctx, "name");
duk_push_string(ctx, name);
duk_def_prop(ctx, -3, DUK_DEFPROP_HAVE_VALUE | DUK_DEFPROP_FORCE); /* Improve stacktraces by displaying function name */
duk_set_magic(ctx, -1, (duk_int_t) flags);
duk_put_prop_string(ctx, -2, name);
}
void duk_console_init(duk_context *ctx, duk_uint_t flags) {
duk_push_object(ctx);
/* Custom function to format objects; user can replace.
* For now, try JX-formatting and if that fails, fall back
* to ToString(v).
*/
duk_eval_string(ctx,
"(function (E) {"
"return function format(v){"
"try{"
"return E('jx',v);"
"}catch(e){"
"return String(v);" /* String() allows symbols, ToString() internal algorithm doesn't. */
"}"
"};"
"})(Duktape.enc)");
duk_put_prop_string(ctx, -2, "format");
duk__console_reg_vararg_func(ctx, duk__console_assert, "assert", flags);
duk__console_reg_vararg_func(ctx, duk__console_log, "log", flags);
duk__console_reg_vararg_func(ctx, duk__console_log, "debug", flags); /* alias to console.log */
duk__console_reg_vararg_func(ctx, duk__console_trace, "trace", flags);
duk__console_reg_vararg_func(ctx, duk__console_info, "info", flags);
duk__console_reg_vararg_func(ctx, duk__console_warn, "warn", flags);
duk__console_reg_vararg_func(ctx, duk__console_error, "error", flags);
duk__console_reg_vararg_func(ctx, duk__console_error, "exception", flags); /* alias to console.error */
duk__console_reg_vararg_func(ctx, duk__console_dir, "dir", flags);
duk_put_global_string(ctx, "console");
/* Proxy wrapping: ensures any undefined console method calls are
* ignored silently. This is required specifically by the
* DeveloperToolsWG proposal (and is implemented also by Firefox:
* https://bugzilla.mozilla.org/show_bug.cgi?id=629607).
*/
if (flags & DUK_CONSOLE_PROXY_WRAPPER) {
/* Tolerate errors: Proxy may be disabled. */
duk_peval_string_noresult(ctx,
"(function(){"
"var D=function(){};"
"console=new Proxy(console,{"
"get:function(t,k){"
"var v=t[k];"
"return typeof v==='function'?v:D;"
"}"
"});"
"})();"
);
}
}
| {
"pile_set_name": "Github"
} |
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// <locale>
// template <class charT> class numpunct;
// string grouping() const;
#include <locale>
#include <cassert>
#include "test_macros.h"
int main(int, char**)
{
std::locale l = std::locale::classic();
{
typedef char C;
const std::numpunct<C>& np = std::use_facet<std::numpunct<C> >(l);
assert(np.grouping() == std::string());
}
{
typedef wchar_t C;
const std::numpunct<C>& np = std::use_facet<std::numpunct<C> >(l);
assert(np.grouping() == std::string());
}
return 0;
}
| {
"pile_set_name": "Github"
} |
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !noedac
package collector
import (
"fmt"
"path"
"path/filepath"
"regexp"
"github.com/prometheus/client_golang/prometheus"
)
const (
edacSubsystem = "edac"
)
var (
edacMemControllerRE = regexp.MustCompile(`.*devices/system/edac/mc/mc([0-9]*)`)
edacMemCsrowRE = regexp.MustCompile(`.*devices/system/edac/mc/mc[0-9]*/csrow([0-9]*)`)
)
type edacCollector struct {
ceCount *prometheus.Desc
ueCount *prometheus.Desc
csRowCECount *prometheus.Desc
csRowUECount *prometheus.Desc
}
func init() {
registerCollector("edac", defaultEnabled, NewEdacCollector)
}
// NewEdacCollector returns a new Collector exposing edac stats.
func NewEdacCollector() (Collector, error) {
return &edacCollector{
ceCount: prometheus.NewDesc(
prometheus.BuildFQName(namespace, edacSubsystem, "correctable_errors_total"),
"Total correctable memory errors.",
[]string{"controller"}, nil,
),
ueCount: prometheus.NewDesc(
prometheus.BuildFQName(namespace, edacSubsystem, "uncorrectable_errors_total"),
"Total uncorrectable memory errors.",
[]string{"controller"}, nil,
),
csRowCECount: prometheus.NewDesc(
prometheus.BuildFQName(namespace, edacSubsystem, "csrow_correctable_errors_total"),
"Total correctable memory errors for this csrow.",
[]string{"controller", "csrow"}, nil,
),
csRowUECount: prometheus.NewDesc(
prometheus.BuildFQName(namespace, edacSubsystem, "csrow_uncorrectable_errors_total"),
"Total uncorrectable memory errors for this csrow.",
[]string{"controller", "csrow"}, nil,
),
}, nil
}
func (c *edacCollector) Update(ch chan<- prometheus.Metric) error {
memControllers, err := filepath.Glob(sysFilePath("devices/system/edac/mc/mc[0-9]*"))
if err != nil {
return err
}
for _, controller := range memControllers {
controllerMatch := edacMemControllerRE.FindStringSubmatch(controller)
if controllerMatch == nil {
return fmt.Errorf("controller string didn't match regexp: %s", controller)
}
controllerNumber := controllerMatch[1]
value, err := readUintFromFile(path.Join(controller, "ce_count"))
if err != nil {
return fmt.Errorf("couldn't get ce_count for controller %s: %s", controllerNumber, err)
}
ch <- prometheus.MustNewConstMetric(
c.ceCount, prometheus.CounterValue, float64(value), controllerNumber)
value, err = readUintFromFile(path.Join(controller, "ce_noinfo_count"))
if err != nil {
return fmt.Errorf("couldn't get ce_noinfo_count for controller %s: %s", controllerNumber, err)
}
ch <- prometheus.MustNewConstMetric(
c.csRowCECount, prometheus.CounterValue, float64(value), controllerNumber, "unknown")
value, err = readUintFromFile(path.Join(controller, "ue_count"))
if err != nil {
return fmt.Errorf("couldn't get ue_count for controller %s: %s", controllerNumber, err)
}
ch <- prometheus.MustNewConstMetric(
c.ueCount, prometheus.CounterValue, float64(value), controllerNumber)
value, err = readUintFromFile(path.Join(controller, "ue_noinfo_count"))
if err != nil {
return fmt.Errorf("couldn't get ue_noinfo_count for controller %s: %s", controllerNumber, err)
}
ch <- prometheus.MustNewConstMetric(
c.csRowUECount, prometheus.CounterValue, float64(value), controllerNumber, "unknown")
// For each controller, walk the csrow directories.
csrows, err := filepath.Glob(controller + "/csrow[0-9]*")
if err != nil {
return err
}
for _, csrow := range csrows {
csrowMatch := edacMemCsrowRE.FindStringSubmatch(csrow)
if csrowMatch == nil {
return fmt.Errorf("csrow string didn't match regexp: %s", csrow)
}
csrowNumber := csrowMatch[1]
value, err = readUintFromFile(path.Join(csrow, "ce_count"))
if err != nil {
return fmt.Errorf("couldn't get ce_count for controller/csrow %s/%s: %s", controllerNumber, csrowNumber, err)
}
ch <- prometheus.MustNewConstMetric(
c.csRowCECount, prometheus.CounterValue, float64(value), controllerNumber, csrowNumber)
value, err = readUintFromFile(path.Join(csrow, "ue_count"))
if err != nil {
return fmt.Errorf("couldn't get ue_count for controller/csrow %s/%s: %s", controllerNumber, csrowNumber, err)
}
ch <- prometheus.MustNewConstMetric(
c.csRowUECount, prometheus.CounterValue, float64(value), controllerNumber, csrowNumber)
}
}
return err
}
| {
"pile_set_name": "Github"
} |
/****************************************************************************
* arch/arm/src/s32k1xx/s32k1xx_flexcan.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <stdbool.h>
#include <unistd.h>
#include <time.h>
#include <string.h>
#include <debug.h>
#include <errno.h>
#include <nuttx/can.h>
#include <nuttx/wdog.h>
#include <nuttx/irq.h>
#include <nuttx/arch.h>
#include <nuttx/wqueue.h>
#include <nuttx/signal.h>
#include <nuttx/net/netdev.h>
#include <nuttx/net/can.h>
#include "arm_arch.h"
#include "chip.h"
#include "s32k1xx_config.h"
#include "hardware/s32k1xx_flexcan.h"
#include "hardware/s32k1xx_pinmux.h"
#include "s32k1xx_periphclocks.h"
#include "s32k1xx_pin.h"
#include "s32k1xx_flexcan.h"
#include <arch/board/board.h>
#ifdef CONFIG_NET_CMSG
#include <sys/time.h>
#endif
#ifdef CONFIG_S32K1XX_FLEXCAN
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* If processing is not done at the interrupt level, then work queue support
* is required.
*/
#define CANWORK LPWORK
/* CONFIG_S32K1XX_FLEXCAN_NETHIFS determines the number of physical
* interfaces that will be supported.
*/
#define MASKSTDID 0x000007ff
#define MASKEXTID 0x1fffffff
#define FLAGEFF (1 << 31) /* Extended frame format */
#define FLAGRTR (1 << 30) /* Remote transmission request */
#define RXMBCOUNT 5
#define TXMBCOUNT 2
#define TOTALMBCOUNT RXMBCOUNT + TXMBCOUNT
#define IFLAG1_RX ((1 << RXMBCOUNT)-1)
#define IFLAG1_TX (((1 << TXMBCOUNT)-1) << RXMBCOUNT)
#define CAN_FIFO_NE (1 << 5)
#define CAN_FIFO_OV (1 << 6)
#define CAN_FIFO_WARN (1 << 7)
#define CAN_EFF_FLAG 0x80000000 /* EFF/SFF is set in the MSB */
#define POOL_SIZE 1
#ifdef CONFIG_NET_CMSG
#define MSG_DATA sizeof(struct timeval)
#else
#define MSG_DATA 0
#endif
/* CAN bit timing values */
#define CLK_FREQ 80000000
#define PRESDIV_MAX 256
#define SEG_MAX 8
#define SEG_MIN 1
#define TSEG_MIN 2
#define TSEG1_MAX 17
#define TSEG2_MAX 9
#define NUMTQ_MAX 26
#define SEG_FD_MAX 32
#define SEG_FD_MIN 1
#define TSEG_FD_MIN 2
#define TSEG1_FD_MAX 39
#define TSEG2_FD_MAX 9
#define NUMTQ_FD_MAX 49
#ifdef CONFIG_NET_CAN_RAW_TX_DEADLINE
# if !defined(CONFIG_SCHED_WORKQUEUE)
# error Work queue support is required
# endif
#define TX_TIMEOUT_WQ
#endif
/* Interrupt flags for RX fifo */
#define IFLAG1_RXFIFO (CAN_FIFO_NE | CAN_FIFO_WARN | CAN_FIFO_OV)
static int peak_tx_mailbox_index_ = 0;
/****************************************************************************
* Private Types
****************************************************************************/
union cs_e
{
volatile uint32_t cs;
struct
{
volatile uint32_t time_stamp : 16;
volatile uint32_t dlc : 4;
volatile uint32_t rtr : 1;
volatile uint32_t ide : 1;
volatile uint32_t srr : 1;
volatile uint32_t res : 1;
volatile uint32_t code : 4;
volatile uint32_t res2 : 1;
volatile uint32_t esi : 1;
volatile uint32_t brs : 1;
volatile uint32_t edl : 1;
};
};
union id_e
{
volatile uint32_t w;
struct
{
volatile uint32_t ext : 29;
volatile uint32_t resex : 3;
};
struct
{
volatile uint32_t res : 18;
volatile uint32_t std : 11;
volatile uint32_t resstd : 3;
};
};
union data_e
{
volatile uint32_t w00;
struct
{
volatile uint32_t b03 : 8;
volatile uint32_t b02 : 8;
volatile uint32_t b01 : 8;
volatile uint32_t b00 : 8;
};
};
struct mb_s
{
union cs_e cs;
union id_e id;
#ifdef CONFIG_NET_CAN_CANFD
union data_e data[16];
#else
union data_e data[2];
#endif
};
#ifdef CONFIG_NET_CAN_RAW_TX_DEADLINE
#define TX_ABORT -1
#define TX_FREE 0
#define TX_BUSY 1
struct txmbstats
{
struct timeval deadline;
uint32_t pending; /* -1 = abort, 0 = free, 1 = busy */
};
#endif
/* FlexCAN Device hardware configuration */
struct flexcan_config_s
{
uint32_t tx_pin; /* GPIO configuration for TX */
uint32_t rx_pin; /* GPIO configuration for RX */
uint32_t enable_pin; /* Optional enable pin */
uint32_t enable_high; /* Optional enable high/low */
uint32_t bus_irq; /* BUS IRQ */
uint32_t error_irq; /* ERROR IRQ */
uint32_t lprx_irq; /* LPRX IRQ */
uint32_t mb_irq; /* MB 0-15 IRQ */
};
struct flexcan_timeseg
{
uint32_t bitrate;
int32_t samplep;
uint8_t propseg;
uint8_t pseg1;
uint8_t pseg2;
uint8_t presdiv;
};
/* FlexCAN device structures */
#ifdef CONFIG_S32K1XX_FLEXCAN0
static const struct flexcan_config_s s32k1xx_flexcan0_config =
{
.tx_pin = PIN_CAN0_TX,
.rx_pin = PIN_CAN0_RX,
#ifdef PIN_CAN0_ENABLE
.enable_pin = PIN_CAN0_ENABLE,
.enable_high = CAN0_ENABLE_OUT,
#else
.enable_pin = 0,
.enable_high = 0,
#endif
.bus_irq = S32K1XX_IRQ_CAN0_BUS,
.error_irq = S32K1XX_IRQ_CAN0_ERROR,
.lprx_irq = S32K1XX_IRQ_CAN0_LPRX,
.mb_irq = S32K1XX_IRQ_CAN0_0_15,
};
#endif
#ifdef CONFIG_S32K1XX_FLEXCAN1
static const struct flexcan_config_s s32k1xx_flexcan1_config =
{
.tx_pin = PIN_CAN1_TX,
.rx_pin = PIN_CAN1_RX,
#ifdef PIN_CAN1_ENABLE
.enable_pin = PIN_CAN1_ENABLE,
.enable_high = CAN1_ENABLE_OUT,
#else
.enable_pin = 0,
.enable_high = 0,
#endif
.bus_irq = S32K1XX_IRQ_CAN1_BUS,
.error_irq = S32K1XX_IRQ_CAN1_ERROR,
.lprx_irq = 0,
.mb_irq = S32K1XX_IRQ_CAN1_0_15,
};
#endif
#ifdef CONFIG_S32K1XX_FLEXCAN2
static const struct flexcan_config_s s32k1xx_flexcan2_config =
{
.tx_pin = PIN_CAN2_TX,
.rx_pin = PIN_CAN2_RX,
#ifdef PIN_CAN2_ENABLE
.enable_pin = PIN_CAN2_ENABLE,
.rx_pin = CAN2_ENABLE_HIGH,
#else
.enable_pin = 0,
.rx_pin = 0,
#endif
.bus_irq = S32K1XX_IRQ_CAN2_BUS,
.error_irq = S32K1XX_IRQ_CAN2_ERROR,
.lprx_irq = 0,
.mb_irq = S32K1XX_IRQ_CAN2_0_15,
};
#endif
/* The s32k1xx_driver_s encapsulates all state information for a single
* hardware interface
*/
struct s32k1xx_driver_s
{
uint32_t base; /* FLEXCAN base address */
bool bifup; /* true:ifup false:ifdown */
#ifdef TX_TIMEOUT_WQ
struct wdog_s txtimeout[TXMBCOUNT]; /* TX timeout timer */
#endif
struct work_s irqwork; /* For deferring interrupt work to the wq */
struct work_s pollwork; /* For deferring poll work to the work wq */
#ifdef CONFIG_NET_CAN_CANFD
struct canfd_frame *txdesc; /* A pointer to the list of TX descriptor */
struct canfd_frame *rxdesc; /* A pointer to the list of RX descriptors */
#else
struct can_frame *txdesc; /* A pointer to the list of TX descriptor */
struct can_frame *rxdesc; /* A pointer to the list of RX descriptors */
#endif
/* This holds the information visible to the NuttX network */
struct net_driver_s dev; /* Interface understood by the network */
struct mb_s *rx;
struct mb_s *tx;
struct flexcan_timeseg arbi_timing; /* Timing for arbitration phase */
#ifdef CONFIG_NET_CAN_CANFD
struct flexcan_timeseg data_timing; /* Timing for data phase */
#endif
const struct flexcan_config_s *config;
#ifdef CONFIG_NET_CAN_RAW_TX_DEADLINE
struct txmbstats txmb[TXMBCOUNT];
#endif
};
/****************************************************************************
* Private Data
****************************************************************************/
#ifdef CONFIG_S32K1XX_FLEXCAN0
static struct s32k1xx_driver_s g_flexcan0;
#endif
#ifdef CONFIG_S32K1XX_FLEXCAN1
static struct s32k1xx_driver_s g_flexcan1;
#endif
#ifdef CONFIG_S32K1XX_FLEXCAN2
static struct s32k1xx_driver_s g_flexcan2;
#endif
#ifdef CONFIG_NET_CAN_CANFD
static uint8_t g_tx_pool[(sizeof(struct canfd_frame)+MSG_DATA)*POOL_SIZE];
static uint8_t g_rx_pool[(sizeof(struct canfd_frame)+MSG_DATA)*POOL_SIZE];
#else
static uint8_t g_tx_pool[sizeof(struct can_frame)*POOL_SIZE];
static uint8_t g_rx_pool[sizeof(struct can_frame)*POOL_SIZE];
#endif
/****************************************************************************
* Private Function Prototypes
****************************************************************************/
/****************************************************************************
* Name: arm_lsb
*
* Description:
* Calculate position of lsb that's equal to 1
*
* Input Parameters:
* value - The value to perform the operation on
*
* Returned Value:
* location of lsb which is equal to 1, returns 32 when value is 0
*
****************************************************************************/
static inline uint32_t arm_lsb(unsigned int value)
{
uint32_t ret;
volatile uint32_t rvalue = value;
__asm__ __volatile__ ("rbit %1,%0" : "=r" (rvalue) : "r" (rvalue));
__asm__ __volatile__ ("clz %0, %1" : "=r"(ret) : "r"(rvalue));
return ret;
}
/****************************************************************************
* Name: s32k1xx_bitratetotimeseg
*
* Description:
* Convert bitrate to timeseg
*
* Input Parameters:
* timeseg - structure to store bit timing
* sp_tolerance - allowed difference in sample point from calculated
* bit timings (recommended value: 1)
* can_fd - if set to calculate CAN FD bit timings, otherwise calculate
* classical can timings
*
* Returned Value:
* return 1 on succes, return 0 on failure
*
****************************************************************************/
uint32_t s32k1xx_bitratetotimeseg(struct flexcan_timeseg *timeseg,
int32_t sp_tolerance,
uint32_t can_fd)
{
int32_t tmppresdiv;
int32_t numtq;
int32_t tmpsample;
int32_t tseg1;
int32_t tseg2;
int32_t tmppseg1;
int32_t tmppseg2;
int32_t tmppropseg;
const int32_t TSEG1MAX = (can_fd ? TSEG1_FD_MAX : TSEG1_MAX);
const int32_t TSEG2MAX = (can_fd ? TSEG2_FD_MAX : TSEG2_MAX);
const int32_t SEGMAX = (can_fd ? SEG_FD_MAX : SEG_MAX);
const int32_t NUMTQMAX = (can_fd ? NUMTQ_FD_MAX : NUMTQ_MAX);
for (tmppresdiv = 0; tmppresdiv < PRESDIV_MAX; tmppresdiv++)
{
numtq = (CLK_FREQ / ((tmppresdiv + 1) * timeseg->bitrate));
if (numtq == 0)
{
continue;
}
/* The number of time quanta in 1 bit time must be
* lower than the one supported
*/
if ((CLK_FREQ / ((tmppresdiv + 1) * numtq) == timeseg->bitrate)
&& (numtq >= 8) && (numtq < NUMTQMAX))
{
/* Compute time segments based on the value of the sampling point */
tseg1 = (numtq * timeseg->samplep / 100) - 1;
tseg2 = numtq - 1 - tseg1;
/* Adjust time segment 1 and time segment 2 */
while (tseg1 >= TSEG1MAX || tseg2 < TSEG_MIN)
{
tseg2++;
tseg1--;
}
tmppseg2 = tseg2 - 1;
/* Start from pseg1 = pseg2 and adjust until propseg is valid */
tmppseg1 = tmppseg2;
tmppropseg = tseg1 - tmppseg1 - 2;
while (tmppropseg <= 0)
{
tmppropseg++;
tmppseg1--;
}
while (tmppropseg >= SEGMAX)
{
tmppropseg--;
tmppseg1++;
}
if (((tseg1 >= TSEG1MAX) || (tseg2 >= TSEG2MAX) ||
(tseg2 < TSEG_MIN) || (tseg1 < TSEG_MIN)) ||
((tmppropseg >= SEGMAX) || (tmppseg1 >= SEGMAX) ||
(tmppseg2 < SEG_MIN) || (tmppseg2 >= SEGMAX)))
{
continue;
}
tmpsample = ((tseg1 + 1) * 100) / numtq;
if ((tmpsample - timeseg->samplep) <= sp_tolerance &&
(timeseg->samplep - tmpsample) <= sp_tolerance)
{
if (can_fd == 1)
{
timeseg->propseg = tmppropseg + 1;
}
else
{
timeseg->propseg = tmppropseg;
}
timeseg->pseg1 = tmppseg1;
timeseg->pseg2 = tmppseg2;
timeseg->presdiv = tmppresdiv;
timeseg->samplep = tmpsample;
return 1;
}
}
}
return 0;
}
/* Common TX logic */
static bool s32k1xx_txringfull(FAR struct s32k1xx_driver_s *priv);
static int s32k1xx_transmit(FAR struct s32k1xx_driver_s *priv);
static int s32k1xx_txpoll(struct net_driver_s *dev);
/* Helper functions */
static void s32k1xx_setenable(uint32_t base, uint32_t enable);
static void s32k1xx_setfreeze(uint32_t base, uint32_t freeze);
static uint32_t s32k1xx_waitmcr_change(uint32_t base,
uint32_t mask,
uint32_t target_state);
/* Interrupt handling */
static void s32k1xx_receive(FAR struct s32k1xx_driver_s *priv,
uint32_t flags);
static void s32k1xx_txdone(FAR void *arg);
static int s32k1xx_flexcan_interrupt(int irq, FAR void *context,
FAR void *arg);
/* Watchdog timer expirations */
#ifdef TX_TIMEOUT_WQ
static void s32k1xx_txtimeout_work(FAR void *arg);
static void s32k1xx_txtimeout_expiry(wdparm_t arg);
#endif
/* NuttX callback functions */
static int s32k1xx_ifup(struct net_driver_s *dev);
static int s32k1xx_ifdown(struct net_driver_s *dev);
static void s32k1xx_txavail_work(FAR void *arg);
static int s32k1xx_txavail(struct net_driver_s *dev);
#ifdef CONFIG_NETDEV_IOCTL
static int s32k1xx_ioctl(struct net_driver_s *dev, int cmd,
unsigned long arg);
#endif
/* Initialization */
static int s32k1xx_initialize(struct s32k1xx_driver_s *priv);
static void s32k1xx_reset(struct s32k1xx_driver_s *priv);
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Function: s32k1xx_txringfull
*
* Description:
* Check if all of the TX descriptors are in use.
*
* Input Parameters:
* priv - Reference to the driver state structure
*
* Returned Value:
* true is the TX ring is full; false if there are free slots at the
* head index.
*
****************************************************************************/
static bool s32k1xx_txringfull(FAR struct s32k1xx_driver_s *priv)
{
uint32_t mbi = 0;
while (mbi < TXMBCOUNT)
{
if (priv->tx[mbi].cs.code != CAN_TXMB_DATAORREMOTE)
{
return 0;
}
mbi++;
}
return 1;
}
/****************************************************************************
* Function: s32k1xx_transmit
*
* Description:
* Start hardware transmission. Called either from the txdone interrupt
* handling or from watchdog based polling.
*
* Input Parameters:
* priv - Reference to the driver state structure
*
* Returned Value:
* OK on success; a negated errno on failure
*
* Assumptions:
* May or may not be called from an interrupt handler. In either case,
* global interrupts are disabled, either explicitly or indirectly through
* interrupt handling logic.
*
****************************************************************************/
static int s32k1xx_transmit(FAR struct s32k1xx_driver_s *priv)
{
/* Attempt to write frame */
uint32_t mbi = 0;
uint32_t mb_bit;
uint32_t regval;
#ifdef CONFIG_NET_CAN_CANFD
uint32_t *frame_data_word;
uint32_t i;
#endif
#ifdef CONFIG_NET_CAN_RAW_TX_DEADLINE
int32_t timeout;
#endif
if ((getreg32(priv->base + S32K1XX_CAN_ESR2_OFFSET) &
(CAN_ESR2_IMB | CAN_ESR2_VPS)) ==
(CAN_ESR2_IMB | CAN_ESR2_VPS))
{
mbi = ((getreg32(priv->base + S32K1XX_CAN_ESR2_OFFSET) &
CAN_ESR2_LPTM_MASK) >> CAN_ESR2_LPTM_SHIFT);
mbi -= RXMBCOUNT;
}
mb_bit = 1 << (RXMBCOUNT + mbi);
while (mbi < TXMBCOUNT)
{
if (priv->tx[mbi].cs.code != CAN_TXMB_DATAORREMOTE)
{
putreg32(mb_bit, priv->base + S32K1XX_CAN_IFLAG1_OFFSET);
break;
}
mb_bit <<= 1;
mbi++;
}
if (mbi == TXMBCOUNT)
{
nwarn("No TX MB available mbi %i\r\n", mbi);
return 0; /* No transmission for you! */
}
#ifdef CONFIG_NET_CAN_RAW_TX_DEADLINE
struct timespec ts;
clock_systime_timespec(&ts);
if (priv->dev.d_sndlen > priv->dev.d_len)
{
struct timeval *tv =
(struct timeval *)(priv->dev.d_buf + priv->dev.d_len);
priv->txmb[mbi].deadline = *tv;
timeout = (tv->tv_sec - ts.tv_sec)*CLK_TCK
+ ((tv->tv_usec - ts.tv_nsec / 1000)*CLK_TCK) / 1000000;
if (timeout < 0)
{
return 0; /* No transmission for you! */
}
}
else
{
/* Default TX deadline defined in NET_CAN_RAW_DEFAULT_TX_DEADLINE */
if (CONFIG_NET_CAN_RAW_DEFAULT_TX_DEADLINE > 0)
{
timeout = ((CONFIG_NET_CAN_RAW_DEFAULT_TX_DEADLINE / 1000000)
*CLK_TCK);
priv->txmb[mbi].deadline.tv_sec = ts.tv_sec +
CONFIG_NET_CAN_RAW_DEFAULT_TX_DEADLINE / 1000000;
priv->txmb[mbi].deadline.tv_usec = (ts.tv_nsec / 1000) +
CONFIG_NET_CAN_RAW_DEFAULT_TX_DEADLINE % 1000000;
}
else
{
priv->txmb[mbi].deadline.tv_sec = 0;
priv->txmb[mbi].deadline.tv_usec = 0;
timeout = -1;
}
}
#endif
peak_tx_mailbox_index_ =
(peak_tx_mailbox_index_ > mbi ? peak_tx_mailbox_index_ : mbi);
union cs_e cs;
cs.code = CAN_TXMB_DATAORREMOTE;
struct mb_s *mb = &priv->tx[mbi];
mb->cs.code = CAN_TXMB_INACTIVE;
if (priv->dev.d_len <= sizeof(struct can_frame))
{
struct can_frame *frame = (struct can_frame *)priv->dev.d_buf;
if (frame->can_id & CAN_EFF_FLAG)
{
cs.ide = 1;
mb->id.ext = frame->can_id & MASKEXTID;
}
else
{
mb->id.std = frame->can_id & MASKSTDID;
}
cs.rtr = frame->can_id & FLAGRTR ? 1 : 0;
cs.dlc = frame->can_dlc;
mb->data[0].w00 = __builtin_bswap32(*(uint32_t *)&frame->data[0]);
mb->data[1].w00 = __builtin_bswap32(*(uint32_t *)&frame->data[4]);
}
#ifdef CONFIG_NET_CAN_CANFD
else /* CAN FD frame */
{
struct canfd_frame *frame = (struct canfd_frame *)priv->dev.d_buf;
cs.edl = 1; /* CAN FD Frame */
if (frame->can_id & CAN_EFF_FLAG)
{
cs.ide = 1;
mb->id.ext = frame->can_id & MASKEXTID;
}
else
{
mb->id.std = frame->can_id & MASKSTDID;
}
cs.rtr = frame->can_id & FLAGRTR ? 1 : 0;
cs.dlc = len_to_can_dlc[frame->len];
frame_data_word = (uint32_t *)&frame->data[0];
for (i = 0; i < (frame->len + 4 - 1) / 4; i++)
{
mb->data[i].w00 = __builtin_bswap32(frame_data_word[i]);
}
}
#endif
mb->cs = cs; /* Go. */
regval = getreg32(priv->base + S32K1XX_CAN_IMASK1_OFFSET);
regval |= mb_bit;
putreg32(regval, priv->base + S32K1XX_CAN_IMASK1_OFFSET);
/* Increment statistics */
NETDEV_TXPACKETS(&priv->dev);
#ifdef TX_TIMEOUT_WQ
/* Setup the TX timeout watchdog (perhaps restarting the timer) */
if (timeout >= 0)
{
wd_start(&priv->txtimeout[mbi], timeout + 1,
s32k1xx_txtimeout_expiry, (wdparm_t)priv);
}
#endif
return OK;
}
/****************************************************************************
* Function: s32k1xx_txpoll
*
* Description:
* The transmitter is available, check if the network has any outgoing
* packets ready to send. This is a callback from devif_poll().
* devif_poll() may be called:
*
* 1. When the preceding TX packet send is complete,
* 2. When the preceding TX packet send timesout and the interface is reset
* 3. During normal TX polling
*
* Input Parameters:
* dev - Reference to the NuttX driver state structure
*
* Returned Value:
* OK on success; a negated errno on failure
*
* Assumptions:
* May or may not be called from an interrupt handler. In either case,
* global interrupts are disabled, either explicitly or indirectly through
* interrupt handling logic.
*
****************************************************************************/
static int s32k1xx_txpoll(struct net_driver_s *dev)
{
FAR struct s32k1xx_driver_s *priv =
(FAR struct s32k1xx_driver_s *)dev->d_private;
/* If the polling resulted in data that should be sent out on the network,
* the field d_len is set to a value > 0.
*/
if (priv->dev.d_len > 0)
{
if (!devif_loopback(&priv->dev))
{
/* Send the packet */
s32k1xx_transmit(priv);
/* Check if there is room in the device to hold another packet. If
* not, return a non-zero value to terminate the poll.
*/
if (s32k1xx_txringfull(priv))
{
return -EBUSY;
}
}
}
/* If zero is returned, the polling will continue until all connections
* have been examined.
*/
return 0;
}
/****************************************************************************
* Function: s32k1xx_receive
*
* Description:
* An interrupt was received indicating the availability of a new RX packet
*
* Input Parameters:
* priv - Reference to the driver state structure
*
* Returned Value:
* None
*
* Assumptions:
* Global interrupts are disabled by interrupt handling logic.
*
****************************************************************************/
static void s32k1xx_receive(FAR struct s32k1xx_driver_s *priv,
uint32_t flags)
{
uint32_t mb_index;
struct mb_s *rf;
#ifdef CONFIG_NET_CAN_CANFD
uint32_t *frame_data_word;
uint32_t i;
#endif
while ((mb_index = arm_lsb(flags)) != 32)
{
rf = &priv->rx[mb_index];
/* Read the frame contents */
#ifdef CONFIG_NET_CAN_CANFD
if (rf->cs.edl) /* CAN FD frame */
{
struct canfd_frame *frame = (struct canfd_frame *)priv->rxdesc;
if (rf->cs.ide)
{
frame->can_id = MASKEXTID & rf->id.ext;
frame->can_id |= FLAGEFF;
}
else
{
frame->can_id = MASKSTDID & rf->id.std;
}
if (rf->cs.rtr)
{
frame->can_id |= FLAGRTR;
}
frame->len = can_dlc_to_len[rf->cs.dlc];
frame_data_word = (uint32_t *)&frame->data[0];
for (i = 0; i < (frame->len + 4 - 1) / 4; i++)
{
frame_data_word[i] = __builtin_bswap32(rf->data[i].w00);
}
/* Clear MB interrupt flag */
putreg32(1 << mb_index,
priv->base + S32K1XX_CAN_IFLAG1_OFFSET);
/* Copy the buffer pointer to priv->dev.. Set amount of data
* in priv->dev.d_len
*/
priv->dev.d_len = sizeof(struct canfd_frame);
priv->dev.d_buf = (uint8_t *)frame;
}
else /* CAN 2.0 Frame */
#endif
{
struct can_frame *frame = (struct can_frame *)priv->rxdesc;
if (rf->cs.ide)
{
frame->can_id = MASKEXTID & rf->id.ext;
frame->can_id |= FLAGEFF;
}
else
{
frame->can_id = MASKSTDID & rf->id.std;
}
if (rf->cs.rtr)
{
frame->can_id |= FLAGRTR;
}
frame->can_dlc = rf->cs.dlc;
*(uint32_t *)&frame->data[0] = __builtin_bswap32(rf->data[0].w00);
*(uint32_t *)&frame->data[4] = __builtin_bswap32(rf->data[1].w00);
/* Clear MB interrupt flag */
putreg32(1 << mb_index,
priv->base + S32K1XX_CAN_IFLAG1_OFFSET);
/* Copy the buffer pointer to priv->dev.. Set amount of data
* in priv->dev.d_len
*/
priv->dev.d_len = sizeof(struct can_frame);
priv->dev.d_buf = (uint8_t *)frame;
}
/* Send to socket interface */
NETDEV_RXPACKETS(&priv->dev);
can_input(&priv->dev);
/* Point the packet buffer back to the next Tx buffer that will be
* used during the next write. If the write queue is full, then
* this will point at an active buffer, which must not be written
* to. This is OK because devif_poll won't be called unless the
* queue is not full.
*/
priv->dev.d_buf = (uint8_t *)priv->txdesc;
flags &= ~(1 << mb_index);
/* Reread interrupt flags and process them in this loop */
if (flags == 0)
{
flags = getreg32(priv->base + S32K1XX_CAN_IFLAG1_OFFSET);
flags &= IFLAG1_RX;
}
}
}
/****************************************************************************
* Function: s32k1xx_txdone
*
* Description:
* An interrupt was received indicating that the last TX packet(s) is done
*
* Input Parameters:
* priv - Reference to the driver state structure
*
* Returned Value:
* None
*
* Assumptions:
* Global interrupts are disabled by the watchdog logic.
* We are not in an interrupt context so that we can lock the network.
*
****************************************************************************/
static void s32k1xx_txdone(FAR void *arg)
{
FAR struct s32k1xx_driver_s *priv = (FAR struct s32k1xx_driver_s *)arg;
uint32_t flags;
uint32_t mbi;
uint32_t mb_bit;
flags = getreg32(priv->base + S32K1XX_CAN_IFLAG1_OFFSET);
flags &= IFLAG1_TX;
/* TODO First Process Error aborts */
/* Process TX completions */
mb_bit = 1 << RXMBCOUNT;
for (mbi = 0; flags && mbi < TXMBCOUNT; mbi++)
{
if (flags & mb_bit)
{
putreg32(mb_bit, priv->base + S32K1XX_CAN_IFLAG1_OFFSET);
flags &= ~mb_bit;
NETDEV_TXDONE(&priv->dev);
#ifdef TX_TIMEOUT_WQ
/* We are here because a transmission completed, so the
* corresponding watchdog can be canceled.
*/
wd_cancel(&priv->txtimeout[mbi]);
#endif
}
mb_bit <<= 1;
}
/* There should be space for a new TX in any event. Poll the network for
* new XMIT data
*/
net_lock();
devif_poll(&priv->dev, s32k1xx_txpoll);
net_unlock();
}
/****************************************************************************
* Function: s32k1xx_flexcan_interrupt
*
* Description:
* Three interrupt sources will vector this this function:
* 1. CAN MB transmit interrupt handler
* 2. CAN MB receive interrupt handler
* 3.
*
* Input Parameters:
* irq - Number of the IRQ that generated the interrupt
* context - Interrupt register state save info (architecture-specific)
*
* Returned Value:
* OK on success
*
* Assumptions:
*
****************************************************************************/
static int s32k1xx_flexcan_interrupt(int irq, FAR void *context,
FAR void *arg)
{
FAR struct s32k1xx_driver_s *priv = (struct s32k1xx_driver_s *)arg;
if (irq == priv->config->mb_irq)
{
uint32_t flags;
flags = getreg32(priv->base + S32K1XX_CAN_IFLAG1_OFFSET);
flags &= IFLAG1_RX;
if (flags)
{
/* Process immediately since scheduling a workqueue is too slow
* which causes us to drop CAN frames
*/
s32k1xx_receive(priv, flags);
}
flags = getreg32(priv->base + S32K1XX_CAN_IFLAG1_OFFSET);
flags &= IFLAG1_TX;
if (flags)
{
/* Disable further TX MB CAN interrupts. here can be no race
* condition here.
*/
flags = getreg32(priv->base + S32K1XX_CAN_IMASK1_OFFSET);
flags &= ~(IFLAG1_TX);
putreg32(flags, priv->base + S32K1XX_CAN_IMASK1_OFFSET);
work_queue(CANWORK, &priv->irqwork, s32k1xx_txdone, priv, 0);
}
}
return OK;
}
/****************************************************************************
* Function: s32k1xx_txtimeout_work
*
* Description:
* Perform TX timeout related work from the worker thread
*
* Input Parameters:
* arg - The argument passed when work_queue() as called.
*
* Returned Value:
* OK on success
*
* Assumptions:
*
****************************************************************************/
#ifdef TX_TIMEOUT_WQ
static void s32k1xx_txtimeout_work(FAR void *arg)
{
FAR struct s32k1xx_driver_s *priv = (FAR struct s32k1xx_driver_s *)arg;
uint32_t mbi;
struct timespec ts;
struct timeval *now = (struct timeval *)&ts;
clock_systime_timespec(&ts);
now->tv_usec = ts.tv_nsec / 1000; /* timespec to timeval conversion */
/* The watchdog timed out, yet we still check mailboxes in case the
* transmit function transmitted a new frame
*/
for (mbi = 0; mbi < TXMBCOUNT; mbi++)
{
if (priv->txmb[mbi].deadline.tv_sec != 0
&& (now->tv_sec > priv->txmb[mbi].deadline.tv_sec
|| now->tv_usec > priv->txmb[mbi].deadline.tv_usec))
{
NETDEV_TXTIMEOUTS(&priv->dev);
struct mb_s *mb = &priv->tx[mbi];
mb->cs.code = CAN_TXMB_ABORT;
priv->txmb[mbi].pending = TX_ABORT;
}
}
}
/****************************************************************************
* Function: s32k1xx_txtimeout_expiry
*
* Description:
* Our TX watchdog timed out. Called from the timer interrupt handler.
* The last TX never completed. Reset the hardware and start again.
*
* Input Parameters:
* arg - The argument
*
* Returned Value:
* None
*
* Assumptions:
* Global interrupts are disabled by the watchdog logic.
*
****************************************************************************/
static void s32k1xx_txtimeout_expiry(wdparm_t arg)
{
FAR struct s32k1xx_driver_s *priv = (FAR struct s32k1xx_driver_s *)arg;
/* Schedule to perform the TX timeout processing on the worker thread
*/
work_queue(CANWORK, &priv->irqwork, s32k1xx_txtimeout_work, priv, 0);
}
#endif
static void s32k1xx_setenable(uint32_t base, uint32_t enable)
{
uint32_t regval;
if (enable)
{
regval = getreg32(base + S32K1XX_CAN_MCR_OFFSET);
regval &= ~(CAN_MCR_MDIS);
putreg32(regval, base + S32K1XX_CAN_MCR_OFFSET);
}
else
{
regval = getreg32(base + S32K1XX_CAN_MCR_OFFSET);
regval |= CAN_MCR_MDIS;
putreg32(regval, base + S32K1XX_CAN_MCR_OFFSET);
}
s32k1xx_waitmcr_change(base, CAN_MCR_LPMACK, 1);
}
static void s32k1xx_setfreeze(uint32_t base, uint32_t freeze)
{
uint32_t regval;
if (freeze)
{
/* Enter freeze mode */
regval = getreg32(base + S32K1XX_CAN_MCR_OFFSET);
regval |= (CAN_MCR_HALT | CAN_MCR_FRZ);
putreg32(regval, base + S32K1XX_CAN_MCR_OFFSET);
}
else
{
/* Exit freeze mode */
regval = getreg32(base + S32K1XX_CAN_MCR_OFFSET);
regval &= ~(CAN_MCR_HALT | CAN_MCR_FRZ);
putreg32(regval, base + S32K1XX_CAN_MCR_OFFSET);
}
}
static uint32_t s32k1xx_waitmcr_change(uint32_t base, uint32_t mask,
uint32_t target_state)
{
const uint32_t timeout = 1000;
uint32_t wait_ack;
for (wait_ack = 0; wait_ack < timeout; wait_ack++)
{
const bool state = (getreg32(base + S32K1XX_CAN_MCR_OFFSET) & mask)
!= 0;
if (state == target_state)
{
return true;
}
up_udelay(10);
}
return false;
}
static uint32_t s32k1xx_waitfreezeack_change(uint32_t base,
uint32_t target_state)
{
return s32k1xx_waitmcr_change(base, CAN_MCR_FRZACK, target_state);
}
/****************************************************************************
* Function: s32k1xx_ifup
*
* Description:
* NuttX Callback: Bring up the Ethernet interface when an IP address is
* provided
*
* Input Parameters:
* dev - Reference to the NuttX driver state structure
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
static int s32k1xx_ifup(struct net_driver_s *dev)
{
FAR struct s32k1xx_driver_s *priv =
(FAR struct s32k1xx_driver_s *)dev->d_private;
if (!s32k1xx_initialize(priv))
{
nerr("initialize failed");
return -1;
}
priv->bifup = true;
#ifdef CONFIG_NET_CAN_CANFD
priv->txdesc = (struct canfd_frame *)&g_tx_pool;
priv->rxdesc = (struct canfd_frame *)&g_rx_pool;
#else
priv->txdesc = (struct can_frame *)&g_tx_pool;
priv->rxdesc = (struct can_frame *)&g_rx_pool;
#endif
priv->dev.d_buf = (uint8_t *)priv->txdesc;
/* Set interrupts */
up_enable_irq(priv->config->bus_irq);
up_enable_irq(priv->config->error_irq);
if (priv->config->lprx_irq > 0)
{
up_enable_irq(priv->config->lprx_irq);
}
up_enable_irq(priv->config->mb_irq);
return OK;
}
/****************************************************************************
* Function: s32k1xx_ifdown
*
* Description:
* NuttX Callback: Stop the interface.
*
* Input Parameters:
* dev - Reference to the NuttX driver state structure
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
static int s32k1xx_ifdown(struct net_driver_s *dev)
{
FAR struct s32k1xx_driver_s *priv =
(FAR struct s32k1xx_driver_s *)dev->d_private;
s32k1xx_reset(priv);
priv->bifup = false;
return OK;
}
/****************************************************************************
* Function: s32k1xx_txavail_work
*
* Description:
* Perform an out-of-cycle poll on the worker thread.
*
* Input Parameters:
* arg - Reference to the NuttX driver state structure (cast to void*)
*
* Returned Value:
* None
*
* Assumptions:
* Called on the higher priority worker thread.
*
****************************************************************************/
static void s32k1xx_txavail_work(FAR void *arg)
{
FAR struct s32k1xx_driver_s *priv = (FAR struct s32k1xx_driver_s *)arg;
/* Ignore the notification if the interface is not yet up */
net_lock();
if (priv->bifup)
{
/* Check if there is room in the hardware to hold another outgoing
* packet.
*/
if (!s32k1xx_txringfull(priv))
{
/* No, there is space for another transfer. Poll the network for
* new XMIT data.
*/
devif_poll(&priv->dev, s32k1xx_txpoll);
}
}
net_unlock();
}
/****************************************************************************
* Function: s32k1xx_txavail
*
* Description:
* Driver callback invoked when new TX data is available. This is a
* stimulus perform an out-of-cycle poll and, thereby, reduce the TX
* latency.
*
* Input Parameters:
* dev - Reference to the NuttX driver state structure
*
* Returned Value:
* None
*
* Assumptions:
* Called in normal user mode
*
****************************************************************************/
static int s32k1xx_txavail(struct net_driver_s *dev)
{
FAR struct s32k1xx_driver_s *priv =
(FAR struct s32k1xx_driver_s *)dev->d_private;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions and we will have to ignore the Tx
* availability action.
*/
if (work_available(&priv->pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
s32k1xx_txavail_work(priv);
}
return OK;
}
/****************************************************************************
* Function: s32k1xx_ioctl
*
* Description:
* PHY ioctl command handler
*
* Input Parameters:
* dev - Reference to the NuttX driver state structure
* cmd - ioctl command
* arg - Argument accompanying the command
*
* Returned Value:
* Zero (OK) on success; a negated errno value on failure.
*
* Assumptions:
*
****************************************************************************/
#ifdef CONFIG_NETDEV_CAN_BITRATE_IOCTL
static int s32k1xx_ioctl(struct net_driver_s *dev, int cmd,
unsigned long arg)
{
FAR struct s32k1xx_driver_s *priv =
(FAR struct s32k1xx_driver_s *)dev->d_private;
int ret;
switch (cmd)
{
case SIOCGCANBITRATE: /* Get bitrate from a CAN controller */
{
struct can_ioctl_data_s *req =
(struct can_ioctl_data_s *)((uintptr_t)arg);
req->arbi_bitrate = priv->arbi_timing.bitrate / 1000; /* kbit/s */
req->arbi_samplep = priv->arbi_timing.samplep;
#ifdef CONFIG_NET_CAN_CANFD
req->data_bitrate = priv->data_timing.bitrate / 1000; /* kbit/s */
req->data_samplep = priv->data_timing.samplep;
#else
req->data_bitrate = 0;
req->data_samplep = 0;
#endif
ret = OK;
}
break;
case SIOCSCANBITRATE: /* Set bitrate of a CAN controller */
{
struct can_ioctl_data_s *req =
(struct can_ioctl_data_s *)((uintptr_t)arg);
struct flexcan_timeseg arbi_timing;
arbi_timing.bitrate = req->arbi_bitrate * 1000;
arbi_timing.samplep = req->arbi_samplep;
if (s32k1xx_bitratetotimeseg(&arbi_timing, 10, 0))
{
ret = OK;
}
else
{
ret = -EINVAL;
}
#ifdef CONFIG_NET_CAN_CANFD
struct flexcan_timeseg data_timing;
data_timing.bitrate = req->data_bitrate * 1000;
data_timing.samplep = req->data_samplep;
if (ret == OK && s32k1xx_bitratetotimeseg(&data_timing, 10, 1))
{
ret = OK;
}
else
{
ret = -EINVAL;
}
#endif
if (ret == OK)
{
/* Reset CAN controller and start with new timings */
priv->arbi_timing = arbi_timing;
#ifdef CONFIG_NET_CAN_CANFD
priv->data_timing = data_timing;
#endif
s32k1xx_ifup(dev);
}
}
break;
default:
ret = -ENOTTY;
break;
}
return ret;
}
#endif /* CONFIG_NETDEV_IOCTL */
/****************************************************************************
* Function: s32k1xx_initalize
*
* Description:
* Initialize FLEXCAN device
*
* Input Parameters:
* priv - Reference to the private FLEXCAN driver state structure
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
static int s32k1xx_initialize(struct s32k1xx_driver_s *priv)
{
uint32_t regval;
uint32_t i;
/* initialize CAN device */
s32k1xx_setenable(priv->base, 0);
/* Set SYS_CLOCK src */
regval = getreg32(priv->base + S32K1XX_CAN_CTRL1_OFFSET);
regval |= CAN_CTRL1_CLKSRC;
putreg32(regval, priv->base + S32K1XX_CAN_CTRL1_OFFSET);
s32k1xx_setenable(priv->base, 1);
s32k1xx_reset(priv);
/* Enter freeze mode */
s32k1xx_setfreeze(priv->base, 1);
if (!s32k1xx_waitfreezeack_change(priv->base, 1))
{
ninfo("FLEXCAN: freeze fail\r\n");
return -1;
}
#ifndef CONFIG_NET_CAN_CANFD
regval = getreg32(priv->base + S32K1XX_CAN_CTRL1_OFFSET);
regval |= CAN_CTRL1_PRESDIV(priv->arbi_timing.presdiv) | /* Prescaler divisor factor */
CAN_CTRL1_PROPSEG(priv->arbi_timing.propseg) | /* Propagation segment */
CAN_CTRL1_PSEG1(priv->arbi_timing.pseg1) | /* Phase buffer segment 1 */
CAN_CTRL1_PSEG2(priv->arbi_timing.pseg2) | /* Phase buffer segment 2 */
CAN_CTRL1_RJW(1); /* Resynchronization jump width */
putreg32(regval, priv->base + S32K1XX_CAN_CTRL1_OFFSET);
#else
regval = getreg32(priv->base + S32K1XX_CAN_CBT_OFFSET);
regval |= CAN_CBT_BTF | /* Enable extended bit timing
* configurations for CAN-FD for setting up
* separately nominal and data phase */
CAN_CBT_EPRESDIV(priv->arbi_timing.presdiv) | /* Prescaler divisor factor */
CAN_CBT_EPROPSEG(priv->arbi_timing.propseg) | /* Propagation segment */
CAN_CBT_EPSEG1(priv->arbi_timing.pseg1) | /* Phase buffer segment 1 */
CAN_CBT_EPSEG2(priv->arbi_timing.pseg2) | /* Phase buffer segment 2 */
CAN_CBT_ERJW(1); /* Resynchronization jump width */
putreg32(regval, priv->base + S32K1XX_CAN_CBT_OFFSET);
/* Enable CAN FD feature */
regval = getreg32(priv->base + S32K1XX_CAN_MCR_OFFSET);
regval |= CAN_MCR_FDEN;
putreg32(regval, priv->base + S32K1XX_CAN_MCR_OFFSET);
regval = getreg32(priv->base + S32K1XX_CAN_FDCBT_OFFSET);
regval |= CAN_FDCBT_FPRESDIV(priv->data_timing.presdiv) | /* Prescaler divisor factor of 1 */
CAN_FDCBT_FPROPSEG(priv->data_timing.propseg) | /* Propagation
* segment (only register that doesn't add 1) */
CAN_FDCBT_FPSEG1(priv->data_timing.pseg1) | /* Phase buffer segment 1 */
CAN_FDCBT_FPSEG2(priv->data_timing.pseg2) | /* Phase buffer segment 2 */
CAN_FDCBT_FRJW(priv->data_timing.pseg2); /* Resynchorinzation jump width same as PSEG2 */
putreg32(regval, priv->base + S32K1XX_CAN_FDCBT_OFFSET);
/* Additional CAN-FD configurations */
regval = getreg32(priv->base + S32K1XX_CAN_FDCTRL_OFFSET);
regval |= CAN_FDCTRL_FDRATE | /* Enable bit rate switch in data phase of frame */
CAN_FDCTRL_TDCEN | /* Enable transceiver delay compensation */
CAN_FDCTRL_TDCOFF(5) | /* Setup 5 cycles for data phase sampling delay */
CAN_FDCTRL_MBDSR0(3); /* Setup 64 bytes per message buffer (7 MB's) */
putreg32(regval, priv->base + S32K1XX_CAN_FDCTRL_OFFSET);
regval = getreg32(priv->base + S32K1XX_CAN_CTRL2_OFFSET);
regval |= CAN_CTRL2_ISOCANFDEN;
putreg32(regval, priv->base + S32K1XX_CAN_CTRL2_OFFSET);
#endif
for (i = TXMBCOUNT; i < TOTALMBCOUNT; i++)
{
priv->rx[i].id.w = 0x0;
/* FIXME sometimes we get a hard fault here */
}
putreg32(0x0, priv->base + S32K1XX_CAN_RXFGMASK_OFFSET);
for (i = 0; i < TOTALMBCOUNT; i++)
{
putreg32(0, priv->base + S32K1XX_CAN_RXIMR_OFFSET(i));
}
for (i = 0; i < RXMBCOUNT; i++)
{
ninfo("Set MB%i to receive %p\r\n", i, &priv->rx[i]);
priv->rx[i].cs.edl = 0x1;
priv->rx[i].cs.brs = 0x1;
priv->rx[i].cs.esi = 0x0;
priv->rx[i].cs.code = 4;
priv->rx[i].cs.srr = 0x0;
priv->rx[i].cs.ide = 0x1;
priv->rx[i].cs.rtr = 0x0;
}
putreg32(IFLAG1_RX, priv->base + S32K1XX_CAN_IFLAG1_OFFSET);
putreg32(IFLAG1_RX, priv->base + S32K1XX_CAN_IMASK1_OFFSET);
/* Exit freeze mode */
s32k1xx_setfreeze(priv->base, 0);
if (!s32k1xx_waitfreezeack_change(priv->base, 0))
{
ninfo("FLEXCAN: unfreeze fail\r\n");
return -1;
}
return 1;
}
/****************************************************************************
* Function: s32k1xx_reset
*
* Description:
* Put the EMAC in the non-operational, reset state
*
* Input Parameters:
* priv - Reference to the private FLEXCAN driver state structure
*
* Returned Value:
* None
*
* Assumptions:
*
****************************************************************************/
static void s32k1xx_reset(struct s32k1xx_driver_s *priv)
{
uint32_t regval;
uint32_t i;
regval = getreg32(priv->base + S32K1XX_CAN_MCR_OFFSET);
regval |= CAN_MCR_SOFTRST;
putreg32(regval, priv->base + S32K1XX_CAN_MCR_OFFSET);
if (!s32k1xx_waitmcr_change(priv->base, CAN_MCR_SOFTRST, 0))
{
nerr("Reset failed");
return;
}
regval = getreg32(priv->base + S32K1XX_CAN_MCR_OFFSET);
regval &= ~(CAN_MCR_SUPV);
putreg32(regval, priv->base + S32K1XX_CAN_MCR_OFFSET);
/* Initialize all MB rx and tx */
for (i = 0; i < TOTALMBCOUNT; i++)
{
ninfo("MB %i %p\r\n", i, &priv->rx[i]);
ninfo("MB %i %p\r\n", i, &priv->rx[i].id.w);
priv->rx[i].cs.cs = 0x0;
priv->rx[i].id.w = 0x0;
priv->rx[i].data[0].w00 = 0x0;
priv->rx[i].data[1].w00 = 0x0;
}
regval = getreg32(priv->base + S32K1XX_CAN_MCR_OFFSET);
regval |= CAN_MCR_SLFWAK | CAN_MCR_WRNEN | CAN_MCR_SRXDIS |
CAN_MCR_IRMQ | CAN_MCR_AEN |
(((TOTALMBCOUNT - 1) << CAN_MCR_MAXMB_SHIFT) &
CAN_MCR_MAXMB_MASK);
putreg32(regval, priv->base + S32K1XX_CAN_MCR_OFFSET);
regval = CAN_CTRL2_RRS | CAN_CTRL2_EACEN;
putreg32(regval, priv->base + S32K1XX_CAN_CTRL2_OFFSET);
for (i = 0; i < TOTALMBCOUNT; i++)
{
putreg32(0, priv->base + S32K1XX_CAN_RXIMR_OFFSET(i));
}
/* Filtering catchall */
putreg32(0x3fffffff, priv->base + S32K1XX_CAN_RX14MASK_OFFSET);
putreg32(0x3fffffff, priv->base + S32K1XX_CAN_RX15MASK_OFFSET);
putreg32(0x3fffffff, priv->base + S32K1XX_CAN_RXMGMASK_OFFSET);
putreg32(0x0, priv->base + S32K1XX_CAN_RXFGMASK_OFFSET);
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Function: s32k1xx_caninitialize
*
* Description:
* Initialize the CAN controller and driver
*
* Input Parameters:
* intf - In the case where there are multiple CAN devices, this value
* identifies which CAN device is to be initialized.
*
* Returned Value:
* OK on success; Negated errno on failure.
*
* Assumptions:
*
****************************************************************************/
int s32k1xx_caninitialize(int intf)
{
struct s32k1xx_driver_s *priv;
int ret;
switch (intf)
{
#ifdef CONFIG_S32K1XX_FLEXCAN0
case 0:
priv = &g_flexcan0;
memset(priv, 0, sizeof(struct s32k1xx_driver_s));
priv->base = S32K1XX_FLEXCAN0_BASE;
priv->config = &s32k1xx_flexcan0_config;
/* Default bitrate configuration */
# ifdef CONFIG_NET_CAN_CANFD
priv->arbi_timing.bitrate = CONFIG_FLEXCAN0_ARBI_BITRATE;
priv->arbi_timing.samplep = CONFIG_FLEXCAN0_ARBI_SAMPLEP;
priv->data_timing.bitrate = CONFIG_FLEXCAN0_DATA_BITRATE;
priv->data_timing.samplep = CONFIG_FLEXCAN0_DATA_SAMPLEP;
# else
priv->arbi_timing.bitrate = CONFIG_FLEXCAN0_BITRATE;
priv->arbi_timing.samplep = CONFIG_FLEXCAN0_SAMPLEP;
# endif
break;
#endif
#ifdef CONFIG_S32K1XX_FLEXCAN1
case 1:
priv = &g_flexcan1;
memset(priv, 0, sizeof(struct s32k1xx_driver_s));
priv->base = S32K1XX_FLEXCAN1_BASE;
priv->config = &s32k1xx_flexcan1_config;
/* Default bitrate configuration */
# ifdef CONFIG_NET_CAN_CANFD
priv->arbi_timing.bitrate = CONFIG_FLEXCAN1_ARBI_BITRATE;
priv->arbi_timing.samplep = CONFIG_FLEXCAN1_ARBI_SAMPLEP;
priv->data_timing.bitrate = CONFIG_FLEXCAN1_DATA_BITRATE;
priv->data_timing.samplep = CONFIG_FLEXCAN1_DATA_SAMPLEP;
# else
priv->arbi_timing.bitrate = CONFIG_FLEXCAN1_BITRATE;
priv->arbi_timing.samplep = CONFIG_FLEXCAN1_SAMPLEP;
# endif
break;
#endif
#ifdef CONFIG_S32K1XX_FLEXCAN2
case 2:
priv = &g_flexcan2;
memset(priv, 0, sizeof(struct s32k1xx_driver_s));
priv->base = S32K1XX_FLEXCAN2_BASE;
priv->config = &s32k1xx_flexcan2_config;
/* Default bitrate configuration */
# ifdef CONFIG_NET_CAN_CANFD
priv->arbi_timing.bitrate = CONFIG_FLEXCAN2_ARBI_BITRATE;
priv->arbi_timing.samplep = CONFIG_FLEXCAN2_ARBI_SAMPLEP;
priv->data_timing.bitrate = CONFIG_FLEXCAN2_DATA_BITRATE;
priv->data_timing.samplep = CONFIG_FLEXCAN2_DATA_SAMPLEP;
# else
priv->arbi_timing.bitrate = CONFIG_FLEXCAN2_BITRATE;
priv->arbi_timing.samplep = CONFIG_FLEXCAN2_SAMPLEP;
# endif
break;
#endif
default:
return -ENODEV;
}
if (!s32k1xx_bitratetotimeseg(&priv->arbi_timing, 1, 0))
{
nerr("ERROR: Invalid CAN timings please try another sample point "
"or refer to the reference manual\n");
return -1;
}
#ifdef CONFIG_NET_CAN_CANFD
if (!s32k1xx_bitratetotimeseg(&priv->data_timing, 1, 1))
{
nerr("ERROR: Invalid CAN data phase timings please try another "
"sample point or refer to the reference manual\n");
return -1;
}
#endif
s32k1xx_pinconfig(priv->config->tx_pin);
s32k1xx_pinconfig(priv->config->rx_pin);
if (priv->config->enable_pin > 0)
{
s32k1xx_pinconfig(priv->config->enable_pin);
s32k1xx_gpiowrite(priv->config->enable_pin, priv->config->enable_high);
}
/* Attach the flexcan interrupt handler */
if (irq_attach(priv->config->bus_irq, s32k1xx_flexcan_interrupt, priv))
{
/* We could not attach the ISR to the interrupt */
nerr("ERROR: Failed to attach CAN bus IRQ\n");
return -EAGAIN;
}
if (irq_attach(priv->config->error_irq, s32k1xx_flexcan_interrupt, priv))
{
/* We could not attach the ISR to the interrupt */
nerr("ERROR: Failed to attach CAN error IRQ\n");
return -EAGAIN;
}
if (priv->config->lprx_irq > 0)
{
if (irq_attach(priv->config->lprx_irq,
s32k1xx_flexcan_interrupt, priv))
{
/* We could not attach the ISR to the interrupt */
nerr("ERROR: Failed to attach CAN LPRX IRQ\n");
return -EAGAIN;
}
}
if (irq_attach(priv->config->mb_irq, s32k1xx_flexcan_interrupt, priv))
{
/* We could not attach the ISR to the interrupt */
nerr("ERROR: Failed to attach CAN OR'ed Message buffer (0-15) IRQ\n");
return -EAGAIN;
}
/* Initialize the driver structure */
priv->dev.d_ifup = s32k1xx_ifup; /* I/F up (new IP address) callback */
priv->dev.d_ifdown = s32k1xx_ifdown; /* I/F down callback */
priv->dev.d_txavail = s32k1xx_txavail; /* New TX data callback */
#ifdef CONFIG_NETDEV_IOCTL
priv->dev.d_ioctl = s32k1xx_ioctl; /* Support CAN ioctl() calls */
#endif
priv->dev.d_private = priv; /* Used to recover private state from dev */
priv->rx = (struct mb_s *)(priv->base + S32K1XX_CAN_MB_OFFSET);
priv->tx = (struct mb_s *)(priv->base + S32K1XX_CAN_MB_OFFSET +
(sizeof(struct mb_s) * RXMBCOUNT));
/* Put the interface in the down state. This usually amounts to resetting
* the device and/or calling s32k1xx_ifdown().
*/
ninfo("callbacks done\r\n");
s32k1xx_ifdown(&priv->dev);
/* Register the device with the OS so that socket IOCTLs can be performed */
netdev_register(&priv->dev, NET_LL_CAN);
UNUSED(ret);
return OK;
}
/****************************************************************************
* Name: arm_netinitialize
*
* Description:
* Initialize the enabled CAN device interfaces. If there are more
* different network devices in the chip, then board-specific logic will
* have to provide this function to determine which, if any, network
* devices should be initialized.
*
****************************************************************************/
#if !defined(CONFIG_NETDEV_LATEINIT)
void arm_netinitialize(void)
{
#ifdef CONFIG_S32K1XX_FLEXCAN0
s32k1xx_caninitialize(0);
#endif
#ifdef CONFIG_S32K1XX_FLEXCAN1
s32k1xx_caninitialize(1);
#endif
#ifdef CONFIG_S32K1XX_FLEXCAN2
s32k1xx_caninitialize(2);
#endif
}
#endif
#endif /* CONFIG_S32K1XX_FLEXCAN */
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html>
<head>
<title>Group console messages: Simple logs</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<link href="../../_common/testcase.css" type="text/css" rel="stylesheet"/>
<script>
function groupedLogs()
{
console.info("info"); console.info("info");
console.log(null); console.log(null);
console.log(undefined); console.log(undefined);
console.log(true); console.log(true);
console.log(1); console.log(1);
console.log(-0); console.log(0);
console.log("test"); console.log("test");
console.log(Math.sqrt(-1)); console.log(Math.sqrt(-1));
console.debug("debug"); console.debug("debug");
console.info("info"); console.info("info");
console.warn("warning"); console.warn("warning");
console.error("error"); console.error("error");
cleanUp();
}
function notGroupedLogs()
{
console.log(1); console.log(2);
console.log(true); console.log(false);
console.log(1); console.log("1");
console.log([1,2,3]); console.log([1,2,4]);
console.log(1, 23); console.log(12, 3);
console.log(["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"]);
console.log(["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaacaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"]);
console.log(/test/i); console.log(/test/i);
var obj = {1:1,x:2};
console.log(obj); console.log(obj);
var obj2 = [{a:1},2,3];
console.log(obj2); console.log(obj2);
var obj3 = Object.create(null);
console.log(obj3); console.log(obj3);
console.log(document.head.firstChild); console.log(document.head.firstChild);
console.log([{a:1},2,3]); console.log([{a:1},2,3]);
console.log([{a:1},2,4]); console.log([{a:1},2,5]);
console.log({a:1,b:2,c:3,d:4}); console.log({a:1,b:2,c:3,d:5});
var date1 = new Date();
var date2 = new Date(date1.getTime());
console.log(date1); console.log(date2);
var date3 = new Date();
var date4 = new Date(date1.getTime());
date4.setMilliseconds(date4.getMilliseconds() + 1);
console.log(date3); console.log(date4);
console.log(1,1); console.log(1,2);
console.log("1"); console.log(1);
console.log("null"); console.log(null); console.log(undefined);
console.log(document); console.log(document);
console.log(document.head.firstChild); document.head.firstChild.textContent = " "; console.log(document.head.firstChild);
var arr = [1];
console.log(arr); arr[0] = 2; console.log(arr);
logTable(); logTable();
/* This is currently not supported by Firebug
var th = function() { throw 0; };
var throwy1 = Proxy.create({getOwnPropertyDescriptor: th, getPropertyDescriptor: th, getOwnPropertyNames: th, getPropertyNames: th}, null);
var throwy2 = Proxy.create({getOwnPropertyDescriptor: th, getPropertyDescriptor: th, getOwnPropertyNames: th, getPropertyNames: th}, null);
console.log(throwy1); console.log(throwy2);
*/
appendChildAndLog(document.createTextNode("a")); appendChildAndLog(document.createTextNode("a"));
appendChildInArrayAndLog(document.createTextNode("a")); appendChildInArrayAndLog(document.createTextNode("a"));
cleanUp();
}
function cleanUp()
{
var output = document.getElementById("output");
while (output.lastChild)
output.removeChild(output.lastChild);
}
function logTable()
{
var objects =
[
{firstName: "Steven", lastName: "Mayers"},
{firstName: "John", lastName: "Travers"}
];
console.table(objects);
}
function appendChildAndLog(ch)
{
var output = document.getElementById("output");
output.appendChild(ch);
console.log(ch);
}
function appendChildInArrayAndLog(ch)
{
var output = document.getElementById("output");
output.appendChild(ch);
console.log([ch]);
}
</script>
</head>
<body>
<header>
<h1><a href="http://code.google.com/p/fbug/issues/detail?id=4979">Group console messages</a>: Simple logs</h1>
</header>
<div>
<section id="content">
<button id="testButton1" onclick="groupedLogs()">Grouped logs</button>
<button id="testButton2" onclick="notGroupedLogs()">Not grouped logs</button>
</section>
<section id="description">
<h3>Steps to reproduce</h3>
<ol>
<li>Open Firebug</li>
<li>Enable and switch to the <em>Console</em> panel</li>
<li>
Press the <em>Grouped logs</em> button above<br/>
<span class="ok">⇒ Every log in the console should have the group counter set to "2".</span>
</li>
<li>
Press the <em>Not grouped logs</em> button above<br/>
<span class="ok">⇒ None of the logs in the console should have a group counter.</span>
</li>
</ol>
<h3>Expected result</h3>
<ul>
<li>
All logs created by clicking the <em>Grouped logs</em> button should be grouped and
all created by clicking the <em>Not grouped logs</em> button should not be grouped.
</li>
</ul>
<div id="output"></div>
</section>
<footer>Jan Odvarko <odvarko@gmail.com></footer>
</div>
</body>
</html>
| {
"pile_set_name": "Github"
} |
<map id="icpPointRobust" name="icpPointRobust">
<area shape="rect" id="node2" href="$ar_util_8c.html#ae28eeba3e16224a7d77b46bd34b8f4fe" title="arUtilMatMul" alt="" coords="215,5,311,31"/>
<area shape="rect" id="node3" href="$icp_core_8c.html#aceeb3fd72a762c2078d8fb88ec03ccfb" title="icpGetU_from_X_by_MatX2U" alt="" coords="164,55,361,80"/>
</map>
| {
"pile_set_name": "Github"
} |
console.log("hello world!");
| {
"pile_set_name": "Github"
} |
/**
* @license AngularJS v1.2.7
* (c) 2010-2014 Google, Inc. http://angularjs.org
* License: MIT
*/
(function() {'use strict';
/**
* @description
*
* This object provides a utility for producing rich Error messages within
* Angular. It can be called as follows:
*
* var exampleMinErr = minErr('example');
* throw exampleMinErr('one', 'This {0} is {1}', foo, bar);
*
* The above creates an instance of minErr in the example namespace. The
* resulting error will have a namespaced error code of example.one. The
* resulting error will replace {0} with the value of foo, and {1} with the
* value of bar. The object is not restricted in the number of arguments it can
* take.
*
* If fewer arguments are specified than necessary for interpolation, the extra
* interpolation markers will be preserved in the final string.
*
* Since data will be parsed statically during a build step, some restrictions
* are applied with respect to how minErr instances are created and called.
* Instances should have names of the form namespaceMinErr for a minErr created
* using minErr('namespace') . Error codes, namespaces and template strings
* should all be static strings, not variables or general expressions.
*
* @param {string} module The namespace to use for the new minErr instance.
* @returns {function(string, string, ...): Error} instance
*/
function minErr(module) {
return function () {
var code = arguments[0],
prefix = '[' + (module ? module + ':' : '') + code + '] ',
template = arguments[1],
templateArgs = arguments,
stringify = function (obj) {
if (typeof obj === 'function') {
return obj.toString().replace(/ \{[\s\S]*$/, '');
} else if (typeof obj === 'undefined') {
return 'undefined';
} else if (typeof obj !== 'string') {
return JSON.stringify(obj);
}
return obj;
},
message, i;
message = prefix + template.replace(/\{\d+\}/g, function (match) {
var index = +match.slice(1, -1), arg;
if (index + 2 < templateArgs.length) {
arg = templateArgs[index + 2];
if (typeof arg === 'function') {
return arg.toString().replace(/ ?\{[\s\S]*$/, '');
} else if (typeof arg === 'undefined') {
return 'undefined';
} else if (typeof arg !== 'string') {
return toJson(arg);
}
return arg;
}
return match;
});
message = message + '\nhttp://errors.angularjs.org/1.2.7/' +
(module ? module + '/' : '') + code;
for (i = 2; i < arguments.length; i++) {
message = message + (i == 2 ? '?' : '&') + 'p' + (i-2) + '=' +
encodeURIComponent(stringify(arguments[i]));
}
return new Error(message);
};
}
/**
* @ngdoc interface
* @name angular.Module
* @description
*
* Interface for configuring angular {@link angular.module modules}.
*/
function setupModuleLoader(window) {
var $injectorMinErr = minErr('$injector');
var ngMinErr = minErr('ng');
function ensure(obj, name, factory) {
return obj[name] || (obj[name] = factory());
}
var angular = ensure(window, 'angular', Object);
// We need to expose `angular.$$minErr` to modules such as `ngResource` that reference it during bootstrap
angular.$$minErr = angular.$$minErr || minErr;
return ensure(angular, 'module', function() {
/** @type {Object.<string, angular.Module>} */
var modules = {};
/**
* @ngdoc function
* @name angular.module
* @description
*
* The `angular.module` is a global place for creating, registering and retrieving Angular
* modules.
* All modules (angular core or 3rd party) that should be available to an application must be
* registered using this mechanism.
*
* When passed two or more arguments, a new module is created. If passed only one argument, an
* existing module (the name passed as the first argument to `module`) is retrieved.
*
*
* # Module
*
* A module is a collection of services, directives, filters, and configuration information.
* `angular.module` is used to configure the {@link AUTO.$injector $injector}.
*
* <pre>
* // Create a new module
* var myModule = angular.module('myModule', []);
*
* // register a new service
* myModule.value('appName', 'MyCoolApp');
*
* // configure existing services inside initialization blocks.
* myModule.config(function($locationProvider) {
* // Configure existing providers
* $locationProvider.hashPrefix('!');
* });
* </pre>
*
* Then you can create an injector and load your modules like this:
*
* <pre>
* var injector = angular.injector(['ng', 'MyModule'])
* </pre>
*
* However it's more likely that you'll just use
* {@link ng.directive:ngApp ngApp} or
* {@link angular.bootstrap} to simplify this process for you.
*
* @param {!string} name The name of the module to create or retrieve.
* @param {Array.<string>=} requires If specified then new module is being created. If
* unspecified then the the module is being retrieved for further configuration.
* @param {Function} configFn Optional configuration function for the module. Same as
* {@link angular.Module#methods_config Module#config()}.
* @returns {module} new module with the {@link angular.Module} api.
*/
return function module(name, requires, configFn) {
var assertNotHasOwnProperty = function(name, context) {
if (name === 'hasOwnProperty') {
throw ngMinErr('badname', 'hasOwnProperty is not a valid {0} name', context);
}
};
assertNotHasOwnProperty(name, 'module');
if (requires && modules.hasOwnProperty(name)) {
modules[name] = null;
}
return ensure(modules, name, function() {
if (!requires) {
throw $injectorMinErr('nomod', "Module '{0}' is not available! You either misspelled " +
"the module name or forgot to load it. If registering a module ensure that you " +
"specify the dependencies as the second argument.", name);
}
/** @type {!Array.<Array.<*>>} */
var invokeQueue = [];
/** @type {!Array.<Function>} */
var runBlocks = [];
var config = invokeLater('$injector', 'invoke');
/** @type {angular.Module} */
var moduleInstance = {
// Private state
_invokeQueue: invokeQueue,
_runBlocks: runBlocks,
/**
* @ngdoc property
* @name angular.Module#requires
* @propertyOf angular.Module
* @returns {Array.<string>} List of module names which must be loaded before this module.
* @description
* Holds the list of modules which the injector will load before the current module is
* loaded.
*/
requires: requires,
/**
* @ngdoc property
* @name angular.Module#name
* @propertyOf angular.Module
* @returns {string} Name of the module.
* @description
*/
name: name,
/**
* @ngdoc method
* @name angular.Module#provider
* @methodOf angular.Module
* @param {string} name service name
* @param {Function} providerType Construction function for creating new instance of the
* service.
* @description
* See {@link AUTO.$provide#provider $provide.provider()}.
*/
provider: invokeLater('$provide', 'provider'),
/**
* @ngdoc method
* @name angular.Module#factory
* @methodOf angular.Module
* @param {string} name service name
* @param {Function} providerFunction Function for creating new instance of the service.
* @description
* See {@link AUTO.$provide#factory $provide.factory()}.
*/
factory: invokeLater('$provide', 'factory'),
/**
* @ngdoc method
* @name angular.Module#service
* @methodOf angular.Module
* @param {string} name service name
* @param {Function} constructor A constructor function that will be instantiated.
* @description
* See {@link AUTO.$provide#service $provide.service()}.
*/
service: invokeLater('$provide', 'service'),
/**
* @ngdoc method
* @name angular.Module#value
* @methodOf angular.Module
* @param {string} name service name
* @param {*} object Service instance object.
* @description
* See {@link AUTO.$provide#value $provide.value()}.
*/
value: invokeLater('$provide', 'value'),
/**
* @ngdoc method
* @name angular.Module#constant
* @methodOf angular.Module
* @param {string} name constant name
* @param {*} object Constant value.
* @description
* Because the constant are fixed, they get applied before other provide methods.
* See {@link AUTO.$provide#constant $provide.constant()}.
*/
constant: invokeLater('$provide', 'constant', 'unshift'),
/**
* @ngdoc method
* @name angular.Module#animation
* @methodOf angular.Module
* @param {string} name animation name
* @param {Function} animationFactory Factory function for creating new instance of an
* animation.
* @description
*
* **NOTE**: animations take effect only if the **ngAnimate** module is loaded.
*
*
* Defines an animation hook that can be later used with
* {@link ngAnimate.$animate $animate} service and directives that use this service.
*
* <pre>
* module.animation('.animation-name', function($inject1, $inject2) {
* return {
* eventName : function(element, done) {
* //code to run the animation
* //once complete, then run done()
* return function cancellationFunction(element) {
* //code to cancel the animation
* }
* }
* }
* })
* </pre>
*
* See {@link ngAnimate.$animateProvider#register $animateProvider.register()} and
* {@link ngAnimate ngAnimate module} for more information.
*/
animation: invokeLater('$animateProvider', 'register'),
/**
* @ngdoc method
* @name angular.Module#filter
* @methodOf angular.Module
* @param {string} name Filter name.
* @param {Function} filterFactory Factory function for creating new instance of filter.
* @description
* See {@link ng.$filterProvider#register $filterProvider.register()}.
*/
filter: invokeLater('$filterProvider', 'register'),
/**
* @ngdoc method
* @name angular.Module#controller
* @methodOf angular.Module
* @param {string|Object} name Controller name, or an object map of controllers where the
* keys are the names and the values are the constructors.
* @param {Function} constructor Controller constructor function.
* @description
* See {@link ng.$controllerProvider#register $controllerProvider.register()}.
*/
controller: invokeLater('$controllerProvider', 'register'),
/**
* @ngdoc method
* @name angular.Module#directive
* @methodOf angular.Module
* @param {string|Object} name Directive name, or an object map of directives where the
* keys are the names and the values are the factories.
* @param {Function} directiveFactory Factory function for creating new instance of
* directives.
* @description
* See {@link ng.$compileProvider#methods_directive $compileProvider.directive()}.
*/
directive: invokeLater('$compileProvider', 'directive'),
/**
* @ngdoc method
* @name angular.Module#config
* @methodOf angular.Module
* @param {Function} configFn Execute this function on module load. Useful for service
* configuration.
* @description
* Use this method to register work which needs to be performed on module loading.
*/
config: config,
/**
* @ngdoc method
* @name angular.Module#run
* @methodOf angular.Module
* @param {Function} initializationFn Execute this function after injector creation.
* Useful for application initialization.
* @description
* Use this method to register work which should be performed when the injector is done
* loading all modules.
*/
run: function(block) {
runBlocks.push(block);
return this;
}
};
if (configFn) {
config(configFn);
}
return moduleInstance;
/**
* @param {string} provider
* @param {string} method
* @param {String=} insertMethod
* @returns {angular.Module}
*/
function invokeLater(provider, method, insertMethod) {
return function() {
invokeQueue[insertMethod || 'push']([provider, method, arguments]);
return moduleInstance;
};
}
});
};
});
}
setupModuleLoader(window);
})(window);
/**
* Closure compiler type information
*
* @typedef { {
* requires: !Array.<string>,
* invokeQueue: !Array.<Array.<*>>,
*
* service: function(string, Function):angular.Module,
* factory: function(string, Function):angular.Module,
* value: function(string, *):angular.Module,
*
* filter: function(string, Function):angular.Module,
*
* init: function(Function):angular.Module
* } }
*/
angular.Module;
| {
"pile_set_name": "Github"
} |
"""
A simple script that uses bpy to render views of a single object by
move the camera around it.
Original source:
https://github.com/panmari/stanford-shapenet-renderer
"""
import os
import bpy
import math
from math import radians
from tqdm import tqdm
from PIL import Image
import numpy as np
import cv2
def resize_padding(im, desired_size):
# compute the new size
old_size = im.size
ratio = float(desired_size)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
im = im.resize(new_size, Image.ANTIALIAS)
# create a new image and paste the resized on it
new_im = Image.new("RGBA", (desired_size, desired_size))
new_im.paste(im, ((desired_size - new_size[0]) // 2, (desired_size - new_size[1]) // 2))
return new_im
def resize_padding_v2(im, desired_size_in, desired_size_out):
# compute the new size
old_size = im.size
ratio = float(desired_size_in)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
im = im.resize(new_size, Image.ANTIALIAS)
# create a new image and paste the resized on it
new_im = Image.new("RGBA", (desired_size_out, desired_size_out))
new_im.paste(im, ((desired_size_out - new_size[0]) // 2, (desired_size_out - new_size[1]) // 2))
return new_im
# create a lamp with an appropriate energy
def makeLamp(lamp_name, rad):
# Create new lamp data block
lamp_data = bpy.data.lamps.new(name=lamp_name, type='POINT')
lamp_data.energy = rad
# modify the distance when the object is not normalized
# lamp_data.distance = rad * 2.5
# Create new object with our lamp data block
lamp_object = bpy.data.objects.new(name=lamp_name, object_data=lamp_data)
# Link lamp object to the scene so it'll appear in this scene
scene = bpy.context.scene
scene.objects.link(lamp_object)
return lamp_object
def parent_obj_to_camera(b_camera):
# set the parenting to the origin
origin = (0, 0, 0)
b_empty = bpy.data.objects.new("Empty", None)
b_empty.location = origin
b_camera.parent = b_empty
scn = bpy.context.scene
scn.objects.link(b_empty)
scn.objects.active = b_empty
return b_empty
def clean_obj_lamp_and_mesh(context):
scene = context.scene
objs = bpy.data.objects
meshes = bpy.data.meshes
for obj in objs:
if obj.type == "MESH" or obj.type == 'LAMP':
scene.objects.unlink(obj)
objs.remove(obj)
for mesh in meshes:
meshes.remove(mesh)
def render_obj_grid(obj, output_dir, shape=[256, 256], step=30, light_main=5, light_add=1, r=2, normalize=False, forward=None, up=None):
clean_obj_lamp_and_mesh(bpy.context)
# Set up rendering of depth map:
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
# clear default nodes
for n in tree.nodes:
tree.nodes.remove(n)
# create input render layer node
rl = tree.nodes.new('CompositorNodeRLayers')
map = tree.nodes.new(type="CompositorNodeMapValue")
# Size is chosen kind of arbitrarily, try out until you're satisfied with resulting depth map.
map.offset = [-0.7]
map.size = [0.8]
map.use_min = True
map.min = [0]
map.use_max = True
map.max = [255]
try:
links.new(rl.outputs['Z'], map.inputs[0])
except KeyError:
# some versions of blender don't like this?
pass
invert = tree.nodes.new(type="CompositorNodeInvert")
links.new(map.outputs[0], invert.inputs[1])
# Setting up the environment
scene = bpy.context.scene
scene.render.resolution_x = shape[1]
scene.render.resolution_y = shape[0]
scene.render.resolution_percentage = 100
scene.render.alpha_mode = 'TRANSPARENT'
# Camera setting
cam = scene.objects['Camera']
cam_constraint = cam.constraints.new(type='TRACK_TO')
cam_constraint.track_axis = 'TRACK_NEGATIVE_Z'
cam_constraint.up_axis = 'UP_Y'
b_empty = parent_obj_to_camera(cam)
cam_constraint.target = b_empty
# Light setting
lamp_object = makeLamp('Lamp1', light_main)
lamp_add = makeLamp('Lamp2', light_add)
# Output setting
fp = os.path.join(output_dir, 'no_texture')
crop_dir = os.path.join(output_dir, 'crop')
if not os.path.isdir(crop_dir):os.makedirs(crop_dir)
scene.render.image_settings.file_format = 'PNG'
# import object
if forward is not None and up is not None:
bpy.ops.import_scene.obj(filepath=obj, axis_forward=forward, axis_up=up)
else:
bpy.ops.import_scene.obj(filepath=obj)
# normalize the object
if normalize:
for object in bpy.context.scene.objects:
if object.name in ['Camera', 'Lamp'] or object.type in ['EMPTY', 'LAMP']:
continue
bpy.context.scene.objects.active = object
max_dim = max(object.dimensions)
object.dimensions = object.dimensions / max_dim if max_dim != 0 else object.dimensions
# Separate viewpoints on the surface of a semi-sphere of radian r
n_azi = int(360 / 5) # one render image every 5 degrees
n_view = n_azi * int(90 / step) # numbe of tours depending on the elevation step
for i in range(0, n_view):
azi = (i * 5) % 360
ele = (i // n_azi) * step
scene.render.filepath = os.path.join(fp, 'rendering_%03d_%03d' % (ele, azi))
loc_y = r * math.cos(radians(ele)) * math.cos(radians(azi))
loc_x = r * math.cos(radians(ele)) * math.sin(radians(azi))
loc_z = r * math.sin(radians(ele))
cam.location = (loc_x, loc_y, loc_z)
lamp_object.location = (loc_x, loc_y, 10)
lamp_add.location = (loc_x, loc_y, -10)
# render image
bpy.ops.render.render(write_still=True)
# crop rendered images
im_path = 'rendering_%03d_%03d.png' % (ele, azi)
im = Image.open(os.path.join(fp, im_path)).copy()
bbox = im.getbbox()
im = im.crop(bbox)
im_new = resize_padding(im, 224)
im_new.save(os.path.join(crop_dir, im_path))
def render_obj_with_view(obj, output_dir, csv_file, texture_img, views=20, shape=[512, 512]):
# Clean old objects
clean_obj_lamp_and_mesh(bpy.context)
# import object
bpy.ops.import_scene.obj(filepath=obj)
# Setting up the environment
scene = bpy.context.scene
scene.render.resolution_x = shape[1]
scene.render.resolution_y = shape[0]
scene.render.resolution_percentage = 100
scene.render.alpha_mode = 'TRANSPARENT'
# Camera setting
cam = scene.objects['Camera']
cam_constraint = cam.constraints.new(type='TRACK_TO')
cam_constraint.track_axis = 'TRACK_NEGATIVE_Z'
cam_constraint.up_axis = 'UP_Y'
b_empty = parent_obj_to_camera(cam)
cam_constraint.target = b_empty
# Output setting
fp = os.path.join(output_dir, 'renders')
fm = os.path.join(output_dir, 'masks')
fi = os.path.join(output_dir, 'images')
if not os.path.isdir(fm):
os.makedirs(fm)
if not os.path.isdir(fi):
os.makedirs(fi)
scene.render.image_settings.file_format = 'PNG'
# Light setting and Camera radian setting
lamp_object = makeLamp('Lamp1', 3)
lamp_add = makeLamp('Lamp2', 3 * 0.1)
# Random textures and backgrounds generation with different viewpoints
energies = np.random.rand(views) + .1
r = 1.5
azis = 360.0 * np.random.rand(views)
azis[azis == 360.0] = 0.0 # prevent symmetry
eles = 180.0 * np.arccos(2 * np.random.rand(views) - 1) / np.pi
eles = np.abs(eles - 90.0)
eles[eles == 90.0] = 89.0 # prevent absolute bird-view
for object in bpy.context.scene.objects:
if object.name in ['Camera', 'Lamp'] or object.type in ['EMPTY', 'LAMP']:
continue
mat = bpy.data.materials.new(name='Material')
mat.diffuse_color = tuple(np.random.rand(3))
tex = bpy.data.textures.new('UVMapping', 'IMAGE')
tex.image = bpy.data.images.load(texture_img)
slot = mat.texture_slots.add()
slot.texture = tex
if object.data.materials:
for i in range(len(object.data.materials)):
object.data.materials[i] = mat
else:
object.data.materials.append(mat)
# Render images and crop the object and resize it to desired BBox size
for n in range(0, views):
loc_y = r * math.cos(radians(eles[n])) * math.cos(radians(azis[n]))
loc_x = r * math.cos(radians(eles[n])) * math.sin(radians(azis[n]))
loc_z = r * math.sin(radians(eles[n]))
cam.location = (loc_x, loc_y, loc_z)
lamp_object.location = (loc_x, loc_y, 10)
lamp_add.location = (loc_x, loc_y, -10)
# Modify the lightness
for object in bpy.context.scene.objects:
if not object.type == 'LAMP':
continue
if object.name == 'Lamp1':
object.data.energy = 3 * energies[n] + 2.
else:
object.data.energy = 3 * energies[n] * 0.3 + .2
# Render the object
scene.render.filepath = fp + '/%03d_%03d_%03d' % (n, int(eles[n]), int(azis[n]))
bpy.ops.render.render(write_still=True)
# obtain the object mask of the rendered object
img = cv2.imread(scene.render.filepath + '.png', cv2.IMREAD_UNCHANGED)
threshold = img[:, :, 3]
mask = threshold != 0
mask.dtype = np.uint8
mask *= 255
fm_path = os.path.join(fm, '%03d_%03d_%03d.png' % (n, int(eles[n]), int(azis[n])))
cv2.imwrite(fm_path, mask)
# resize and pad the original render to standard-sized render
render_old = Image.open(scene.render.filepath + '.png').copy()
bbox = render_old.getbbox()
render_old = render_old.crop(bbox)
render_new = resize_padding_v2(render_old, 224, 256)
render_new.save(os.path.join(fi, '%03d_%03d_%03d.png' % (n, int(eles[n]), int(azis[n]))))
# add annotation for this sample
cat_id = os.path.split(os.path.split(output_dir)[0])[1]
example_id = os.path.split(output_dir)[1]
image_path = os.path.join(output_dir, 'images', '%03d_%03d_%03d.png' % (n, int(eles[n]), int(azis[n])))
with open(csv_file, 'a') as f:
f.write(image_path + ',' + cat_id + ',' + example_id + ',' + str(int(azis[n])) + ',' + str(int(eles[n])) + '\n')
def render_obj(obj, output_dir, azi, ele, rol, name, shape=[512, 512], forward=None, up=None):
clean_obj_lamp_and_mesh(bpy.context)
# Setting up the environment
scene = bpy.context.scene
scene.render.resolution_x = shape[1]
scene.render.resolution_y = shape[0]
scene.render.resolution_percentage = 100
scene.render.alpha_mode = 'TRANSPARENT'
# Camera setting
cam = scene.objects['Camera']
cam_constraint = cam.constraints.new(type='TRACK_TO')
cam_constraint.track_axis = 'TRACK_NEGATIVE_Z'
cam_constraint.up_axis = 'UP_Y'
b_empty = parent_obj_to_camera(cam)
cam_constraint.target = b_empty
# Light setting
lamp_object = makeLamp('Lamp1', 5)
lamp_add = makeLamp('Lamp2', 1)
if forward is not None and up is not None:
bpy.ops.import_scene.obj(filepath=obj, axis_forward=forward, axis_up=up)
else:
bpy.ops.import_scene.obj(filepath=obj)
# normalize it and set the center
for object in bpy.context.scene.objects:
if object.name in ['Camera', 'Lamp'] or object.type == 'EMPTY':
continue
bpy.context.scene.objects.active = object
max_dim = max(object.dimensions)
object.dimensions = object.dimensions / max_dim if max_dim != 0 else object.dimensions
# Output setting
scene.render.image_settings.file_format = 'PNG'
scene.render.filepath = os.path.join(output_dir, name + '_rendering_%03d_%03d_%03d' % (int(azi), int(ele), int(rol)))
# transform Euler angles from degrees into radians
azi = radians(azi)
ele = radians(ele)
rol = radians(rol)
r = 2.5
loc_y = r * math.cos(ele) * math.cos(azi)
loc_x = r * math.cos(ele) * math.sin(azi)
loc_z = r * math.sin(ele)
cam.location = (loc_x, loc_y, loc_z + 0.5)
lamp_object.location = (loc_x, loc_y, 10)
lamp_add.location = (loc_x, loc_y, -10)
# Change the in-plane rotation
cam_ob = bpy.context.scene.camera
bpy.context.scene.objects.active = cam_ob # select the camera object
distance = np.sqrt(loc_x ** 2 + loc_y ** 2 + loc_z ** 2)
bpy.ops.transform.rotate(value=rol, axis=(loc_x / distance, loc_y / distance, loc_z / distance),
constraint_axis=(False, False, False), constraint_orientation='GLOBAL', mirror=False,
proportional='DISABLED', proportional_edit_falloff='SMOOTH',
proportional_size=1)
bpy.ops.render.render(write_still=True)
if __name__ == '__main__':
obj = '/home/xiao/Projects/PoseFromShape/demo/armadillo.obj'
render_dir = '/home/xiao/Projects/PoseFromShape/demo/armadillo_multiviews'
render_obj_grid(obj, render_dir, [512, 512], 30, 5, 1, 2, True, None, None)
| {
"pile_set_name": "Github"
} |
.TH sar\-c.d 1m "$Date:: 2007-08-05 #$" "USER COMMANDS"
.SH NAME
sar\-c.d \- sar \-c demo in DTrace. Uses DTrace.
.SH SYNOPSIS
.B sar\-c.d
.SH DESCRIPTION
This has been written to demonstrate fetching the same data as sar \-c
from DTrace. This program is intended as a starting point for other
DTrace scripts, by beginning with familiar statistics.
Since this uses DTrace, only the root user or users with the
dtrace_kernel privilege can run this command.
.SH OS
Solaris
.SH STABILITY
stable - needs the syscall and sysinfo providers.
.SH EXAMPLES
.TP
Print system call counts every second,
#
.B sar\-c.d
.PP
.SH FIELDS
.TP
scall/s
System calls
.TP
sread/s
reads
.TP
swrit/s
writes
.TP
fork/s
forks
.TP
exec/s
execs
.TP
rchar/s
read characters
.TP
wchar/s
write characters
.PP
.SH IDEA
David Rubio, who also wrote the original.
.PP
.SH NOTES
As this program does not use Kstat, there is no summary since boot line.
.PP
.SH DOCUMENTATION
See the DTraceToolkit for further documentation under the
Docs directory. The DTraceToolkit docs may include full worked
examples with verbose descriptions explaining the output.
.SH EXIT
sar\-c.d will run until Ctrl\-C is hit.
.SH AUTHOR
Brendan Gregg
[Sydney, Australia]
.SH SEE ALSO
sar(1M), dtrace(1M)
| {
"pile_set_name": "Github"
} |
export default function getURL(os: string, extended: string, version: string): string {
const extendedStr = (extended: string): string => {
if (extended === 'true') {
return 'extended_';
} else {
return '';
// } else {
// throw new Error(`Invalid input (extended): ${extended}`);
}
};
const ext = (os: string): string => {
if (os === 'Windows') {
return 'zip';
} else {
return 'tar.gz';
}
};
const hugoName = `hugo_${extendedStr(extended)}${version}_${os}-64bit`;
const baseURL = 'https://github.com/gohugoio/hugo/releases/download';
const url = `${baseURL}/v${version}/${hugoName}.${ext(os)}`;
return url;
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2003-2008 The University of Wroclaw.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the University may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
using Nemerle;
using Nemerle.Assertions;
using Nemerle.Collections;
using Nemerle.Compiler.Parsetree;
using Nemerle.Compiler;
using Nemerle.Imperative;
using Nemerle.Macros;
using Nemerle.Text;
using System;
using System.Globalization;
using System.IO;
using System.Text.RegularExpressions;
using System.Text;
namespace Nemerle.Utility
{
[Nemerle.MacroUsage (Nemerle.MacroPhase.BeforeInheritance,
Nemerle.MacroTargets.Assembly)]
macro GeneratedAssemblyVersion(str, defaults = null)
{
def version = ExpandEnvHelper.Expand(str, defaults);
Manager().Hierarchy.AddAssemblyAttribute(Manager().CoreEnv,
<[ System.Reflection.AssemblyVersion($(version : string)) ]>);
}
module GitRevisionHelper
{
[Memoize]
public GetRevisionGeneric(path : string) : option[string * string * string]
{
def execGit(startInfoConfigurator)
{
// Execute "git describe"
def process = System.Diagnostics.Process();
process.StartInfo.UseShellExecute = false;
process.StartInfo.RedirectStandardOutput = true;
process.StartInfo.RedirectStandardError = true;
process.StartInfo.WorkingDirectory = path;
process.StartInfo.CreateNoWindow = true;
startInfoConfigurator(process.StartInfo);
// Read git output line by line until regex is matched
def loop(reader)
{
match (reader.ReadLine())
{
| null => None()
| line =>
regexp match (line)
{
| @"(?<tag>.+)\-(?<rev>.+)\-(?<commit>.+)" =>
def tag = Regex.Replace(tag, @"[^\d\.]", "");
if (tag == ".")
Some(null, rev, commit)
else
Some(tag, rev, commit);
| _ => loop(reader);
}
}
}
try
{
if (process.Start())
{
def revision = loop (process.StandardOutput);
// Wait for git client process to terminate
unless (process.WaitForExit (2000))
process.Kill ();
revision;
}
else
{
None()
}
}
catch
{
| _ => None();
}
}
def configCommon(si) // mono git or msysgit with git.exe in PATH
{
si.FileName = Environment.GetEnvironmentVariable("GIT_PATH") ?? "git";
si.Arguments = "describe --tags --long";
}
def configCmd(si) // PATH conatains git.cmd only workaround
{
si.FileName = "cmd";
si.Arguments = "/C git describe --tags --long";
}
match (execGit(configCommon))
{
| Some(_) as x => x
| None() => execGit(configCmd);
}
}
}
}
| {
"pile_set_name": "Github"
} |
<?xml version='1.0' encoding='UTF-8'?>
<resources>
<string name="app_name">Orbot</string>
<string name="menu_settings">Zvamada</string>
<string name="menu_stop">Mira</string>
<string name="menu_about">Maererano</string>
<string name="button_about">Maererano</string>
<string name="menu_exit">Buda</string>
<string name="btn_cancel">Kanzura</string>
<!--Welcome Wizard strings (DJH)-->
<!--END Welcome Wizard strings (DJH)-->
<!--New Wizard Strings-->
<!--Title Screen-->
<!--Warning screen-->
<!--Permissions screen-->
<!--TipsAndTricks screen-->
<!--Transparent Proxy screen-->
<string name="error">Paita kanganiso</string>
<string name="set_locale_title">Mutauro</string>
<string name="name">Zita</string>
<string name="disable">Rambidza</string>
<string name="enable">Bvumidza</string>
</resources>
| {
"pile_set_name": "Github"
} |
# FIXME: Add support for global settings.
# FIXME: Add support for custom getters/setters.
defaultTextEncodingName type=String
ftpDirectoryTemplatePath type=String
localStorageDatabasePath type=String
editableLinkBehavior type=EditableLinkBehavior, initial=EditableLinkDefaultBehavior
textDirectionSubmenuInclusionBehavior type=TextDirectionSubmenuInclusionBehavior, initial=TextDirectionSubmenuAutomaticallyIncluded
passwordEchoDurationInSeconds type=double, initial=1
# Sets the magnification value for validation message timer. If the
# magnification value is N, a validation message disappears automatically after
# <message length> * N / 1000 seconds. If N is equal to or less than 0, a
# validation message doesn't disappears automaticaly.
validationMessageTimerMagnification type=int, initial=50
# Number of pixels below which 2D canvas is rendered in software
# even if hardware acceleration is enabled.
# Hardware acceleration is useful for large canvases where it can avoid the
# pixel bandwidth between the CPU and GPU. But GPU acceleration comes at
# a price - extra back-buffer and texture copy. Small canvases are also
# widely used for stylized fonts. Anti-aliasing text in hardware at that
# scale is generally slower. So below a certain size it is better to
# draw canvas in software.
minimumAccelerated2dCanvasSize type=int, initial=257*256
layoutFallbackWidth type=int, initial=980
deviceWidth type=int, initial=0
deviceHeight type=int, initial=0
# Allow clients concerned with memory consumption to set a quota on session storage
# since the memory used won't be released until the Page is destroyed.
sessionStorageQuota type=unsigned, initial=StorageMap::noQuota
minimumFontSize type=int, initial=0, setNeedsStyleRecalcInAllFrames=1
minimumLogicalFontSize type=int, initial=0, setNeedsStyleRecalcInAllFrames=1
defaultFontSize type=int, initial=16, setNeedsStyleRecalcInAllFrames=1
defaultFixedFontSize type=int, initial=0, setNeedsStyleRecalcInAllFrames=1
editingBehaviorType type=EditingBehaviorType, initial=editingBehaviorTypeForPlatform()
maximumHTMLParserDOMTreeDepth type=unsigned, initial=defaultMaximumHTMLParserDOMTreeDepth
# This setting only affects site icon image loading if loadsImagesAutomatically setting is false and this setting is true.
# All other permutations still heed loadsImagesAutomatically setting.
loadsSiteIconsIgnoringImageLoadingSetting initial=false
caretBrowsingEnabled initial=false
preventKeyboardDOMEventDispatch initial=false
localStorageEnabled initial=false
allowUniversalAccessFromFileURLs initial=true
allowFileAccessFromFileURLs initial=true
javaScriptCanOpenWindowsAutomatically initial=false
javaScriptCanAccessClipboard initial=false
shouldPrintBackgrounds initial=false
usesDashboardBackwardCompatibilityMode initial=false, conditional=DASHBOARD_SUPPORT
textAreasAreResizable initial=false, setNeedsStyleRecalcInAllFrames=1
authorAndUserStylesEnabled initial=true, setNeedsStyleRecalcInAllFrames=1
acceleratedCompositingEnabled initial=true, setNeedsStyleRecalcInAllFrames=1
acceleratedCompositedAnimationsEnabled initial=true, setNeedsStyleRecalcInAllFrames=1
showDebugBorders initial=false, setNeedsStyleRecalcInAllFrames=1
showRepaintCounter initial=false, setNeedsStyleRecalcInAllFrames=1
visibleDebugOverlayRegions type=DebugOverlayRegions, initial=0
# This is a quirk we are pro-actively applying to old applications. It changes keyboard event dispatching,
# making keyIdentifier available on keypress events, making charCode available on keydown/keyup events,
# and getting keypress dispatched in more cases.
needsKeyboardEventDisambiguationQuirks initial=false
treatsAnyTextCSSLinkAsStylesheet initial=false
shrinksStandaloneImagesToFit initial=true
pageCacheSupportsPlugins initial=false
showsURLsInToolTips initial=false
showsToolTipOverTruncatedText initial=false
forceFTPDirectoryListings initial=false
developerExtrasEnabled initial=false
scriptMarkupEnabled initial=true
needsSiteSpecificQuirks initial=false
domTimersThrottlingEnabled initial=true
webArchiveDebugModeEnabled initial=false, conditional=WEB_ARCHIVE
localFileContentSniffingEnabled initial=false
offlineWebApplicationCacheEnabled initial=false
enforceCSSMIMETypeInNoQuirksMode initial=true
usesEncodingDetector initial=false
allowScriptsToCloseWindows initial=false
canvasUsesAcceleratedDrawing initial=false
acceleratedDrawingEnabled initial=false
displayListDrawingEnabled initial=false
acceleratedFiltersEnabled initial=false
useLegacyTextAlignPositionedElementBehavior initial=false
javaScriptRuntimeFlags type=JSC::RuntimeFlags
# PLATFORM(QT) only
offlineStorageDatabaseEnabled initial=true
# FIXME: This should really be disabled by default as it makes platforms that don't support the feature download files
# they can't use by. Leaving enabled for now to not change existing behavior.
downloadableBinaryFontsEnabled initial=true
xssAuditorEnabled initial=false
unsafePluginPastingEnabled initial=true
acceleratedCompositingForFixedPositionEnabled initial=defaultAcceleratedCompositingForFixedPositionEnabled
acceleratedCompositingForOverflowScrollEnabled initial=false
rubberBandingForSubScrollableRegionsEnabled initial=true, conditional=RUBBER_BANDING
experimentalNotificationsEnabled initial=false
webGLEnabled initial=false
webGLErrorsToConsoleEnabled initial=true
openGLMultisamplingEnabled initial=true
forceSoftwareWebGLRendering initial=false
accelerated2dCanvasEnabled initial=false
antialiased2dCanvasEnabled initial=true
loadDeferringEnabled initial=true
webAudioEnabled initial=false
paginateDuringLayoutEnabled initial=false
fullScreenEnabled initial=false, conditional=FULLSCREEN_API
asynchronousSpellCheckingEnabled initial=false
mediaStreamEnabled initial=false
# This feature requires an implementation of ValidationMessageClient.
interactiveFormValidationEnabled initial=false
usePreHTML5ParserQuirks initial=false
hyperlinkAuditingEnabled initial=false
crossOriginCheckInGetMatchedCSSRulesDisabled initial=false
forceCompositingMode initial=false
shouldInjectUserScriptsInInitialEmptyDocument initial=false
fixedElementsLayoutRelativeToFrame initial=false
allowDisplayOfInsecureContent initial=false
allowRunningOfInsecureContent initial=false
requiresUserGestureForMediaPlayback initial=defaultRequiresUserGestureForMediaPlayback
audioPlaybackRequiresUserGesture initial=defaultAudioPlaybackRequiresUserGesture
allowsInlineMediaPlayback initial=defaultAllowsInlineMediaPlayback
inlineMediaPlaybackRequiresPlaysInlineAttribute initial=defaultInlineMediaPlaybackRequiresPlaysInlineAttribute
allowsPictureInPictureMediaPlayback initial=defaultAllowsPictureInPictureMediaPlayback
mediaControlsScaleWithPageZoom initial=defaultMediaControlsScaleWithPageZoom
invisibleAutoplayNotPermitted initial=false
passwordEchoEnabled initial=false
suppressesIncrementalRendering initial=false
incrementalRenderingSuppressionTimeoutInSeconds type=double, initial=defaultIncrementalRenderingSuppressionTimeoutInSeconds
backspaceKeyNavigationEnabled initial=true
shouldDisplaySubtitles initial=false, conditional=VIDEO_TRACK
shouldDisplayCaptions initial=false, conditional=VIDEO_TRACK
shouldDisplayTextDescriptions initial=false, conditional=VIDEO_TRACK
scrollingCoordinatorEnabled initial=false
scrollingTreeIncludesFrames initial=defaultScrollingTreeIncludesFrames
scrollAnimatorEnabled initial=true, conditional=SMOOTH_SCROLLING
forceUpdateScrollbarsOnMainThreadForPerformanceTesting initial=false
notificationsEnabled initial=true
# Some apps needs isLoadingInAPISense to account for active subresource loaders.
needsIsLoadingInAPISenseQuirk initial=false
shouldRespectImageOrientation initial=defaultShouldRespectImageOrientation
imageSubsamplingEnabled initial=defaultImageSubsamplingEnabled
wantsBalancedSetDefersLoadingBehavior initial=false
requestAnimationFrameEnabled initial=true
# For touch adjustment to apply, the compile option TOUCH_ADJUSTMENT must also be enabled.
# This setting adds a means to dynamically disable the feature at runtime on systems with
# support for touch adjustment.
touchAdjustmentEnabled initial=true
fixedPositionCreatesStackingContext initial=defaultFixedPositionCreatesStackingContext
syncXHRInDocumentsEnabled initial=true
cookieEnabled initial=true
mediaEnabled initial=true
DOMPasteAllowed initial=false
# When enabled, window.blur() does not change focus, and
# window.focus() only changes focus when invoked from the context that
# created the window.
windowFocusRestricted initial=true
diagnosticLoggingEnabled initial=false
delegatesPageScaling initial=false
plugInSnapshottingEnabled initial=false
snapshotAllPlugIns initial=false
autostartOriginPlugInSnapshottingEnabled initial=true
primaryPlugInSnapshotDetectionEnabled initial=true
maximumPlugInSnapshotAttempts type=unsigned, initial=20
frameFlatteningEnabled initial=false
allowCustomScrollbarInMainFrame initial=true
webSecurityEnabled initial=true
spatialNavigationEnabled initial=false
autoscrollForDragAndDropEnabled initial=false
unifiedTextCheckerEnabled initial=defaultUnifiedTextCheckerEnabled
logsPageMessagesToSystemConsoleEnabled initial=false
backForwardCacheExpirationInterval type=double, initial=1800
# Some apps could have a default video poster if it is not set.
defaultVideoPosterURL type=String
smartInsertDeleteEnabled initial=defaultSmartInsertDeleteEnabled
selectTrailingWhitespaceEnabled initial=defaultSelectTrailingWhitespaceEnabled
selectionIncludesAltImageText initial=true
useLegacyBackgroundSizeShorthandBehavior initial=false
fixedBackgroundsPaintRelativeToDocument initial=defaultFixedBackgroundsPaintRelativeToDocument
minimumZoomFontSize type=float, initial=15, conditional=IOS_TEXT_AUTOSIZING
simpleLineLayoutEnabled initial=true, setNeedsStyleRecalcInAllFrames=1
simpleLineLayoutDebugBordersEnabled initial=false, setNeedsStyleRecalcInAllFrames=1
subpixelCSSOMElementMetricsEnabled initial=false
useGiantTiles initial=false
mediaSourceEnabled initial=true, conditional=MEDIA_SOURCE
# FIXME: Rename to allowMultiElementImplicitFormSubmission once we upstream the iOS changes to WebView.mm.
allowMultiElementImplicitSubmission initial=false
allowsAirPlayForMediaPlayback initial=true, conditional=WIRELESS_PLAYBACK_TARGET
shouldConvertPositionStyleOnCopy initial=false
maxParseDuration type=double, initial=-1
standalone initial=false
telephoneNumberParsingEnabled initial=false
mediaDataLoadsAutomatically initial=defaultMediaDataLoadsAutomatically
shouldTransformsAffectOverflow initial=true
shouldDispatchJavaScriptWindowOnErrorEvents initial=false
alwaysUseAcceleratedOverflowScroll initial=false
imageControlsEnabled initial=false, conditional=SERVICE_CONTROLS
enableInheritURIQueryComponent initial=false
aggressiveTileRetentionEnabled initial=false
temporaryTileCohortRetentionEnabled initial=true
useImageDocumentForSubframePDF initial=false
dataDetectorTypes type=DataDetectorTypes, initial=DataDetectorTypeNone, conditional=DATA_DETECTION
# Allow SourceBuffers to store up to 304MB each, enough for approximately five minutes
# of 1080p video and stereo audio.
maximumSourceBufferSize type=int, initial=318767104, conditional=MEDIA_SOURCE
serviceControlsEnabled initial=false, conditional=SERVICE_CONTROLS
appleMailPaginationQuirkEnabled initial=false
attachmentElementEnabled initial=true, conditional=ATTACHMENT_ELEMENT
newBlockInsideInlineModelEnabled initial=false, setNeedsStyleRecalcInAllFrames=1
httpEquivEnabled initial=true
# Some ports (e.g. iOS) might choose to display attachments inline, regardless of whether the response includes the
# HTTP header "Content-Disposition: attachment". This setting enables a sandbox around these attachments. The sandbox
# enforces all frame sandbox flags (see enum SandboxFlag in SecurityContext.h), and also disables <meta http-equiv>
# processing and subframe loading.
contentDispositionAttachmentSandboxEnabled initial=false
| {
"pile_set_name": "Github"
} |
package jmespath
import (
"bytes"
"encoding/json"
"fmt"
"strconv"
"strings"
"unicode/utf8"
)
type token struct {
tokenType tokType
value string
position int
length int
}
type tokType int
const eof = -1
// Lexer contains information about the expression being tokenized.
type Lexer struct {
expression string // The expression provided by the user.
currentPos int // The current position in the string.
lastWidth int // The width of the current rune. This
buf bytes.Buffer // Internal buffer used for building up values.
}
// SyntaxError is the main error used whenever a lexing or parsing error occurs.
type SyntaxError struct {
msg string // Error message displayed to user
Expression string // Expression that generated a SyntaxError
Offset int // The location in the string where the error occurred
}
func (e SyntaxError) Error() string {
// In the future, it would be good to underline the specific
// location where the error occurred.
return "SyntaxError: " + e.msg
}
// HighlightLocation will show where the syntax error occurred.
// It will place a "^" character on a line below the expression
// at the point where the syntax error occurred.
func (e SyntaxError) HighlightLocation() string {
return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
}
//go:generate stringer -type=tokType
const (
tUnknown tokType = iota
tStar
tDot
tFilter
tFlatten
tLparen
tRparen
tLbracket
tRbracket
tLbrace
tRbrace
tOr
tPipe
tNumber
tUnquotedIdentifier
tQuotedIdentifier
tComma
tColon
tLT
tLTE
tGT
tGTE
tEQ
tNE
tJSONLiteral
tStringLiteral
tCurrent
tExpref
tAnd
tNot
tEOF
)
var basicTokens = map[rune]tokType{
'.': tDot,
'*': tStar,
',': tComma,
':': tColon,
'{': tLbrace,
'}': tRbrace,
']': tRbracket, // tLbracket not included because it could be "[]"
'(': tLparen,
')': tRparen,
'@': tCurrent,
}
// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
// When using this bitmask just be sure to shift the rune down 64 bits
// before checking against identifierStartBits.
const identifierStartBits uint64 = 576460745995190270
// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
var whiteSpace = map[rune]bool{
' ': true, '\t': true, '\n': true, '\r': true,
}
func (t token) String() string {
return fmt.Sprintf("Token{%+v, %s, %d, %d}",
t.tokenType, t.value, t.position, t.length)
}
// NewLexer creates a new JMESPath lexer.
func NewLexer() *Lexer {
lexer := Lexer{}
return &lexer
}
func (lexer *Lexer) next() rune {
if lexer.currentPos >= len(lexer.expression) {
lexer.lastWidth = 0
return eof
}
r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
lexer.lastWidth = w
lexer.currentPos += w
return r
}
func (lexer *Lexer) back() {
lexer.currentPos -= lexer.lastWidth
}
func (lexer *Lexer) peek() rune {
t := lexer.next()
lexer.back()
return t
}
// tokenize takes an expression and returns corresponding tokens.
func (lexer *Lexer) tokenize(expression string) ([]token, error) {
var tokens []token
lexer.expression = expression
lexer.currentPos = 0
lexer.lastWidth = 0
loop:
for {
r := lexer.next()
if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
t := lexer.consumeUnquotedIdentifier()
tokens = append(tokens, t)
} else if val, ok := basicTokens[r]; ok {
// Basic single char token.
t := token{
tokenType: val,
value: string(r),
position: lexer.currentPos - lexer.lastWidth,
length: 1,
}
tokens = append(tokens, t)
} else if r == '-' || (r >= '0' && r <= '9') {
t := lexer.consumeNumber()
tokens = append(tokens, t)
} else if r == '[' {
t := lexer.consumeLBracket()
tokens = append(tokens, t)
} else if r == '"' {
t, err := lexer.consumeQuotedIdentifier()
if err != nil {
return tokens, err
}
tokens = append(tokens, t)
} else if r == '\'' {
t, err := lexer.consumeRawStringLiteral()
if err != nil {
return tokens, err
}
tokens = append(tokens, t)
} else if r == '`' {
t, err := lexer.consumeLiteral()
if err != nil {
return tokens, err
}
tokens = append(tokens, t)
} else if r == '|' {
t := lexer.matchOrElse(r, '|', tOr, tPipe)
tokens = append(tokens, t)
} else if r == '<' {
t := lexer.matchOrElse(r, '=', tLTE, tLT)
tokens = append(tokens, t)
} else if r == '>' {
t := lexer.matchOrElse(r, '=', tGTE, tGT)
tokens = append(tokens, t)
} else if r == '!' {
t := lexer.matchOrElse(r, '=', tNE, tNot)
tokens = append(tokens, t)
} else if r == '=' {
t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
tokens = append(tokens, t)
} else if r == '&' {
t := lexer.matchOrElse(r, '&', tAnd, tExpref)
tokens = append(tokens, t)
} else if r == eof {
break loop
} else if _, ok := whiteSpace[r]; ok {
// Ignore whitespace
} else {
return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
}
}
tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
return tokens, nil
}
// Consume characters until the ending rune "r" is reached.
// If the end of the expression is reached before seeing the
// terminating rune "r", then an error is returned.
// If no error occurs then the matching substring is returned.
// The returned string will not include the ending rune.
func (lexer *Lexer) consumeUntil(end rune) (string, error) {
start := lexer.currentPos
current := lexer.next()
for current != end && current != eof {
if current == '\\' && lexer.peek() != eof {
lexer.next()
}
current = lexer.next()
}
if lexer.lastWidth == 0 {
// Then we hit an EOF so we never reached the closing
// delimiter.
return "", SyntaxError{
msg: "Unclosed delimiter: " + string(end),
Expression: lexer.expression,
Offset: len(lexer.expression),
}
}
return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
}
func (lexer *Lexer) consumeLiteral() (token, error) {
start := lexer.currentPos
value, err := lexer.consumeUntil('`')
if err != nil {
return token{}, err
}
value = strings.Replace(value, "\\`", "`", -1)
return token{
tokenType: tJSONLiteral,
value: value,
position: start,
length: len(value),
}, nil
}
func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
start := lexer.currentPos
currentIndex := start
current := lexer.next()
for current != '\'' && lexer.peek() != eof {
if current == '\\' && lexer.peek() == '\'' {
chunk := lexer.expression[currentIndex : lexer.currentPos-1]
lexer.buf.WriteString(chunk)
lexer.buf.WriteString("'")
lexer.next()
currentIndex = lexer.currentPos
}
current = lexer.next()
}
if lexer.lastWidth == 0 {
// Then we hit an EOF so we never reached the closing
// delimiter.
return token{}, SyntaxError{
msg: "Unclosed delimiter: '",
Expression: lexer.expression,
Offset: len(lexer.expression),
}
}
if currentIndex < lexer.currentPos {
lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
}
value := lexer.buf.String()
// Reset the buffer so it can reused again.
lexer.buf.Reset()
return token{
tokenType: tStringLiteral,
value: value,
position: start,
length: len(value),
}, nil
}
func (lexer *Lexer) syntaxError(msg string) SyntaxError {
return SyntaxError{
msg: msg,
Expression: lexer.expression,
Offset: lexer.currentPos - 1,
}
}
// Checks for a two char token, otherwise matches a single character
// token. This is used whenever a two char token overlaps a single
// char token, e.g. "||" -> tPipe, "|" -> tOr.
func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
start := lexer.currentPos - lexer.lastWidth
nextRune := lexer.next()
var t token
if nextRune == second {
t = token{
tokenType: matchedType,
value: string(first) + string(second),
position: start,
length: 2,
}
} else {
lexer.back()
t = token{
tokenType: singleCharType,
value: string(first),
position: start,
length: 1,
}
}
return t
}
func (lexer *Lexer) consumeLBracket() token {
// There's three options here:
// 1. A filter expression "[?"
// 2. A flatten operator "[]"
// 3. A bare rbracket "["
start := lexer.currentPos - lexer.lastWidth
nextRune := lexer.next()
var t token
if nextRune == '?' {
t = token{
tokenType: tFilter,
value: "[?",
position: start,
length: 2,
}
} else if nextRune == ']' {
t = token{
tokenType: tFlatten,
value: "[]",
position: start,
length: 2,
}
} else {
t = token{
tokenType: tLbracket,
value: "[",
position: start,
length: 1,
}
lexer.back()
}
return t
}
func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
start := lexer.currentPos
value, err := lexer.consumeUntil('"')
if err != nil {
return token{}, err
}
var decoded string
asJSON := []byte("\"" + value + "\"")
if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
return token{}, err
}
return token{
tokenType: tQuotedIdentifier,
value: decoded,
position: start - 1,
length: len(decoded),
}, nil
}
func (lexer *Lexer) consumeUnquotedIdentifier() token {
// Consume runes until we reach the end of an unquoted
// identifier.
start := lexer.currentPos - lexer.lastWidth
for {
r := lexer.next()
if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
lexer.back()
break
}
}
value := lexer.expression[start:lexer.currentPos]
return token{
tokenType: tUnquotedIdentifier,
value: value,
position: start,
length: lexer.currentPos - start,
}
}
func (lexer *Lexer) consumeNumber() token {
// Consume runes until we reach something that's not a number.
start := lexer.currentPos - lexer.lastWidth
for {
r := lexer.next()
if r < '0' || r > '9' {
lexer.back()
break
}
}
value := lexer.expression[start:lexer.currentPos]
return token{
tokenType: tNumber,
value: value,
position: start,
length: lexer.currentPos - start,
}
}
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.