repo_name
stringlengths
6
101
path
stringlengths
4
300
text
stringlengths
7
1.31M
centauroWaRRIor/VulkanSamples
ExternalLibraries/NvGameworksFramework/externals/src/cg4cpp/upper3x2.cpp
/* * Copyright 2009 by NVIDIA Corporation. All rights reserved. All * information contained herein is proprietary and confidential to NVIDIA * Corporation. Any use, reproduction, or disclosure without the written * permission of NVIDIA Corporation is prohibited. */ #include <Cg/double.hpp> #include <Cg/upper3x2.hpp> namespace Cg { float3x2 upper3x2(float1x1 m) { return upper3x2<float,1,1>(m); } float3x2 upper3x2(float1x2 m) { return upper3x2<float,1,2>(m); } float3x2 upper3x2(float1x3 m) { return upper3x2<float,1,3>(m); } float3x2 upper3x2(float1x4 m) { return upper3x2<float,1,4>(m); } float3x2 upper3x2(float2x1 m) { return upper3x2<float,2,1>(m); } float3x2 upper3x2(float2x2 m) { return upper3x2<float,2,2>(m); } float3x2 upper3x2(float2x3 m) { return upper3x2<float,2,3>(m); } float3x2 upper3x2(float2x4 m) { return upper3x2<float,2,4>(m); } float3x2 upper3x2(float3x1 m) { return upper3x2<float,3,1>(m); } float3x2 upper3x2(float3x2 m) { return upper3x2<float,3,2>(m); } float3x2 upper3x2(float3x3 m) { return upper3x2<float,3,3>(m); } float3x2 upper3x2(float3x4 m) { return upper3x2<float,3,4>(m); } float3x2 upper3x2(float4x1 m) { return upper3x2<float,4,1>(m); } float3x2 upper3x2(float4x2 m) { return upper3x2<float,4,2>(m); } float3x2 upper3x2(float4x3 m) { return upper3x2<float,4,3>(m); } float3x2 upper3x2(float4x4 m) { return upper3x2<float,4,4>(m); } double3x2 upper3x2(double1x1 m) { return upper3x2<double,1,1>(m); } double3x2 upper3x2(double1x2 m) { return upper3x2<double,1,2>(m); } double3x2 upper3x2(double1x3 m) { return upper3x2<double,1,3>(m); } double3x2 upper3x2(double1x4 m) { return upper3x2<double,1,4>(m); } double3x2 upper3x2(double2x1 m) { return upper3x2<double,2,1>(m); } double3x2 upper3x2(double2x2 m) { return upper3x2<double,2,2>(m); } double3x2 upper3x2(double2x3 m) { return upper3x2<double,2,3>(m); } double3x2 upper3x2(double2x4 m) { return upper3x2<double,2,4>(m); } double3x2 upper3x2(double3x1 m) { return upper3x2<double,3,1>(m); } double3x2 upper3x2(double3x2 m) { return upper3x2<double,3,2>(m); } double3x2 upper3x2(double3x3 m) { return upper3x2<double,3,3>(m); } double3x2 upper3x2(double3x4 m) { return upper3x2<double,3,4>(m); } double3x2 upper3x2(double4x1 m) { return upper3x2<double,4,1>(m); } double3x2 upper3x2(double4x2 m) { return upper3x2<double,4,2>(m); } double3x2 upper3x2(double4x3 m) { return upper3x2<double,4,3>(m); } double3x2 upper3x2(double4x4 m) { return upper3x2<double,4,4>(m); } } // namespace Cg
lindsayad/misc_programming
cpp/namespace-std.cpp
#include <iostream> namespace std { void foo() { cout << "Im in the std namespace bitch!\n"; } } // namespace std int main() { std::foo(); }
consulo/consulo-spring
web/src/com/intellij/spring/web/mvc/jam/SpringMVCJamReferenceContributor.java
package com.intellij.spring.web.mvc.jam; import com.intellij.patterns.PsiJavaPatterns; import com.intellij.patterns.PatternCondition; import com.intellij.psi.*; import com.intellij.psi.util.PsiTreeUtil; import com.intellij.psi.impl.source.resolve.reference.PsiReferenceProviderBase; import com.intellij.util.ProcessingContext; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.NonNls; /** * @author <NAME> */ public class SpringMVCJamReferenceContributor extends PsiReferenceContributor { public void registerReferenceProviders(final PsiReferenceRegistrar registrar) { registrar.registerReferenceProvider( PsiJavaPatterns.literalExpression().with(new PatternCondition<PsiLiteralExpression>("foo") { @Override public boolean accepts(@NotNull final PsiLiteralExpression psiLiteralExpression, final ProcessingContext context) { final PsiNameValuePair pair = PsiTreeUtil.getParentOfType(psiLiteralExpression, PsiNameValuePair.class); if (pair != null) { @NonNls final String name = pair.getName(); if (name == null || name.equals("value")) { final String qualifiedName = ((PsiAnnotation)pair.getParent().getParent()).getQualifiedName(); return qualifiedName != null && (qualifiedName.equals(SpringMVCRequestMapping.REQUEST_MAPPING) || qualifiedName.equals(SpringMVCModelAttribute.MODEL_ATTRIBUTE)); } } return false; } }), new PsiReferenceProviderBase() { @NotNull @Override public PsiReference[] getReferencesByElement(@NotNull final PsiElement element, @NotNull final ProcessingContext context) { return new PsiReference[] { PsiReferenceBase.createSelfReference(element, PsiTreeUtil.getParentOfType(element, PsiAnnotation.class)) }; } }); } }
jyf588/nimblephysics
dart/realtime/RealTimeControlBuffer.cpp
#include "dart/realtime/RealTimeControlBuffer.hpp" #include <iostream> #include "dart/simulation/World.hpp" namespace dart { namespace realtime { RealTimeControlBuffer::RealTimeControlBuffer( int forceDim, int steps, int millisPerStep) : mForceDim(forceDim), mNumSteps(steps), mMillisPerStep(millisPerStep), mActiveBuffer(UNINITIALIZED), mBufA(Eigen::MatrixXs::Zero(forceDim, steps)), mBufB(Eigen::MatrixXs::Zero(forceDim, steps)), mControlLog(ControlLog(forceDim, millisPerStep)) { } /// Gets the force at a given timestep Eigen::VectorXs RealTimeControlBuffer::getPlannedForce(long time, bool dontLog) { if (mActiveBuffer == UNINITIALIZED) { // Unitialized, default to no force return Eigen::VectorXs::Zero(mForceDim); } int elapsed = time - mLastWroteBufferAt; if (elapsed < 0) { // Asking for some time in the past, default to no force return Eigen::VectorXs::Zero(mForceDim); } int step = (int)floor((s_t)elapsed / mMillisPerStep); if (step < mNumSteps) { if (mActiveBuffer == BUF_A) { if (!dontLog) mControlLog.record(time, mBufA.col(step)); return mBufA.col(step); } else if (mActiveBuffer == BUF_B) { if (!dontLog) mControlLog.record(time, mBufB.col(step)); return mBufB.col(step); } else assert(false && "Should never reach this point"); } else { // std::cout << "WARNING: MPC isn't keeping up!" << std::endl; Eigen::VectorXs oob = Eigen::VectorXs::Zero(mForceDim); if (!dontLog) mControlLog.record(time, oob); return oob; } // The code should never reach here, but it's here to keep the compiler happy throw std::runtime_error{"Execution should never reach this point"}; } /// This gets planned forces starting at `start`, and continuing for the /// length of our buffer size `mSteps`. This is useful for initializing MPC /// runs. It supports walking off the end of known future, and assumes 0 /// forces in all extrapolation. void RealTimeControlBuffer::getPlannedForcesStartingAt( long start, Eigen::Ref<Eigen::MatrixXs> forcesOut) { if (mActiveBuffer == UNINITIALIZED) { // Unitialized, default to 0 forcesOut.setZero(); return; } int elapsed = start - mLastWroteBufferAt; if (elapsed < 0) { // Asking for some time in the past, default to 0 forcesOut.setZero(); return; } int startStep = (int)floor((s_t)elapsed / mMillisPerStep); if (startStep < mNumSteps) { // Copy the appropriate block of our active buffer to the forcesOut block if (mActiveBuffer == BUF_A) { forcesOut.block(0, 0, mForceDim, mNumSteps - startStep) = mBufA.block(0, startStep, mForceDim, mNumSteps - startStep); } else if (mActiveBuffer == BUF_B) { forcesOut.block(0, 0, mForceDim, mNumSteps - startStep) = mBufB.block(0, startStep, mForceDim, mNumSteps - startStep); } else assert(false && "Should never reach this point"); // Zero out the remainder of the forcesOut block forcesOut.block(0, mNumSteps - startStep, mForceDim, startStep).setZero(); } else { // std::cout << "WARNING: MPC isn't keeping up!" << std::endl; forcesOut.setZero(); } } /// This swaps in a new buffer of forces. The assumption is that "startAt" is /// before "now", because we'll erase old data in this process. void RealTimeControlBuffer::setControlForcePlan( long startAt, long now, Eigen::MatrixXs forces) { if (startAt > now) { long padMillis = startAt - now; int padSteps = (int)floor((s_t)padMillis / mMillisPerStep); // If we're trying to set the force plan too far out in the future, this // whole exercise is a no-op if (padSteps >= mNumSteps) { return; } // Otherwise, we're going to copy part of the existing plan int currentStep = (int)floor((s_t)(now - mLastWroteBufferAt) / mMillisPerStep); int remainingSteps = mNumSteps - currentStep; mLastWroteBufferAt = now; // If we've overflowed our old buffer, this is bad, but recoverable. We'll // just not copy anything from our old plan, since it's all in the past now // anyways. if (remainingSteps < 0) { mBufA = forces; mActiveBuffer = BUF_A; return; } int copySteps = padSteps; int zeroSteps = 0; int useSteps = mNumSteps - padSteps; if (padSteps > remainingSteps) { copySteps = remainingSteps; zeroSteps = padSteps - remainingSteps; useSteps = mNumSteps - padSteps; } assert(copySteps + zeroSteps + useSteps == mNumSteps); if (mActiveBuffer == UNINITIALIZED) { mBufA.block(0, 0, mForceDim, copySteps).setZero(); mBufA.block(0, copySteps, mForceDim, zeroSteps).setZero(); mBufA.block(0, copySteps + zeroSteps, mForceDim, useSteps) = forces.block(0, 0, mForceDim, useSteps); mActiveBuffer = BUF_A; } else if (mActiveBuffer == BUF_B) { mBufA.block(0, 0, mForceDim, copySteps) = mBufB.block(0, mNumSteps - copySteps, mForceDim, copySteps); mBufA.block(0, copySteps, mForceDim, zeroSteps).setZero(); mBufA.block(0, copySteps + zeroSteps, mForceDim, useSteps) = forces.block(0, 0, mForceDim, useSteps); mActiveBuffer = BUF_A; } else if (mActiveBuffer == BUF_A) { mBufB.block(0, 0, mForceDim, copySteps) = mBufA.block(0, mNumSteps - copySteps, mForceDim, copySteps); mBufB.block(0, copySteps, mForceDim, zeroSteps).setZero(); mBufB.block(0, copySteps + zeroSteps, mForceDim, useSteps) = forces.block(0, 0, mForceDim, useSteps); mActiveBuffer = BUF_B; } } else { mLastWroteBufferAt = startAt; if (mActiveBuffer == UNINITIALIZED || mActiveBuffer == BUF_B) { mBufA = forces; // Crucial for lock-free behavior: copy the buffer BEFORE setting the // active buffer. Not a huge deal if we're a bit off here in the // optimizer, but not ideal. mActiveBuffer = BUF_A; } else { mBufB = forces; // Crucial for lock-free behavior: copy the buffer BEFORE setting the // active buffer. Not a huge deal if we're a bit off here in the // optimizer, but not ideal. mActiveBuffer = BUF_B; } } } /// This retrieves the state of the world at a given time, assuming that we've /// been applying forces from the buffer since the last state that we fully /// observed. void RealTimeControlBuffer::estimateWorldStateAt( std::shared_ptr<simulation::World> world, ObservationLog* log, long time) { Observation obs = log->getClosestObservationBefore(time); int elapsedSinceObservation = time - obs.time; if (elapsedSinceObservation < 0) { assert( elapsedSinceObservation >= 0 && "estimateWorldStateAt() cannot ask far a time before the earliest available observation."); } int stepsSinceObservation = (int)floor((s_t)elapsedSinceObservation / mMillisPerStep); /* std::cout << "RealTimeControlBuffer time: " << time << std::endl; std::cout << "RealTimeControlBuffer obs.time: " << obs.time << std::endl; std::cout << "RealTimeControlBuffer elapsedSinceObservation: " << elapsedSinceObservation << std::endl; std::cout << "RealTimeControlBuffer stepsSinceObservation: " << stepsSinceObservation << std::endl; */ world->setPositions(obs.pos); world->setVelocities(obs.vel); world->setMasses(log->getMass()); for (int i = 0; i < stepsSinceObservation; i++) { long at = obs.time + i * mMillisPerStep; // In the future, project assuming planned forces if (at > mControlLog.last()) { world->setControlForces(getPlannedForce(at, true)); } // In the past, project using known forces read from the buffer else { world->setControlForces(mControlLog.get(at)); } world->step(); } } /// This rescales the timestep size. This is useful because larger timesteps /// mean fewer time steps per real unit of time, and thus we can run our /// optimization slower and still keep up with real life. void RealTimeControlBuffer::setMillisPerStep(int newMillisPerStep) { mControlLog.setMillisPerStep(newMillisPerStep); if (mActiveBuffer == BUF_A) { rescaleBuffer(mBufA, mMillisPerStep, newMillisPerStep); } else if (mActiveBuffer == BUF_B) { rescaleBuffer(mBufB, mMillisPerStep, newMillisPerStep); } mMillisPerStep = newMillisPerStep; } /// This changes the number of steps. Fewer steps mean we can compute a buffer /// faster, but it also means we have less time to compute the buffer. This /// probably has a nonlinear effect on runtime. void RealTimeControlBuffer::setNumSteps(int newNumSteps) { Eigen::MatrixXs newBuf = Eigen::MatrixXs::Zero(mForceDim, newNumSteps); int minLen = newNumSteps; if (mNumSteps < minLen) minLen = mNumSteps; if (mActiveBuffer == BUF_A) { newBuf.block(0, 0, mForceDim, minLen) = mBufA.block(0, 0, mForceDim, minLen); } else if (mActiveBuffer == BUF_B) { newBuf.block(0, 0, mForceDim, minLen) = mBufB.block(0, 0, mForceDim, minLen); } mBufA = newBuf; mBufB = newBuf; } /// This returns the number of millis we have left in the plan after `time`. /// This can be a negative number. long RealTimeControlBuffer::getPlanBufferMillisAfter(long time) { long planEnd = mLastWroteBufferAt + (mNumSteps * mMillisPerStep); return planEnd - time; } /// This is useful when we're replicating a log across a network boundary, /// which comes up in distributed MPC. void RealTimeControlBuffer::manuallyRecordObservedForce( long time, Eigen::VectorXs observation) { mControlLog.record(time, observation); } /// This is a helper to rescale the timestep size of a buffer while leaving /// the data otherwise unchanged. void RealTimeControlBuffer::rescaleBuffer( Eigen::MatrixXs& buf, int oldMillisPerStep, int newMillisPerStep) { Eigen::MatrixXs newBuf = Eigen::MatrixXs::Zero(buf.rows(), buf.cols()); for (int i = mNumSteps - 1; i >= 0; i--) { if (newMillisPerStep > oldMillisPerStep) { // If we're increasing the step size, there's more than one old column per // new column, so map from old to new int newCol = static_cast<int>( floor(static_cast<s_t>(i * oldMillisPerStep) / newMillisPerStep)); newBuf.col(newCol) = buf.col(i); } else { // If we're increasing the step size, there's more than one new column per // old column, so map from new to old int oldCol = static_cast<int>( floor(static_cast<s_t>(i * newMillisPerStep) / oldMillisPerStep)); newBuf.col(i) = buf.col(oldCol); } } buf = newBuf; } } // namespace realtime } // namespace dart
agustafson/sbt-avrohugger
src/sbt-test/avrohugger/filesorter/build.sbt
<gh_stars>100-1000 name := "filesorter-test" sourceGenerators in Compile += (avroScalaGenerate in Compile).taskValue
umontreal-simul/contactcenters
source/umontreal/iro/lecuyer/contactcenters/app/trace/ContactTrace.java
<gh_stars>0 package umontreal.iro.lecuyer.contactcenters.app.trace; /** * Represents an object capable of creating a contact-by-contact trace. * The format and location of the produced trace depends on the * implementation. * * The tracing facility is used as follows. * An * implementation of this interface is initialized at the beginning of the * simulation using the {@link #init()} method. * Each time a contact is processed, the * {@link #writeLine(int,int,int,double,double,String,int,double)} * method is called by some listener. * At the end of the simulation, the {@link #close()} * method is called to close the file or database connection the trace * is sent to. */ public interface ContactTrace { public static final String OUTCOME_BLOCKED = "Blocked"; public static final String OUTCOME_ABANDONED = "Abandoned"; public static final String OUTCOME_SERVED = "Served"; public static final String OUTCOME_FAILED = "Failed"; /** * Initializes the tracing mechanism. This method opens the trace file or * database connection, and writes headers, etc. If an error occurs during * the initialization, this method should log the error, and disable tracing. */ public void init (); /** * Closes the trace facility after a simulation. This method closes files, * database connections, etc. */ public void close (); /** * Writes a new line in the trace representing a simulated contact. * The line includes the step of the simulation at which the contact * occurred, the type of the contact, the period of its arrival, * its time spent in queue, its outcome, the group of its serving agent, and * its service time. * Some of these fields might be {@link Double#NaN} * if the information does not exist. For example, * a blocked or abandoned call does not have a serving agent group, or * a service time. * @param step the step, in the experiment, during which the call occurred. * @param type the type of the call. * @param period the period of arrival of the call. * @param arvTime the arrival time. * @param queueTime the time spent by the call in the queue. * @param outcome the outcome of the call. * @param group the group of the serving agent. * @param srvTime the service time of the call. */ public void writeLine (int step, int type, int period, double arvTime, double queueTime, String outcome, int group, double srvTime); }
chenying-wang/usc-ee-coursework-public
ee569/hw4/feature/FeatureMatcher.cpp
<filename>ee569/hw4/feature/FeatureMatcher.cpp<gh_stars>1-10 /** * Name: <NAME> * Email: <EMAIL> * USC ID: ****-****-** * Date: Friday, March 20, 2020 **/ #include <iostream> #include <vector> #include <unordered_map> #include "opencv2/core.hpp" #include "opencv2/features2d.hpp" #include "opencv2/xfeatures2d.hpp" #include "FeatureMatcher.h" #define NUM_OF_SIFT_FEATURES (20) #define NUM_OF_SIFT_BOW_FEATURES (500) #define NUM_OF_SIFT_OCTAVES (3) #define SIFT_CONTRAST_THRESHOLD (0.05) #define SIFT_EDGE_THRESHOLD (12.0) #define SIFT_SIGMA (2.0) #define NUM_OF_BOW_BINS (8) FeatureMatcher::FeatureMatcher() { this->m_pcFeatureDetector = cv::xfeatures2d::SIFT::create(NUM_OF_SIFT_FEATURES, NUM_OF_SIFT_OCTAVES, SIFT_CONTRAST_THRESHOLD, SIFT_EDGE_THRESHOLD, SIFT_SIGMA); this->m_pcBOWFeatureDetector = cv::xfeatures2d::SIFT::create(NUM_OF_SIFT_BOW_FEATURES, NUM_OF_SIFT_OCTAVES, SIFT_CONTRAST_THRESHOLD, SIFT_EDGE_THRESHOLD, SIFT_SIGMA); this->m_pcDescriptorMatcher = cv::DescriptorMatcher::create(cv::DescriptorMatcher::BRUTEFORCE_SL2); } FeatureMatcher::~FeatureMatcher() {} Image *FeatureMatcher::getKeypoints(const cv::Mat *image) const { if (image == nullptr || image->empty()) { return nullptr; } auto keypoints = std::vector<cv::KeyPoint>(); this->m_pcFeatureDetector->detect(*image, keypoints, cv::noArray()); auto keypointsImage = this->drawKeypoints(image, keypoints); keypoints.clear(); return keypointsImage; } Image *FeatureMatcher::getLargestScaleKeypoints(const cv::Mat *image) const { if (image == nullptr || image->empty()) { return nullptr; } auto keypoints = std::vector<cv::KeyPoint>(); this->m_pcFeatureDetector->detect(*image, keypoints, cv::noArray()); auto maxIdx = 0u; for (auto i = 0u; i < keypoints.size(); ++i) { if (keypoints.at(i).size > keypoints.at(maxIdx).size) { maxIdx = i; } } auto maxScaleKeypoint = std::vector<cv::KeyPoint>(1u, keypoints.at(maxIdx)); auto largestScaleKeypointImage = this->drawKeypoints(image, maxScaleKeypoint); keypoints.clear(); return largestScaleKeypointImage; } Image *FeatureMatcher::match(const cv::Mat *queryImage, const cv::Mat *trainImage) const { if (queryImage == nullptr || trainImage == nullptr || queryImage->empty() || trainImage->empty()) { return nullptr; } auto queryKeypoints = std::vector<cv::KeyPoint>(), trainKeypoints = std::vector<cv::KeyPoint>(); auto queryDescriptors = cv::Mat(), trainDescriptors = cv::Mat(); this->m_pcFeatureDetector->detectAndCompute(*queryImage, cv::noArray(), queryKeypoints, queryDescriptors); this->m_pcFeatureDetector->detectAndCompute(*trainImage, cv::noArray(), trainKeypoints, trainDescriptors); auto matches = std::vector<cv::DMatch>(); this->m_pcDescriptorMatcher->match(queryDescriptors, trainDescriptors, matches); queryDescriptors.release(); trainDescriptors.release(); Image *resultImage = this->drawMatches(queryImage, queryKeypoints, trainImage, trainKeypoints, matches); queryKeypoints.clear(); trainKeypoints.clear(); return resultImage; } Image *FeatureMatcher::matchLargestScaleKeypoint(const cv::Mat *queryImage, const cv::Mat *trainImage) const { if (queryImage == nullptr || trainImage == nullptr || queryImage->empty() || trainImage->empty()) { return nullptr; } auto queryKeypoints = std::vector<cv::KeyPoint>(), trainKeypoints = std::vector<cv::KeyPoint>(); auto queryDescriptors = cv::Mat(), trainDescriptors = cv::Mat(); this->m_pcFeatureDetector->detect(*queryImage, queryKeypoints, cv::noArray()); auto maxIdx = 0u; for (auto i = 0u; i < queryKeypoints.size(); ++i) { if (queryKeypoints.at(i).size > queryKeypoints.at(maxIdx).size) { maxIdx = i; } } auto maxScaleKeypoint = std::vector<cv::KeyPoint>(1u, queryKeypoints.at(maxIdx)); this->m_pcFeatureDetector->compute(*queryImage, maxScaleKeypoint, queryDescriptors); this->m_pcFeatureDetector->detectAndCompute(*trainImage, cv::noArray(), trainKeypoints, trainDescriptors); auto matches = std::vector<cv::DMatch>(); this->m_pcDescriptorMatcher->match(queryDescriptors, trainDescriptors, matches); queryDescriptors.release(); trainDescriptors.release(); auto resultImage = this->drawMatches(queryImage, maxScaleKeypoint, trainImage, trainKeypoints, matches); queryKeypoints.clear(); trainKeypoints.clear(); maxScaleKeypoint.clear(); return resultImage; } Image *FeatureMatcher::bagOfWords(const cv::Mat *image, const char *outputCsvFilename) const { if (image->empty() || image->empty()) { return nullptr; } auto keypoints = std::vector<cv::KeyPoint>(); auto descriptors = cv::Mat(); this->m_pcBOWFeatureDetector->detectAndCompute(*image, cv::noArray(), keypoints, descriptors); auto labels = cv::Mat(); cv::kmeans(descriptors, NUM_OF_BOW_BINS, labels, cv::TermCriteria(cv::TermCriteria::MAX_ITER, 2000u, 1e-7), 6, cv::KmeansFlags::KMEANS_PP_CENTERS); keypoints.clear(); descriptors.release(); std::unordered_map<int, uint> mCount; for (auto i = 0u; i < (uint) labels.rows; ++i) { auto label = labels.at<int>(i, 0); if (!mCount.count(label)) { mCount.insert(std::make_pair(label, 0u)); } ++mCount.at(label); } labels.release(); std::vector<std::vector<std::string>> csv; auto i = 0u; for (auto count : mCount) { csv.push_back(std::vector<std::string>({std::to_string(i++), std::to_string(count.second)})); } this->m_cCSVWriter.write(outputCsvFilename, csv); mCount.clear(); csv.clear(); return this->drawKeypoints(image, keypoints); } Image *FeatureMatcher::matchBagOfWords(const cv::Mat *image, const cv::Mat *referenceImages, const char *outputCsvFilename) const { if (image == nullptr || referenceImages == nullptr || image->empty() || referenceImages->empty()) { return nullptr; } auto keypoints = std::vector<cv::KeyPoint>(), referenceKeypoints = std::vector<cv::KeyPoint>(); auto descriptors = cv::Mat(), referenceDescriptors = cv::Mat(); this->m_pcBOWFeatureDetector->detectAndCompute(*image, cv::noArray(), keypoints, descriptors); this->m_pcBOWFeatureDetector->detectAndCompute(*referenceImages, cv::noArray(), referenceKeypoints, referenceDescriptors); auto referenceLablesMat = cv::Mat(), centroids = cv::Mat(); cv::kmeans(referenceDescriptors, NUM_OF_BOW_BINS, referenceLablesMat, cv::TermCriteria(cv::TermCriteria::MAX_ITER, 2000u, 1e-7), 6, cv::KmeansFlags::KMEANS_PP_CENTERS, centroids); auto labels = std::vector<uint>(NUM_OF_BOW_BINS, 0u); auto referenceLabels = std::vector<uint>(NUM_OF_BOW_BINS, 0u); for (auto i = 0u; i < (uint) descriptors.rows; ++i) { auto descriptor = descriptors.row(i); auto minDistance = std::numeric_limits<double>::max(); auto label = 0u; for (auto j = 0u; j < (uint) NUM_OF_BOW_BINS; ++j) { auto distance = cv::norm(descriptor, centroids.row(j), cv::NormTypes::NORM_L2SQR); if (distance < minDistance) { minDistance = distance; label = j; } } ++labels.at(label); } for (auto i = 0u; i < (uint) referenceLablesMat.rows; ++i) { ++referenceLabels.at(referenceLablesMat.at<int>(i, 0)); } referenceLablesMat.release(); std::vector<std::vector<std::string>> csv; for (auto i = 0u; i < (uint) NUM_OF_BOW_BINS; ++i) { csv.push_back(std::vector<std::string>({ std::to_string(i), std::to_string(labels.at(i)), std::to_string(referenceLabels.at(i)) })); } this->m_cCSVWriter.write(outputCsvFilename, csv); labels.clear(); csv.clear(); return this->drawKeypoints(image, keypoints); } Image *FeatureMatcher::drawMatches(const cv::Mat *image1, const std::vector<cv::KeyPoint> keypoints1, const cv::Mat *image2, const std::vector<cv::KeyPoint> keypoints2, const std::vector<cv::DMatch> matches) const { if (matches.empty()) { return nullptr; } const auto pcMathesImageMat = new cv::Mat(); cv::drawMatches(*image1, keypoints1, *image2, keypoints2, matches, *pcMathesImageMat, cv::Scalar::all(-1), cv::Scalar::all(-1), std::vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); const auto pcMatchesImage = this->m_cImageCvAdapter.fromCvMat(pcMathesImageMat); pcMathesImageMat->release(); delete pcMathesImageMat; return pcMatchesImage; } Image *FeatureMatcher::drawKeypoints(const cv::Mat *image, const std::vector<cv::KeyPoint> keypoints) const { const auto pcKeypointsImageMat = new cv::Mat(); cv::drawKeypoints(*image, keypoints, *pcKeypointsImageMat, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS); const auto pcKeypointsImage = this->m_cImageCvAdapter.fromCvMat(pcKeypointsImageMat); pcKeypointsImageMat->release(); delete pcKeypointsImageMat; return pcKeypointsImage; }
jeffkala/batfish
projects/batfish/src/main/java/org/batfish/vendor/a10/representation/A10Configuration.java
<filename>projects/batfish/src/main/java/org/batfish/vendor/a10/representation/A10Configuration.java package org.batfish.vendor.a10.representation; import static com.google.common.base.MoreObjects.firstNonNull; import static java.util.Comparator.naturalOrder; import static org.batfish.datamodel.Configuration.DEFAULT_VRF_NAME; import static org.batfish.datamodel.FirewallSessionInterfaceInfo.Action.POST_NAT_FIB_LOOKUP; import static org.batfish.datamodel.Prefix.MAX_PREFIX_LENGTH; import static org.batfish.datamodel.acl.AclLineMatchExprs.and; import static org.batfish.datamodel.acl.AclLineMatchExprs.matchDst; import static org.batfish.datamodel.acl.AclLineMatchExprs.matchIcmp; import static org.batfish.datamodel.acl.AclLineMatchExprs.matchIpProtocol; import static org.batfish.datamodel.acl.AclLineMatchExprs.not; import static org.batfish.datamodel.tracking.TrackMethods.negatedReference; import static org.batfish.datamodel.tracking.TrackMethods.reachability; import static org.batfish.vendor.a10.representation.A10Conversion.VIRTUAL_TCP_PORT_TYPES; import static org.batfish.vendor.a10.representation.A10Conversion.VIRTUAL_UDP_PORT_TYPES; import static org.batfish.vendor.a10.representation.A10Conversion.computeAclName; import static org.batfish.vendor.a10.representation.A10Conversion.convertAccessList; import static org.batfish.vendor.a10.representation.A10Conversion.createBgpProcess; import static org.batfish.vendor.a10.representation.A10Conversion.findHaSourceAddress; import static org.batfish.vendor.a10.representation.A10Conversion.findVrrpAEnabledSourceAddress; import static org.batfish.vendor.a10.representation.A10Conversion.generatedFailedTrackMethodName; import static org.batfish.vendor.a10.representation.A10Conversion.generatedServerTrackMethodName; import static org.batfish.vendor.a10.representation.A10Conversion.getEnabledVrids; import static org.batfish.vendor.a10.representation.A10Conversion.getFloatingIpKernelRoutes; import static org.batfish.vendor.a10.representation.A10Conversion.getFloatingIps; import static org.batfish.vendor.a10.representation.A10Conversion.getFloatingIpsByHaGroup; import static org.batfish.vendor.a10.representation.A10Conversion.getFloatingIpsForAllVrids; import static org.batfish.vendor.a10.representation.A10Conversion.getInterfaceEnabledEffective; import static org.batfish.vendor.a10.representation.A10Conversion.getNatPoolIps; import static org.batfish.vendor.a10.representation.A10Conversion.getNatPoolIpsByHaGroup; import static org.batfish.vendor.a10.representation.A10Conversion.getNatPoolIpsForAllVrids; import static org.batfish.vendor.a10.representation.A10Conversion.getNatPoolKernelRoutes; import static org.batfish.vendor.a10.representation.A10Conversion.getVirtualServerIps; import static org.batfish.vendor.a10.representation.A10Conversion.getVirtualServerIpsByHaGroup; import static org.batfish.vendor.a10.representation.A10Conversion.getVirtualServerIpsForAllVrids; import static org.batfish.vendor.a10.representation.A10Conversion.getVirtualServerKernelRoutes; import static org.batfish.vendor.a10.representation.A10Conversion.isVrrpAEnabled; import static org.batfish.vendor.a10.representation.A10Conversion.toDstTransformationSteps; import static org.batfish.vendor.a10.representation.A10Conversion.toMatchExpr; import static org.batfish.vendor.a10.representation.A10Conversion.toSnatTransformationStep; import static org.batfish.vendor.a10.representation.A10Conversion.toVrrpGroup; import static org.batfish.vendor.a10.representation.Interface.DEFAULT_MTU; import static org.batfish.vendor.a10.representation.StaticRoute.DEFAULT_STATIC_ROUTE_DISTANCE; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSetMultimap; import com.google.common.collect.ImmutableSortedMap; import com.google.common.collect.ImmutableSortedSet; import com.google.common.collect.SetMultimap; import com.google.common.collect.Streams; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.SortedMap; import java.util.stream.Stream; import javax.annotation.Nonnull; import javax.annotation.Nullable; import org.batfish.common.VendorConversionException; import org.batfish.datamodel.AclIpSpace; import org.batfish.datamodel.ConcreteInterfaceAddress; import org.batfish.datamodel.Configuration; import org.batfish.datamodel.ConfigurationFormat; import org.batfish.datamodel.ConnectedRouteMetadata; import org.batfish.datamodel.DeviceModel; import org.batfish.datamodel.FirewallSessionInterfaceInfo; import org.batfish.datamodel.IcmpCode; import org.batfish.datamodel.IntegerSpace; import org.batfish.datamodel.InterfaceAddress; import org.batfish.datamodel.InterfaceType; import org.batfish.datamodel.Ip; import org.batfish.datamodel.IpProtocol; import org.batfish.datamodel.IpRange; import org.batfish.datamodel.LineAction; import org.batfish.datamodel.Prefix; import org.batfish.datamodel.SwitchportMode; import org.batfish.datamodel.Vrf; import org.batfish.datamodel.VrrpGroup; import org.batfish.datamodel.acl.AclLineMatchExpr; import org.batfish.datamodel.acl.DeniedByAcl; import org.batfish.datamodel.acl.TrueExpr; import org.batfish.datamodel.packet_policy.ApplyFilter; import org.batfish.datamodel.packet_policy.ApplyTransformation; import org.batfish.datamodel.packet_policy.Drop; import org.batfish.datamodel.packet_policy.FibLookup; import org.batfish.datamodel.packet_policy.If; import org.batfish.datamodel.packet_policy.IngressInterfaceVrf; import org.batfish.datamodel.packet_policy.PacketMatchExpr; import org.batfish.datamodel.packet_policy.PacketPolicy; import org.batfish.datamodel.packet_policy.Return; import org.batfish.datamodel.packet_policy.Statement; import org.batfish.datamodel.route.nh.NextHopIp; import org.batfish.datamodel.tracking.DecrementPriority; import org.batfish.datamodel.tracking.TrackAction; import org.batfish.datamodel.tracking.TrackMethod; import org.batfish.datamodel.transformation.ApplyAll; import org.batfish.datamodel.transformation.ApplyAny; import org.batfish.datamodel.transformation.Noop; import org.batfish.datamodel.transformation.Transformation; import org.batfish.datamodel.transformation.TransformationStep; import org.batfish.referencelibrary.AddressGroup; import org.batfish.referencelibrary.GeneratedRefBookUtils; import org.batfish.referencelibrary.GeneratedRefBookUtils.BookType; import org.batfish.referencelibrary.ReferenceBook; import org.batfish.vendor.VendorConfiguration; import org.batfish.vendor.a10.representation.A10Conversion.VirtualServerTargetVirtualAddressExtractor; import org.batfish.vendor.a10.representation.Interface.Type; /** Datamodel class representing an A10 device configuration. */ public final class A10Configuration extends VendorConfiguration { @VisibleForTesting public static final String VIRTUAL_SERVERS_PACKET_POLICY_NAME = "~VIRTUAL_SERVERS_PACKET_POLICY~"; public A10Configuration() { _accessLists = new HashMap<>(); _floatingIps = new HashMap<>(); _healthMonitors = new HashMap<>(); _interfacesEthernet = new HashMap<>(); _interfacesLoopback = new HashMap<>(); _interfacesTrunk = new HashMap<>(); _interfacesVe = new HashMap<>(); _natPools = new HashMap<>(); _serviceGroups = new HashMap<>(); _servers = new HashMap<>(); _staticRoutes = new HashMap<>(); _virtualServers = new HashMap<>(); _vlans = new HashMap<>(); } @Nullable public BgpProcess getBgpProcess() { return _bgpProcess; } /** Gets the {@link BgpProcess} for this device, creating a new one if none already exists. */ @Nonnull public BgpProcess getOrCreateBgpProcess(long number) { if (_bgpProcess == null) { _bgpProcess = new BgpProcess(number); } return _bgpProcess; } @Nonnull public AccessList getOrCreateAccessList(String name) { if (!_accessLists.containsKey(name)) { _accessLists.put(name, new AccessList(name)); } return _accessLists.get(name); } @Nonnull public Map<String, AccessList> getAccessLists() { return _accessLists; } /** ACOSv2 {@code floating-ip}s. */ public @Nonnull Map<Ip, FloatingIp> getV2FloatingIps() { return _floatingIps; } /** ACOSv2 {@code ha} configuration. */ public @Nullable Ha getHa() { return _ha; } public @Nonnull Ha getOrCreateHa() { if (_ha == null) { _ha = new Ha(); } return _ha; } @Nonnull public Map<String, HealthMonitor> getHealthMonitors() { return _healthMonitors; } public void createHealthMonitorIfAbsent(String name) { if (_healthMonitors.get(name) == null) { _healthMonitors.put(name, new HealthMonitor(name)); } } @Override public String getHostname() { return _hostname; } @Nonnull public Map<Integer, Interface> getInterfacesEthernet() { return _interfacesEthernet; } @Nonnull public Map<Integer, Interface> getInterfacesLoopback() { return _interfacesLoopback; } @Nonnull public Map<Integer, TrunkInterface> getInterfacesTrunk() { return _interfacesTrunk; } @Nonnull public Map<Integer, Interface> getInterfacesVe() { return _interfacesVe; } @Nonnull public Map<String, NatPool> getNatPools() { return _natPools; } @Nonnull public Map<String, ServiceGroup> getServiceGroups() { return ImmutableMap.copyOf(_serviceGroups); } @Nonnull public ServiceGroup getOrCreateServiceGroup(String name, ServerPort.Type type) { return _serviceGroups.computeIfAbsent(name, n -> new ServiceGroup(name, type)); } @Nonnull public Map<String, Server> getServers() { return _servers; } @Nonnull public Map<String, VirtualServer> getVirtualServers() { return Collections.unmodifiableMap(_virtualServers); } @Nullable public VirtualServer getVirtualServer(String name) { return _virtualServers.get(name); } /** * Get the {@link VirtualServer} corresponding to the provided name, creating it (with the * specified {@link VirtualServerTarget}) if it doesn't already exist. */ @Nonnull public VirtualServer getOrCreateVirtualServer(String name, VirtualServerTarget target) { return _virtualServers.computeIfAbsent(name, n -> new VirtualServer(n, target)); } /** Map of route {@link Prefix} to {@link StaticRouteManager} for that prefix. */ @Nonnull public Map<Prefix, StaticRouteManager> getStaticRoutes() { return _staticRoutes; } @Override public void setHostname(String hostname) { _hostname = hostname.toLowerCase(); _rawHostname = hostname; } @Override public void setVendor(ConfigurationFormat format) { _vendor = format; } @VisibleForTesting public static int getInterfaceMtuEffective(Interface iface) { return firstNonNull(iface.getMtu(), DEFAULT_MTU); } @Nonnull public Map<Integer, Vlan> getVlans() { return _vlans; } @Nonnull public static InterfaceType getInterfaceType(Interface iface) { switch (iface.getType()) { case ETHERNET: return InterfaceType.PHYSICAL; case LOOPBACK: return InterfaceType.LOOPBACK; case VE: return InterfaceType.VLAN; case TRUNK: return InterfaceType.AGGREGATED; default: assert false; return InterfaceType.UNKNOWN; } } @Nonnull public static String getInterfaceName(InterfaceReference ref) { return getInterfaceName(ref.getType(), ref.getNumber()); } @Nonnull public static String getInterfaceName(Interface iface) { return getInterfaceName(iface.getType(), iface.getNumber()); } @Nonnull public static String getInterfaceName(Interface.Type type, int num) { return getInterfaceHumanName(type, num).replace(" ", ""); } @Nonnull public static String getInterfaceHumanName(Interface iface) { return getInterfaceHumanName(iface.getType(), iface.getNumber()); } @Nonnull public static String getInterfaceHumanName(Interface.Type type, int num) { if (type == Interface.Type.VE) { return String.format("VirtualEthernet %s", num); } String typeStr = type.toString(); // Only the first letter should be capitalized, similar to A10 `show` data return String.format( "%s%s %s", typeStr.substring(0, 1), typeStr.substring(1).toLowerCase(), num); } public @Nullable VrrpA getVrrpA() { return _vrrpA; } public @Nonnull VrrpA getOrCreateVrrpA() { if (_vrrpA == null) { _vrrpA = new VrrpA(); } return _vrrpA; } @Nonnull @Override public List<Configuration> toVendorIndependentConfigurations() throws VendorConversionException { String hostname = getHostname(); _c = new Configuration(hostname, _vendor); _c.setHumanName(_rawHostname); _c.setDeviceModel(DeviceModel.A10); _c.setDefaultCrossZoneAction(LineAction.DENY); _c.setDefaultInboundAction(LineAction.PERMIT); _c.setExportBgpFromBgpRib(true); // Generated default VRF Vrf vrf = new Vrf(DEFAULT_VRF_NAME); _c.setVrfs(ImmutableMap.of(DEFAULT_VRF_NAME, vrf)); _ifaceNametoIface = new HashMap<>(); _interfacesLoopback.forEach( (num, iface) -> { convertInterface(iface, vrf); _ifaceNametoIface.put(getInterfaceName(iface), iface); }); _interfacesEthernet.forEach( (num, iface) -> { convertInterface(iface, vrf); _ifaceNametoIface.put(getInterfaceName(iface), iface); }); _interfacesVe.forEach( (num, iface) -> { convertInterface(iface, vrf); _ifaceNametoIface.put(getInterfaceName(iface), iface); }); _interfacesTrunk.forEach((num, iface) -> convertInterface(iface, vrf)); _staticRoutes.forEach( (prefix, manager) -> manager.getVariants().forEach((ip, sr) -> convertStaticRoute(vrf, prefix, sr))); convertAccessLists(); // Must be done after interface conversion convertVirtualServers(); convertHealthChecks(); convertVrrpA(); convertHa(); createKernelRoutes(); convertBgp(); markStructures(); generateReferenceBook(); generateNatPoolIpSpaces(); return ImmutableList.of(_c); } /** * Creates {@link TrackMethod}s for each enabled IPv4 server health check, searchable by server * IPv4 address. */ private void convertHealthChecks() { for (Server server : _servers.values()) { if (!(server.getTarget() instanceof ServerTargetAddress)) { // Not an IPv4 server continue; } if (!firstNonNull(server.getEnable(), true)) { // server is disabled continue; } if (firstNonNull(server.getHealthCheckDisable(), false)) { // health check is disabled continue; } String healthMonitorName = server.getHealthCheck(); if (healthMonitorName == null) { // no associated health monitor continue; } HealthMonitor healthMonitor = _healthMonitors.get(healthMonitorName); if (healthMonitor == null) { // undefined health monitor continue; } Ip ip = ((ServerTargetAddress) server.getTarget()).getAddress(); // TODO: Use configured health monitor method (e.g. ICMP, TCP/123, etc.) // instead of forcing ICMP (which is the default) TrackMethod method = reachability(ip, DEFAULT_VRF_NAME); String methodName = generatedServerTrackMethodName(ip); _c.getTrackingGroups().put(methodName, method); } } /** Returns map: template name -> generated track method name -> action */ private @Nonnull Map<String, Map<String, TrackAction>> convertFailOverPolicyTemplates() { if (_vrrpA == null) { return ImmutableMap.of(); } ImmutableMap.Builder<String, Map<String, TrackAction>> builder = ImmutableMap.builder(); _vrrpA .getFailOverPolicyTemplates() .forEach( (templateName, template) -> { ImmutableMap.Builder<String, TrackAction> actionsBuilder = ImmutableMap.builder(); template .getGateways() .forEach( (ip, decrement) -> { String trackMethodName = generatedServerTrackMethodName(ip); if (!_c.getTrackingGroups().containsKey(trackMethodName)) { // unusable gateway health check return; } TrackAction action = new DecrementPriority(decrement); String failedTrackMethodName = createFailedTrackIfNeeded(trackMethodName); actionsBuilder.put(failedTrackMethodName, action); }); builder.put(templateName, actionsBuilder.build()); }); return builder.build(); } /** Creates and puts a {@link ReferenceBook} for virtual servers defined in the configuration */ private void generateReferenceBook() { String virtualAddressesBookname = GeneratedRefBookUtils.getName(_hostname, BookType.VirtualAddresses); _c.getGeneratedReferenceBooks() .put( virtualAddressesBookname, ReferenceBook.builder(virtualAddressesBookname) .setAddressGroups( _virtualServers.values().stream() .map( vServer -> new AddressGroup( ImmutableSortedSet.of( VirtualServerTargetVirtualAddressExtractor.INSTANCE .visit(vServer.getTarget()) .toString()), vServer.getName())) .collect(ImmutableList.toImmutableList())) .build()); } /** Creates named IpSpaces from configured NAT pools. */ private void generateNatPoolIpSpaces() { _natPools.forEach( (name, pool) -> _c.getIpSpaces() .put(ipSpaceNameForNatPool(name), IpRange.range(pool.getStart(), pool.getEnd()))); } @VisibleForTesting static String ipSpaceNameForNatPool(String natPoolName) { return String.format("NatPool~%s", natPoolName); } private void convertBgp() { if (_bgpProcess == null) { return; } createBgpProcess(_bgpProcess, _c, _w); } /** * Create {@link org.batfish.datamodel.KernelRoute}s from {@code ip nat pool} networks, {@code slb * virtual-server}s, ACOSv5 {@code vrrp-a vrid floating-ip}s, and ACOSv2 {@code floating-ip}s. */ private void createKernelRoutes() { _c.getDefaultVrf() .setKernelRoutes( Streams.concat( getNatPoolKernelRoutes(_natPools.values()), getVirtualServerKernelRoutes(_virtualServers.values()), _vrrpA != null ? getFloatingIpKernelRoutes(_vrrpA) : Stream.of(), getFloatingIpKernelRoutes(_floatingIps.keySet())) .collect(ImmutableSortedSet.toImmutableSortedSet(naturalOrder()))); } private void convertVrrpA() { Map<String, Map<String, TrackAction>> failOverPolicyTemplateActions = convertFailOverPolicyTemplates(); // If vrrp-a is disabled, then the device should act as if it owns all addresses that would have // been part of vrrp-a. // If vrrp-a is enabled, then the device should own all addresses for VRIDs that are enabled. if (isVrrpAEnabled(_vrrpA)) { convertVrrpAEnabled(failOverPolicyTemplateActions); } else if (_ha == null) { convertVrrpADisabled(); } } /** * Process vrrp-a vrids in the case vrrp-a is disabled. Causes the device to own all virtual IPs * for all vrids. */ private void convertVrrpADisabled() { // Overview: // - Add all virtual addresses to every inteface with a concrete IPv4 address. // - Set address metadata so no connected nor local routes are generated for virtual addresses. Set<ConcreteInterfaceAddress> virtualAddresses = Streams.concat( getNatPoolIpsForAllVrids(_natPools.values()), getVirtualServerIpsForAllVrids(_virtualServers.values()), _vrrpA != null ? getFloatingIpsForAllVrids(_vrrpA) : Stream.of()) .map(ip -> ConcreteInterfaceAddress.create(ip, MAX_PREFIX_LENGTH)) .collect(ImmutableSet.toImmutableSet()); ConnectedRouteMetadata connectedRouteMetadata = ConnectedRouteMetadata.builder() .setGenerateConnectedRoute(false) .setGenerateLocalRoute(false) .build(); SortedMap<ConcreteInterfaceAddress, ConnectedRouteMetadata> addressMetadata = virtualAddresses.stream() .collect( ImmutableSortedMap.toImmutableSortedMap( naturalOrder(), virtualAddress -> virtualAddress, unused -> connectedRouteMetadata)); _c.getAllInterfaces().values().stream() .filter(A10Conversion::vrrpADisabledAppliesToInterface) .forEach( iface -> { iface.setAllAddresses( ImmutableSortedSet.<InterfaceAddress>naturalOrder() .addAll(iface.getAllAddresses()) .addAll(virtualAddresses) .build()); iface.setAddressMetadata( ImmutableSortedMap .<ConcreteInterfaceAddress, ConnectedRouteMetadata>naturalOrder() .putAll(iface.getAddressMetadata()) .putAll(addressMetadata) .build()); }); } /** * Process vrrp-a vrids in the case vrrp-a is enabled. Causes the device to own all virtual * addresses for each vrid for which it is converted VRRP master. */ private void convertVrrpAEnabled( // template name -> generated track method name -> action Map<String, Map<String, TrackAction>> failOverPolicyTemplateActions) { // Overview: // - Add a VrrpGroup for each enabled vrid on the first found L3 interface owning a subnet // containing any vrrp-a peer-group ip // - abort if no peer-group ips are set // - Each VrrpGroup should assign the addresses to every OTHER L3 interface with a primary // ConcreteInterfaceAddress. // - Each created VrrpGroup contains all the virtual addresses the device should own when it is // master for the corresponding vrid. assert _vrrpA != null; Set<Ip> peerIps = _vrrpA.getPeerGroup(); if (peerIps.isEmpty()) { _w.redFlag("Batfish does not support vrrp-a without at least one peer-group peer-ip"); return; } ConcreteInterfaceAddress sourceAddress = null; org.batfish.datamodel.Interface peerInterface = null; for (org.batfish.datamodel.Interface iface : _c.getAllInterfaces().values()) { Optional<ConcreteInterfaceAddress> maybeSourceAddress = findVrrpAEnabledSourceAddress(iface, peerIps); if (maybeSourceAddress.isPresent()) { sourceAddress = maybeSourceAddress.get(); peerInterface = iface; break; } } if (peerInterface == null) { _w.redFlag( String.format( "Could not find any interface in a subnet containing any of the peer IPs: %s", peerIps)); return; } // vrid -> virtual addresses ImmutableSetMultimap.Builder<Integer, Ip> virtualAddressesByEnabledVridBuilder = ImmutableSetMultimap.builder(); // VRID 0 always exists, but may be disabled. Other VRIDs exist only if they are declared, and // cannot be disabled independently. // Grab the virtual addresses for each VRID from NAT pools and virtual-servers getEnabledVrids(_vrrpA) .forEach( vrid -> { Streams.concat( getNatPoolIps(_natPools.values(), vrid), getVirtualServerIps(_virtualServers.values(), vrid), getFloatingIps(_vrrpA, vrid)) .forEach(ip -> virtualAddressesByEnabledVridBuilder.put(vrid, ip)); }); SetMultimap<Integer, Ip> virtualAddressesByEnabledVrid = virtualAddressesByEnabledVridBuilder.build(); // VRID 0 may be used even if it is not configured explicitly. assert virtualAddressesByEnabledVrid.keySet().stream() .allMatch(vrid -> vrid == 0 || _vrrpA.getVrids().containsKey(vrid)); // Create VrrpGroup builders for each vrid. We cannot make final VrrpGroups because we are // missing source address, which varies per interface. ImmutableSortedMap.Builder<Integer, VrrpGroup> vrrpGroupsBuilder = ImmutableSortedMap.naturalOrder(); // Addresses should be assigned to all non-loopback L3 interfaces other than the peer interface. final org.batfish.datamodel.Interface finalPeerInterface = peerInterface; List<String> ipOwnerInterfaces = _c.getAllInterfaces().values().stream() .filter( i -> i != finalPeerInterface && i.getInterfaceType() != InterfaceType.LOOPBACK && !i.getAllConcreteAddresses().isEmpty()) .map(org.batfish.datamodel.Interface::getName) .collect(ImmutableList.toImmutableList()); final ConcreteInterfaceAddress finalSourceAddress = sourceAddress; virtualAddressesByEnabledVrid .asMap() .forEach( (vrid, virtualAddresses) -> vrrpGroupsBuilder.put( vrid, toVrrpGroup( _vrrpA.getVrids().get(vrid), finalSourceAddress, virtualAddresses, ipOwnerInterfaces, failOverPolicyTemplateActions))); // Assign the VRRP groups to the peer interface peerInterface.setVrrpGroups(vrrpGroupsBuilder.build()); } /** Convert ha configuration for ACOSv2. */ private void convertHa() { // TODO: Support virtual-addresses in case ha is disabled on ACOSv2. // May need different behavior than convertVrrpADisabled(), which happens now in that // case. if (_ha != null) { convertHaEnabled(); } } /** * Process ha groups in the case ha is enabled. Causes the device to own all virtual addresses for * each ha-group for which it is converted VRRP master. */ private void convertHaEnabled() { // Overview: // - Add a VrrpGroup for each enabled ha-group on the L3 interface owning subnet containing // conn-mirror ip (the HA heartbeat interface) // - abort if no conn-mirror ip is set // - Each created VrrpGroup contains all the virtual addresses the device should own when it is // master for the corresponding vrid (using ha group id as vrid). // - Each created VrrpGroup should set the owned IPs on all L3 interfaces EXCEPT the heartbeat // interface. assert _ha != null; Ip connMirror = _ha.getConnMirror(); if (connMirror == null) { _w.redFlag("Batfish does not support ha without explicit conn-mirror"); return; } Map<String, TrackAction> trackActions = convertHaChecks(); // Find the ha heartbeat interface and source IP org.batfish.datamodel.Interface heartbeatInterface = null; ConcreteInterfaceAddress sourceAddress = null; for (org.batfish.datamodel.Interface iface : _c.getAllInterfaces().values()) { Optional<ConcreteInterfaceAddress> maybeSourceAddress = findHaSourceAddress(iface, connMirror); if (maybeSourceAddress.isPresent()) { heartbeatInterface = iface; sourceAddress = maybeSourceAddress.get(); break; } } if (heartbeatInterface == null) { // Abort, since we couldn't find the heartbeat interface. _w.redFlag( String.format( "Could not find any interface with address in subnet of ha conn-mirror IP %s", connMirror)); return; } // ha group id -> virtual addresses ImmutableSetMultimap.Builder<Integer, Ip> virtualAddressesByEnabledHaGroupBuilder = ImmutableSetMultimap.builder(); // VRID 0 always exists, but may be disabled. Other VRIDs exist only if they are declared, and // cannot be disabled independently. // Grab the virtual addresses for each VRID from NAT pools and virtual-servers _ha.getGroups() .forEach( (haGroupId, haGroup) -> { Streams.concat( getNatPoolIpsByHaGroup(_natPools.values(), haGroupId), getVirtualServerIpsByHaGroup(_virtualServers.values(), haGroupId), getFloatingIpsByHaGroup(_floatingIps, haGroupId)) .forEach(ip -> virtualAddressesByEnabledHaGroupBuilder.put(haGroupId, ip)); }); SetMultimap<Integer, Ip> virtualAddressesByEnabledHaGroup = virtualAddressesByEnabledHaGroupBuilder.build(); // Addresses should be assigned to all non-loopback L3 interfaces other than the heartbeat // interface final org.batfish.datamodel.Interface finalHeartbeatInterface = heartbeatInterface; List<String> ipOwnerInterfaces = _c.getAllInterfaces().values().stream() .filter( i -> i != finalHeartbeatInterface && i.getInterfaceType() != InterfaceType.LOOPBACK && !i.getAllConcreteAddresses().isEmpty()) .map(org.batfish.datamodel.Interface::getName) .collect(ImmutableList.toImmutableList()); // Create VrrpGroups for each ha group, with addresses assigned to all other L3 interfaces.. ImmutableSortedMap.Builder<Integer, VrrpGroup> vrrpGroupsBuilder = ImmutableSortedMap.naturalOrder(); final ConcreteInterfaceAddress finalSourceAddress = sourceAddress; virtualAddressesByEnabledHaGroup .asMap() .forEach( (haGroupId, virtualAddresses) -> vrrpGroupsBuilder.put( haGroupId, toVrrpGroup( haGroupId, _ha, finalSourceAddress, virtualAddresses, ipOwnerInterfaces, trackActions))); // Assign the VRRP groups to the heartbeat interface heartbeatInterface.setVrrpGroups(vrrpGroupsBuilder.build()); } /** Returns map: trackMethodName -> action */ private @Nonnull Map<String, TrackAction> convertHaChecks() { assert _ha != null; ImmutableMap.Builder<String, TrackAction> builder = ImmutableMap.builder(); for (Ip ip : _ha.getCheckGateways()) { String trackMethodName = generatedServerTrackMethodName(ip); if (!_c.getTrackingGroups().containsKey(trackMethodName)) { // unusable gateway health check continue; } // TODO: Docs say this device should no longer participate in HA if check fails, but we don't // currently have an action for that. For now, best we can do is reduce priority to // minimum. String failedTrackMethodName = createFailedTrackIfNeeded(trackMethodName); builder.put(failedTrackMethodName, new DecrementPriority(255)); } // TODO: other check types return builder.build(); } private @Nonnull String createFailedTrackIfNeeded(String trackMethodName) { String failedTrackMethodName = generatedFailedTrackMethodName(trackMethodName); if (!_c.getTrackingGroups().containsKey(failedTrackMethodName)) { _c.getTrackingGroups().put(failedTrackMethodName, negatedReference(trackMethodName)); } return failedTrackMethodName; } private void convertAccessLists() { _accessLists.forEach((name, acl) -> convertAccessList(acl, _c, _filename)); } /** * Convert virtual-servers to load-balancing VI constructs and attach resulting ACLs and * transformations to interfaces. Modifies VI interfaces and must be called after those are * created. */ private void convertVirtualServers() { // Build transformation statements Return returnFibLookup = new Return(new FibLookup(IngressInterfaceVrf.instance())); List<Statement> transformationStatements = getTransformationStatements(returnFibLookup); // Apply transformations at each interface, along with incoming access list if present _c.getAllInterfaces() .forEach( (name, iface) -> { iface.setFirewallSessionInterfaceInfo( new FirewallSessionInterfaceInfo( POST_NAT_FIB_LOOKUP, ImmutableList.of(iface.getName()), null, null)); String incomingFilter = // TODO Can trunk interfaces configure access-list? // They are not included in _ifaceNametoIface. Optional.ofNullable(_ifaceNametoIface.get(name)) .map(Interface::getAccessListIn) .orElse(null); String policyName = packetPolicyName(incomingFilter); // Create packet policy if it doesn't already exist if (!_c.getPacketPolicies().containsKey(policyName)) { PacketPolicy policy; if (incomingFilter == null) { policy = new PacketPolicy(policyName, transformationStatements, returnFibLookup); } else { List<Statement> policyStatements = ImmutableList.<Statement>builder() .add( new If( new PacketMatchExpr( new DeniedByAcl(computeAclName(incomingFilter))), ImmutableList.of(new Return(Drop.instance())))) .addAll(transformationStatements) .build(); policy = new PacketPolicy(policyName, policyStatements, returnFibLookup); } _c.getPacketPolicies().put(policyName, policy); } iface.setPacketPolicy(policyName); }); } @VisibleForTesting public static String packetPolicyName(@Nullable String incomingFilter) { if (incomingFilter == null) { return VIRTUAL_SERVERS_PACKET_POLICY_NAME; } return String.format("~PACKET_POLICY_%s~", incomingFilter); } /** * Generate {@link Statement packet policy statements} encoding the NAT transformations for this * device. */ private List<Statement> getTransformationStatements(Return returnFibLookup) { ImmutableList.Builder<Statement> statements = ImmutableList.builder(); // Apply transformations to traffic matching virtual servers. These statements return FibLookup // after applying the transformation. _virtualServers.values().stream() .filter(A10Conversion::isVirtualServerEnabled) .forEach(vs -> statements.add(toStatement(vs, returnFibLookup))); // Drop any remaining non-ping traffic destined to a VIP. Set<Ip> vips = _virtualServers.values().stream() .filter(A10Conversion::isVirtualServerEnabled) .map(VirtualServer::getTarget) .map(VirtualServerTargetVirtualAddressExtractor.INSTANCE::visit) .collect(ImmutableSet.toImmutableSet()); if (!vips.isEmpty()) { AclLineMatchExpr matchesVip = matchDst( AclIpSpace.union( vips.stream().map(Ip::toIpSpace).collect(ImmutableSet.toImmutableSet()))); AclLineMatchExpr ping = and(matchIpProtocol(IpProtocol.ICMP), matchIcmp(IcmpCode.ECHO_REQUEST)); statements.add( new If( new PacketMatchExpr(and(matchesVip, not(ping))), ImmutableList.of(new Return(Drop.instance())))); } return statements.build(); } /** * Convert specified {@link VirtualServer}'s transformations into a packet policy {@link * Statement}. */ private Statement toStatement(VirtualServer vs, Return returnFibLookup) { return new If( new PacketMatchExpr(toMatchExpr(vs, _filename)), vs.getPorts().values().stream() .filter(A10Conversion::isVirtualServerPortEnabled) .map(vsPort -> toStatement(vsPort, returnFibLookup)) .collect(ImmutableList.toImmutableList())); } /** * Convert specified {@link VirtualServerPort}'s transformations into a packet policy {@link * Statement}. This statement returns a {@link FibLookup} for matching traffic, or a {@link Drop} * for matching traffic not permitted by the virtual server's (optional) ACL. */ private Statement toStatement(VirtualServerPort port, Return returnFibLookup) { ImmutableList.Builder<Statement> trueStatements = ImmutableList.builder(); String aclName = port.getAccessList(); if (aclName != null) { String viAclName = computeAclName(aclName); assert _c.getIpAccessLists().containsKey(viAclName); trueStatements.add(new ApplyFilter(viAclName)); } trueStatements.add( new ApplyTransformation( new Transformation( TrueExpr.INSTANCE, ImmutableList.of(toTransformationStep(port)), null, null))); trueStatements.add(returnFibLookup); return new If(new PacketMatchExpr(toMatchExpr(port)), trueStatements.build()); } /** * Returns the full transformation (DNAT and SNAT if applicable) for the specified virtual-server * port. */ @Nonnull private TransformationStep toTransformationStep(VirtualServerPort port) { // No service-group means no load balancing String serviceGroupName = port.getServiceGroup(); if (serviceGroupName == null) { return Noop.NOOP_DEST_NAT; } ServiceGroup serviceGroup = _serviceGroups.get(serviceGroupName); assert serviceGroup != null; ApplyAny dnatStep = new ApplyAny(toDstTransformationSteps(serviceGroup, _servers)); String snatName = port.getSourceNat(); if (snatName == null) { return dnatStep; } NatPool natPool = _natPools.get(snatName); TransformationStep snatStep = toSnatTransformationStep(natPool); return new ApplyAll(snatStep, dnatStep); } private void convertStaticRoute(Vrf vrf, Prefix prefix, StaticRoute staticRoute) { vrf.getStaticRoutes() .add( org.batfish.datamodel.StaticRoute.builder() .setNetwork(prefix) .setNextHop(NextHopIp.of(staticRoute.getForwardingRouterAddress())) .setAdministrativeCost( firstNonNull(staticRoute.getDistance(), DEFAULT_STATIC_ROUTE_DISTANCE)) .setRecursive(false) .build()); } private void markStructures() { A10StructureType.CONCRETE_STRUCTURES.forEach(this::markConcreteStructure); A10StructureType.ABSTRACT_STRUCTURES.asMap().forEach(this::markAbstractStructureAllUsages); } /** * Convert specified VS {@link Interface} in provided {@link Vrf} to a VI model {@link * org.batfish.datamodel.Interface} attached to the VI {@link Configuration}. */ private void convertInterface(Interface iface, Vrf vrf) { String name = getInterfaceName(iface); boolean enabledEffective = getInterfaceEnabledEffective(iface, _majorVersionNumber); org.batfish.datamodel.Interface.Builder newIface = org.batfish.datamodel.Interface.builder() .setAdminUp(enabledEffective) .setMtu(getInterfaceMtuEffective(iface)) .setType(getInterfaceType(iface)) .setName(name) .setVrf(vrf) .setOwner(_c); // A10 interface `name` is more like a description than an actual name newIface.setDescription(iface.getName()); newIface.setHumanName(getInterfaceHumanName(iface)); newIface.setDeclaredNames(ImmutableList.of(name)); if (iface.getIpAddress() != null) { ConcreteInterfaceAddress address = iface.getIpAddress(); ConnectedRouteMetadata meta = ConnectedRouteMetadata.builder().setGenerateLocalRoute(false).build(); newIface.setAddress(address); newIface.setAddressMetadata(ImmutableMap.of(address, meta)); if (iface.getType() != Type.LOOPBACK) { newIface.setProxyArp(true); } } // VLANs boolean vlanIsConfigured = hasVlanSettings(iface); if (vlanIsConfigured) { setVlanSettings(iface, newIface); } // Aggregates and members - must happen after initial VLAN settings are set if (iface instanceof TrunkInterface) { TrunkInterface trunkIface = (TrunkInterface) iface; String trunkName = getInterfaceName(iface); ImmutableSet<String> memberNames = trunkIface.getMembers().stream() .map(A10Configuration::getInterfaceName) .filter( memberName -> { boolean ifaceExists = _ifaceNametoIface.containsKey(memberName); if (!ifaceExists) { // Cannot tell if this missing member would invalidate other members or not // So, optimistically leave other members _w.redFlag( String.format( "Trunk member %s does not exist, cannot add to %s", memberName, trunkName)); } return ifaceExists; }) .collect(ImmutableSet.toImmutableSet()); if (memberNames.isEmpty()) { _w.redFlag( String.format( "%s does not contain any member interfaces", getInterfaceName(Interface.Type.TRUNK, trunkIface.getNumber()))); } else { newIface.setChannelGroupMembers(memberNames); newIface.setDependencies( memberNames.stream() .map( member -> new org.batfish.datamodel.Interface.Dependency( member, org.batfish.datamodel.Interface.DependencyType.AGGREGATE)) .collect(ImmutableSet.toImmutableSet())); // If this trunk doesn't have VLAN configured directly (e.g. ACOS v2), inherit it if (!vlanIsConfigured) { if (vlanSettingsDifferent(memberNames)) { _w.redFlag( String.format( "VLAN settings for members of %s are different, ignoring their VLAN settings", trunkName)); } else { // All members have the same VLAN settings, so just use the first String firstMemberName = memberNames.iterator().next(); setVlanSettings(_ifaceNametoIface.get(firstMemberName), newIface); } } else { if (memberNames.stream() .anyMatch(memberName -> hasVlanSettings(_ifaceNametoIface.get(memberName)))) { _w.redFlag( String.format( "Cannot configure VLAN settings on %s as well as its members. Member VLAN" + " settings will be ignored.", trunkName)); } } } } if (iface.getType() == Interface.Type.ETHERNET) { InterfaceReference ifaceRef = new InterfaceReference(iface.getType(), iface.getNumber()); _interfacesTrunk.values().stream() .filter(t -> t.getMembers().contains(ifaceRef)) .findFirst() .ifPresent( t -> { newIface.setChannelGroup(getInterfaceName(Interface.Type.TRUNK, t.getNumber())); // TODO determine if switchport settings need to be propagated to member interfaces }); } newIface.build(); } /** * Check if any VLAN settings for {@link Interface}s (specified by name) are different. * * <p>All specified interface names must correspond to existent interfaces, in the {@code * _ifaceNameToIface} map. */ private boolean vlanSettingsDifferent(Collection<String> names) { Stream<org.batfish.datamodel.Interface> distinctVlanSettings = names.stream() .map( name -> { org.batfish.datamodel.Interface.Builder baseIface = org.batfish.datamodel.Interface.builder().setName(""); setVlanSettings(_ifaceNametoIface.get(name), baseIface); return baseIface.build(); }) .distinct(); return distinctVlanSettings.count() > 1; } /** * Set VLAN settings of the specified VI {@link org.batfish.datamodel.Interface.Builder} based on * the specified {@link Interface}. */ private void setVlanSettings(Interface iface, org.batfish.datamodel.Interface.Builder viIface) { viIface.setSwitchportMode(SwitchportMode.NONE); List<Vlan> taggedVlans = getTaggedVlans(iface); Optional<Vlan> untaggedVlan = getUntaggedVlan(iface); IntegerSpace.Builder allVlans = IntegerSpace.builder(); if (untaggedVlan.isPresent()) { viIface.setSwitchportMode(SwitchportMode.TRUNK); viIface.setSwitchport(true); viIface.setNativeVlan(untaggedVlan.get().getNumber()); allVlans.including(untaggedVlan.get().getNumber()); } if (!taggedVlans.isEmpty()) { viIface.setSwitchportMode(SwitchportMode.TRUNK); viIface.setSwitchport(true); taggedVlans.forEach(vlan -> allVlans.including(vlan.getNumber())); } viIface.setAllowedVlans(allVlans.build()); if (iface.getType() == Interface.Type.VE) { int vlanNumber = iface.getNumber(); viIface.setVlan(vlanNumber); } } /** Returns a boolean indicating if VLAN settings exist for the supplied {@link Interface}. */ private boolean hasVlanSettings(Interface iface) { List<Vlan> taggedVlans = getTaggedVlans(iface); Optional<Vlan> untaggedVlan = getUntaggedVlan(iface); return untaggedVlan.isPresent() || !taggedVlans.isEmpty() || iface.getType() == Interface.Type.VE; } /** Get the untagged VLAN for the specified interface, if one exists. */ @Nonnull public Optional<Vlan> getUntaggedVlan(Interface iface) { InterfaceReference ref = new InterfaceReference(iface.getType(), iface.getNumber()); return _vlans.values().stream().filter(v -> v.getUntagged().contains(ref)).findFirst(); } /** Returns all VLANs associated with the specified tagged interface. */ @Nonnull private List<Vlan> getTaggedVlans(Interface iface) { InterfaceReference ref = new InterfaceReference(iface.getType(), iface.getNumber()); return _vlans.values().stream() .filter(v -> v.getTagged().contains(ref)) .collect(ImmutableList.toImmutableList()); } /** * Returns a boolean indicating if the specified {@link ServerPort.Type} and {@link * VirtualServerPort.Type} are compatible. */ public static boolean arePortTypesCompatible( ServerPort.Type realType, VirtualServerPort.Type virtualType) { if (realType == ServerPort.Type.UDP) { return VIRTUAL_UDP_PORT_TYPES.contains(virtualType); } assert realType == ServerPort.Type.TCP; return VIRTUAL_TCP_PORT_TYPES.contains(virtualType); } /** * Returns the major version number determined for this configuration. Returns {@code null} if no * version number could be determined. */ @Nullable public Integer getMajorVersionNumber() { return _majorVersionNumber; } public void setMajorVersionNumber(@Nullable Integer majorVersionNumber) { _majorVersionNumber = majorVersionNumber; } /** * Finalize configuration after it is finished being built. Does things like making structures * immutable. * * <p>This should only be called once, at the end of parsing and extraction. */ public void finalizeStructures() { _accessLists = ImmutableMap.copyOf(_accessLists); _floatingIps = ImmutableMap.copyOf(_floatingIps); _healthMonitors = ImmutableMap.copyOf(_healthMonitors); _interfacesEthernet = ImmutableMap.copyOf(_interfacesEthernet); _interfacesLoopback = ImmutableMap.copyOf(_interfacesLoopback); _interfacesVe = ImmutableMap.copyOf(_interfacesVe); _interfacesTrunk = ImmutableMap.copyOf(_interfacesTrunk); _natPools = ImmutableMap.copyOf(_natPools); _servers = ImmutableMap.copyOf(_servers); _serviceGroups = ImmutableMap.copyOf(_serviceGroups); _staticRoutes = ImmutableMap.copyOf(_staticRoutes); _virtualServers = ImmutableMap.copyOf(_virtualServers); _vlans = ImmutableMap.copyOf(_vlans); } /** Map of interface names to interface. Used for converting aggregate interfaces. */ @Nullable private transient Map<String, Interface> _ifaceNametoIface; @Nonnull private Map<String, AccessList> _accessLists; @Nullable private BgpProcess _bgpProcess; private Configuration _c; private @Nonnull Map<Ip, FloatingIp> _floatingIps; private @Nullable Ha _ha; @Nonnull private Map<String, HealthMonitor> _healthMonitors; private String _hostname; /** Hostname as it appears in the config, uncanonicalized */ private String _rawHostname; @Nonnull private Map<Integer, Interface> _interfacesEthernet; @Nonnull private Map<Integer, Interface> _interfacesLoopback; @Nonnull private Map<Integer, TrunkInterface> _interfacesTrunk; @Nonnull private Map<Integer, Interface> _interfacesVe; @Nullable private Integer _majorVersionNumber; @Nonnull private Map<String, NatPool> _natPools; @Nonnull private Map<String, Server> _servers; @Nonnull private Map<String, ServiceGroup> _serviceGroups; @Nonnull private Map<Prefix, StaticRouteManager> _staticRoutes; @Nonnull private Map<String, VirtualServer> _virtualServers; @Nullable private VrrpA _vrrpA; @Nonnull private Map<Integer, Vlan> _vlans; private ConfigurationFormat _vendor; }
prizeGit/graphql-demo
src/main/java/io/redintro/graphqldemo/port/in/BookViewPort.java
<gh_stars>0 package io.redintro.graphqldemo.port.in; import io.redintro.graphqldemo.domain.Book; import java.util.List; import java.util.UUID; public interface BookViewPort { List<Book> showAll(); Book create(Book book); Book read(UUID id); Book update(Book book); boolean delete(UUID id); long count(); }
Nicanor008/JS-Algorithms-Challenge
hackerrank/InterviewPreparationKit/Strings/making-anagrams.js
// https://www.hackerrank.com/challenges/making-anagrams/problem const s1 = "aaa"; const s2 = "bbb"; let anagramStrings = (s1, s2) => { let string1 = {}; let string2 = {}; let output = false; if (s1.length !== s2.length) { output = false; return false; } for (let i = 0; i < s1.length; i++) { if (!string1[s1[i]]) { string1[s1[i]] = 1; } else { string1[s1[i]]++; } } for (let i = 0; i < s2.length; i++) { if (!string2[s2[i]]) { string2[s2[i]] = 1; } else { string2[s2[i]]++; } } for (let i = 0; i < s1.length; i++) { // if (string1[s1[i]] !== string2[s2[i]] && s2[i + 1] !== undefined) { // output = false; // } else { if (Object.keys(string2) !== Object.keys(string1)) { output === false; } else { output === true; } console.log(string1, ">>>>>>>>>>...........>>>>>......", string2); // output = true; // } } return output; }; // Complete the makeAnagram function below. function makeAnagram(strs) { let anagrams = {}; let total = 0; // Array.from(a).forEach((char) => { // counter[char] = counter[char] || 0; // counter[char]++; // }); // Array.from(b).forEach((char) => { // counter[char] = counter[char] || 0; // counter[char]--; // }); // Object.keys(counter).forEach((k) => { // if (counter[k] !== 0) { // total += Math.abs(counter[k]); // } // }); // return total; for (let i = 0; i <= strs.length; i++) { if (strs.length === 2) { if (anagramStrings(strs[0], strs[1]) === true) { return strs; } else { return strs; } } else if (strs[i + 1] !== undefined) { if (anagramStrings(strs[i], strs[i + 1]) === true) { // console.log(strs[i],">>>>>>>>>>...........>>>>>......", strs[i+1]); strs.splice(i + 1, 1); } } } return strs; } // const ar = ["aaa", "bbb", "ccc"]; const ar = ["code", "aaagmnrs", "anagrams", "doce"]; console.log(makeAnagram(ar)); // console.log(anagramStrings(s1, s2));
Sealdolphin/ParanoiaApp
src/test/java/paranoia/network/interfaces/OptimizeListenerMock.java
<reponame>Sealdolphin/ParanoiaApp package paranoia.network.interfaces; public class OptimizeListenerMock { }
io7m/jsx
com.io7m.jsx.parser/src/main/java/com/io7m/jsx/lexer/JSXLexer.java
<gh_stars>0 /* * Copyright © 2016 <<EMAIL>> http://io7m.com * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR * IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ package com.io7m.jsx.lexer; import com.io7m.jeucreader.UnicodeCharacterReaderPushBackType; import com.io7m.jlexing.core.LexicalPosition; import com.io7m.jlexing.core.LexicalPositionMutable; import com.io7m.jsx.api.lexer.JSXLexerBareCarriageReturnException; import com.io7m.jsx.api.lexer.JSXLexerComment; import com.io7m.jsx.api.lexer.JSXLexerConfigurationType; import com.io7m.jsx.api.lexer.JSXLexerException; import com.io7m.jsx.api.lexer.JSXLexerInvalidCodePointException; import com.io7m.jsx.api.lexer.JSXLexerNewLinesInStringsException; import com.io7m.jsx.api.lexer.JSXLexerNotHexCharException; import com.io7m.jsx.api.lexer.JSXLexerType; import com.io7m.jsx.api.lexer.JSXLexerUnexpectedEOFException; import com.io7m.jsx.api.lexer.JSXLexerUnknownEscapeCodeException; import com.io7m.jsx.api.tokens.TokenComment; import com.io7m.jsx.api.tokens.TokenEOF; import com.io7m.jsx.api.tokens.TokenLeftParenthesis; import com.io7m.jsx.api.tokens.TokenLeftSquare; import com.io7m.jsx.api.tokens.TokenQuotedString; import com.io7m.jsx.api.tokens.TokenRightParenthesis; import com.io7m.jsx.api.tokens.TokenRightSquare; import com.io7m.jsx.api.tokens.TokenSymbol; import com.io7m.jsx.api.tokens.TokenType; import java.io.IOException; import java.net.URI; import java.util.Objects; import java.util.Optional; /** * The default implementation of the {@link JSXLexerType} type. */ public final class JSXLexer implements JSXLexerType { private final StringBuilder buffer; private final JSXLexerConfigurationType config; private final UnicodeCharacterReaderPushBackType reader; private final LexicalPositionMutable<URI> position; private final LexicalPositionMutable<URI> buffer_position; private State state; private JSXLexerComment buffer_comment; private JSXLexer( final JSXLexerConfigurationType c, final UnicodeCharacterReaderPushBackType r) { this.config = Objects.requireNonNull(c, "Configuration"); this.reader = Objects.requireNonNull(r, "Reader"); this.state = State.STATE_INITIAL; this.buffer = new StringBuilder(256); final int start = c.startAtLine(); this.position = LexicalPositionMutable.create(start, 0, Optional.empty()); this.buffer_position = LexicalPositionMutable.create(start, 0, Optional.empty()); this.position.setFile(c.file()); this.buffer_position.setFile(c.file()); } /** * Construct a new lexer. * * @param c The lexer configuration * @param r The unicode character reader * * @return A new lexer */ public static JSXLexerType newLexer( final JSXLexerConfigurationType c, final UnicodeCharacterReaderPushBackType r) { return new JSXLexer(c, r); } private void completeNewline() { this.state = State.STATE_INITIAL; this.position.setLine(this.position.line() + 1); this.position.setColumn(0); } private TokenType completeQuotedString() { this.state = State.STATE_INITIAL; final String text = Objects.requireNonNull(this.buffer.toString(), "Text"); this.buffer.setLength(0); return new TokenQuotedString(this.buffer_position.toImmutable(), text); } private TokenType completeSymbol() { this.state = State.STATE_INITIAL; final String text = Objects.requireNonNull(this.buffer.toString(), "Text"); this.buffer.setLength(0); return new TokenSymbol(this.buffer_position.toImmutable(), text); } private TokenType completeComment() { this.state = State.STATE_INITIAL; final String text = Objects.requireNonNull(this.buffer.toString(), "Text"); this.buffer.setLength(0); return new TokenComment( this.buffer_position.toImmutable(), this.buffer_comment, text); } private JSXLexerBareCarriageReturnException errorBareCarriageReturn() { final StringBuilder sb = new StringBuilder(32); sb.append("Bare carriage return (U+000D) in source"); final String s = Objects.requireNonNull(sb.toString(), "Message"); return new JSXLexerBareCarriageReturnException( this.snapshotPosition(), s); } private JSXLexerInvalidCodePointException errorInvalidCodePoint( final long cp) { final StringBuilder sb = new StringBuilder(32); sb.append("Invalid code point given in escape (U+"); sb.append(Long.toUnsignedString(cp, 16)); sb.append(')'); final String s = Objects.requireNonNull(sb.toString(), "Message"); return new JSXLexerInvalidCodePointException(this.snapshotPosition(), s); } private JSXLexerNewLinesInStringsException errorNewLinesNotInQuotedStrings() { return new JSXLexerNewLinesInStringsException( this.snapshotPosition(), "Lexer configuration does not permit newlines (U+000A or U+000D) in quoted strings"); } private JSXLexerNotHexCharException errorNotHexChar( final int c) { final StringBuilder sb = new StringBuilder(16); sb.append("Expected a character [0123456789aAbBcCdDeEfF] (got "); sb.appendCodePoint(c); sb.append(')'); final String s = Objects.requireNonNull(sb.toString(), "Message"); return new JSXLexerNotHexCharException(this.snapshotPosition(), s); } private JSXLexerUnexpectedEOFException errorUnexpectedEOF() { final StringBuilder sb = new StringBuilder(32); sb.append("Unexpected EOF"); final String s = Objects.requireNonNull(sb.toString(), "Message"); return new JSXLexerUnexpectedEOFException(this.snapshotPosition(), s); } private JSXLexerUnknownEscapeCodeException errorUnknownEscape( final int c) { final StringBuilder sb = new StringBuilder(64); sb.append("Unknown escape code ("); sb.appendCodePoint(c); sb.append(')'); final String s = Objects.requireNonNull(sb.toString(), "Message"); return new JSXLexerUnknownEscapeCodeException( this.snapshotPosition(), s); } private void parseEscape() throws JSXLexerException, IOException { final int c = this.readCharNotEOF(); if (c == (int) '"') { this.buffer.append('"'); return; } if (c == (int) '\\') { this.buffer.append('\\'); return; } if (c == (int) 'r') { this.buffer.append('\r'); return; } if (c == (int) 'n') { this.buffer.append('\n'); return; } if (c == (int) 't') { this.buffer.append('\t'); return; } if (c == (int) 'u') { this.parseUnicode4(); return; } if (c == (int) 'U') { this.parseUnicode8(); return; } throw this.errorUnknownEscape(c); } private void parseUnicode4() throws JSXLexerException, IOException { final StringBuilder hexbuf = new StringBuilder(16); hexbuf.appendCodePoint(this.readHexCharNotEOF()); hexbuf.appendCodePoint(this.readHexCharNotEOF()); hexbuf.appendCodePoint(this.readHexCharNotEOF()); hexbuf.appendCodePoint(this.readHexCharNotEOF()); final String hex = Objects.requireNonNull(hexbuf.toString(), "Hex code"); final int code = Integer.parseInt(hex, 16); this.buffer.appendCodePoint(code); } private void parseUnicode8() throws JSXLexerException, IOException { final StringBuilder hexbuf = new StringBuilder(16); hexbuf.appendCodePoint(this.readHexCharNotEOF()); hexbuf.appendCodePoint(this.readHexCharNotEOF()); hexbuf.appendCodePoint(this.readHexCharNotEOF()); hexbuf.appendCodePoint(this.readHexCharNotEOF()); hexbuf.appendCodePoint(this.readHexCharNotEOF()); hexbuf.appendCodePoint(this.readHexCharNotEOF()); hexbuf.appendCodePoint(this.readHexCharNotEOF()); hexbuf.appendCodePoint(this.readHexCharNotEOF()); final String hex = Objects.requireNonNull(hexbuf.toString(), "Hex code"); final long code = Long.parseUnsignedLong(hex, 16); final int cp = (int) code; if (!Character.isValidCodePoint(cp)) { throw this.errorInvalidCodePoint(code); } this.buffer.appendCodePoint(cp); } private int readChar() throws IOException { final int c = this.reader.readCodePoint(); if (c != -1) { this.position.setColumn(this.position.column() + 1); } return c; } private int readCharNotEOF() throws IOException, JSXLexerUnexpectedEOFException { final int c = this.readChar(); if (c == -1) { throw this.errorUnexpectedEOF(); } return c; } /* * It is not realistically possible to reduce the cyclomatic complexity * of this function, so Checkstyle must be disabled. */ // CHECKSTYLE:OFF private int readHexCharNotEOF() throws JSXLexerException, IOException { final int c = this.readCharNotEOF(); switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'a': case 'A': case 'b': case 'B': case 'c': case 'C': case 'd': case 'D': case 'e': case 'E': case 'f': case 'F': return c; default: throw this.errorNotHexChar(c); } } // CHECKSTYLE:ON private void startQuotedString() { this.state = State.STATE_IN_STRING_QUOTED; this.buffer_comment = null; this.buffer_position.setColumn(this.position.column()); this.buffer_position.setLine(this.position.line()); this.buffer.setLength(0); } private void startSymbol( final int c) { this.state = State.STATE_IN_SYMBOL; this.buffer_comment = null; this.buffer_position.setColumn(this.position.column()); this.buffer_position.setLine(this.position.line()); this.buffer.setLength(0); this.buffer.appendCodePoint(c); } private void startComment( final JSXLexerComment comment) { this.state = State.STATE_IN_COMMENT; this.buffer_comment = comment; this.buffer_position.setColumn(this.position.column()); this.buffer_position.setLine(this.position.line()); this.buffer.setLength(0); } @Override public TokenType token() throws IOException, JSXLexerException { return this.tokenRead(); } /* * It is not realistically possible to reduce the cyclomatic complexity * of this function, so Checkstyle must be disabled. */ // CHECKSTYLE:OFF private TokenType tokenRead() throws IOException, JSXLexerException { while (true) { switch (this.state) { case STATE_IN_COMMENT: { final int c = this.readChar(); if (c == -1) { return this.completeComment(); } if (c == (int) '\n') { this.completeNewline(); return this.completeComment(); } if (c == (int) '\r') { this.state = State.STATE_IN_CRLF; return this.completeComment(); } this.buffer.appendCodePoint(c); continue; } case STATE_INITIAL: { final int c = this.readChar(); if (c == -1) { return new TokenEOF(this.snapshotPosition()); } if (c == (int) '\n') { this.completeNewline(); continue; } if (c == (int) '\r') { this.state = State.STATE_IN_CRLF; continue; } if (c == (int) '"') { this.startQuotedString(); continue; } if (this.appearsToBeComment(c)) { continue; } if (c == (int) '(') { return new TokenLeftParenthesis(this.snapshotPosition()); } if (c == (int) ')') { return new TokenRightParenthesis(this.snapshotPosition()); } if (c == (int) '[') { if (this.config.squareBrackets()) { return new TokenLeftSquare(this.snapshotPosition()); } } if (c == (int) ']') { if (this.config.squareBrackets()) { return new TokenRightSquare(this.snapshotPosition()); } } if (Character.isSpaceChar(c)) { continue; } this.startSymbol(c); continue; } case STATE_IN_CRLF: { final int c = this.readCharNotEOF(); if (c == (int) '\n') { this.completeNewline(); continue; } throw this.errorBareCarriageReturn(); } case STATE_IN_STRING_QUOTED: { final int c = this.readCharNotEOF(); if (c == (int) '\\') { this.parseEscape(); continue; } if ((c == (int) '\r') || (c == (int) '\n')) { if (!this.config.newlinesInQuotedStrings()) { throw this.errorNewLinesNotInQuotedStrings(); } } if (c == (int) '"') { return this.completeQuotedString(); } this.buffer.appendCodePoint(c); continue; } case STATE_IN_SYMBOL: { final int c = this.readChar(); if (c == -1) { return this.completeSymbol(); } if (c == (int) '\n') { this.completeNewline(); return this.completeSymbol(); } if (c == (int) '\r') { this.state = State.STATE_IN_CRLF; return this.completeSymbol(); } if (c == (int) '"') { final TokenType s = this.completeSymbol(); this.reader.pushCodePoint(c); return s; } if (c == (int) '(') { this.reader.pushCodePoint(c); return this.completeSymbol(); } if (c == (int) ')') { this.reader.pushCodePoint(c); return this.completeSymbol(); } if (c == (int) '[') { if (this.config.squareBrackets()) { this.reader.pushCodePoint(c); return this.completeSymbol(); } } if (c == (int) ']') { if (this.config.squareBrackets()) { this.reader.pushCodePoint(c); return this.completeSymbol(); } } if (Character.isSpaceChar(c)) { return this.completeSymbol(); } this.buffer.appendCodePoint(c); } } } } // CHECKSTYLE:ON private boolean appearsToBeComment( final int c) { for (final JSXLexerComment comment : this.config.comments()) { if (comment.token() == c) { this.startComment(comment); return true; } } return false; } private LexicalPosition<URI> snapshotPosition() { return this.position.toImmutable(); } private enum State { STATE_IN_CRLF, STATE_IN_STRING_QUOTED, STATE_IN_SYMBOL, STATE_IN_COMMENT, STATE_INITIAL } }
TheChosenEvilOne/MindustryFork
core/src/io/anuke/mindustry/world/blocks/WeaponBlocks.java
<gh_stars>1-10 package io.anuke.mindustry.world.blocks; import com.badlogic.gdx.graphics.Color; import com.badlogic.gdx.math.Vector2; import io.anuke.mindustry.Vars; import io.anuke.mindustry.entities.BulletType; import io.anuke.mindustry.entities.effect.Fx; import io.anuke.mindustry.entities.effect.TeslaOrb; import io.anuke.mindustry.resource.Item; import io.anuke.mindustry.world.Block; import io.anuke.mindustry.world.Tile; import io.anuke.mindustry.world.blocks.types.defense.LaserTurret; import io.anuke.mindustry.world.blocks.types.defense.PowerTurret; import io.anuke.mindustry.world.blocks.types.defense.Turret; import io.anuke.ucore.core.Effects; import io.anuke.ucore.util.Angles; import io.anuke.ucore.util.Mathf; public class WeaponBlocks{ public static Block turret = new Turret("turret"){ { formalName = "turret"; range = 52; reload = 15f; bullet = BulletType.stone; health = 50; ammo = Item.stone; fullDescription = "A basic, cheap turret. Uses stone for ammo. Has slightly more range than the double-turret."; } }, doubleturret = new Turret("doubleturret"){ { formalName = "double turret"; range = 44; reload = 13f; bullet = BulletType.stone; ammo = Item.stone; health = 55; health = 50; fullDescription = "A slightly more powerful version of the turret. Uses stone for ammo. Does significantly more damage, but has a lower range. Shoots two bullets."; } @Override protected void shoot(Tile tile){ TurretEntity entity = tile.entity(); Angles.vector.set(4, -2).rotate(entity.rotation); bullet(tile, entity.rotation); Angles.vector.set(4, 2).rotate(entity.rotation); bullet(tile, entity.rotation); } }, machineturret = new Turret("machineturret"){ { formalName = "gattling turret"; range = 65; reload = 7f; bullet = BulletType.iron; ammo = Item.iron; health = 65; fullDescription = "A standard all-around turret. Uses iron for ammo. Has a fast fire rate with decent damage."; } }, shotgunturret = new Turret("shotgunturret"){ { formalName = "splitter turret"; range = 50; reload = 30f; bullet = BulletType.iron; ammo = Item.iron; health = 70; shots = 5; inaccuracy = 15f; shotDelayScale = 0.7f; fullDescription = "A standard turret. Uses iron for ammo. Shoots a spread of 7 bullets. " + "Lower range, but higher damage output than the gattling turret."; } }, flameturret = new Turret("flameturret"){ { formalName = "flamer turret"; range = 35f; reload = 5f; bullet = BulletType.flame; ammo = Item.coal; health = 90; fullDescription = "Advanced close-range turret. Uses coal for ammo. Has very low range, but very high damage and damage. " + "Good for close quarters. Recommended to be used behind walls."; } }, sniperturret = new Turret("sniperturret"){ { shootsound = "railgun"; formalName = "railgun turret"; range = 120; reload = 50f; bullet = BulletType.sniper; ammo = Item.steel; health = 70; shootEffect = Fx.railshot; fullDescription = "Advanced long-range turret. Uses steel for ammo. Very high damage, but low fire rate. " + "Expensive to use, but can be placed far away from enemy lines due to its range."; } }, mortarturret = new Turret("mortarturret"){ { shootsound = "bigshot"; rotatespeed = 0.1f; formalName = "flak turret"; range = 120; reload = 100f; bullet = BulletType.shell; ammo = Item.coal; ammoMultiplier = 5; health = 110; fullDescription = "Advanced splash-damage turret. Uses coal for ammo. " + "Very slow fire rate and bullets, but very high single-target and splash damage. " + "Useful for large crowds of enemies."; shootEffect = Fx.mortarshot; shootShake = 2f; } }, laserturret = new LaserTurret("laserturret"){ { shootsound = "laser"; beamColor = Color.SKY; formalName = "laser turret"; range = 60; reload = 4f; damage = 10; health = 110; powerUsed = 0.2f; fullDescription = "Advanced single-target turret. Uses power. Good medium-range all-around turret. " + "Single-target only. Never misses."; } }, teslaturret = new PowerTurret("waveturret"){ { shootsound = "tesla"; formalName = "tesla turret"; range = 70; reload = 15f; bullet = BulletType.shell; health = 140; fullDescription = "Advanced multi-target turret. Uses power. Medium range. Never misses." + "Average to low damage, but can hit multiple enemies simultaneously with chain lighting."; } @Override public void shoot(Tile tile){ TurretEntity entity = tile.entity(); Angles.translation(entity.rotation, 4); new TeslaOrb(tile.worldx() + Angles.x(), tile.worldy() + Angles.y(), range, (int)(9*Vars.multiplier)).add(); } }, plasmaturret = new Turret("plasmaturret"){ { shootsound = "flame2"; inaccuracy = 7f; formalName = "plasma turret"; range = 60f; reload = 3f; bullet = BulletType.plasmaflame; ammo = Item.coal; health = 180; ammoMultiplier = 40; fullDescription = "Highly advanced version of the flamer turret. Uses coal as ammo. " + "Very high damage, low to medium range."; } }, chainturret = new Turret("chainturret"){ { shootsound = "bigshot"; inaccuracy = 8f; formalName = "chain turret"; range = 80f; reload = 8f; bullet = BulletType.chain; ammo = Item.uranium; health = 430; width = height = 2; shootCone = 9f; ammoMultiplier = 8; shots = 2; fullDescription = "The ultimate rapid-fire turret. Uses uranium as ammo. Shoots large slugs at a high fire rate. " + "Medium range. Spans multiple tiles. Extremely tough."; shootEffect = Fx.chainshot; } //TODO specify turret shoot effect in turret instead of doing it manually @Override protected void shoot(Tile tile){ TurretEntity entity = tile.entity(); Vector2 offset = getPlaceOffset(); float len = 8; float space = 3.5f; for(int i = -1; i < 1; i ++){ Angles.vector.set(len, Mathf.sign(i) * space).rotate(entity.rotation); bullet(tile, entity.rotation); Effects.effect(shootEffect, tile.worldx() + Angles.x() + offset.x, tile.worldy()+ Angles.y() + offset.y, entity.rotation); } Effects.shake(1f, 1f, tile.worldx(), tile.worldy()); } }, titanturret = new Turret("titancannon"){ { shootsound = "blast"; formalName = "<NAME>non"; range = 120f; reload = 23f; bullet = BulletType.titanshell; ammo = Item.uranium; health = 800; ammoMultiplier = 4; width = height = 3; rotatespeed = 0.07f; shootCone = 9f; fullDescription = "The ultimate long-range turret. Uses uranium as ammo. Shoots large splash-damage shells at a medium rate of fire. " + "Long range. Spans multiple tiles. Extremely tough."; shootEffect = Fx.titanshot; shootShake = 3f; } }; }
fighting41love/TextAttack
textattack/models/classification/bert/bert_for_imdb_sentiment_classification.py
<filename>textattack/models/classification/bert/bert_for_imdb_sentiment_classification.py<gh_stars>1-10 from textattack.models.helpers import BERTForClassification class BERTForIMDBSentimentClassification(BERTForClassification): """ BERT fine-tuned on the IMDb Sentiment dataset for sentiment classification. """ MODEL_PATH_CASED = 'models/classification/bert/imdb-cased' MODEL_PATH_UNCASED = 'models/classification/bert/imdb-uncased' def __init__(self, cased=False): if cased: path = BERTForIMDBSentimentClassification.MODEL_PATH_CASED else: path = BERTForIMDBSentimentClassification.MODEL_PATH_UNCASED super().__init__(path)
rui278/rui278.github.io
maliCL/search/pages_73.js
<reponame>rui278/rui278.github.io var searchData= [ ['sgemm',['SGEMM',['../sgemm_tutorial.html',1,'tutorials']]], ['sobel_20filter',['Sobel Filter',['../sobel_tutorial.html',1,'tutorials']]], ['support',['Support',['../support.html',1,'']]] ];
richard2018/refactoring
src/main/java/replace/parameter/with/explicit/methods/Refactor.java
package replace.parameter.with.explicit.methods; public class Refactor { int _height; int _width; void setValue(String name, int value) { if (name.equals("height")) { _height = value; return; } if (name.equals("width")) { _width = value; return; } // Assert.shouldNeverReachHere(); } // --> void setHeight(int arg) { _height = arg; } void setWidth(int arg) { _width = arg; } // --- /*static final int ENGINEER = 0; static final int SALESMAN = 1; static final int MANAGER = 2; static Employee create(int type) { switch (type) { case ENGINEER: return new Engineer(); case SALESMAN: return new Salesman(); case MANAGER: return new Manager(); default: throw new IllegalArgumentException("Incorrect type code value"); } } // --> static Employee createEngineer() { return new Engineer(); } static Employee createSalesman() { return new Salesman(); } static Employee createManager() { return new Manager(); }*/ }
navalsynergy/jitsi
react/features/video-menu/components/web/VideoMenu.js
<reponame>navalsynergy/jitsi<gh_stars>0 // @flow import React from 'react'; /** * The type of the React {@code Component} props of {@link VideoMenu}. */ type Props = { /** * The components to place as the body of the {@code VideoMenu}. */ children: React$Node, /** * The id attribute to be added to the component's DOM for retrieval when * querying the DOM. Not used directly by the component. */ id: string }; /** * Click handler. * * @param {SyntheticEvent} event - The click event. * @returns {void} */ function onClick(event) { // If the event is propagated to the thumbnail container the participant will be pinned. That's why the propagation // needs to be stopped. event.stopPropagation(); } /** * React {@code Component} responsible for displaying other components as a menu * for manipulating participant state. * * @param {Props} props - The component's props. * @returns {Component} */ export default function VideoMenu(props: Props) { return ( <ul className = 'popupmenu' id = { props.id } onClick = { onClick }> { props.children } </ul> ); }
nms-htc/cmts-mo-portlet
docroot/WEB-INF/src/com/cmcti/cmts/domain/model/impl/MerchantModelImpl.java
<reponame>nms-htc/cmts-mo-portlet /** * Copyright (c) 2000-present Liferay, Inc. All rights reserved. * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. * * This library is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more * details. */ package com.cmcti.cmts.domain.model.impl; import com.cmcti.cmts.domain.model.Merchant; import com.cmcti.cmts.domain.model.MerchantModel; import com.cmcti.cmts.domain.model.MerchantSoap; import com.liferay.portal.kernel.bean.AutoEscapeBeanHandler; import com.liferay.portal.kernel.exception.SystemException; import com.liferay.portal.kernel.json.JSON; import com.liferay.portal.kernel.util.GetterUtil; import com.liferay.portal.kernel.util.ProxyUtil; import com.liferay.portal.kernel.util.StringBundler; import com.liferay.portal.kernel.util.StringPool; import com.liferay.portal.model.CacheModel; import com.liferay.portal.model.impl.BaseModelImpl; import com.liferay.portal.service.ServiceContext; import com.liferay.portal.util.PortalUtil; import com.liferay.portlet.expando.model.ExpandoBridge; import com.liferay.portlet.expando.util.ExpandoBridgeFactoryUtil; import java.io.Serializable; import java.sql.Types; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; /** * The base model implementation for the Merchant service. Represents a row in the &quot;CMTS_Merchant&quot; database table, with each column mapped to a property of this class. * * <p> * This implementation and its corresponding interface {@link com.cmcti.cmts.domain.model.MerchantModel} exist only as a container for the default property accessors generated by ServiceBuilder. Helper methods and all application logic should be put in {@link MerchantImpl}. * </p> * * @author richard * @see MerchantImpl * @see com.cmcti.cmts.domain.model.Merchant * @see com.cmcti.cmts.domain.model.MerchantModel * @generated */ @JSON(strict = true) public class MerchantModelImpl extends BaseModelImpl<Merchant> implements MerchantModel { /* * NOTE FOR DEVELOPERS: * * Never modify or reference this class directly. All methods that expect a merchant model instance should use the {@link com.cmcti.cmts.domain.model.Merchant} interface instead. */ public static final String TABLE_NAME = "CMTS_Merchant"; public static final Object[][] TABLE_COLUMNS = { { "merchantId", Types.BIGINT }, { "groupId", Types.BIGINT }, { "companyId", Types.BIGINT }, { "userId", Types.BIGINT }, { "userName", Types.VARCHAR }, { "createDate", Types.TIMESTAMP }, { "modifiedDate", Types.TIMESTAMP }, { "title", Types.VARCHAR }, { "code_", Types.VARCHAR }, { "parentId", Types.BIGINT }, { "parentCode", Types.VARCHAR }, { "description", Types.VARCHAR } }; public static final String TABLE_SQL_CREATE = "create table CMTS_Merchant (merchantId LONG not null primary key,groupId LONG,companyId LONG,userId LONG,userName VARCHAR(75) null,createDate DATE null,modifiedDate DATE null,title VARCHAR(75) null,code_ VARCHAR(75) null,parentId LONG,parentCode VARCHAR(75) null,description VARCHAR(75) null)"; public static final String TABLE_SQL_DROP = "drop table CMTS_Merchant"; public static final String ORDER_BY_JPQL = " ORDER BY merchant.merchantId ASC"; public static final String ORDER_BY_SQL = " ORDER BY CMTS_Merchant.merchantId ASC"; public static final String DATA_SOURCE = "liferayDataSource"; public static final String SESSION_FACTORY = "liferaySessionFactory"; public static final String TX_MANAGER = "liferayTransactionManager"; public static final boolean ENTITY_CACHE_ENABLED = GetterUtil.getBoolean(com.liferay.util.service.ServiceProps.get( "value.object.entity.cache.enabled.com.cmcti.cmts.domain.model.Merchant"), true); public static final boolean FINDER_CACHE_ENABLED = GetterUtil.getBoolean(com.liferay.util.service.ServiceProps.get( "value.object.finder.cache.enabled.com.cmcti.cmts.domain.model.Merchant"), true); public static final boolean COLUMN_BITMASK_ENABLED = GetterUtil.getBoolean(com.liferay.util.service.ServiceProps.get( "value.object.column.bitmask.enabled.com.cmcti.cmts.domain.model.Merchant"), true); public static long CODE_COLUMN_BITMASK = 1L; public static long USERID_COLUMN_BITMASK = 2L; public static long MERCHANTID_COLUMN_BITMASK = 4L; /** * Converts the soap model instance into a normal model instance. * * @param soapModel the soap model instance to convert * @return the normal model instance */ public static Merchant toModel(MerchantSoap soapModel) { if (soapModel == null) { return null; } Merchant model = new MerchantImpl(); model.setMerchantId(soapModel.getMerchantId()); model.setGroupId(soapModel.getGroupId()); model.setCompanyId(soapModel.getCompanyId()); model.setUserId(soapModel.getUserId()); model.setUserName(soapModel.getUserName()); model.setCreateDate(soapModel.getCreateDate()); model.setModifiedDate(soapModel.getModifiedDate()); model.setTitle(soapModel.getTitle()); model.setCode(soapModel.getCode()); model.setParentId(soapModel.getParentId()); model.setParentCode(soapModel.getParentCode()); model.setDescription(soapModel.getDescription()); return model; } /** * Converts the soap model instances into normal model instances. * * @param soapModels the soap model instances to convert * @return the normal model instances */ public static List<Merchant> toModels(MerchantSoap[] soapModels) { if (soapModels == null) { return null; } List<Merchant> models = new ArrayList<Merchant>(soapModels.length); for (MerchantSoap soapModel : soapModels) { models.add(toModel(soapModel)); } return models; } public static final long LOCK_EXPIRATION_TIME = GetterUtil.getLong(com.liferay.util.service.ServiceProps.get( "lock.expiration.time.com.cmcti.cmts.domain.model.Merchant")); public MerchantModelImpl() { } @Override public long getPrimaryKey() { return _merchantId; } @Override public void setPrimaryKey(long primaryKey) { setMerchantId(primaryKey); } @Override public Serializable getPrimaryKeyObj() { return _merchantId; } @Override public void setPrimaryKeyObj(Serializable primaryKeyObj) { setPrimaryKey(((Long)primaryKeyObj).longValue()); } @Override public Class<?> getModelClass() { return Merchant.class; } @Override public String getModelClassName() { return Merchant.class.getName(); } @Override public Map<String, Object> getModelAttributes() { Map<String, Object> attributes = new HashMap<String, Object>(); attributes.put("merchantId", getMerchantId()); attributes.put("groupId", getGroupId()); attributes.put("companyId", getCompanyId()); attributes.put("userId", getUserId()); attributes.put("userName", getUserName()); attributes.put("createDate", getCreateDate()); attributes.put("modifiedDate", getModifiedDate()); attributes.put("title", getTitle()); attributes.put("code", getCode()); attributes.put("parentId", getParentId()); attributes.put("parentCode", getParentCode()); attributes.put("description", getDescription()); return attributes; } @Override public void setModelAttributes(Map<String, Object> attributes) { Long merchantId = (Long)attributes.get("merchantId"); if (merchantId != null) { setMerchantId(merchantId); } Long groupId = (Long)attributes.get("groupId"); if (groupId != null) { setGroupId(groupId); } Long companyId = (Long)attributes.get("companyId"); if (companyId != null) { setCompanyId(companyId); } Long userId = (Long)attributes.get("userId"); if (userId != null) { setUserId(userId); } String userName = (String)attributes.get("userName"); if (userName != null) { setUserName(userName); } Date createDate = (Date)attributes.get("createDate"); if (createDate != null) { setCreateDate(createDate); } Date modifiedDate = (Date)attributes.get("modifiedDate"); if (modifiedDate != null) { setModifiedDate(modifiedDate); } String title = (String)attributes.get("title"); if (title != null) { setTitle(title); } String code = (String)attributes.get("code"); if (code != null) { setCode(code); } Long parentId = (Long)attributes.get("parentId"); if (parentId != null) { setParentId(parentId); } String parentCode = (String)attributes.get("parentCode"); if (parentCode != null) { setParentCode(parentCode); } String description = (String)attributes.get("description"); if (description != null) { setDescription(description); } } @JSON @Override public long getMerchantId() { return _merchantId; } @Override public void setMerchantId(long merchantId) { _merchantId = merchantId; } @JSON @Override public long getGroupId() { return _groupId; } @Override public void setGroupId(long groupId) { _groupId = groupId; } @JSON @Override public long getCompanyId() { return _companyId; } @Override public void setCompanyId(long companyId) { _companyId = companyId; } @JSON @Override public long getUserId() { return _userId; } @Override public void setUserId(long userId) { _columnBitmask |= USERID_COLUMN_BITMASK; if (!_setOriginalUserId) { _setOriginalUserId = true; _originalUserId = _userId; } _userId = userId; } @Override public String getUserUuid() throws SystemException { return PortalUtil.getUserValue(getUserId(), "uuid", _userUuid); } @Override public void setUserUuid(String userUuid) { _userUuid = userUuid; } public long getOriginalUserId() { return _originalUserId; } @JSON @Override public String getUserName() { if (_userName == null) { return StringPool.BLANK; } else { return _userName; } } @Override public void setUserName(String userName) { _userName = userName; } @JSON @Override public Date getCreateDate() { return _createDate; } @Override public void setCreateDate(Date createDate) { _createDate = createDate; } @JSON @Override public Date getModifiedDate() { return _modifiedDate; } @Override public void setModifiedDate(Date modifiedDate) { _modifiedDate = modifiedDate; } @JSON @Override public String getTitle() { if (_title == null) { return StringPool.BLANK; } else { return _title; } } @Override public void setTitle(String title) { _title = title; } @JSON @Override public String getCode() { if (_code == null) { return StringPool.BLANK; } else { return _code; } } @Override public void setCode(String code) { _columnBitmask |= CODE_COLUMN_BITMASK; if (_originalCode == null) { _originalCode = _code; } _code = code; } public String getOriginalCode() { return GetterUtil.getString(_originalCode); } @JSON @Override public long getParentId() { return _parentId; } @Override public void setParentId(long parentId) { _parentId = parentId; } @JSON @Override public String getParentCode() { if (_parentCode == null) { return StringPool.BLANK; } else { return _parentCode; } } @Override public void setParentCode(String parentCode) { _parentCode = parentCode; } @JSON @Override public String getDescription() { if (_description == null) { return StringPool.BLANK; } else { return _description; } } @Override public void setDescription(String description) { _description = description; } public long getColumnBitmask() { return _columnBitmask; } @Override public ExpandoBridge getExpandoBridge() { return ExpandoBridgeFactoryUtil.getExpandoBridge(getCompanyId(), Merchant.class.getName(), getPrimaryKey()); } @Override public void setExpandoBridgeAttributes(ServiceContext serviceContext) { ExpandoBridge expandoBridge = getExpandoBridge(); expandoBridge.setAttributes(serviceContext); } @Override public Merchant toEscapedModel() { if (_escapedModel == null) { _escapedModel = (Merchant)ProxyUtil.newProxyInstance(_classLoader, _escapedModelInterfaces, new AutoEscapeBeanHandler(this)); } return _escapedModel; } @Override public Object clone() { MerchantImpl merchantImpl = new MerchantImpl(); merchantImpl.setMerchantId(getMerchantId()); merchantImpl.setGroupId(getGroupId()); merchantImpl.setCompanyId(getCompanyId()); merchantImpl.setUserId(getUserId()); merchantImpl.setUserName(getUserName()); merchantImpl.setCreateDate(getCreateDate()); merchantImpl.setModifiedDate(getModifiedDate()); merchantImpl.setTitle(getTitle()); merchantImpl.setCode(getCode()); merchantImpl.setParentId(getParentId()); merchantImpl.setParentCode(getParentCode()); merchantImpl.setDescription(getDescription()); merchantImpl.resetOriginalValues(); return merchantImpl; } @Override public int compareTo(Merchant merchant) { long primaryKey = merchant.getPrimaryKey(); if (getPrimaryKey() < primaryKey) { return -1; } else if (getPrimaryKey() > primaryKey) { return 1; } else { return 0; } } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (!(obj instanceof Merchant)) { return false; } Merchant merchant = (Merchant)obj; long primaryKey = merchant.getPrimaryKey(); if (getPrimaryKey() == primaryKey) { return true; } else { return false; } } @Override public int hashCode() { return (int)getPrimaryKey(); } @Override public void resetOriginalValues() { MerchantModelImpl merchantModelImpl = this; merchantModelImpl._originalUserId = merchantModelImpl._userId; merchantModelImpl._setOriginalUserId = false; merchantModelImpl._originalCode = merchantModelImpl._code; merchantModelImpl._columnBitmask = 0; } @Override public CacheModel<Merchant> toCacheModel() { MerchantCacheModel merchantCacheModel = new MerchantCacheModel(); merchantCacheModel.merchantId = getMerchantId(); merchantCacheModel.groupId = getGroupId(); merchantCacheModel.companyId = getCompanyId(); merchantCacheModel.userId = getUserId(); merchantCacheModel.userName = getUserName(); String userName = merchantCacheModel.userName; if ((userName != null) && (userName.length() == 0)) { merchantCacheModel.userName = null; } Date createDate = getCreateDate(); if (createDate != null) { merchantCacheModel.createDate = createDate.getTime(); } else { merchantCacheModel.createDate = Long.MIN_VALUE; } Date modifiedDate = getModifiedDate(); if (modifiedDate != null) { merchantCacheModel.modifiedDate = modifiedDate.getTime(); } else { merchantCacheModel.modifiedDate = Long.MIN_VALUE; } merchantCacheModel.title = getTitle(); String title = merchantCacheModel.title; if ((title != null) && (title.length() == 0)) { merchantCacheModel.title = null; } merchantCacheModel.code = getCode(); String code = merchantCacheModel.code; if ((code != null) && (code.length() == 0)) { merchantCacheModel.code = null; } merchantCacheModel.parentId = getParentId(); merchantCacheModel.parentCode = getParentCode(); String parentCode = merchantCacheModel.parentCode; if ((parentCode != null) && (parentCode.length() == 0)) { merchantCacheModel.parentCode = null; } merchantCacheModel.description = getDescription(); String description = merchantCacheModel.description; if ((description != null) && (description.length() == 0)) { merchantCacheModel.description = null; } return merchantCacheModel; } @Override public String toString() { StringBundler sb = new StringBundler(25); sb.append("{merchantId="); sb.append(getMerchantId()); sb.append(", groupId="); sb.append(getGroupId()); sb.append(", companyId="); sb.append(getCompanyId()); sb.append(", userId="); sb.append(getUserId()); sb.append(", userName="); sb.append(getUserName()); sb.append(", createDate="); sb.append(getCreateDate()); sb.append(", modifiedDate="); sb.append(getModifiedDate()); sb.append(", title="); sb.append(getTitle()); sb.append(", code="); sb.append(getCode()); sb.append(", parentId="); sb.append(getParentId()); sb.append(", parentCode="); sb.append(getParentCode()); sb.append(", description="); sb.append(getDescription()); sb.append("}"); return sb.toString(); } @Override public String toXmlString() { StringBundler sb = new StringBundler(40); sb.append("<model><model-name>"); sb.append("com.cmcti.cmts.domain.model.Merchant"); sb.append("</model-name>"); sb.append( "<column><column-name>merchantId</column-name><column-value><![CDATA["); sb.append(getMerchantId()); sb.append("]]></column-value></column>"); sb.append( "<column><column-name>groupId</column-name><column-value><![CDATA["); sb.append(getGroupId()); sb.append("]]></column-value></column>"); sb.append( "<column><column-name>companyId</column-name><column-value><![CDATA["); sb.append(getCompanyId()); sb.append("]]></column-value></column>"); sb.append( "<column><column-name>userId</column-name><column-value><![CDATA["); sb.append(getUserId()); sb.append("]]></column-value></column>"); sb.append( "<column><column-name>userName</column-name><column-value><![CDATA["); sb.append(getUserName()); sb.append("]]></column-value></column>"); sb.append( "<column><column-name>createDate</column-name><column-value><![CDATA["); sb.append(getCreateDate()); sb.append("]]></column-value></column>"); sb.append( "<column><column-name>modifiedDate</column-name><column-value><![CDATA["); sb.append(getModifiedDate()); sb.append("]]></column-value></column>"); sb.append( "<column><column-name>title</column-name><column-value><![CDATA["); sb.append(getTitle()); sb.append("]]></column-value></column>"); sb.append( "<column><column-name>code</column-name><column-value><![CDATA["); sb.append(getCode()); sb.append("]]></column-value></column>"); sb.append( "<column><column-name>parentId</column-name><column-value><![CDATA["); sb.append(getParentId()); sb.append("]]></column-value></column>"); sb.append( "<column><column-name>parentCode</column-name><column-value><![CDATA["); sb.append(getParentCode()); sb.append("]]></column-value></column>"); sb.append( "<column><column-name>description</column-name><column-value><![CDATA["); sb.append(getDescription()); sb.append("]]></column-value></column>"); sb.append("</model>"); return sb.toString(); } private static ClassLoader _classLoader = Merchant.class.getClassLoader(); private static Class<?>[] _escapedModelInterfaces = new Class[] { Merchant.class }; private long _merchantId; private long _groupId; private long _companyId; private long _userId; private String _userUuid; private long _originalUserId; private boolean _setOriginalUserId; private String _userName; private Date _createDate; private Date _modifiedDate; private String _title; private String _code; private String _originalCode; private long _parentId; private String _parentCode; private String _description; private long _columnBitmask; private Merchant _escapedModel; }
xueduany/chii
node_modules/licia/slice.js
exports = function(arr, start, end) { var len = arr.length; if (start == null) { start = 0; } else if (start < 0) { start = Math.max(len + start, 0); } else { start = Math.min(start, len); } if (end == null) { end = len; } else if (end < 0) { end = Math.max(len + end, 0); } else { end = Math.min(end, len); } var ret = []; while (start < end) { ret.push(arr[start++]); } return ret; }; module.exports = exports;
PiotrLadyzynski/XChange
xchange-bitcointoyou/src/main/java/com/xeiam/xchange/bitcointoyou/service/BitcoinToYouDigest.java
<reponame>PiotrLadyzynski/XChange package com.xeiam.xchange.bitcointoyou.service; import javax.crypto.Mac; import si.mazi.rescu.RestInvocation; import com.xeiam.xchange.service.BaseParamsDigest; import com.xeiam.xchange.utils.Base64; /** * @author <NAME> */ public class BitcoinToYouDigest extends BaseParamsDigest { private final String key; private final long nonce; /** * Constructor * * @param nonce See {@link com.xeiam.xchange.bitcointoyou.BitcoinToYouUtils#getNonce()} */ private BitcoinToYouDigest(String key, String secret, Long nonce) { super(secret, HMAC_SHA_256); this.key = key; this.nonce = nonce; } public static BitcoinToYouDigest createInstance(String key, String secret, Long nonce) { return secret == null ? null : new BitcoinToYouDigest(key, secret, nonce); } @Override public String digestParams(RestInvocation restInvocation) { Mac hmac256 = getMac(); // ACCESS_NONCE + ACCESS_KEY String message = this.nonce + this.key; hmac256.update(message.getBytes()); return Base64.encodeBytes(hmac256.doFinal()).toUpperCase(); } }
SevenandTen/CodeLib
CodeLib/MyLib/ED_StringControl/ED_StringControl.h
// // ED_StringControl.h // CodeLib // // Created by zw on 2019/12/25. // Copyright © 2019 seventeen. All rights reserved. // #import <Foundation/Foundation.h> NS_ASSUME_NONNULL_BEGIN @interface ED_StringControl : NSObject + (BOOL)isChineseCharacter:(NSString *)target; + (BOOL)isEnglishCharacter:(NSString *)target; + (BOOL)isNumberCharacter:(NSString *)target; @end NS_ASSUME_NONNULL_END
turchin-oleg/Enterprise-Template-API
src/main/java/com/example/api/security/audit/Auditable.java
package com.example.api.security.audit; import lombok.Getter; import org.springframework.data.annotation.CreatedBy; import org.springframework.data.annotation.CreatedDate; import org.springframework.data.annotation.LastModifiedBy; import org.springframework.data.annotation.LastModifiedDate; import org.springframework.data.jpa.domain.support.AuditingEntityListener; import javax.persistence.Column; import javax.persistence.EntityListeners; import javax.persistence.MappedSuperclass; import javax.persistence.Temporal; import java.util.Date; import static javax.persistence.TemporalType.TIMESTAMP; @MappedSuperclass @EntityListeners(AuditingEntityListener.class) public abstract class Auditable<U> { @Getter @Column(name = "created_by", nullable = false, updatable = false) @CreatedBy protected U createdBy; @Getter @Column(name = "creation_date", nullable = false, updatable = false) @CreatedDate @Temporal(TIMESTAMP) protected Date creationDate; @Getter @LastModifiedBy protected U lastModifiedBy; @Getter @LastModifiedDate @Temporal(TIMESTAMP) protected Date lastModifiedDate; }
Markmu/algo_practicing
leet-code/src/main/java/com/mark/No_69_Sqrt/Solution.java
package com.mark.No_69_Sqrt; public class Solution { public int mySqrt(int x) { if (x == 0) return 0; long left = 1l; long right = x >> 1l; long mid = 0; while (left <= right) { mid = left + ((right - left) >> 1); if (mid*mid <= x) { if ((mid+1)*(mid+1) > x || mid*mid == x) return (int) mid; else left = mid+1; } else { right = mid - 1; } } return 1; } }
cgwalters/origin
pkg/cmd/cli/secrets/subcommand.go
package secrets import ( "io" "github.com/spf13/cobra" cmdutil "github.com/openshift/origin/pkg/cmd/util" "github.com/openshift/origin/pkg/cmd/util/clientcmd" ) const SecretsRecommendedName = "secrets" const ( secretsLong = `Manage secrets in your project. Secrets are used to store confidential information that should not be contained inside of an image. They are commonly used to hold things like keys for authentication to other internal systems like Docker registries.` ) func NewCmdSecrets(name, fullName string, f *clientcmd.Factory, out io.Writer, ocEditFullName string) *cobra.Command { // Parent command to which all subcommands are added. cmds := &cobra.Command{ Use: name, Short: "Manage secrets", Long: secretsLong, Run: cmdutil.DefaultSubCommandRun(out), } newSecretFullName := fullName + " " + NewSecretRecommendedCommandName cmds.AddCommand(NewCmdCreateSecret(NewSecretRecommendedCommandName, newSecretFullName, f, out)) cmds.AddCommand(NewCmdCreateDockerConfigSecret(CreateDockerConfigSecretRecommendedName, fullName+" "+CreateDockerConfigSecretRecommendedName, f.Factory, out, newSecretFullName, ocEditFullName)) return cmds }
wscheep/proto-beam
transform/src/main/java/io/anemos/protobeam/transform/beamsql/ProtoToRowDoFn.java
package io.anemos.protobeam.transform.beamsql; import com.google.protobuf.Message; import io.anemos.protobeam.convert.ProtoBeamSqlExecutionPlan; import org.apache.beam.sdk.transforms.DoFn; import org.apache.beam.sdk.values.Row; public class ProtoToRowDoFn<T extends Message> extends DoFn<T, Row> { private ProtoBeamSqlExecutionPlan plan; public ProtoToRowDoFn(Class<T> messageClass) { plan = new ProtoBeamSqlExecutionPlan(messageClass); } @ProcessElement public void processElement(ProcessContext c) { c.output(plan.convert(c.element())); } }
adjohnston/react-pattern-library
example/stories/colours/colours.js
<filename>example/stories/colours/colours.js<gh_stars>1-10 const constants = require('../../constants') module.exports = { group: constants.groups.pages, pageName: 'Our Colours', notesRef: 'our-colours', }
gokit/dbx
schema/data_type.go
package schema // the abstract DB type of this column type DataType string const ( TypeChar DataType = "char" TypeString DataType = "string" TypeText DataType = "text" TypeMediumText DataType = "mediumText" TypeLongText DataType = "longText" TypeTinyInt DataType = "tinyInt" TypeSmallInt DataType = "smallInt" TypeMediumInt DataType = "mediumInt" TypeInt DataType = "int" TypeBigInt DataType = "bigInt" TypeTinyBlob DataType = "tinyBlob" TypeBlob DataType = "blob" TypeMediumBlob DataType = "mediumBlob" TypeLongBlob DataType = "longBlob" TypeFloat DataType = "float" TypeDouble DataType = "double" TypeDecimal DataType = "decimal" TypeDateTime DataType = "datetime" TypeDateTimeTz DataType = "datetimeTz" TypeTimestamp DataType = "timestamp" TypeTimestampTz DataType = "timestampTz" TypeTime DataType = "time" TypeTimeTz DataType = "timeTz" TypeDate DataType = "date" TypeYear DataType = "year" TypeBinary DataType = "binary" TypeBoolean DataType = "boolean" TypeJson DataType = "json" TypeJsonb DataType = "jsonb" TypeEnum DataType = "enum" TypeSet DataType = "set" TypeUUID DataType = "uuid" TypeIpAddress DataType = "ipAddress" TypeMacAddress DataType = "macAddress" TypeGeometry DataType = "geometry" TypePoint DataType = "point" TypeLineString DataType = "lineString" TypePolygon DataType = "polygon" TypeGeometryCollection DataType = "geometryCollection" TypeMultiPoint DataType = "multiPoint" TypeMultiLineString DataType = "multiLineString" TypeMultiPolygon DataType = "multiPolygon" TypeMultiPolygonZ DataType = "MultiPolygonZ" )
sixthedge/cellar
src/thinkspace/api/thinkspace-test/test/timetable/helpers/all.rb
<reponame>sixthedge/cellar module Test; module Timetable; module Helpers; module All extend ActiveSupport::Concern included do include Casespace::Models include Timetable::Helpers::Models include Timetable::Helpers::Assert end; end; end; end; end
Dbevan/SunderingShadows
d/common/mounts/barded_flying_mount.c
<reponame>Dbevan/SunderingShadows //Added to allow for barded flying mounts off of the Knight swords //from Demongate. The appropriate flying mounts will be consistent //with the normal barded horses - Octothorpe 7/6/11 #include <std.h> inherit "/std/flying_mount.c"; nosave int healingStuff; int is_warhorse() { return 1; } void create(){ ::create(); add_limb("right foreleg","torso",0,0,0); add_limb("left foreleg","torso",0,0,0); add_limb("right rear leg","torso",0,0,0); add_limb("left rear leg","torso",0,0,0); add_limb("torso","torso",0,0,0); add_limb("head","torso",0,0,0); set_attack_limbs( ({"right leg","left leg"}) ); set_damage(1,10); set_stats("strength",18); set_body_type("equine"); set_level(20); set_mlevel("fighter",20); set_hd(20,10); set_max_hp(200); set_hp(200); set_overall_ac(6); set_exp(1); set_max_distance(200); set_flight_delay(300); set_flying_prof(25); set_ward_pass(25); new("/d/common/obj/armour/barding")->move(TO); command("wear barding"); } int enter(string str){ if(TP->is_class("paladin")) set_overall_ac(6 - (int)TP->query_class_level("paladin")/2); if(TP->is_class("cavalier")) set_overall_ac(6- (int)TP->query_class_level("cavalier")/2); if(TP->is_class("antipaladin")) set_overall_ac(6 - (int)TP->query_class_level("antipaladin")/2); return ::enter(str); } int restrict_mount_ok(object who) { int lev, temp, hurt, newhp; lev = who->query_level(); if (who->is_class("paladin") || who->is_class("cavalier") || who->is_class("antipaladin")) { temp = query_hp(); hurt = query_max_hp() - temp; set_level(lev); // this wasn't working for some reason so using mlevel set_riding_level(who->query_skill("athletics")); set_mlevel("fighter",lev); set_hd(lev,10); set_max_hp(20*lev); set_hp(query_max_hp() - hurt); set_damage(1,lev/2); set_stats("strength",20); if (healingStuff || query_max_hp() < query_hp()) { set_hp(query_max_hp()); healingStuff = 0; } return 1; } set_level(20); set_riding_level(20); set_mlevel("fighter",20); set_hd(20,10); set_stats("strength",18); set_max_hp(200); if (healingStuff || query_max_hp() < query_hp()) { set_hp(query_max_hp()); healingStuff = 0; } set_damage(1,10); return 1; }
herokuro/noop-server
test/cli/help-flag.test.js
'use strict' const _ = require('../utils') _.test('cli:help flag', async t => { const helpFile = await _.readFile(_.path.helpText) const trimmedHelpFile = helpFile.slice(0, -1) const { stdout: helpShort } = await _.run(`node ${_.cli} -h`) t.deepEqual(helpShort, trimmedHelpFile, '$ noop-server -h should return the help text') const { stdout: helpLong } = await _.run(`node ${_.cli} --help`) t.deepEqual(helpLong, trimmedHelpFile, '$ noop-server --help should return the help text') t.end() })
wohaaitinciu/zpublic
pellets/z_platform/impl/z_platform_win_thread.hpp
<reponame>wohaaitinciu/zpublic #pragma once #include <windows.h> typedef HANDLE zl_thread_handle; class zl_thread_impl { public: bool create(size_t stack_size, zl_delegate* pdelegate, zl_thread_handle* out_thread_handle) { zl_thread_handle h = ::CreateThread(NULL, stack_size, zl_thread_impl::thread_func, pdelegate, 0, NULL); if (h == NULL) { return false; } if (out_thread_handle) { *out_thread_handle = h; } else { ::CloseHandle(h); } return true; } private: static DWORD __stdcall thread_func(void* params) { zl_delegate* d = static_cast<zl_delegate*>(params); d->thread_main(); return 0; } };
athenianco/athenian-api
server/tests/controllers/miners/github/test_check_run.py
<gh_stars>1-10 from datetime import datetime, timezone from pathlib import Path import numpy as np from numpy.testing import assert_array_equal import pandas as pd import pytest from athenian.api.controllers.features.github.check_run_metrics_accelerated import \ mark_check_suite_types from athenian.api.controllers.miners.filters import JIRAFilter, LabelFilter from athenian.api.controllers.miners.github.check_run import _postprocess_check_runs, \ _split_duplicate_check_runs, mine_check_runs from athenian.api.controllers.settings import LogicalRepositorySettings from athenian.api.int_to_str import int_to_str from athenian.api.models.metadata.github import CheckRun @pytest.mark.parametrize("time_from, time_to, repositories, pushers, labels, jira, size", [ (datetime(2015, 1, 1, tzinfo=timezone.utc), datetime(2020, 1, 1, tzinfo=timezone.utc), ["src-d/go-git"], [], LabelFilter.empty(), JIRAFilter.empty(), 4581), (datetime(2015, 1, 1, tzinfo=timezone.utc), datetime(2020, 1, 1, tzinfo=timezone.utc), ["src-d/hercules"], [], LabelFilter.empty(), JIRAFilter.empty(), 0), (datetime(2015, 1, 1, tzinfo=timezone.utc), datetime(2018, 1, 1, tzinfo=timezone.utc), ["src-d/go-git"], [], LabelFilter.empty(), JIRAFilter.empty(), 2371), (datetime(2018, 1, 1, tzinfo=timezone.utc), datetime(2020, 1, 1, tzinfo=timezone.utc), ["src-d/go-git"], [], LabelFilter.empty(), JIRAFilter.empty(), 2213), (datetime(2015, 1, 1, tzinfo=timezone.utc), datetime(2020, 1, 1, tzinfo=timezone.utc), ["src-d/go-git"], ["mcuadros"], LabelFilter.empty(), JIRAFilter.empty(), 1642), (datetime(2015, 1, 1, tzinfo=timezone.utc), datetime(2020, 1, 1, tzinfo=timezone.utc), ["src-d/go-git"], [], LabelFilter({"bug", "plumbing", "enhancement"}, set()), JIRAFilter.empty(), 67), (datetime(2015, 1, 1, tzinfo=timezone.utc), datetime(2020, 1, 1, tzinfo=timezone.utc), ["src-d/go-git"], [], LabelFilter.empty(), JIRAFilter(1, ["10003", "10009"], LabelFilter.empty(), set(), {"task"}, False, False), 229), (datetime(2015, 10, 10, tzinfo=timezone.utc), datetime(2015, 10, 23, tzinfo=timezone.utc), ["src-d/go-git"], [], LabelFilter.empty(), JIRAFilter.empty(), 4), ]) async def test_check_run_smoke( mdb, time_from, time_to, repositories, pushers, labels, jira, size, logical_settings): df = await mine_check_runs( time_from, time_to, repositories, pushers, labels, jira, False, logical_settings, (6366825,), mdb, None) assert len(df) == size for col in CheckRun.__table__.columns: if col.name not in (CheckRun.committed_date_hack.name,): assert col.name in df.columns assert len(df[CheckRun.check_run_node_id.name].unique()) == len(df) @pytest.mark.parametrize("time_from, time_to, size", [ (datetime(2015, 1, 1, tzinfo=timezone.utc), datetime(2020, 1, 1, tzinfo=timezone.utc), 2766), (datetime(2018, 1, 1, tzinfo=timezone.utc), datetime(2019, 1, 1, tzinfo=timezone.utc), 1068), ]) async def test_check_run_only_prs(mdb, time_from, time_to, size, logical_settings): df = await mine_check_runs( time_from, time_to, ["src-d/go-git"], [], LabelFilter.empty(), JIRAFilter.empty(), True, logical_settings, (6366825,), mdb, None) assert (df[CheckRun.pull_request_node_id.name].values != 0).all() assert len(df) == size @pytest.mark.parametrize("repos, size", [ (["src-d/go-git", "src-d/go-git/alpha", "src-d/go-git/beta"], 4662), (["src-d/go-git", "src-d/go-git/alpha"], 3922), (["src-d/go-git", "src-d/go-git/beta"], 3766), (["src-d/go-git"], 4581), (["src-d/go-git/alpha"], 896), ]) async def test_check_run_logical_repos_title( mdb, logical_settings, repos, size): df = await mine_check_runs( datetime(2015, 1, 1, tzinfo=timezone.utc), datetime(2020, 1, 1, tzinfo=timezone.utc), repos, [], LabelFilter.empty(), JIRAFilter.empty(), False, logical_settings, (6366825,), mdb, None) assert set(df[CheckRun.repository_full_name.name].unique()) == set(repos) assert len(df) == size @pytest.fixture(scope="session") def logical_settings_mixed(): return LogicalRepositorySettings({ "src-d/go-git/alpha": {"labels": ["bug", "enhancement"]}, "src-d/go-git/beta": {"title": ".*[Aa]dd"}, }, {}) @pytest.mark.parametrize("repos, size", [ (["src-d/go-git", "src-d/go-git/alpha", "src-d/go-git/beta"], 4581), (["src-d/go-git", "src-d/go-git/alpha"], 3841), (["src-d/go-git", "src-d/go-git/beta"], 4572), (["src-d/go-git"], 4581), (["src-d/go-git/alpha"], 9), ]) async def test_check_run_logical_repos_label( mdb, logical_settings_mixed, repos, size): df = await mine_check_runs( datetime(2015, 1, 1, tzinfo=timezone.utc), datetime(2020, 1, 1, tzinfo=timezone.utc), repos, [], LabelFilter.empty(), JIRAFilter.empty(), False, logical_settings_mixed, (6366825,), mdb, None) assert set(df[CheckRun.repository_full_name.name].unique()) == set(repos) assert len(df) == size def test_mark_check_suite_types_smoke(): names = np.array(["one", "two", "one", "three", "one", "one", "two"]) suites = np.array([1, 1, 4, 3, 2, 5, 5]) suite_indexes, group_ids = mark_check_suite_types(names, suites) assert_array_equal(suite_indexes, [0, 4, 3, 2, 5]) assert_array_equal(group_ids, [2, 1, 0, 1, 2]) def test_mark_check_suite_types_empty(): suite_indexes, group_ids = mark_check_suite_types( np.array([], dtype="U"), np.array([], dtype=int)) assert len(suite_indexes) == 0 assert len(group_ids) == 0 @pytest.fixture(scope="module") def alternative_facts() -> pd.DataFrame: df = pd.read_csv( Path(__file__).parent.parent.parent / "features" / "github" / "check_runs.csv.gz") for col in (CheckRun.started_at, CheckRun.completed_at, CheckRun.pull_request_created_at, CheckRun.pull_request_closed_at, CheckRun.committed_date): df[col.name] = df[col.name].astype(np.datetime64) df = _split_duplicate_check_runs(df) _postprocess_check_runs(df) return df def test_mark_check_suite_types_real_world(alternative_facts): repos = int_to_str(alternative_facts[CheckRun.repository_node_id.name].values) names = np.char.add( repos, np.char.encode(alternative_facts[CheckRun.name.name].values.astype("U"), "UTF-8")) suite_indexes, group_ids = mark_check_suite_types( names, alternative_facts[CheckRun.check_suite_node_id.name].values) assert (suite_indexes < len(alternative_facts)).all() assert (suite_indexes >= 0).all() unique_groups, counts = np.unique(group_ids, return_counts=True) assert_array_equal(unique_groups, np.arange(21)) assert_array_equal( counts, [1, 1, 110, 1, 275, 1, 928, 369, 2, 1472, 8490, 1, 707, 213, 354, 205, 190, 61, 731, 251, 475])
JRL-CARI-CNR-UNIBS/cari_motion_planning
graph_core/src/graph_core/avoidance_goal_cost_function.cpp
/* Copyright (c) 2019, <NAME> CNR-STIIMA <EMAIL> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the <organization> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <graph_core/avoidance_goal_cost_function.h> namespace pathplan { AvoidanceGoalCostFunction::AvoidanceGoalCostFunction(const ros::NodeHandle &nh): GoalCostFunction(), nh_(nh) { urdf::Model model; model.initParam("robot_description"); std::string base_frame = "world"; std::string tool_frame = "tip"; if (!nh_.getParam("base_frame", base_frame)) { ROS_ERROR("%s/base_frame not defined", nh_.getNamespace().c_str()); throw std::invalid_argument("base_frame is not defined"); } if (!nh_.getParam("tool_frame", tool_frame)) { ROS_ERROR("%s/tool_frame not defined", nh_.getNamespace().c_str()); throw std::invalid_argument("base_frame is not defined"); } if (!nh_.getParam("max_penalty", max_penalty_)) { ROS_ERROR("%s/max_penalty not defined, use 1.0", nh_.getNamespace().c_str()); max_penalty_=1.0; } if (!nh_.getParam("max_avoidance_distance", max_distance_)) { ROS_ERROR("%s/max_avoidance_distance not defined, use 1.5 meter", nh_.getNamespace().c_str()); max_distance_=1.5; } if (!nh_.getParam("min_avoidance_distance", min_distance_)) { ROS_ERROR("%s/min_avoidance_distance not defined, use 0.5 meter", nh_.getNamespace().c_str()); min_distance_=0.5; } if (!nh_.getParam("display_bubbles", plot)) { ROS_ERROR("%s/display_bubbles not defined, use false", nh_.getNamespace().c_str()); plot=false; } Eigen::Vector3d grav; grav << 0, 0, -9.806; chain_ = rosdyn::createChain(model, base_frame, tool_frame, grav); if (!nh_.getParam("links", links_)) { ROS_ERROR("%s/links not defined, use all links", nh_.getNamespace().c_str()); links_=chain_->getLinksName(); } points_.resize(3,0); inv_delta_distance_=1.0/(max_distance_-min_distance_); if (plot) { marker_id_=0; marker_pub_ = nh_.advertise<visualization_msgs::Marker>("/goal_cost_function/avoidance_points", 1000); visualization_msgs::Marker marker; marker.type = visualization_msgs::Marker::SPHERE; marker.id=marker_id_; marker.ns = "avoidance"; marker.header.frame_id="world"; marker.header.stamp=ros::Time::now(); marker.action = visualization_msgs::Marker::DELETE; for (unsigned int idx=0;idx<5;idx++) { marker_pub_.publish(marker); ros::Duration(0.01).sleep(); } } } void AvoidanceGoalCostFunction::cleanPoints() { points_.resize(3,0); if (!plot) return; visualization_msgs::Marker marker; marker.type = visualization_msgs::Marker::SPHERE; marker.ns = "avoidance"; marker.header.frame_id="world"; marker.header.stamp=ros::Time::now(); marker.action = visualization_msgs::Marker::DELETEALL; marker_id_=0; marker_pub_.publish(marker); ros::Duration(0.1).sleep(); } void AvoidanceGoalCostFunction::addPoint(const Eigen::Vector3d &point) { for (int ic=0;ic<points_.cols();ic++) { if ((points_.col(ic)-point).norm()<1e-4) return; } points_.conservativeResize(3, points_.cols()+1); points_.col(points_.cols()-1) = point; if (!plot) return; // visualization_msgs::Marker marker; // marker.type = visualization_msgs::Marker::SPHERE; // marker.ns = "avoidance"; // marker.pose.orientation.w=1.0; // tf::pointEigenToMsg(point,marker.pose.position); // marker.header.frame_id="world"; // marker.header.stamp=ros::Time::now(); // marker.action = visualization_msgs::Marker::ADD; // marker.id= marker_id_++; // marker.scale.x = 2.0*max_distance_; // marker.scale.y = 2.0*max_distance_; // marker.scale.z = 2.0*max_distance_; // marker.color.r = 1; // marker.color.g = 0; // marker.color.b = 0; // marker.color.a = 0.05; // marker_pub_.publish(marker); // ros::Duration(0.15).sleep(); // if (min_distance_>0) // { // marker.scale.x = 2.0*min_distance_; // marker.scale.y = 2.0*min_distance_; // marker.scale.z = 2.0*min_distance_; // marker.id= marker_id_++; // marker.color.r = 1; // marker.color.g = 0; // marker.color.b = 0; // marker.color.a = .4; // marker_pub_.publish(marker); // ros::Duration(0.01).sleep(); // } } void AvoidanceGoalCostFunction::publishPoints() { if (!plot) return; visualization_msgs::Marker marker; marker.type = visualization_msgs::Marker::SPHERE_LIST; marker.ns = "avoidance"; marker.pose.orientation.w=1.0; marker.header.frame_id="world"; marker.header.stamp=ros::Time::now(); marker.action = visualization_msgs::Marker::ADD; marker.id= marker_id_++; marker.scale.x = 2.0*max_distance_; marker.scale.y = 2.0*max_distance_; marker.scale.z = 2.0*max_distance_; marker.color.r = 1.0; marker.color.g = 0.0; marker.color.b = 0.0; marker.color.a = 0.05; for (int ic=0;ic<points_.cols();ic++) { Eigen::Vector3d point=points_.col(ic); geometry_msgs::Point p; tf::pointEigenToMsg(point,p); marker.points.push_back(p); marker.colors.push_back(marker.color); } marker_pub_.publish(marker); ros::Duration(0.15).sleep(); // if (min_distance_>0) // { // marker.scale.x = 2.0*min_distance_; // marker.scale.y = 2.0*min_distance_; // marker.scale.z = 2.0*min_distance_; // marker.id= marker_id_++; // marker.color.r = 1; // marker.color.g = 0; // marker.color.b = 0; // marker.color.a = .4; // marker_pub_.publish(marker); // ros::Duration(0.01).sleep(); // } } double AvoidanceGoalCostFunction::cost(const Eigen::VectorXd& configuration) { double dist=std::numeric_limits<double>::infinity(); for (const std::string& link: links_) { Eigen::Affine3d T_b_l=chain_->getTransformationLink(configuration,link); for (long ip=0;ip<points_.cols();ip++) { double d=(T_b_l.translation()-points_.col(ip)).norm(); dist=(d<dist)?d:dist; if (dist<min_distance_) break; } if (dist<min_distance_) break; } if (dist<min_distance_) return max_penalty_; else if (dist<max_distance_) return max_penalty_*(max_distance_-dist)*inv_delta_distance_; return 0.0; } }
opengauss-mirror/DataStudio
code/datastudio/src/org.opengauss.mppdbide.view/src/org/opengauss/mppdbide/view/utils/ProgressMonitorControl.java
<reponame>opengauss-mirror/DataStudio /* * Copyright (c) 2022 Huawei Technologies Co.,Ltd. * * openGauss is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. * You may obtain a copy of Mulan PSL v2 at: * * http://license.coscl.org.cn/MulanPSL2 * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ package org.opengauss.mppdbide.view.utils; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.annotation.PostConstruct; import javax.annotation.PreDestroy; import org.eclipse.core.runtime.jobs.IJobChangeEvent; import org.eclipse.core.runtime.jobs.IJobChangeListener; import org.eclipse.core.runtime.jobs.IJobManager; import org.eclipse.core.runtime.jobs.Job; import org.eclipse.swt.SWT; import org.eclipse.swt.custom.TableEditor; import org.eclipse.swt.events.PaintEvent; import org.eclipse.swt.events.PaintListener; import org.eclipse.swt.events.SelectionEvent; import org.eclipse.swt.events.SelectionListener; import org.eclipse.swt.graphics.Point; import org.eclipse.swt.layout.GridData; import org.eclipse.swt.widgets.Button; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Display; import org.eclipse.swt.widgets.ProgressBar; import org.eclipse.swt.widgets.Table; import org.eclipse.swt.widgets.TableColumn; import org.eclipse.swt.widgets.TableItem; import org.opengauss.mppdbide.utils.IMessagesConstants; import org.opengauss.mppdbide.utils.MPPDBIDEConstants; import org.opengauss.mppdbide.utils.loader.MessageConfigLoader; import org.opengauss.mppdbide.view.utils.consts.UIConstants; import org.opengauss.mppdbide.view.utils.dialog.MPPDBIDEDialogs; import org.opengauss.mppdbide.view.utils.dialog.MPPDBIDEDialogs.MESSAGEDIALOGTYPE; import org.opengauss.mppdbide.view.utils.icon.IconUtility; import org.opengauss.mppdbide.view.utils.icon.IiconPath; import org.opengauss.mppdbide.view.utils.progressmonitorif.ProgressMonitorControlIf; import org.opengauss.mppdbide.view.workerjob.UIWorkerJob; /** * * Title: class * * Description: The Class ProgressMonitorControl. * * @since 3.0.0 */ public class ProgressMonitorControl implements ProgressMonitorControlIf { private static final int TABLE_COLUMN_1_WIDTH = 2 * IScreenResolutionUtil.getScreenWidth() / 3; private static final int TABLE_COLUMN_2_WIDTH = 50; private static boolean isExportImportInProgress; private DSJobChangeListener dsJobChangeListenerObj = new DSJobChangeListener(); private Table table; private List<TableEditor> editorLst = new ArrayList<TableEditor>(); private List<TableEditor> editorBtnLst = new ArrayList<TableEditor>(); private Composite parent; /** * Instantiates a new progress monitor control. */ public ProgressMonitorControl() { parent = null; table = null; } /** * Creates the controls. * * @param compParent the comp parent */ @PostConstruct public void createControls(Composite compParent) { this.parent = compParent; table = new Table(compParent, SWT.NONE); table.setLayoutData(new GridData(GridData.FILL_HORIZONTAL)); table.setHeaderVisible(true); table.setLinesVisible(true); // AvoidLooprForArrayOrObjectAllocationCheck for (int j = 0; j < 2; j++) { new TableColumn(table, SWT.NONE); } // AvoidLooprForArrayOrObjectAllocationCheck table.getColumn(0).setText(MessageConfigLoader.getProperty(IMessagesConstants.CANCEL_PROGRESS_TABLE_HEARDER)); table.getColumn(1).setText(MessageConfigLoader.getProperty(IMessagesConstants.MPPDBIDE_DIA_BTN_CANC)); table.getColumn(0).setWidth(TABLE_COLUMN_1_WIDTH); table.getColumn(1).setWidth(TABLE_COLUMN_2_WIDTH); table.redraw(); compParent.redraw(); } /** * Refresh jobs. */ public void refreshJobs() { if (!isExportImportInProgress) { setExportImportInProgress(true); } final IJobManager jm = Job.getJobManager(); updateProgressTable(jm); jm.addJobChangeListener(dsJobChangeListenerObj); } /** * Removes the job listener. */ public void removeJobListener() { final IJobManager jm = Job.getJobManager(); jm.removeJobChangeListener(dsJobChangeListenerObj); } private class DSJobChangeListener implements IJobChangeListener { @Override public void sleeping(IJobChangeEvent event) { } @Override public void scheduled(IJobChangeEvent event) { } @Override public void running(IJobChangeEvent event) { } @Override public void done(IJobChangeEvent event) { final IJobManager jm = Job.getJobManager(); Job[] allJobs = jm.find(MPPDBIDEConstants.CANCELABLEJOB); if (allJobs.length == 0) { UIElement.getInstance().hideStatusBarWindow(); setExportImportInProgress(true); } if (null != jm.currentJob() && jm.currentJob().belongsTo(MPPDBIDEConstants.CANCELABLEJOB)) { Display.getDefault().syncExec(new Runnable() { @Override public void run() { updateProgressTable(jm); } }); } } @Override public void awake(IJobChangeEvent event) { } @Override public void aboutToRun(IJobChangeEvent event) { } } /** * Update progress table. * * @param jm the jm */ private void updateProgressTable(IJobManager jm) { Job[] allJobs = jm.find(MPPDBIDEConstants.CANCELABLEJOB); if (table.isDisposed()) { return; } table.removeAll(); table.clearAll(); for (TableEditor editor : editorLst) { functionDisposecomponent(editor); editor = null; } editorLst.clear(); for (TableEditor editor : editorBtnLst) { functionDisposecomponent(editor); editor = null; } editorBtnLst.clear(); for (final Job job : allJobs) { addProgressBarForAllJobs(job); } table.getColumn(0).setWidth(TABLE_COLUMN_1_WIDTH); table.getColumn(1).setWidth(TABLE_COLUMN_2_WIDTH); table.redraw(); parent.redraw(); } private void addProgressBarForAllJobs(final Job job) { TableItem item = new TableItem(table, SWT.NONE); ProgressBar bar = new ProgressBar(table, SWT.SMOOTH | SWT.HORIZONTAL | SWT.INDETERMINATE | SWT.LEFT); item.setData(new GridData(SWT.FILL, SWT.FILL, true, true)); TableEditor editor = new TableEditor(table); editor.grabHorizontal = true; editor.setEditor(bar, item, 0); editorLst.add(editor); Map<Object, Button> buttons = new HashMap<Object, Button>(); Button button; if (buttons.containsKey(job)) { button = buttons.get(job); } else { button = new Button(table, SWT.TRANSPARENT); button.addPaintListener(new ButtonPaintHelper()); button.setData("row.id", job); buttons.put(job, button); if (job instanceof UIWorkerJob) { UIWorkerJob uiwkrJob = (UIWorkerJob) job; if (uiwkrJob.isCancel()) { bar.addPaintListener(new StatusBarPaintListener( MessageConfigLoader.getProperty(IMessagesConstants.CANCELLING_JOB, uiwkrJob.getName()), bar)); button.setEnabled(false); button.setGrayed(true); } else { bar.addPaintListener(new StatusBarPaintListener(uiwkrJob.getName(), bar)); } } } addEditorButton(item, button); } private void addEditorButton(TableItem item, Button button) { TableEditor editorBtn = new TableEditor(item.getParent()); editorBtn.grabHorizontal = true; editorBtn.grabVertical = true; editorBtn.setEditor(button, item, 1); button.addSelectionListener(new SelectionHelper()); editorBtn.layout(); editorBtnLst.add(editorBtn); } /** * * Title: class * * Description: The Class SelectionHelper. */ private final class SelectionHelper implements SelectionListener { @Override public void widgetSelected(SelectionEvent event) { Job jb = null; if (!((Button) event.getSource()).isDisposed()) { jb = (Job) ((Button) event.getSource()).getData("row.id"); } int choice = MPPDBIDEDialogs.generateMessageDialog(MESSAGEDIALOGTYPE.QUESTION, true, MessageConfigLoader.getProperty(IMessagesConstants.CANCEL_IMPORTEXPORT_CONSOLE), MessageConfigLoader.getProperty(IMessagesConstants.CANCEL_IMPORTEXPORT_CONSOLE_MSG), MessageConfigLoader.getProperty(IMessagesConstants.MPPDBIDE_DIA_BTN_YES), MessageConfigLoader.getProperty(IMessagesConstants.MPPDBIDE_DIA_BTN_NO)); if (choice == UIConstants.OK_ID) { if (jb != null) { jb.cancel(); ProgressMonitorControl.this.updateProgressTable(Job.getJobManager()); } } } @Override public void widgetDefaultSelected(SelectionEvent event) { } } /** * * Title: class * * Description: The Class ButtonPaintHelper. */ private static final class ButtonPaintHelper implements PaintListener { @Override public void paintControl(PaintEvent event) { event.gc.setBackground(event.display.getSystemColor(SWT.COLOR_WHITE)); event.gc.fillRectangle(event.x, event.y, event.width, event.height); event.gc.drawImage(IconUtility.getIconImage(IiconPath.ICO_BAR_CLOSETWO, this.getClass()), event.width / 3, 1); } } /** * Function disposecomponent. * * @param editor the e */ private void functionDisposecomponent(TableEditor editor) { if (editor != null) { if (editor.getEditor() != null) { editor.getEditor().dispose(); } editor.dispose(); } } /** * Sets the export import in progress. * * @param isExportImportInProgres the new export import in progress */ public static void setExportImportInProgress(boolean isExportImportInProgres) { ProgressMonitorControl.isExportImportInProgress = isExportImportInProgres; } /** * The listener interface for receiving statusBarPaint events. The class * that is interested in processing a statusBarPaint event implements this * interface, and the object created with that class is registered with a * component using the component's <code>addStatusBarPaintListener<code> * method. When the statusBarPaint event occurs, that object's appropriate * method is invoked. * * StatusBarPaintEvent */ private final class StatusBarPaintListener implements PaintListener { private String message; private ProgressBar bar; /** * Instantiates a new status bar paint listener. * * @param message the message * @param bar the bar */ private StatusBarPaintListener(String message, ProgressBar bar) { this.message = message; this.bar = bar; } @Override public void paintControl(PaintEvent event) { String string = message; Point point = bar.getSize(); org.eclipse.swt.graphics.FontMetrics fontMetrics = event.gc.getFontMetrics(); int width = fontMetrics.getAverageCharWidth() * string.length(); int height = fontMetrics.getHeight(); if (parent.getDisplay() != null) { event.gc.setForeground(parent.getDisplay().getSystemColor(SWT.COLOR_BLACK)); } event.gc.drawString(string, (point.x - width) / 2, (point.y - height) / 2, true); } } /** * Pre destroy. */ @PreDestroy public void preDestroy() { UIElement.getInstance().removePartFromStack(UIConstants.UI_PART_PROGRESSBAR_ID); } }
HedgehogCode/javacpp-presets
tritonserver/src/gen/java/org/bytedeco/tritonserver/global/tritonserver.java
<reponame>HedgehogCode/javacpp-presets // Targeted by JavaCPP version 1.5.7-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tritonserver.global; import org.bytedeco.tritonserver.tritonserver.*; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; import static org.bytedeco.javacpp.presets.javacpp.*; import org.bytedeco.cuda.cudart.*; import static org.bytedeco.cuda.global.cudart.*; import org.bytedeco.cuda.cublas.*; import static org.bytedeco.cuda.global.cublas.*; import org.bytedeco.cuda.cudnn.*; import static org.bytedeco.cuda.global.cudnn.*; import org.bytedeco.cuda.nvrtc.*; import static org.bytedeco.cuda.global.nvrtc.*; import org.bytedeco.tensorrt.nvinfer.*; import static org.bytedeco.tensorrt.global.nvinfer.*; import org.bytedeco.tensorrt.nvinfer_plugin.*; import static org.bytedeco.tensorrt.global.nvinfer_plugin.*; import org.bytedeco.tensorrt.nvonnxparser.*; import static org.bytedeco.tensorrt.global.nvonnxparser.*; import org.bytedeco.tensorrt.nvparsers.*; import static org.bytedeco.tensorrt.global.nvparsers.*; public class tritonserver extends org.bytedeco.tritonserver.presets.tritonserver { static { Loader.load(); } // Parsed from tritonserver.h // Copyright 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #pragma once /** \file */ // #include <stdbool.h> // #include <stddef.h> // #include <stdint.h> // #ifdef __cplusplus // #endif // #ifdef _COMPILING_TRITONSERVER // #if defined(_MSC_VER) // #define TRITONSERVER_DECLSPEC __declspec(dllexport) // #elif defined(__GNUC__) // #define TRITONSERVER_DECLSPEC __attribute__((__visibility__("default"))) // #else // #define TRITONSERVER_DECLSPEC // #endif // #else // #if defined(_MSC_VER) // #define TRITONSERVER_DECLSPEC __declspec(dllimport) // #else // #define TRITONSERVER_DECLSPEC // Targeting ../tritonserver/TRITONSERVER_Error.java // Targeting ../tritonserver/TRITONSERVER_InferenceRequest.java // Targeting ../tritonserver/TRITONSERVER_InferenceResponse.java // Targeting ../tritonserver/TRITONSERVER_InferenceTrace.java // Targeting ../tritonserver/TRITONSERVER_Message.java // Targeting ../tritonserver/TRITONSERVER_Metrics.java // Targeting ../tritonserver/TRITONSERVER_ResponseAllocator.java // Targeting ../tritonserver/TRITONSERVER_Server.java // Targeting ../tritonserver/TRITONSERVER_ServerOptions.java /** * TRITONSERVER API Version * * The TRITONSERVER API is versioned with major and minor version * numbers. Any change to the API that does not impact backwards * compatibility (for example, adding a non-required function) * increases the minor version number. Any change that breaks * backwards compatibility (for example, deleting or changing the * behavior of a function) increases the major version number. A * client should check that the API version used to compile the * client is compatible with the API version of the Triton shared * library that it is linking against. This is typically done by code * similar to the following which makes sure that the major versions * are equal and that the minor version of the Triton shared library * is >= the minor version used to build the client. * * uint32_t api_version_major, api_version_minor; * TRITONSERVER_ApiVersion(&api_version_major, &api_version_minor); * if ((api_version_major != TRITONSERVER_API_VERSION_MAJOR) || * (api_version_minor < TRITONSERVER_API_VERSION_MINOR)) { * return TRITONSERVER_ErrorNew( * TRITONSERVER_ERROR_UNSUPPORTED, * "triton server API version does not support this client"); * } * */ public static final int TRITONSERVER_API_VERSION_MAJOR = 1; /// public static final int TRITONSERVER_API_VERSION_MINOR = 4; /** Get the TRITONBACKEND API version supported by the Triton shared * library. This value can be compared against the * TRITONSERVER_API_VERSION_MAJOR and TRITONSERVER_API_VERSION_MINOR * used to build the client to ensure that Triton shared library is * compatible with the client. * * @param major Returns the TRITONSERVER API major version supported * by Triton. * @param minor Returns the TRITONSERVER API minor version supported * by Triton. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// public static native TRITONSERVER_Error TRITONSERVER_ApiVersion( @Cast("uint32_t*") IntPointer major, @Cast("uint32_t*") IntPointer minor); public static native TRITONSERVER_Error TRITONSERVER_ApiVersion( @Cast("uint32_t*") IntBuffer major, @Cast("uint32_t*") IntBuffer minor); public static native TRITONSERVER_Error TRITONSERVER_ApiVersion( @Cast("uint32_t*") int[] major, @Cast("uint32_t*") int[] minor); /** TRITONSERVER_DataType * * Tensor data types recognized by TRITONSERVER. * */ /** enum TRITONSERVER_DataType */ public static final int TRITONSERVER_TYPE_INVALID = 0, TRITONSERVER_TYPE_BOOL = 1, TRITONSERVER_TYPE_UINT8 = 2, TRITONSERVER_TYPE_UINT16 = 3, TRITONSERVER_TYPE_UINT32 = 4, TRITONSERVER_TYPE_UINT64 = 5, TRITONSERVER_TYPE_INT8 = 6, TRITONSERVER_TYPE_INT16 = 7, TRITONSERVER_TYPE_INT32 = 8, TRITONSERVER_TYPE_INT64 = 9, TRITONSERVER_TYPE_FP16 = 10, TRITONSERVER_TYPE_FP32 = 11, TRITONSERVER_TYPE_FP64 = 12, TRITONSERVER_TYPE_BYTES = 13; /** Get the string representation of a data type. The returned string * is not owned by the caller and so should not be modified or freed. * * @param datatype The data type. * @return The string representation of the data type. */ /// public static native String TRITONSERVER_DataTypeString( @Cast("TRITONSERVER_DataType") int datatype); /** Get the Triton datatype corresponding to a string representation * of a datatype. * * @param dtype The datatype string representation. * @return The Triton data type or TRITONSERVER_TYPE_INVALID if the * string does not represent a data type. */ /// public static native @Cast("TRITONSERVER_DataType") int TRITONSERVER_StringToDataType(String dtype); public static native @Cast("TRITONSERVER_DataType") int TRITONSERVER_StringToDataType(@Cast("const char*") BytePointer dtype); /** Get the size of a Triton datatype in bytes. Zero is returned for * TRITONSERVER_TYPE_BYTES because it have variable size. Zero is * returned for TRITONSERVER_TYPE_INVALID. * * @param dtype The datatype. * @return The size of the datatype. */ /// /// public static native @Cast("uint32_t") int TRITONSERVER_DataTypeByteSize(@Cast("TRITONSERVER_DataType") int datatype); /** TRITONSERVER_MemoryType * * Types of memory recognized by TRITONSERVER. * */ /** enum TRITONSERVER_MemoryType */ public static final int TRITONSERVER_MEMORY_CPU = 0, TRITONSERVER_MEMORY_CPU_PINNED = 1, TRITONSERVER_MEMORY_GPU = 2; /** Get the string representation of a memory type. The returned * string is not owned by the caller and so should not be modified or * freed. * * @param memtype The memory type. * @return The string representation of the memory type. */ /// /// public static native String TRITONSERVER_MemoryTypeString( @Cast("TRITONSERVER_MemoryType") int memtype); /** TRITONSERVER_ParameterType * * Types of parameters recognized by TRITONSERVER. * */ /** enum TRITONSERVER_ParameterType */ public static final int TRITONSERVER_PARAMETER_STRING = 0, TRITONSERVER_PARAMETER_INT = 1, TRITONSERVER_PARAMETER_BOOL = 2; /** Get the string representation of a parmeter type. The returned * string is not owned by the caller and so should not be modified or * freed. * * @param paramtype The parameter type. * @return The string representation of the parameter type. */ /// /// public static native String TRITONSERVER_ParameterTypeString( @Cast("TRITONSERVER_ParameterType") int paramtype); /** TRITONSERVER_InstanceGroupKind * * Kinds of instance groups recognized by TRITONSERVER. * */ /** enum TRITONSERVER_InstanceGroupKind */ public static final int TRITONSERVER_INSTANCEGROUPKIND_AUTO = 0, TRITONSERVER_INSTANCEGROUPKIND_CPU = 1, TRITONSERVER_INSTANCEGROUPKIND_GPU = 2, TRITONSERVER_INSTANCEGROUPKIND_MODEL = 3; /** Get the string representation of an instance-group kind. The * returned string is not owned by the caller and so should not be * modified or freed. * * @param kind The instance-group kind. * @return The string representation of the kind. */ /// /// public static native String TRITONSERVER_InstanceGroupKindString( @Cast("TRITONSERVER_InstanceGroupKind") int kind); /** TRITONSERVER_Logging * * Types/levels of logging. * */ /** enum TRITONSERVER_LogLevel */ public static final int TRITONSERVER_LOG_INFO = 0, TRITONSERVER_LOG_WARN = 1, TRITONSERVER_LOG_ERROR = 2, TRITONSERVER_LOG_VERBOSE = 3; /** Is a log level enabled? * * @param level The log level. * @return True if the log level is enabled, false if not enabled. */ /// public static native @Cast("bool") boolean TRITONSERVER_LogIsEnabled( @Cast("TRITONSERVER_LogLevel") int level); /** Log a message at a given log level if that level is enabled. * * @param level The log level. * @param filename The file name of the location of the log message. * @param line The line number of the log message. * @param msg The log message. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONSERVER_LogMessage( @Cast("TRITONSERVER_LogLevel") int level, String filename, int line, String msg); public static native TRITONSERVER_Error TRITONSERVER_LogMessage( @Cast("TRITONSERVER_LogLevel") int level, @Cast("const char*") BytePointer filename, int line, @Cast("const char*") BytePointer msg); /** TRITONSERVER_Error * * Errors are reported by a TRITONSERVER_Error object. A NULL * TRITONSERVER_Error indicates no error, a non-NULL TRITONSERVER_Error * indicates error and the code and message for the error can be * retrieved from the object. * * The caller takes ownership of a TRITONSERVER_Error object returned by * the API and must call TRITONSERVER_ErrorDelete to release the object. * <p> * The TRITONSERVER_Error error codes */ /** enum TRITONSERVER_Error_Code */ public static final int TRITONSERVER_ERROR_UNKNOWN = 0, TRITONSERVER_ERROR_INTERNAL = 1, TRITONSERVER_ERROR_NOT_FOUND = 2, TRITONSERVER_ERROR_INVALID_ARG = 3, TRITONSERVER_ERROR_UNAVAILABLE = 4, TRITONSERVER_ERROR_UNSUPPORTED = 5, TRITONSERVER_ERROR_ALREADY_EXISTS = 6; /** Create a new error object. The caller takes ownership of the * TRITONSERVER_Error object and must call TRITONSERVER_ErrorDelete to * release the object. * * @param code The error code. * @param msg The error message. * @return A new TRITONSERVER_Error object. */ /// public static native TRITONSERVER_Error TRITONSERVER_ErrorNew( @Cast("TRITONSERVER_Error_Code") int code, String msg); public static native TRITONSERVER_Error TRITONSERVER_ErrorNew( @Cast("TRITONSERVER_Error_Code") int code, @Cast("const char*") BytePointer msg); /** Delete an error object. * * @param error The error object. */ /// public static native void TRITONSERVER_ErrorDelete(TRITONSERVER_Error error); /** Get the error code. * * @param error The error object. * @return The error code. */ /// public static native @Cast("TRITONSERVER_Error_Code") int TRITONSERVER_ErrorCode(TRITONSERVER_Error error); /** Get the string representation of an error code. The returned * string is not owned by the caller and so should not be modified or * freed. The lifetime of the returned string extends only as long as * 'error' and must not be accessed once 'error' is deleted. * * @param error The error object. * @return The string representation of the error code. */ /// public static native String TRITONSERVER_ErrorCodeString( TRITONSERVER_Error error); /** Get the error message. The returned string is not owned by the * caller and so should not be modified or freed. The lifetime of the * returned string extends only as long as 'error' and must not be * accessed once 'error' is deleted. * * @param error The error object. * @return The error message. */ /// /// /// public static native String TRITONSERVER_ErrorMessage( TRITONSERVER_Error error); // Targeting ../tritonserver/TRITONSERVER_ResponseAllocatorAllocFn_t.java // Targeting ../tritonserver/TRITONSERVER_ResponseAllocatorReleaseFn_t.java // Targeting ../tritonserver/TRITONSERVER_ResponseAllocatorStartFn_t.java /** Create a new response allocator object. * * The response allocator object is used by Triton to allocate * buffers to hold the output tensors in inference responses. Most * models generate a single response for each inference request * (TRITONSERVER_TXN_ONE_TO_ONE). For these models the order of * callbacks will be: * * TRITONSERVER_ServerInferAsync called * - start_fn : optional (and typically not required) * - alloc_fn : called once for each output tensor in response * TRITONSERVER_InferenceResponseDelete called * - release_fn: called once for each output tensor in response * * For models that generate multiple responses for each inference * request (TRITONSERVER_TXN_DECOUPLED), the start_fn callback can be * used to determine sets of alloc_fn callbacks that belong to the * same response: * * TRITONSERVER_ServerInferAsync called * - start_fn * - alloc_fn : called once for each output tensor in response * - start_fn * - alloc_fn : called once for each output tensor in response * ... * For each response, TRITONSERVER_InferenceResponseDelete called * - release_fn: called once for each output tensor in the response * * In all cases the start_fn, alloc_fn and release_fn callback * functions must be thread-safe. Typically making these functions * thread-safe does not require explicit locking. The recommended way * to implement these functions is to have each inference request * provide a 'response_allocator_userp' object that is unique to that * request with TRITONSERVER_InferenceRequestSetResponseCallback. The * callback functions then operate only on this unique state. Locking * is required only when the callback function needs to access state * that is shared across inference requests (for example, a common * allocation pool). * * @param allocator Returns the new response allocator object. * @param alloc_fn The function to call to allocate buffers for result * tensors. * @param release_fn The function to call when the server no longer * holds a reference to an allocated buffer. * @param start_fn The function to call to indicate that the * subsequent 'alloc_fn' calls are for a new response. This callback * is optional (use nullptr to indicate that it should not be * invoked). <p> * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ResponseAllocatorNew( @Cast("TRITONSERVER_ResponseAllocator**") PointerPointer allocator, TRITONSERVER_ResponseAllocatorAllocFn_t alloc_fn, TRITONSERVER_ResponseAllocatorReleaseFn_t release_fn, TRITONSERVER_ResponseAllocatorStartFn_t start_fn); public static native TRITONSERVER_Error TRITONSERVER_ResponseAllocatorNew( @ByPtrPtr TRITONSERVER_ResponseAllocator allocator, TRITONSERVER_ResponseAllocatorAllocFn_t alloc_fn, TRITONSERVER_ResponseAllocatorReleaseFn_t release_fn, TRITONSERVER_ResponseAllocatorStartFn_t start_fn); /** Delete a response allocator. * * @param allocator The response allocator object. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONSERVER_ResponseAllocatorDelete( TRITONSERVER_ResponseAllocator allocator); /** TRITONSERVER_Message * * Object representing a Triton Server message. * <p> * Create a new message object from serialized JSON string. * * @param message The message object. * @param base The base of the serialized JSON. * @param byte_size The size, in bytes, of the serialized message. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_MessageNewFromSerializedJson( @Cast("TRITONSERVER_Message**") PointerPointer message, String base, @Cast("size_t") long byte_size); public static native TRITONSERVER_Error TRITONSERVER_MessageNewFromSerializedJson( @ByPtrPtr TRITONSERVER_Message message, String base, @Cast("size_t") long byte_size); public static native TRITONSERVER_Error TRITONSERVER_MessageNewFromSerializedJson( @ByPtrPtr TRITONSERVER_Message message, @Cast("const char*") BytePointer base, @Cast("size_t") long byte_size); /** Delete a message object. * * @param message The message object. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_MessageDelete( TRITONSERVER_Message message); /** Get the base and size of the buffer containing the serialized * message in JSON format. The buffer is owned by the * TRITONSERVER_Message object and should not be modified or freed by * the caller. The lifetime of the buffer extends only as long as * 'message' and must not be accessed once 'message' is deleted. * * @param message The message object. * @param base Returns the base of the serialized message. * @param byte_size Returns the size, in bytes, of the serialized * message. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// public static native TRITONSERVER_Error TRITONSERVER_MessageSerializeToJson( TRITONSERVER_Message message, @Cast("const char**") PointerPointer base, @Cast("size_t*") SizeTPointer byte_size); public static native TRITONSERVER_Error TRITONSERVER_MessageSerializeToJson( TRITONSERVER_Message message, @Cast("const char**") @ByPtrPtr BytePointer base, @Cast("size_t*") SizeTPointer byte_size); public static native TRITONSERVER_Error TRITONSERVER_MessageSerializeToJson( TRITONSERVER_Message message, @Cast("const char**") @ByPtrPtr ByteBuffer base, @Cast("size_t*") SizeTPointer byte_size); public static native TRITONSERVER_Error TRITONSERVER_MessageSerializeToJson( TRITONSERVER_Message message, @Cast("const char**") @ByPtrPtr byte[] base, @Cast("size_t*") SizeTPointer byte_size); /** TRITONSERVER_Metrics * * Object representing metrics. * <p> * Metric format types */ /** enum TRITONSERVER_MetricFormat */ public static final int TRITONSERVER_METRIC_PROMETHEUS = 0; /** Delete a metrics object. * * @param metrics The metrics object. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONSERVER_MetricsDelete( TRITONSERVER_Metrics metrics); /** Get a buffer containing the metrics in the specified format. For * each format the buffer contains the following: * * TRITONSERVER_METRIC_PROMETHEUS: 'base' points to a single multiline * string (char*) that gives a text representation of the metrics in * prometheus format. 'byte_size' returns the length of the string * in bytes. * * The buffer is owned by the 'metrics' object and should not be * modified or freed by the caller. The lifetime of the buffer * extends only as long as 'metrics' and must not be accessed once * 'metrics' is deleted. * * @param metrics The metrics object. * @param format The format to use for the returned metrics. * @param base Returns a pointer to the base of the formatted * metrics, as described above. * @param byte_size Returns the size, in bytes, of the formatted * metrics. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// public static native TRITONSERVER_Error TRITONSERVER_MetricsFormatted( TRITONSERVER_Metrics metrics, @Cast("TRITONSERVER_MetricFormat") int format, @Cast("const char**") PointerPointer base, @Cast("size_t*") SizeTPointer byte_size); public static native TRITONSERVER_Error TRITONSERVER_MetricsFormatted( TRITONSERVER_Metrics metrics, @Cast("TRITONSERVER_MetricFormat") int format, @Cast("const char**") @ByPtrPtr BytePointer base, @Cast("size_t*") SizeTPointer byte_size); public static native TRITONSERVER_Error TRITONSERVER_MetricsFormatted( TRITONSERVER_Metrics metrics, @Cast("TRITONSERVER_MetricFormat") int format, @Cast("const char**") @ByPtrPtr ByteBuffer base, @Cast("size_t*") SizeTPointer byte_size); public static native TRITONSERVER_Error TRITONSERVER_MetricsFormatted( TRITONSERVER_Metrics metrics, @Cast("TRITONSERVER_MetricFormat") int format, @Cast("const char**") @ByPtrPtr byte[] base, @Cast("size_t*") SizeTPointer byte_size); /** TRITONSERVER_InferenceTrace * * Object that represents tracing for an inference request. * <p> * Trace levels */ /** enum TRITONSERVER_InferenceTraceLevel */ public static final int TRITONSERVER_TRACE_LEVEL_DISABLED = 0, TRITONSERVER_TRACE_LEVEL_MIN = 1, TRITONSERVER_TRACE_LEVEL_MAX = 2; /** Get the string representation of a trace level. The returned * string is not owned by the caller and so should not be modified or * freed. * * @param level The trace level. * @return The string representation of the trace level. */ public static native String TRITONSERVER_InferenceTraceLevelString( @Cast("TRITONSERVER_InferenceTraceLevel") int level); // Trace activities /** enum TRITONSERVER_InferenceTraceActivity */ public static final int TRITONSERVER_TRACE_REQUEST_START = 0, TRITONSERVER_TRACE_QUEUE_START = 1, TRITONSERVER_TRACE_COMPUTE_START = 2, TRITONSERVER_TRACE_COMPUTE_INPUT_END = 3, TRITONSERVER_TRACE_COMPUTE_OUTPUT_START = 4, TRITONSERVER_TRACE_COMPUTE_END = 5, TRITONSERVER_TRACE_REQUEST_END = 6; /** Get the string representation of a trace activity. The returned * string is not owned by the caller and so should not be modified or * freed. * * @param activity The trace activity. * @return The string representation of the trace activity. */ public static native String TRITONSERVER_InferenceTraceActivityString( @Cast("TRITONSERVER_InferenceTraceActivity") int activity); // Targeting ../tritonserver/TRITONSERVER_InferenceTraceActivityFn_t.java // Targeting ../tritonserver/TRITONSERVER_InferenceTraceReleaseFn_t.java /** Create a new inference trace object. The caller takes ownership of * the TRITONSERVER_InferenceTrace object and must call * TRITONSERVER_InferenceTraceDelete to release the object. * * The activity callback function will be called to report activity * for 'trace' as well as for any child traces that are spawned by * 'trace', and so the activity callback must check the trace object * to determine specifically what activity is being reported. * * The release callback is called for both 'trace' and for any child * traces spawned by 'trace'. * * @param trace Returns the new inference trace object. * @param level The tracing level. * @param parent_id The parent trace id for this trace. A value of 0 * indicates that there is not parent trace. * @param activity_fn The callback function where activity for the * trace is reported. * @param release_fn The callback function called when all activity * is complete for the trace. * @param trace_userp User-provided pointer that is delivered to * the activity and release callback functions. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceNew( @Cast("TRITONSERVER_InferenceTrace**") PointerPointer trace, @Cast("TRITONSERVER_InferenceTraceLevel") int level, @Cast("uint64_t") long parent_id, TRITONSERVER_InferenceTraceActivityFn_t activity_fn, TRITONSERVER_InferenceTraceReleaseFn_t release_fn, Pointer trace_userp); public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceNew( @ByPtrPtr TRITONSERVER_InferenceTrace trace, @Cast("TRITONSERVER_InferenceTraceLevel") int level, @Cast("uint64_t") long parent_id, TRITONSERVER_InferenceTraceActivityFn_t activity_fn, TRITONSERVER_InferenceTraceReleaseFn_t release_fn, Pointer trace_userp); /** Delete a trace object. * * @param trace The trace object. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceDelete( TRITONSERVER_InferenceTrace trace); /** Get the id associated with a trace. Every trace is assigned an id * that is unique across all traces created for a Triton server. * * @param trace The trace. * @param id Returns the id associated with the trace. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceId( TRITONSERVER_InferenceTrace trace, @Cast("uint64_t*") LongPointer id); public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceId( TRITONSERVER_InferenceTrace trace, @Cast("uint64_t*") LongBuffer id); public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceId( TRITONSERVER_InferenceTrace trace, @Cast("uint64_t*") long[] id); /** Get the parent id associated with a trace. The parent id indicates * a parent-child relationship between two traces. A parent id value * of 0 indicates that there is no parent trace. * * @param trace The trace. * @param id Returns the parent id associated with the trace. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceParentId( TRITONSERVER_InferenceTrace trace, @Cast("uint64_t*") LongPointer parent_id); public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceParentId( TRITONSERVER_InferenceTrace trace, @Cast("uint64_t*") LongBuffer parent_id); public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceParentId( TRITONSERVER_InferenceTrace trace, @Cast("uint64_t*") long[] parent_id); /** Get the name of the model associated with a trace. The caller does * not own the returned string and must not modify or delete it. The * lifetime of the returned string extends only as long as 'trace'. * * @param trace The trace. * @param model_name Returns the name of the model associated with * the trace. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceModelName( TRITONSERVER_InferenceTrace trace, @Cast("const char**") PointerPointer model_name); public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceModelName( TRITONSERVER_InferenceTrace trace, @Cast("const char**") @ByPtrPtr BytePointer model_name); public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceModelName( TRITONSERVER_InferenceTrace trace, @Cast("const char**") @ByPtrPtr ByteBuffer model_name); public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceModelName( TRITONSERVER_InferenceTrace trace, @Cast("const char**") @ByPtrPtr byte[] model_name); /** Get the version of the model associated with a trace. * * @param trace The trace. * @param model_version Returns the version of the model associated * with the trace. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceModelVersion( TRITONSERVER_InferenceTrace trace, @Cast("int64_t*") LongPointer model_version); public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceModelVersion( TRITONSERVER_InferenceTrace trace, @Cast("int64_t*") LongBuffer model_version); public static native TRITONSERVER_Error TRITONSERVER_InferenceTraceModelVersion( TRITONSERVER_InferenceTrace trace, @Cast("int64_t*") long[] model_version); /** TRITONSERVER_InferenceRequest * * Object representing an inference request. The inference request * provides the meta-data and input tensor values needed for an * inference and returns the inference result meta-data and output * tensors. An inference request object can be modified and reused * multiple times. * <p> * Inference request flags. The enum values must be power-of-2 values. */ /** enum TRITONSERVER_RequestFlag */ public static final int TRITONSERVER_REQUEST_FLAG_SEQUENCE_START = 1, TRITONSERVER_REQUEST_FLAG_SEQUENCE_END = 2; /** Inference request release flags. The enum values must be * power-of-2 values. */ /** enum TRITONSERVER_RequestReleaseFlag */ public static final int TRITONSERVER_REQUEST_RELEASE_ALL = 1; /** Inference response complete flags. The enum values must be * power-of-2 values. */ /** enum TRITONSERVER_ResponseCompleteFlag */ public static final int TRITONSERVER_RESPONSE_COMPLETE_FINAL = 1; // Targeting ../tritonserver/TRITONSERVER_InferenceRequestReleaseFn_t.java // Targeting ../tritonserver/TRITONSERVER_InferenceResponseCompleteFn_t.java /** Create a new inference request object. * * @param inference_request Returns the new request object. * @param server the inference server object. * @param model_name The name of the model to use for the request. * @param model_version The version of the model to use for the * request. If -1 then the server will choose a version based on the * model's policy. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestNew( @Cast("TRITONSERVER_InferenceRequest**") PointerPointer inference_request, TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestNew( @ByPtrPtr TRITONSERVER_InferenceRequest inference_request, TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestNew( @ByPtrPtr TRITONSERVER_InferenceRequest inference_request, TRITONSERVER_Server server, @Cast("const char*") BytePointer model_name, @Cast("const int64_t") long model_version); /** Delete an inference request object. * * @param inference_request The request object. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestDelete( TRITONSERVER_InferenceRequest inference_request); /** Get the ID for a request. The returned ID is owned by * 'inference_request' and must not be modified or freed by the * caller. * * @param inference_request The request object. * @param id Returns the ID. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestId( TRITONSERVER_InferenceRequest inference_request, @Cast("const char**") PointerPointer id); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestId( TRITONSERVER_InferenceRequest inference_request, @Cast("const char**") @ByPtrPtr BytePointer id); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestId( TRITONSERVER_InferenceRequest inference_request, @Cast("const char**") @ByPtrPtr ByteBuffer id); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestId( TRITONSERVER_InferenceRequest inference_request, @Cast("const char**") @ByPtrPtr byte[] id); /** Set the ID for a request. * * @param inference_request The request object. * @param id The ID. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestSetId( TRITONSERVER_InferenceRequest inference_request, String id); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestSetId( TRITONSERVER_InferenceRequest inference_request, @Cast("const char*") BytePointer id); /** Get the flag(s) associated with a request. On return 'flags' holds * a bitwise-or of all flag values, see TRITONSERVER_RequestFlag for * available flags. * * @param inference_request The request object. * @param flags Returns the flags. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestFlags( TRITONSERVER_InferenceRequest inference_request, @Cast("uint32_t*") IntPointer flags); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestFlags( TRITONSERVER_InferenceRequest inference_request, @Cast("uint32_t*") IntBuffer flags); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestFlags( TRITONSERVER_InferenceRequest inference_request, @Cast("uint32_t*") int[] flags); /** Set the flag(s) associated with a request. 'flags' should holds a * bitwise-or of all flag values, see TRITONSERVER_RequestFlag for * available flags. * * @param inference_request The request object. * @param flags The flags. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestSetFlags( TRITONSERVER_InferenceRequest inference_request, @Cast("uint32_t") int flags); /** Get the correlation ID of the inference request. Default is 0, * which indictes that the request has no correlation ID. The * correlation ID is used to indicate two or more inference request * are related to each other. How this relationship is handled by the * inference server is determined by the model's scheduling * policy. * * @param inference_request The request object. * @param correlation_id Returns the correlation ID. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestCorrelationId( TRITONSERVER_InferenceRequest inference_request, @Cast("uint64_t*") LongPointer correlation_id); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestCorrelationId( TRITONSERVER_InferenceRequest inference_request, @Cast("uint64_t*") LongBuffer correlation_id); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestCorrelationId( TRITONSERVER_InferenceRequest inference_request, @Cast("uint64_t*") long[] correlation_id); /** Set the correlation ID of the inference request. Default is 0, which * indictes that the request has no correlation ID. The correlation ID * is used to indicate two or more inference request are related to * each other. How this relationship is handled by the inference * server is determined by the model's scheduling policy. * * @param inference_request The request object. * @param correlation_id The correlation ID. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestSetCorrelationId( TRITONSERVER_InferenceRequest inference_request, @Cast("uint64_t") long correlation_id); /** Get the priority for a request. The default is 0 indicating that * the request does not specify a priority and so will use the * model's default priority. * * @param inference_request The request object. * @param priority Returns the priority level. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestPriority( TRITONSERVER_InferenceRequest inference_request, @Cast("uint32_t*") IntPointer priority); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestPriority( TRITONSERVER_InferenceRequest inference_request, @Cast("uint32_t*") IntBuffer priority); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestPriority( TRITONSERVER_InferenceRequest inference_request, @Cast("uint32_t*") int[] priority); /** Set the priority for a request. The default is 0 indicating that * the request does not specify a priority and so will use the * model's default priority. * * @param inference_request The request object. * @param priority The priority level. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestSetPriority( TRITONSERVER_InferenceRequest inference_request, @Cast("uint32_t") int priority); /** Get the timeout for a request, in microseconds. The default is 0 * which indicates that the request has no timeout. * * @param inference_request The request object. * @param timeout_us Returns the timeout, in microseconds. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestTimeoutMicroseconds( TRITONSERVER_InferenceRequest inference_request, @Cast("uint64_t*") LongPointer timeout_us); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestTimeoutMicroseconds( TRITONSERVER_InferenceRequest inference_request, @Cast("uint64_t*") LongBuffer timeout_us); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestTimeoutMicroseconds( TRITONSERVER_InferenceRequest inference_request, @Cast("uint64_t*") long[] timeout_us); /** Set the timeout for a request, in microseconds. The default is 0 * which indicates that the request has no timeout. * * @param inference_request The request object. * @param timeout_us The timeout, in microseconds. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestSetTimeoutMicroseconds( TRITONSERVER_InferenceRequest inference_request, @Cast("uint64_t") long timeout_us); /** Add an input to a request. * * @param inference_request The request object. * @param name The name of the input. * @param datatype The type of the input. Valid type names are BOOL, * UINT8, UINT16, UINT32, UINT64, INT8, INT16, INT32, INT64, FP16, * FP32, FP64, and BYTES. * @param shape The shape of the input. * @param dim_count The number of dimensions of 'shape'. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestAddInput( TRITONSERVER_InferenceRequest inference_request, String name, @Cast("const TRITONSERVER_DataType") int datatype, @Cast("const int64_t*") LongPointer shape, @Cast("uint64_t") long dim_count); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestAddInput( TRITONSERVER_InferenceRequest inference_request, @Cast("const char*") BytePointer name, @Cast("const TRITONSERVER_DataType") int datatype, @Cast("const int64_t*") LongBuffer shape, @Cast("uint64_t") long dim_count); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestAddInput( TRITONSERVER_InferenceRequest inference_request, String name, @Cast("const TRITONSERVER_DataType") int datatype, @Cast("const int64_t*") long[] shape, @Cast("uint64_t") long dim_count); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestAddInput( TRITONSERVER_InferenceRequest inference_request, @Cast("const char*") BytePointer name, @Cast("const TRITONSERVER_DataType") int datatype, @Cast("const int64_t*") LongPointer shape, @Cast("uint64_t") long dim_count); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestAddInput( TRITONSERVER_InferenceRequest inference_request, String name, @Cast("const TRITONSERVER_DataType") int datatype, @Cast("const int64_t*") LongBuffer shape, @Cast("uint64_t") long dim_count); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestAddInput( TRITONSERVER_InferenceRequest inference_request, @Cast("const char*") BytePointer name, @Cast("const TRITONSERVER_DataType") int datatype, @Cast("const int64_t*") long[] shape, @Cast("uint64_t") long dim_count); /** Remove an input from a request. * * @param inference_request The request object. * @param name The name of the input. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestRemoveInput( TRITONSERVER_InferenceRequest inference_request, String name); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestRemoveInput( TRITONSERVER_InferenceRequest inference_request, @Cast("const char*") BytePointer name); /** Remove all inputs from a request. * * @param inference_request The request object. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestRemoveAllInputs( TRITONSERVER_InferenceRequest inference_request); /** Assign a buffer of data to an input. The buffer will be appended * to any existing buffers for that input. The 'inference_request' * object takes ownership of the buffer and so the caller should not * modify or free the buffer until that ownership is released by * 'inference_request' being deleted or by the input being removed * from 'inference_request'. * * @param inference_request The request object. * @param name The name of the input. * @param base The base address of the input data. * @param byte_size The size, in bytes, of the input data. * @param memory_type The memory type of the input data. * @param memory_type_id The memory type id of the input data. * @return a TRITONSERVER_Error indicating success or failure. */ public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestAppendInputData( TRITONSERVER_InferenceRequest inference_request, String name, @Const Pointer base, @Cast("size_t") long byte_size, @Cast("TRITONSERVER_MemoryType") int memory_type, @Cast("int64_t") long memory_type_id); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestAppendInputData( TRITONSERVER_InferenceRequest inference_request, @Cast("const char*") BytePointer name, @Const Pointer base, @Cast("size_t") long byte_size, @Cast("TRITONSERVER_MemoryType") int memory_type, @Cast("int64_t") long memory_type_id); /** Assign a buffer of data to an input for execution on all model instances * with the specified host policy. The buffer will be appended to any existing * buffers for that input on all devices with this host policy. The * 'inference_request' object takes ownership of the buffer and so the caller * should not modify or free the buffer until that ownership is released by * 'inference_request' being deleted or by the input being removed from * 'inference_request'. If the execution is scheduled on a device that does not * have a input buffer specified using this function, then the input buffer * specified with TRITONSERVER_InferenceRequestAppendInputData will be used so * a non-host policy specific version of data must be added using that API. * @param inference_request The request object. * @param name The name of the input. * @param base The base address of the input data. * @param byte_size The size, in bytes, of the input data. * @param memory_type The memory type of the input data. * @param memory_type_id The memory type id of the input data. * @param host_policy_name All model instances executing with this host_policy * will use this input buffer for execution. * @return a TRITONSERVER_Error indicating success or failure. */ public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestAppendInputDataWithHostPolicy( TRITONSERVER_InferenceRequest inference_request, String name, @Const Pointer base, @Cast("size_t") long byte_size, @Cast("TRITONSERVER_MemoryType") int memory_type, @Cast("int64_t") long memory_type_id, String host_policy_name); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestAppendInputDataWithHostPolicy( TRITONSERVER_InferenceRequest inference_request, @Cast("const char*") BytePointer name, @Const Pointer base, @Cast("size_t") long byte_size, @Cast("TRITONSERVER_MemoryType") int memory_type, @Cast("int64_t") long memory_type_id, @Cast("const char*") BytePointer host_policy_name); /** Clear all input data from an input, releasing ownership of the * buffer(s) that were appended to the input with * TRITONSERVER_InferenceRequestAppendInputData or * TRITONSERVER_InferenceRequestAppendInputDataWithHostPolicy * @param inference_request The request object. * @param name The name of the input. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestRemoveAllInputData( TRITONSERVER_InferenceRequest inference_request, String name); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestRemoveAllInputData( TRITONSERVER_InferenceRequest inference_request, @Cast("const char*") BytePointer name); /** Add an output request to an inference request. * * @param inference_request The request object. * @param name The name of the output. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestAddRequestedOutput( TRITONSERVER_InferenceRequest inference_request, String name); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestAddRequestedOutput( TRITONSERVER_InferenceRequest inference_request, @Cast("const char*") BytePointer name); /** Remove an output request from an inference request. * * @param inference_request The request object. * @param name The name of the output. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestRemoveRequestedOutput( TRITONSERVER_InferenceRequest inference_request, String name); public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestRemoveRequestedOutput( TRITONSERVER_InferenceRequest inference_request, @Cast("const char*") BytePointer name); /** Remove all output requests from an inference request. * * @param inference_request The request object. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestRemoveAllRequestedOutputs( TRITONSERVER_InferenceRequest inference_request); /** Set the release callback for an inference request. The release * callback is called by Triton to return ownership of the request * object. * * @param inference_request The request object. * @param request_release_fn The function called to return ownership * of the 'inference_request' object. * @param request_release_userp User-provided pointer that is * delivered to the 'request_release_fn' callback. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestSetReleaseCallback( TRITONSERVER_InferenceRequest inference_request, TRITONSERVER_InferenceRequestReleaseFn_t request_release_fn, Pointer request_release_userp); /** Set the allocator and response callback for an inference * request. The allocator is used to allocate buffers for any output * tensors included in responses that are produced for this * request. The response callback is called to return response * objects representing responses produced for this request. * * @param inference_request The request object. * @param response_allocator The TRITONSERVER_ResponseAllocator to use * to allocate buffers to hold inference results. * @param response_allocator_userp User-provided pointer that is * delivered to the response allocator's start and allocation functions. * @param response_fn The function called to deliver an inference * response for this request. * @param response_userp User-provided pointer that is delivered to * the 'response_fn' callback. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONSERVER_InferenceRequestSetResponseCallback( TRITONSERVER_InferenceRequest inference_request, TRITONSERVER_ResponseAllocator response_allocator, Pointer response_allocator_userp, TRITONSERVER_InferenceResponseCompleteFn_t response_fn, Pointer response_userp); /** TRITONSERVER_InferenceResponse * * Object representing an inference response. The inference response * provides the meta-data and output tensor values calculated by the * inference. * <p> * Delete an inference response object. * * @param inference_response The response object. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseDelete( TRITONSERVER_InferenceResponse inference_response); /** Return the error status of an inference response. Return a * TRITONSERVER_Error object on failure, return nullptr on success. * The returned error object is owned by 'inference_response' and so * should not be deleted by the caller. * * @param inference_response The response object. * @return a TRITONSERVER_Error indicating the success or failure * status of the response. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseError( TRITONSERVER_InferenceResponse inference_response); /** Get model used to produce a response. The caller does not own the * returned model name value and must not modify or delete it. The * lifetime of all returned values extends until 'inference_response' * is deleted. * * @param inference_response The response object. * @param model_name Returns the name of the model. * @param model_version Returns the version of the model. * this response. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseModel( TRITONSERVER_InferenceResponse inference_response, @Cast("const char**") PointerPointer model_name, @Cast("int64_t*") LongPointer model_version); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseModel( TRITONSERVER_InferenceResponse inference_response, @Cast("const char**") @ByPtrPtr BytePointer model_name, @Cast("int64_t*") LongPointer model_version); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseModel( TRITONSERVER_InferenceResponse inference_response, @Cast("const char**") @ByPtrPtr ByteBuffer model_name, @Cast("int64_t*") LongBuffer model_version); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseModel( TRITONSERVER_InferenceResponse inference_response, @Cast("const char**") @ByPtrPtr byte[] model_name, @Cast("int64_t*") long[] model_version); /** Get the ID of the request corresponding to a response. The caller * does not own the returned ID and must not modify or delete it. The * lifetime of all returned values extends until 'inference_response' * is deleted. * * @param inference_response The response object. * @param request_id Returns the ID of the request corresponding to * this response. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseId( TRITONSERVER_InferenceResponse inference_response, @Cast("const char**") PointerPointer request_id); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseId( TRITONSERVER_InferenceResponse inference_response, @Cast("const char**") @ByPtrPtr BytePointer request_id); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseId( TRITONSERVER_InferenceResponse inference_response, @Cast("const char**") @ByPtrPtr ByteBuffer request_id); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseId( TRITONSERVER_InferenceResponse inference_response, @Cast("const char**") @ByPtrPtr byte[] request_id); /** Get the number of parameters available in the response. * * @param inference_response The response object. * @param count Returns the number of parameters. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseParameterCount( TRITONSERVER_InferenceResponse inference_response, @Cast("uint32_t*") IntPointer count); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseParameterCount( TRITONSERVER_InferenceResponse inference_response, @Cast("uint32_t*") IntBuffer count); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseParameterCount( TRITONSERVER_InferenceResponse inference_response, @Cast("uint32_t*") int[] count); /** Get all information about a parameter. The caller does not own any * of the returned values and must not modify or delete them. The * lifetime of all returned values extends until 'inference_response' * is deleted. * * The 'vvalue' returns a void* pointer that must be cast * appropriately based on 'type'. For example: * * void* vvalue; * TRITONSERVER_ParameterType type; * TRITONSERVER_InferenceResponseParameter( * response, index, &name, &type, &vvalue); * switch (type) { * case TRITONSERVER_PARAMETER_BOOL: * bool value = *(reinterpret_cast<bool*>(vvalue)); * ... * case TRITONSERVER_PARAMETER_INT: * int64_t value = *(reinterpret_cast<int64_t*>(vvalue)); * ... * case TRITONSERVER_PARAMETER_STRING: * const char* value = reinterpret_cast<const char*>(vvalue); * ... * * @param inference_response The response object. * @param index The index of the parameter, must be 0 <= index < * count, where 'count' is the value returned by * TRITONSERVER_InferenceResponseParameterCount. * @param name Returns the name of the parameter. * @param type Returns the type of the parameter. * @param vvalue Returns a pointer to the parameter value. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseParameter( TRITONSERVER_InferenceResponse inference_response, @Cast("const uint32_t") int index, @Cast("const char**") PointerPointer name, @Cast("TRITONSERVER_ParameterType*") IntPointer type, @Cast("const void**") PointerPointer vvalue); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseParameter( TRITONSERVER_InferenceResponse inference_response, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr BytePointer name, @Cast("TRITONSERVER_ParameterType*") IntPointer type, @Cast("const void**") @ByPtrPtr Pointer vvalue); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseParameter( TRITONSERVER_InferenceResponse inference_response, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr ByteBuffer name, @Cast("TRITONSERVER_ParameterType*") IntBuffer type, @Cast("const void**") @ByPtrPtr Pointer vvalue); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseParameter( TRITONSERVER_InferenceResponse inference_response, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr byte[] name, @Cast("TRITONSERVER_ParameterType*") int[] type, @Cast("const void**") @ByPtrPtr Pointer vvalue); /** Get the number of outputs available in the response. * * @param inference_response The response object. * @param count Returns the number of output tensors. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseOutputCount( TRITONSERVER_InferenceResponse inference_response, @Cast("uint32_t*") IntPointer count); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseOutputCount( TRITONSERVER_InferenceResponse inference_response, @Cast("uint32_t*") IntBuffer count); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseOutputCount( TRITONSERVER_InferenceResponse inference_response, @Cast("uint32_t*") int[] count); /** Get all information about an output tensor. The tensor data is * returned as the base pointer to the data and the size, in bytes, * of the data. The caller does not own any of the returned values * and must not modify or delete them. The lifetime of all returned * values extends until 'inference_response' is deleted. * * @param inference_response The response object. * @param index The index of the output tensor, must be 0 <= index < * count, where 'count' is the value returned by * TRITONSERVER_InferenceResponseOutputCount. * @param name Returns the name of the output. * @param datatype Returns the type of the output. * @param shape Returns the shape of the output. * @param dim_count Returns the number of dimensions of the returned * shape. * @param base Returns the tensor data for the output. * @param byte_size Returns the size, in bytes, of the data. * @param memory_type Returns the memory type of the data. * @param memory_type_id Returns the memory type id of the data. * @param userp The user-specified value associated with the buffer * in TRITONSERVER_ResponseAllocatorAllocFn_t. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseOutput( TRITONSERVER_InferenceResponse inference_response, @Cast("const uint32_t") int index, @Cast("const char**") PointerPointer name, @Cast("TRITONSERVER_DataType*") IntPointer datatype, @Cast("const int64_t**") PointerPointer shape, @Cast("uint64_t*") LongPointer dim_count, @Cast("const void**") PointerPointer base, @Cast("size_t*") SizeTPointer byte_size, @Cast("TRITONSERVER_MemoryType*") IntPointer memory_type, @Cast("int64_t*") LongPointer memory_type_id, @Cast("void**") PointerPointer userp); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseOutput( TRITONSERVER_InferenceResponse inference_response, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr BytePointer name, @Cast("TRITONSERVER_DataType*") IntPointer datatype, @Cast("const int64_t**") @ByPtrPtr LongPointer shape, @Cast("uint64_t*") LongPointer dim_count, @Cast("const void**") @ByPtrPtr Pointer base, @Cast("size_t*") SizeTPointer byte_size, @Cast("TRITONSERVER_MemoryType*") IntPointer memory_type, @Cast("int64_t*") LongPointer memory_type_id, @Cast("void**") @ByPtrPtr Pointer userp); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseOutput( TRITONSERVER_InferenceResponse inference_response, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr ByteBuffer name, @Cast("TRITONSERVER_DataType*") IntBuffer datatype, @Cast("const int64_t**") @ByPtrPtr LongBuffer shape, @Cast("uint64_t*") LongBuffer dim_count, @Cast("const void**") @ByPtrPtr Pointer base, @Cast("size_t*") SizeTPointer byte_size, @Cast("TRITONSERVER_MemoryType*") IntBuffer memory_type, @Cast("int64_t*") LongBuffer memory_type_id, @Cast("void**") @ByPtrPtr Pointer userp); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseOutput( TRITONSERVER_InferenceResponse inference_response, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr byte[] name, @Cast("TRITONSERVER_DataType*") int[] datatype, @Cast("const int64_t**") @ByPtrPtr long[] shape, @Cast("uint64_t*") long[] dim_count, @Cast("const void**") @ByPtrPtr Pointer base, @Cast("size_t*") SizeTPointer byte_size, @Cast("TRITONSERVER_MemoryType*") int[] memory_type, @Cast("int64_t*") long[] memory_type_id, @Cast("void**") @ByPtrPtr Pointer userp); /** Get a classification label associated with an output for a given * index. The caller does not own the returned label and must not * modify or delete it. The lifetime of all returned label extends * until 'inference_response' is deleted. * * @param inference_response The response object. * @param index The index of the output tensor, must be 0 <= index < * count, where 'count' is the value returned by * TRITONSERVER_InferenceResponseOutputCount. * @param class_index The index of the class. * @param name Returns the label corresponding to 'class_index' or * nullptr if no label. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseOutputClassificationLabel( TRITONSERVER_InferenceResponse inference_response, @Cast("const uint32_t") int index, @Cast("const size_t") long class_index, @Cast("const char**") PointerPointer label); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseOutputClassificationLabel( TRITONSERVER_InferenceResponse inference_response, @Cast("const uint32_t") int index, @Cast("const size_t") long class_index, @Cast("const char**") @ByPtrPtr BytePointer label); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseOutputClassificationLabel( TRITONSERVER_InferenceResponse inference_response, @Cast("const uint32_t") int index, @Cast("const size_t") long class_index, @Cast("const char**") @ByPtrPtr ByteBuffer label); public static native TRITONSERVER_Error TRITONSERVER_InferenceResponseOutputClassificationLabel( TRITONSERVER_InferenceResponse inference_response, @Cast("const uint32_t") int index, @Cast("const size_t") long class_index, @Cast("const char**") @ByPtrPtr byte[] label); /** TRITONSERVER_ServerOptions * * Options to use when creating an inference server. * <p> * Model control modes */ /** enum TRITONSERVER_ModelControlMode */ public static final int TRITONSERVER_MODEL_CONTROL_NONE = 0, TRITONSERVER_MODEL_CONTROL_POLL = 1, TRITONSERVER_MODEL_CONTROL_EXPLICIT = 2; /** Rate limit modes */ /** enum TRITONSERVER_RateLimitMode */ public static final int TRITONSERVER_RATE_LIMIT_OFF = 0, TRITONSERVER_RATE_LIMIT_EXEC_COUNT = 1; /** Create a new server options object. The caller takes ownership of * the TRITONSERVER_ServerOptions object and must call * TRITONSERVER_ServerOptionsDelete to release the object. * * @param options Returns the new server options object. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsNew( @Cast("TRITONSERVER_ServerOptions**") PointerPointer options); public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsNew( @ByPtrPtr TRITONSERVER_ServerOptions options); /** Delete a server options object. * * @param options The server options object. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsDelete( TRITONSERVER_ServerOptions options); /** Set the textual ID for the server in a server options. The ID is a * name that identifies the server. * * @param options The server options object. * @param server_id The server identifier. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetServerId( TRITONSERVER_ServerOptions options, String server_id); public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetServerId( TRITONSERVER_ServerOptions options, @Cast("const char*") BytePointer server_id); /** Set the model repository path in a server options. The path must be * the full absolute path to the model repository. This function can be called * multiple times with different paths to set multiple model repositories. * Note that if a model is not unique across all model repositories * at any time, the model will not be available. * * @param options The server options object. * @param model_repository_path The full path to the model repository. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetModelRepositoryPath( TRITONSERVER_ServerOptions options, String model_repository_path); public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetModelRepositoryPath( TRITONSERVER_ServerOptions options, @Cast("const char*") BytePointer model_repository_path); /** Set the model control mode in a server options. For each mode the models * will be managed as the following: * * TRITONSERVER_MODEL_CONTROL_NONE: the models in model repository will be * loaded on startup. After startup any changes to the model repository will * be ignored. Calling TRITONSERVER_ServerPollModelRepository will result in * an error. * * TRITONSERVER_MODEL_CONTROL_POLL: the models in model repository will be * loaded on startup. The model repository can be polled periodically using * TRITONSERVER_ServerPollModelRepository and the server will load, unload, * and updated models according to changes in the model repository. * * TRITONSERVER_MODEL_CONTROL_EXPLICIT: the models in model repository will * not be loaded on startup. The corresponding model control APIs must be * called to load / unload a model in the model repository. * * @param options The server options object. * @param mode The mode to use for the model control. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetModelControlMode( TRITONSERVER_ServerOptions options, @Cast("TRITONSERVER_ModelControlMode") int mode); /** Set the model to be loaded at startup in a server options. The model must be * present in one, and only one, of the specified model repositories. * This function can be called multiple times with different model name * to set multiple startup models. * Note that it only takes affect on TRITONSERVER_MODEL_CONTROL_EXPLICIT mode. * * @param options The server options object. * @param mode_name The name of the model to load on startup. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetStartupModel( TRITONSERVER_ServerOptions options, String model_name); public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetStartupModel( TRITONSERVER_ServerOptions options, @Cast("const char*") BytePointer model_name); /** Enable or disable strict model configuration handling in a server * options. * * @param options The server options object. * @param strict True to enable strict model configuration handling, * false to disable. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetStrictModelConfig( TRITONSERVER_ServerOptions options, @Cast("bool") boolean strict); /** Set the rate limit mode in a server options. * * TRITONSERVER_RATE_LIMIT_EXEC_COUNT: The rate limiting prioritizes the * inference execution using the number of times each instance has got a * chance to run. The execution gets to run only when its resource * constraints are satisfied. * * TRITONSERVER_RATE_LIMIT_OFF: The rate limiting is turned off and the * inference gets executed whenever an instance is available. * * @param options The server options object. * @param mode The mode to use for the rate limiting. By default, execution * count is used to determine the priorities. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetRateLimiterMode( TRITONSERVER_ServerOptions options, @Cast("TRITONSERVER_RateLimitMode") int mode); /** Add resource count for rate limiting. * * @param options The server options object. * @param name The name of the resource. * @param count The count of the resource. * @param device The device identifier for the resource. A value of -1 * indicates that the specified number of resources are available on every * device. The device value is ignored for a global resource. The server * will use the rate limiter configuration specified for instance groups * in model config to determine whether resource is global. In case of * conflicting resource type in different model configurations, server * will raise an appropriate error while loading model. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsAddRateLimiterResource( TRITONSERVER_ServerOptions options, String resource_name, @Cast("const size_t") long resource_count, int device); public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsAddRateLimiterResource( TRITONSERVER_ServerOptions options, @Cast("const char*") BytePointer resource_name, @Cast("const size_t") long resource_count, int device); /** Set the total pinned memory byte size that the server can allocate * in a server options. The pinned memory pool will be shared across * Triton itself and the backends that use * TRITONBACKEND_MemoryManager to allocate memory. * * @param options The server options object. * @param size The pinned memory pool byte size. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetPinnedMemoryPoolByteSize( TRITONSERVER_ServerOptions options, @Cast("uint64_t") long size); /** Set the total CUDA memory byte size that the server can allocate * on given GPU device in a server options. The pinned memory pool * will be shared across Triton itself and the backends that use * TRITONBACKEND_MemoryManager to allocate memory. * * @param options The server options object. * @param gpu_device The GPU device to allocate the memory pool. * @param size The CUDA memory pool byte size. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetCudaMemoryPoolByteSize( TRITONSERVER_ServerOptions options, int gpu_device, @Cast("uint64_t") long size); /** Set the minimum support CUDA compute capability in a server * options. * * @param options The server options object. * @param cc The minimum CUDA compute capability. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetMinSupportedComputeCapability( TRITONSERVER_ServerOptions options, double cc); /** Enable or disable exit-on-error in a server options. * * @param options The server options object. * @param exit True to enable exiting on intialization error, false * to continue. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetExitOnError( TRITONSERVER_ServerOptions options, @Cast("bool") boolean exit); /** Enable or disable strict readiness handling in a server options. * * @param options The server options object. * @param strict True to enable strict readiness handling, false to * disable. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetStrictReadiness( TRITONSERVER_ServerOptions options, @Cast("bool") boolean strict); /** Set the exit timeout, in seconds, for the server in a server * options. * * @param options The server options object. * @param timeout The exit timeout, in seconds. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetExitTimeout( TRITONSERVER_ServerOptions options, @Cast("unsigned int") int timeout); /** Set the number of threads used in buffer manager in a server options. * * @param thread_count The number of threads. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetBufferManagerThreadCount( TRITONSERVER_ServerOptions options, @Cast("unsigned int") int thread_count); /** Enable or disable info level logging. * * @param options The server options object. * @param log True to enable info logging, false to disable. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetLogInfo( TRITONSERVER_ServerOptions options, @Cast("bool") boolean log); /** Enable or disable warning level logging. * * @param options The server options object. * @param log True to enable warning logging, false to disable. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetLogWarn( TRITONSERVER_ServerOptions options, @Cast("bool") boolean log); /** Enable or disable error level logging. * * @param options The server options object. * @param log True to enable error logging, false to disable. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetLogError( TRITONSERVER_ServerOptions options, @Cast("bool") boolean log); /** Set verbose logging level. Level zero disables verbose logging. * * @param options The server options object. * @param level The verbose logging level. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetLogVerbose( TRITONSERVER_ServerOptions options, int level); /** Enable or disable metrics collection in a server options. * * @param options The server options object. * @param metrics True to enable metrics, false to disable. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetMetrics( TRITONSERVER_ServerOptions options, @Cast("bool") boolean metrics); /** Enable or disable GPU metrics collection in a server options. GPU * metrics are collected if both this option and * TRITONSERVER_ServerOptionsSetMetrics are true. * * @param options The server options object. * @param gpu_metrics True to enable GPU metrics, false to disable. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetGpuMetrics( TRITONSERVER_ServerOptions options, @Cast("bool") boolean gpu_metrics); /** Set the directory containing backend shared libraries. This * directory is searched last after the version and model directory * in the model repository when looking for the backend shared * library for a model. If the backend is named 'be' the directory * searched is 'backend_dir'/be/libtriton_be.so. * * @param options The server options object. * @param backend_dir The full path of the backend directory. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetBackendDirectory( TRITONSERVER_ServerOptions options, String backend_dir); public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetBackendDirectory( TRITONSERVER_ServerOptions options, @Cast("const char*") BytePointer backend_dir); /** Set the directory containing repository agent shared libraries. This * directory is searched when looking for the repository agent shared * library for a model. If the backend is named 'ra' the directory * searched is 'repoagent_dir'/ra/libtritonrepoagent_ra.so. * * @param options The server options object. * @param repoagent_dir The full path of the repository agent directory. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetRepoAgentDirectory( TRITONSERVER_ServerOptions options, String repoagent_dir); public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetRepoAgentDirectory( TRITONSERVER_ServerOptions options, @Cast("const char*") BytePointer repoagent_dir); /** Set a configuration setting for a named backend in a server * options. * * @param options The server options object. * @param backend_name The name of the backend. * @param setting The name of the setting. * @param value The setting value. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetBackendConfig( TRITONSERVER_ServerOptions options, String backend_name, String setting, String value); public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetBackendConfig( TRITONSERVER_ServerOptions options, @Cast("const char*") BytePointer backend_name, @Cast("const char*") BytePointer setting, @Cast("const char*") BytePointer value); /** Set a host policy setting for a given policy name in a server options. * * @param options The server options object. * @param policy_name The name of the policy. * @param setting The name of the setting. * @param value The setting value. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetHostPolicy( TRITONSERVER_ServerOptions options, String policy_name, String setting, String value); public static native TRITONSERVER_Error TRITONSERVER_ServerOptionsSetHostPolicy( TRITONSERVER_ServerOptions options, @Cast("const char*") BytePointer policy_name, @Cast("const char*") BytePointer setting, @Cast("const char*") BytePointer value); /** TRITONSERVER_Server * * An inference server. * <p> * Model batch flags. The enum values must be power-of-2 values. */ /** enum TRITONSERVER_ModelBatchFlag */ public static final int TRITONSERVER_BATCH_UNKNOWN = 1, TRITONSERVER_BATCH_FIRST_DIM = 2; /** Model index flags. The enum values must be power-of-2 values. */ /** enum TRITONSERVER_ModelIndexFlag */ public static final int TRITONSERVER_INDEX_FLAG_READY = 1; /** Model transaction policy flags. The enum values must be * power-of-2 values. */ /** enum TRITONSERVER_ModelTxnPropertyFlag */ public static final int TRITONSERVER_TXN_ONE_TO_ONE = 1, TRITONSERVER_TXN_DECOUPLED = 2; /** Create a new server object. The caller takes ownership of the * TRITONSERVER_Server object and must call TRITONSERVER_ServerDelete * to release the object. * * @param server Returns the new inference server object. * @param options The inference server options object. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerNew( @Cast("TRITONSERVER_Server**") PointerPointer server, TRITONSERVER_ServerOptions options); public static native TRITONSERVER_Error TRITONSERVER_ServerNew( @ByPtrPtr TRITONSERVER_Server server, TRITONSERVER_ServerOptions options); /** Delete a server object. If server is not already stopped it is * stopped before being deleted. * * @param server The inference server object. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerDelete( TRITONSERVER_Server server); /** Stop a server object. A server can't be restarted once it is * stopped. * * @param server The inference server object. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerStop( TRITONSERVER_Server server); /** Check the model repository for changes and update server state * based on those changes. * * @param server The inference server object. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerPollModelRepository(TRITONSERVER_Server server); /** Is the server live? * * @param server The inference server object. * @param live Returns true if server is live, false otherwise. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerIsLive( TRITONSERVER_Server server, @Cast("bool*") boolean[] live); public static native TRITONSERVER_Error TRITONSERVER_ServerIsLive( TRITONSERVER_Server server, @Cast("bool*") BoolPointer live); /** Is the server ready? * * @param server The inference server object. * @param ready Returns true if server is ready, false otherwise. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerIsReady( TRITONSERVER_Server server, @Cast("bool*") boolean[] ready); public static native TRITONSERVER_Error TRITONSERVER_ServerIsReady( TRITONSERVER_Server server, @Cast("bool*") BoolPointer ready); /** Is the model ready? * * @param server The inference server object. * @param model_name The name of the model to get readiness for. * @param model_version The version of the model to get readiness * for. If -1 then the server will choose a version based on the * model's policy. * @param ready Returns true if server is ready, false otherwise. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONSERVER_ServerModelIsReady( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @Cast("bool*") boolean[] ready); public static native TRITONSERVER_Error TRITONSERVER_ServerModelIsReady( TRITONSERVER_Server server, @Cast("const char*") BytePointer model_name, @Cast("const int64_t") long model_version, @Cast("bool*") BoolPointer ready); /** Get the batch properties of the model. The properties are * communicated by a flags value and an (optional) object returned by * 'voidp'. * * - TRITONSERVER_BATCH_UNKNOWN: Triton cannot determine the * batching properties of the model. This means that the model * does not support batching in any way that is useable by * Triton. The returned 'voidp' value is nullptr. * * - TRITONSERVER_BATCH_FIRST_DIM: The model supports batching * along the first dimension of every input and output * tensor. Triton schedulers that perform batching can * automatically batch inference requests along this dimension. * The returned 'voidp' value is nullptr. * * @param server The inference server object. * @param model_name The name of the model. * @param model_version The version of the model. If -1 then the * server will choose a version based on the model's policy. * @param flags Returns flags indicating the batch properties of the * model. * @param voidp If non-nullptr, returns a point specific to the * 'flags' value. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONSERVER_ServerModelBatchProperties( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @Cast("uint32_t*") IntPointer flags, @Cast("void**") PointerPointer voidp); public static native TRITONSERVER_Error TRITONSERVER_ServerModelBatchProperties( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @Cast("uint32_t*") IntPointer flags, @Cast("void**") @ByPtrPtr Pointer voidp); public static native TRITONSERVER_Error TRITONSERVER_ServerModelBatchProperties( TRITONSERVER_Server server, @Cast("const char*") BytePointer model_name, @Cast("const int64_t") long model_version, @Cast("uint32_t*") IntBuffer flags, @Cast("void**") @ByPtrPtr Pointer voidp); public static native TRITONSERVER_Error TRITONSERVER_ServerModelBatchProperties( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @Cast("uint32_t*") int[] flags, @Cast("void**") @ByPtrPtr Pointer voidp); public static native TRITONSERVER_Error TRITONSERVER_ServerModelBatchProperties( TRITONSERVER_Server server, @Cast("const char*") BytePointer model_name, @Cast("const int64_t") long model_version, @Cast("uint32_t*") IntPointer flags, @Cast("void**") @ByPtrPtr Pointer voidp); public static native TRITONSERVER_Error TRITONSERVER_ServerModelBatchProperties( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @Cast("uint32_t*") IntBuffer flags, @Cast("void**") @ByPtrPtr Pointer voidp); public static native TRITONSERVER_Error TRITONSERVER_ServerModelBatchProperties( TRITONSERVER_Server server, @Cast("const char*") BytePointer model_name, @Cast("const int64_t") long model_version, @Cast("uint32_t*") int[] flags, @Cast("void**") @ByPtrPtr Pointer voidp); /** Get the transaction policy of the model. The policy is * communicated by a flags value. * * - TRITONSERVER_TXN_ONE_TO_ONE: The model generates exactly * one response per request. * * - TRITONSERVER_TXN_DECOUPLED: The model may generate zero * to many responses per request. * * @param server The inference server object. * @param model_name The name of the model. * @param model_version The version of the model. If -1 then the * server will choose a version based on the model's policy. * @param txn_flags Returns flags indicating the transaction policy of the * model. * @param voidp If non-nullptr, returns a point specific to the 'flags' value. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerModelTransactionProperties( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @Cast("uint32_t*") IntPointer txn_flags, @Cast("void**") PointerPointer voidp); public static native TRITONSERVER_Error TRITONSERVER_ServerModelTransactionProperties( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @Cast("uint32_t*") IntPointer txn_flags, @Cast("void**") @ByPtrPtr Pointer voidp); public static native TRITONSERVER_Error TRITONSERVER_ServerModelTransactionProperties( TRITONSERVER_Server server, @Cast("const char*") BytePointer model_name, @Cast("const int64_t") long model_version, @Cast("uint32_t*") IntBuffer txn_flags, @Cast("void**") @ByPtrPtr Pointer voidp); public static native TRITONSERVER_Error TRITONSERVER_ServerModelTransactionProperties( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @Cast("uint32_t*") int[] txn_flags, @Cast("void**") @ByPtrPtr Pointer voidp); public static native TRITONSERVER_Error TRITONSERVER_ServerModelTransactionProperties( TRITONSERVER_Server server, @Cast("const char*") BytePointer model_name, @Cast("const int64_t") long model_version, @Cast("uint32_t*") IntPointer txn_flags, @Cast("void**") @ByPtrPtr Pointer voidp); public static native TRITONSERVER_Error TRITONSERVER_ServerModelTransactionProperties( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @Cast("uint32_t*") IntBuffer txn_flags, @Cast("void**") @ByPtrPtr Pointer voidp); public static native TRITONSERVER_Error TRITONSERVER_ServerModelTransactionProperties( TRITONSERVER_Server server, @Cast("const char*") BytePointer model_name, @Cast("const int64_t") long model_version, @Cast("uint32_t*") int[] txn_flags, @Cast("void**") @ByPtrPtr Pointer voidp); /** Get the metadata of the server as a TRITONSERVER_Message object. * The caller takes ownership of the message object and must call * TRITONSERVER_MessageDelete to release the object. * * @param server The inference server object. * @param server_metadata Returns the server metadata message. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerMetadata( TRITONSERVER_Server server, @Cast("TRITONSERVER_Message**") PointerPointer server_metadata); public static native TRITONSERVER_Error TRITONSERVER_ServerMetadata( TRITONSERVER_Server server, @ByPtrPtr TRITONSERVER_Message server_metadata); /** Get the metadata of a model as a TRITONSERVER_Message * object. The caller takes ownership of the message object and must * call TRITONSERVER_MessageDelete to release the object. * * @param server The inference server object. * @param model_name The name of the model. * @param model_version The version of the model. * If -1 then the server will choose a version based on the model's * policy. * @param model_metadata Returns the model metadata message. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerModelMetadata( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @Cast("TRITONSERVER_Message**") PointerPointer model_metadata); public static native TRITONSERVER_Error TRITONSERVER_ServerModelMetadata( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @ByPtrPtr TRITONSERVER_Message model_metadata); public static native TRITONSERVER_Error TRITONSERVER_ServerModelMetadata( TRITONSERVER_Server server, @Cast("const char*") BytePointer model_name, @Cast("const int64_t") long model_version, @ByPtrPtr TRITONSERVER_Message model_metadata); /** Get the statistics of a model as a TRITONSERVER_Message * object. The caller takes ownership of the object and must call * TRITONSERVER_MessageDelete to release the object. * * @param server The inference server object. * @param model_name The name of the model. * If empty, then statistics for all available models will be returned, * and the server will choose a version based on those models' policies. * @param model_version The version of the model. If -1 then the * server will choose a version based on the model's policy. * @param model_stats Returns the model statistics message. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerModelStatistics( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @Cast("TRITONSERVER_Message**") PointerPointer model_stats); public static native TRITONSERVER_Error TRITONSERVER_ServerModelStatistics( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @ByPtrPtr TRITONSERVER_Message model_stats); public static native TRITONSERVER_Error TRITONSERVER_ServerModelStatistics( TRITONSERVER_Server server, @Cast("const char*") BytePointer model_name, @Cast("const int64_t") long model_version, @ByPtrPtr TRITONSERVER_Message model_stats); /** Get the configuration of a model as a TRITONSERVER_Message object. * The caller takes ownership of the message object and must call * TRITONSERVER_MessageDelete to release the object. * * @param server The inference server object. * @param model_name The name of the model. * @param model_version The version of the model. If -1 then the * server will choose a version based on the model's policy. * @param config_version The model configuration will be returned in * a format matching this version. If the configuration cannot be * represented in the requested version's format then an error will * be returned. Currently only version 1 is supported. * @param model_config Returns the model config message. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// public static native TRITONSERVER_Error TRITONSERVER_ServerModelConfig( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @Cast("const uint32_t") int config_version, @Cast("TRITONSERVER_Message**") PointerPointer model_config); public static native TRITONSERVER_Error TRITONSERVER_ServerModelConfig( TRITONSERVER_Server server, String model_name, @Cast("const int64_t") long model_version, @Cast("const uint32_t") int config_version, @ByPtrPtr TRITONSERVER_Message model_config); public static native TRITONSERVER_Error TRITONSERVER_ServerModelConfig( TRITONSERVER_Server server, @Cast("const char*") BytePointer model_name, @Cast("const int64_t") long model_version, @Cast("const uint32_t") int config_version, @ByPtrPtr TRITONSERVER_Message model_config); /** Get the index of all unique models in the model repositories as a * TRITONSERVER_Message object. The caller takes ownership of the * message object and must call TRITONSERVER_MessageDelete to release * the object. * * If TRITONSERVER_INDEX_FLAG_READY is set in 'flags' only the models * that are loaded into the server and ready for inferencing are * returned. * * @param server The inference server object. * @param flags TRITONSERVER_ModelIndexFlag flags that control how to * collect the index. * @param model_index Return the model index message that holds the * index of all models contained in the server's model repository(s). * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerModelIndex( TRITONSERVER_Server server, @Cast("uint32_t") int flags, @Cast("TRITONSERVER_Message**") PointerPointer model_index); public static native TRITONSERVER_Error TRITONSERVER_ServerModelIndex( TRITONSERVER_Server server, @Cast("uint32_t") int flags, @ByPtrPtr TRITONSERVER_Message model_index); /** Load the requested model or reload the model if it is already * loaded. The function does not return until the model is loaded or * fails to load. Returned error indicates if model loaded * successfully or not. * * @param server The inference server object. * @param model_name The name of the model. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerLoadModel( TRITONSERVER_Server server, String model_name); public static native TRITONSERVER_Error TRITONSERVER_ServerLoadModel( TRITONSERVER_Server server, @Cast("const char*") BytePointer model_name); /** Unload the requested model. Unloading a model that is not loaded * on server has no affect and success code will be returned. * The function does not wait for the requested model to be fully unload * and success code will be returned. * Returned error indicates if model unloaded successfully or not. * * @param server The inference server object. * @param model_name The name of the model. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerUnloadModel( TRITONSERVER_Server server, String model_name); public static native TRITONSERVER_Error TRITONSERVER_ServerUnloadModel( TRITONSERVER_Server server, @Cast("const char*") BytePointer model_name); /** Unload the requested model, and also unload any dependent model that * was loaded along with the requested model (for example, the models composing * an ensemble). Unloading a model that is not loaded * on server has no affect and success code will be returned. * The function does not wait for the requested model and all dependent * models to be fully unload and success code will be returned. * Returned error indicates if model unloaded successfully or not. * * @param server The inference server object. * @param model_name The name of the model. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONSERVER_ServerUnloadModelAndDependents( TRITONSERVER_Server server, String model_name); public static native TRITONSERVER_Error TRITONSERVER_ServerUnloadModelAndDependents( TRITONSERVER_Server server, @Cast("const char*") BytePointer model_name); /** Get the current metrics for the server. The caller takes ownership * of the metrics object and must call TRITONSERVER_MetricsDelete to * release the object. * * @param server The inference server object. * @param metrics Returns the metrics. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONSERVER_ServerMetrics( TRITONSERVER_Server server, @Cast("TRITONSERVER_Metrics**") PointerPointer metrics); public static native TRITONSERVER_Error TRITONSERVER_ServerMetrics( TRITONSERVER_Server server, @ByPtrPtr TRITONSERVER_Metrics metrics); /** Perform inference using the meta-data and inputs supplied by the * 'inference_request'. If the function returns success, then the * caller releases ownership of 'inference_request' and must not * access it in any way after this call, until ownership is returned * via the 'request_release_fn' callback registered in the request * object with TRITONSERVER_InferenceRequestSetReleaseCallback. * * The function unconditionally takes ownership of 'trace' and so the * caller must not access it in any way after this call (except in * the trace id callback) until ownership is returned via the trace's * release_fn callback. * * Responses produced for this request are returned using the * allocator and callback registered with the request by * TRITONSERVER_InferenceRequestSetResponseCallback. * * @param server The inference server object. * @param inference_request The request object. * @param trace The trace object for this request, or nullptr if no * tracing. * @return a TRITONSERVER_Error indicating success or failure. */ public static native TRITONSERVER_Error TRITONSERVER_ServerInferAsync( TRITONSERVER_Server server, TRITONSERVER_InferenceRequest inference_request, TRITONSERVER_InferenceTrace trace); // #ifdef __cplusplus // #endif // Parsed from tritonbackend.h // Copyright 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #pragma once // #include <stddef.h> // #include <stdint.h> // #include "triton/core/tritonserver.h" // #ifdef __cplusplus // #endif // #ifdef _COMPILING_TRITONBACKEND // #if defined(_MSC_VER) // #define TRITONBACKEND_DECLSPEC __declspec(dllexport) // #define TRITONBACKEND_ISPEC __declspec(dllimport) // #elif defined(__GNUC__) // #define TRITONBACKEND_DECLSPEC __attribute__((__visibility__("default"))) // #define TRITONBACKEND_ISPEC // #else // #define TRITONBACKEND_DECLSPEC // #define TRITONBACKEND_ISPEC // #endif // #else // #if defined(_MSC_VER) // #define TRITONBACKEND_DECLSPEC __declspec(dllimport) // #define TRITONBACKEND_ISPEC __declspec(dllexport) // #else // #define TRITONBACKEND_DECLSPEC // #define TRITONBACKEND_ISPEC // Targeting ../tritonserver/TRITONBACKEND_MemoryManager.java // Targeting ../tritonserver/TRITONBACKEND_Input.java // Targeting ../tritonserver/TRITONBACKEND_Output.java // Targeting ../tritonserver/TRITONBACKEND_Request.java // Targeting ../tritonserver/TRITONBACKEND_ResponseFactory.java // Targeting ../tritonserver/TRITONBACKEND_Response.java // Targeting ../tritonserver/TRITONBACKEND_Backend.java // Targeting ../tritonserver/TRITONBACKEND_Model.java // Targeting ../tritonserver/TRITONBACKEND_ModelInstance.java /** * TRITONBACKEND API Version * * The TRITONBACKEND API is versioned with major and minor version * numbers. Any change to the API that does not impact backwards * compatibility (for example, adding a non-required function) * increases the minor version number. Any change that breaks * backwards compatibility (for example, deleting or changing the * behavior of a function) increases the major version number. A * backend should check that the API version used to compile the * backend is compatible with the API version of the Triton server * that it is running in. This is typically done by code similar to * the following which makes sure that the major versions are equal * and that the minor version of Triton is >= the minor version used * to build the backend. * * uint32_t api_version_major, api_version_minor; * TRITONBACKEND_ApiVersion(&api_version_major, &api_version_minor); * if ((api_version_major != TRITONBACKEND_API_VERSION_MAJOR) || * (api_version_minor < TRITONBACKEND_API_VERSION_MINOR)) { * return TRITONSERVER_ErrorNew( * TRITONSERVER_ERROR_UNSUPPORTED, * "triton backend API version does not support this backend"); * } * */ public static final int TRITONBACKEND_API_VERSION_MAJOR = 1; /// public static final int TRITONBACKEND_API_VERSION_MINOR = 5; /** Get the TRITONBACKEND API version supported by Triton. This value * can be compared against the TRITONBACKEND_API_VERSION_MAJOR and * TRITONBACKEND_API_VERSION_MINOR used to build the backend to * ensure that Triton is compatible with the backend. * * @param major Returns the TRITONBACKEND API major version supported * by Triton. * @param minor Returns the TRITONBACKEND API minor version supported * by Triton. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_ApiVersion( @Cast("uint32_t*") IntPointer major, @Cast("uint32_t*") IntPointer minor); public static native TRITONSERVER_Error TRITONBACKEND_ApiVersion( @Cast("uint32_t*") IntBuffer major, @Cast("uint32_t*") IntBuffer minor); public static native TRITONSERVER_Error TRITONBACKEND_ApiVersion( @Cast("uint32_t*") int[] major, @Cast("uint32_t*") int[] minor); /** TRITONBACKEND_ArtifactType * * The ways that the files that make up a backend or model are * communicated to the backend. * * TRITONBACKEND_ARTIFACT_FILESYSTEM: The model or backend * artifacts are made available to Triton via a locally * accessible filesystem. The backend can access these files * using an appropriate system API. * */ /** enum TRITONBACKEND_ArtifactType */ public static final int TRITONBACKEND_ARTIFACT_FILESYSTEM = 0; /** * TRITONBACKEND_MemoryManager * * Object representing an memory manager that is capable of * allocating and otherwise managing different memory types. For * improved performance Triton maintains pools for GPU and CPU-pinned * memory and the memory manager allows backends to access those * pools. * <p> * Allocate a contiguous block of memory of a specific type using a * memory manager. Two error codes have specific interpretations for * this function: * * TRITONSERVER_ERROR_UNSUPPORTED: Indicates that Triton is * incapable of allocating the requested memory type and memory * type ID. Requests for the memory type and ID will always fail * no matter 'byte_size' of the request. * * TRITONSERVER_ERROR_UNAVAILABLE: Indicates that Triton can * allocate the memory type and ID but that currently it cannot * allocate a contiguous block of memory of the requested * 'byte_size'. * * @param manager The memory manager. * @param buffer Returns the allocated memory. * @param memory_type The type of memory to allocate. * @param memory_type_id The ID associated with the memory type to * allocate. For GPU memory this indicates the device ID of the GPU * to allocate from. * @param byte_size The size of memory to allocate, in bytes. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_MemoryManagerAllocate( TRITONBACKEND_MemoryManager manager, @Cast("void**") PointerPointer buffer, @Cast("const TRITONSERVER_MemoryType") int memory_type, @Cast("const int64_t") long memory_type_id, @Cast("const uint64_t") long byte_size); public static native TRITONSERVER_Error TRITONBACKEND_MemoryManagerAllocate( TRITONBACKEND_MemoryManager manager, @Cast("void**") @ByPtrPtr Pointer buffer, @Cast("const TRITONSERVER_MemoryType") int memory_type, @Cast("const int64_t") long memory_type_id, @Cast("const uint64_t") long byte_size); /** Free a buffer that was previously allocated with * TRITONBACKEND_MemoryManagerAllocate. The call must provide the * same values for 'memory_type' and 'memory_type_id' as were used * when the buffer was allocate or else the behavior is undefined. * * @param manager The memory manager. * @param buffer The allocated memory buffer to free. * @param memory_type The type of memory of the buffer. * @param memory_type_id The ID associated with the memory type of * the buffer. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_MemoryManagerFree( TRITONBACKEND_MemoryManager manager, Pointer buffer, @Cast("const TRITONSERVER_MemoryType") int memory_type, @Cast("const int64_t") long memory_type_id); /** * TRITONBACKEND_Input * * Object representing an input tensor. * <p> * Get the name and properties of an input tensor. The returned * strings and other properties are owned by the input, not the * caller, and so should not be modified or freed. * * @param input The input tensor. * @param name If non-nullptr, returns the tensor name. * @param datatype If non-nullptr, returns the tensor datatype. * @param shape If non-nullptr, returns the tensor shape. * @param dim_count If non-nullptr, returns the number of dimensions * in the tensor shape. * @param byte_size If non-nullptr, returns the size of the available * data for the tensor, in bytes. This size reflects the actual data * available, and does not necessarily match what is * expected/required for the tensor given its shape and datatype. It * is the responsibility of the backend to handle mismatches in these * sizes appropriately. * @param buffer_count If non-nullptr, returns the number of buffers * holding the contents of the tensor. These buffers are accessed * using TRITONBACKEND_InputBuffer. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_InputProperties( TRITONBACKEND_Input input, @Cast("const char**") PointerPointer name, @Cast("TRITONSERVER_DataType*") IntPointer datatype, @Cast("const int64_t**") PointerPointer shape, @Cast("uint32_t*") IntPointer dims_count, @Cast("uint64_t*") LongPointer byte_size, @Cast("uint32_t*") IntPointer buffer_count); public static native TRITONSERVER_Error TRITONBACKEND_InputProperties( TRITONBACKEND_Input input, @Cast("const char**") @ByPtrPtr BytePointer name, @Cast("TRITONSERVER_DataType*") IntPointer datatype, @Cast("const int64_t**") @ByPtrPtr LongPointer shape, @Cast("uint32_t*") IntPointer dims_count, @Cast("uint64_t*") LongPointer byte_size, @Cast("uint32_t*") IntPointer buffer_count); public static native TRITONSERVER_Error TRITONBACKEND_InputProperties( TRITONBACKEND_Input input, @Cast("const char**") @ByPtrPtr ByteBuffer name, @Cast("TRITONSERVER_DataType*") IntBuffer datatype, @Cast("const int64_t**") @ByPtrPtr LongBuffer shape, @Cast("uint32_t*") IntBuffer dims_count, @Cast("uint64_t*") LongBuffer byte_size, @Cast("uint32_t*") IntBuffer buffer_count); public static native TRITONSERVER_Error TRITONBACKEND_InputProperties( TRITONBACKEND_Input input, @Cast("const char**") @ByPtrPtr byte[] name, @Cast("TRITONSERVER_DataType*") int[] datatype, @Cast("const int64_t**") @ByPtrPtr long[] shape, @Cast("uint32_t*") int[] dims_count, @Cast("uint64_t*") long[] byte_size, @Cast("uint32_t*") int[] buffer_count); /** Get the name and properties of an input tensor associated with a given * host policy. If there are no input buffers for the specified host policy, * the properties of the fallback input buffers are returned. The returned * strings and other properties are owned by the input, not the caller, and so * should not be modified or freed. * * @param input The input tensor. * @param host_policy_name The host policy name. Fallback input properties * will be return if nullptr is provided. * @param name If non-nullptr, returns the tensor name. * @param datatype If non-nullptr, returns the tensor datatype. * @param shape If non-nullptr, returns the tensor shape. * @param dim_count If non-nullptr, returns the number of dimensions * in the tensor shape. * @param byte_size If non-nullptr, returns the size of the available * data for the tensor, in bytes. This size reflects the actual data * available, and does not necessarily match what is * expected/required for the tensor given its shape and datatype. It * is the responsibility of the backend to handle mismatches in these * sizes appropriately. * @param buffer_count If non-nullptr, returns the number of buffers * holding the contents of the tensor. These buffers are accessed * using TRITONBACKEND_InputBufferForHostPolicy. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_InputPropertiesForHostPolicy( TRITONBACKEND_Input input, String host_policy_name, @Cast("const char**") PointerPointer name, @Cast("TRITONSERVER_DataType*") IntPointer datatype, @Cast("const int64_t**") PointerPointer shape, @Cast("uint32_t*") IntPointer dims_count, @Cast("uint64_t*") LongPointer byte_size, @Cast("uint32_t*") IntPointer buffer_count); public static native TRITONSERVER_Error TRITONBACKEND_InputPropertiesForHostPolicy( TRITONBACKEND_Input input, String host_policy_name, @Cast("const char**") @ByPtrPtr BytePointer name, @Cast("TRITONSERVER_DataType*") IntPointer datatype, @Cast("const int64_t**") @ByPtrPtr LongPointer shape, @Cast("uint32_t*") IntPointer dims_count, @Cast("uint64_t*") LongPointer byte_size, @Cast("uint32_t*") IntPointer buffer_count); public static native TRITONSERVER_Error TRITONBACKEND_InputPropertiesForHostPolicy( TRITONBACKEND_Input input, @Cast("const char*") BytePointer host_policy_name, @Cast("const char**") @ByPtrPtr ByteBuffer name, @Cast("TRITONSERVER_DataType*") IntBuffer datatype, @Cast("const int64_t**") @ByPtrPtr LongBuffer shape, @Cast("uint32_t*") IntBuffer dims_count, @Cast("uint64_t*") LongBuffer byte_size, @Cast("uint32_t*") IntBuffer buffer_count); public static native TRITONSERVER_Error TRITONBACKEND_InputPropertiesForHostPolicy( TRITONBACKEND_Input input, String host_policy_name, @Cast("const char**") @ByPtrPtr byte[] name, @Cast("TRITONSERVER_DataType*") int[] datatype, @Cast("const int64_t**") @ByPtrPtr long[] shape, @Cast("uint32_t*") int[] dims_count, @Cast("uint64_t*") long[] byte_size, @Cast("uint32_t*") int[] buffer_count); public static native TRITONSERVER_Error TRITONBACKEND_InputPropertiesForHostPolicy( TRITONBACKEND_Input input, @Cast("const char*") BytePointer host_policy_name, @Cast("const char**") @ByPtrPtr BytePointer name, @Cast("TRITONSERVER_DataType*") IntPointer datatype, @Cast("const int64_t**") @ByPtrPtr LongPointer shape, @Cast("uint32_t*") IntPointer dims_count, @Cast("uint64_t*") LongPointer byte_size, @Cast("uint32_t*") IntPointer buffer_count); public static native TRITONSERVER_Error TRITONBACKEND_InputPropertiesForHostPolicy( TRITONBACKEND_Input input, String host_policy_name, @Cast("const char**") @ByPtrPtr ByteBuffer name, @Cast("TRITONSERVER_DataType*") IntBuffer datatype, @Cast("const int64_t**") @ByPtrPtr LongBuffer shape, @Cast("uint32_t*") IntBuffer dims_count, @Cast("uint64_t*") LongBuffer byte_size, @Cast("uint32_t*") IntBuffer buffer_count); public static native TRITONSERVER_Error TRITONBACKEND_InputPropertiesForHostPolicy( TRITONBACKEND_Input input, @Cast("const char*") BytePointer host_policy_name, @Cast("const char**") @ByPtrPtr byte[] name, @Cast("TRITONSERVER_DataType*") int[] datatype, @Cast("const int64_t**") @ByPtrPtr long[] shape, @Cast("uint32_t*") int[] dims_count, @Cast("uint64_t*") long[] byte_size, @Cast("uint32_t*") int[] buffer_count); /** Get a buffer holding (part of) the tensor data for an input. For a * given input the number of buffers composing the input are found * from 'buffer_count' returned by TRITONBACKEND_InputProperties. The * returned buffer is owned by the input and so should not be * modified or freed by the caller. The lifetime of the buffer * matches that of the input and so the buffer should not be accessed * after the input tensor object is released. * * @param input The input tensor. * @param index The index of the buffer. Must be 0 <= index < * buffer_count, where buffer_count is the value returned by * TRITONBACKEND_InputProperties. * @param buffer Returns a pointer to a contiguous block of data for * the named input. * @param buffer_byte_size Returns the size, in bytes, of 'buffer'. * @param memory_type Acts as both input and output. On input gives * the buffer memory type preferred by the function caller. Returns * the actual memory type of 'buffer'. * @param memory_type_id Acts as both input and output. On input * gives the buffer memory type id preferred by the function caller. * Returns the actual memory type id of 'buffer'. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_InputBuffer( TRITONBACKEND_Input input, @Cast("const uint32_t") int index, @Cast("const void**") PointerPointer buffer, @Cast("uint64_t*") LongPointer buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") IntPointer memory_type, @Cast("int64_t*") LongPointer memory_type_id); public static native TRITONSERVER_Error TRITONBACKEND_InputBuffer( TRITONBACKEND_Input input, @Cast("const uint32_t") int index, @Cast("const void**") @ByPtrPtr Pointer buffer, @Cast("uint64_t*") LongPointer buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") IntPointer memory_type, @Cast("int64_t*") LongPointer memory_type_id); public static native TRITONSERVER_Error TRITONBACKEND_InputBuffer( TRITONBACKEND_Input input, @Cast("const uint32_t") int index, @Cast("const void**") @ByPtrPtr Pointer buffer, @Cast("uint64_t*") LongBuffer buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") IntBuffer memory_type, @Cast("int64_t*") LongBuffer memory_type_id); public static native TRITONSERVER_Error TRITONBACKEND_InputBuffer( TRITONBACKEND_Input input, @Cast("const uint32_t") int index, @Cast("const void**") @ByPtrPtr Pointer buffer, @Cast("uint64_t*") long[] buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") int[] memory_type, @Cast("int64_t*") long[] memory_type_id); /** Get a buffer holding (part of) the tensor data for an input for a specific * host policy. If there are no input buffers specified for this host policy, * the fallback input buffer is returned. * For a given input the number of buffers composing the input are found * from 'buffer_count' returned by TRITONBACKEND_InputPropertiesForHostPolicy. * The returned buffer is owned by the input and so should not be modified or * freed by the caller. The lifetime of the buffer matches that of the input * and so the buffer should not be accessed after the input tensor object is * released. * * @param input The input tensor. * @param host_policy_name The host policy name. Fallback input buffer * will be return if nullptr is provided. * @param index The index of the buffer. Must be 0 <= index < * buffer_count, where buffer_count is the value returned by * TRITONBACKEND_InputPropertiesForHostPolicy. * @param buffer Returns a pointer to a contiguous block of data for * the named input. * @param buffer_byte_size Returns the size, in bytes, of 'buffer'. * @param memory_type Acts as both input and output. On input gives * the buffer memory type preferred by the function caller. Returns * the actual memory type of 'buffer'. * @param memory_type_id Acts as both input and output. On input * gives the buffer memory type id preferred by the function caller. * Returns the actual memory type id of 'buffer'. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_InputBufferForHostPolicy( TRITONBACKEND_Input input, String host_policy_name, @Cast("const uint32_t") int index, @Cast("const void**") PointerPointer buffer, @Cast("uint64_t*") LongPointer buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") IntPointer memory_type, @Cast("int64_t*") LongPointer memory_type_id); public static native TRITONSERVER_Error TRITONBACKEND_InputBufferForHostPolicy( TRITONBACKEND_Input input, String host_policy_name, @Cast("const uint32_t") int index, @Cast("const void**") @ByPtrPtr Pointer buffer, @Cast("uint64_t*") LongPointer buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") IntPointer memory_type, @Cast("int64_t*") LongPointer memory_type_id); public static native TRITONSERVER_Error TRITONBACKEND_InputBufferForHostPolicy( TRITONBACKEND_Input input, @Cast("const char*") BytePointer host_policy_name, @Cast("const uint32_t") int index, @Cast("const void**") @ByPtrPtr Pointer buffer, @Cast("uint64_t*") LongBuffer buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") IntBuffer memory_type, @Cast("int64_t*") LongBuffer memory_type_id); public static native TRITONSERVER_Error TRITONBACKEND_InputBufferForHostPolicy( TRITONBACKEND_Input input, String host_policy_name, @Cast("const uint32_t") int index, @Cast("const void**") @ByPtrPtr Pointer buffer, @Cast("uint64_t*") long[] buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") int[] memory_type, @Cast("int64_t*") long[] memory_type_id); public static native TRITONSERVER_Error TRITONBACKEND_InputBufferForHostPolicy( TRITONBACKEND_Input input, @Cast("const char*") BytePointer host_policy_name, @Cast("const uint32_t") int index, @Cast("const void**") @ByPtrPtr Pointer buffer, @Cast("uint64_t*") LongPointer buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") IntPointer memory_type, @Cast("int64_t*") LongPointer memory_type_id); public static native TRITONSERVER_Error TRITONBACKEND_InputBufferForHostPolicy( TRITONBACKEND_Input input, String host_policy_name, @Cast("const uint32_t") int index, @Cast("const void**") @ByPtrPtr Pointer buffer, @Cast("uint64_t*") LongBuffer buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") IntBuffer memory_type, @Cast("int64_t*") LongBuffer memory_type_id); public static native TRITONSERVER_Error TRITONBACKEND_InputBufferForHostPolicy( TRITONBACKEND_Input input, @Cast("const char*") BytePointer host_policy_name, @Cast("const uint32_t") int index, @Cast("const void**") @ByPtrPtr Pointer buffer, @Cast("uint64_t*") long[] buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") int[] memory_type, @Cast("int64_t*") long[] memory_type_id); /** * TRITONBACKEND_Output * * Object representing a response output tensor. * <p> * Get a buffer to use to hold the tensor data for the output. The * returned buffer is owned by the output and so should not be freed * by the caller. The caller can and should fill the buffer with the * output data for the tensor. The lifetime of the buffer matches * that of the output and so the buffer should not be accessed after * the output tensor object is released. * * @param buffer Returns a pointer to a buffer where the contents of * the output tensor should be placed. * @param buffer_byte_size The size, in bytes, of the buffer required * by the caller. * @param memory_type Acts as both input and output. On input gives * the buffer memory type preferred by the caller. Returns the * actual memory type of 'buffer'. * @param memory_type_id Acts as both input and output. On input * gives the buffer memory type id preferred by the caller. Returns * the actual memory type id of 'buffer'. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_OutputBuffer( TRITONBACKEND_Output output, @Cast("void**") PointerPointer buffer, @Cast("const uint64_t") long buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") IntPointer memory_type, @Cast("int64_t*") LongPointer memory_type_id); public static native TRITONSERVER_Error TRITONBACKEND_OutputBuffer( TRITONBACKEND_Output output, @Cast("void**") @ByPtrPtr Pointer buffer, @Cast("const uint64_t") long buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") IntPointer memory_type, @Cast("int64_t*") LongPointer memory_type_id); public static native TRITONSERVER_Error TRITONBACKEND_OutputBuffer( TRITONBACKEND_Output output, @Cast("void**") @ByPtrPtr Pointer buffer, @Cast("const uint64_t") long buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") IntBuffer memory_type, @Cast("int64_t*") LongBuffer memory_type_id); public static native TRITONSERVER_Error TRITONBACKEND_OutputBuffer( TRITONBACKEND_Output output, @Cast("void**") @ByPtrPtr Pointer buffer, @Cast("const uint64_t") long buffer_byte_size, @Cast("TRITONSERVER_MemoryType*") int[] memory_type, @Cast("int64_t*") long[] memory_type_id); /** * TRITONBACKEND_Request * * Object representing an inference request. * <p> * Get the ID of the request. Can be nullptr if request doesn't have * an ID. The returned string is owned by the request, not the * caller, and so should not be modified or freed. * * @param request The inference request. * @param id Returns the ID. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_RequestId( TRITONBACKEND_Request request, @Cast("const char**") PointerPointer id); public static native TRITONSERVER_Error TRITONBACKEND_RequestId( TRITONBACKEND_Request request, @Cast("const char**") @ByPtrPtr BytePointer id); public static native TRITONSERVER_Error TRITONBACKEND_RequestId( TRITONBACKEND_Request request, @Cast("const char**") @ByPtrPtr ByteBuffer id); public static native TRITONSERVER_Error TRITONBACKEND_RequestId( TRITONBACKEND_Request request, @Cast("const char**") @ByPtrPtr byte[] id); /** Get the correlation ID of the request. Zero indicates that the * request does not have a correlation ID. * * @param request The inference request. * @param id Returns the correlation ID. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_RequestCorrelationId( TRITONBACKEND_Request request, @Cast("uint64_t*") LongPointer id); public static native TRITONSERVER_Error TRITONBACKEND_RequestCorrelationId( TRITONBACKEND_Request request, @Cast("uint64_t*") LongBuffer id); public static native TRITONSERVER_Error TRITONBACKEND_RequestCorrelationId( TRITONBACKEND_Request request, @Cast("uint64_t*") long[] id); /** Get the number of input tensors specified in the request. * * @param request The inference request. * @param count Returns the number of input tensors. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_RequestInputCount( TRITONBACKEND_Request request, @Cast("uint32_t*") IntPointer count); public static native TRITONSERVER_Error TRITONBACKEND_RequestInputCount( TRITONBACKEND_Request request, @Cast("uint32_t*") IntBuffer count); public static native TRITONSERVER_Error TRITONBACKEND_RequestInputCount( TRITONBACKEND_Request request, @Cast("uint32_t*") int[] count); /** Get the name of an input tensor. The caller does not own * the returned string and must not modify or delete it. The lifetime * of the returned string extends only as long as 'request'. * * @param request The inference request. * @param index The index of the input tensor. Must be 0 <= index < * count, where count is the value returned by * TRITONBACKEND_RequestInputCount. * @param input_name Returns the name of the input tensor * corresponding to the index. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_RequestInputName( TRITONBACKEND_Request request, @Cast("const uint32_t") int index, @Cast("const char**") PointerPointer input_name); public static native TRITONSERVER_Error TRITONBACKEND_RequestInputName( TRITONBACKEND_Request request, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr BytePointer input_name); public static native TRITONSERVER_Error TRITONBACKEND_RequestInputName( TRITONBACKEND_Request request, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr ByteBuffer input_name); public static native TRITONSERVER_Error TRITONBACKEND_RequestInputName( TRITONBACKEND_Request request, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr byte[] input_name); /** Get a named request input. The lifetime of the returned input * object matches that of the request and so the input object should * not be accessed after the request object is released. * * @param request The inference request. * @param name The name of the input. * @param input Returns the input corresponding to the name. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// public static native TRITONSERVER_Error TRITONBACKEND_RequestInput( TRITONBACKEND_Request request, String name, @Cast("TRITONBACKEND_Input**") PointerPointer input); public static native TRITONSERVER_Error TRITONBACKEND_RequestInput( TRITONBACKEND_Request request, String name, @ByPtrPtr TRITONBACKEND_Input input); public static native TRITONSERVER_Error TRITONBACKEND_RequestInput( TRITONBACKEND_Request request, @Cast("const char*") BytePointer name, @ByPtrPtr TRITONBACKEND_Input input); /** Get a request input by index. The order of inputs in a given * request is not necessarily consistent with other requests, even if * the requests are in the same batch. As a result, you can not * assume that an index obtained from one request will point to the * same input in a different request. * * The lifetime of the returned input object matches that of the * request and so the input object should not be accessed after the * request object is released. * * @param request The inference request. * @param index The index of the input tensor. Must be 0 <= index < * count, where count is the value returned by * TRITONBACKEND_RequestInputCount. * @param input Returns the input corresponding to the index. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_RequestInputByIndex( TRITONBACKEND_Request request, @Cast("const uint32_t") int index, @Cast("TRITONBACKEND_Input**") PointerPointer input); public static native TRITONSERVER_Error TRITONBACKEND_RequestInputByIndex( TRITONBACKEND_Request request, @Cast("const uint32_t") int index, @ByPtrPtr TRITONBACKEND_Input input); /** Get the number of output tensors requested to be returned in the * request. * * @param request The inference request. * @param count Returns the number of output tensors. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_RequestOutputCount( TRITONBACKEND_Request request, @Cast("uint32_t*") IntPointer count); public static native TRITONSERVER_Error TRITONBACKEND_RequestOutputCount( TRITONBACKEND_Request request, @Cast("uint32_t*") IntBuffer count); public static native TRITONSERVER_Error TRITONBACKEND_RequestOutputCount( TRITONBACKEND_Request request, @Cast("uint32_t*") int[] count); /** Get the name of a requested output tensor. The caller does not own * the returned string and must not modify or delete it. The lifetime * of the returned string extends only as long as 'request'. * * @param request The inference request. * @param index The index of the requested output tensor. Must be 0 * <= index < count, where count is the value returned by * TRITONBACKEND_RequestOutputCount. * @param output_name Returns the name of the requested output tensor * corresponding to the index. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_RequestOutputName( TRITONBACKEND_Request request, @Cast("const uint32_t") int index, @Cast("const char**") PointerPointer output_name); public static native TRITONSERVER_Error TRITONBACKEND_RequestOutputName( TRITONBACKEND_Request request, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr BytePointer output_name); public static native TRITONSERVER_Error TRITONBACKEND_RequestOutputName( TRITONBACKEND_Request request, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr ByteBuffer output_name); public static native TRITONSERVER_Error TRITONBACKEND_RequestOutputName( TRITONBACKEND_Request request, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr byte[] output_name); /** Release the request. The request should be released when it is no * longer needed by the backend. If this call returns with an error * (i.e. non-nullptr) then the request was not released and ownership * remains with the backend. If this call returns with success, the * 'request' object is no longer owned by the backend and must not be * used. Any tensor names, data types, shapes, input tensors, * etc. returned by TRITONBACKEND_Request* functions for this request * are no longer valid. If a persistent copy of that data is required * it must be created before calling this function. * * @param request The inference request. * @param release_flags Flags indicating what type of request release * should be performed. @see TRITONSERVER_RequestReleaseFlag. @see * TRITONSERVER_InferenceRequestReleaseFn_t. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_RequestRelease( TRITONBACKEND_Request request, @Cast("uint32_t") int release_flags); /** * TRITONBACKEND_ResponseFactory * * Object representing an inference response factory. Using a * response factory is not required; instead a response can be * generated directly from a TRITONBACKEND_Request object using * TRITONBACKEND_ResponseNew(). A response factory allows a request * to be released before all responses have been sent. Releasing a * request as early as possible releases all input tensor data and * therefore may be desirable in some cases. <p> * Create the response factory associated with a request. * * @param factory Returns the new response factory. * @param request The inference request. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ResponseFactoryNew( @Cast("TRITONBACKEND_ResponseFactory**") PointerPointer factory, TRITONBACKEND_Request request); public static native TRITONSERVER_Error TRITONBACKEND_ResponseFactoryNew( @ByPtrPtr TRITONBACKEND_ResponseFactory factory, TRITONBACKEND_Request request); /** Destroy a response factory. * * @param factory The response factory. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ResponseFactoryDelete( TRITONBACKEND_ResponseFactory factory); /** Send response flags without a corresponding response. * * @param factory The response factory. * @param send_flags Flags to send. @see * TRITONSERVER_ResponseCompleteFlag. @see * TRITONSERVER_InferenceResponseCompleteFn_t. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_ResponseFactorySendFlags( TRITONBACKEND_ResponseFactory factory, @Cast("const uint32_t") int send_flags); /** * TRITONBACKEND_Response * * Object representing an inference response. For a given request, * the backend must carefully manage the lifecycle of responses * generated for that request to ensure that the output tensor * buffers are allocated correctly. When a response is created with * TRITONBACKEND_ResponseNew or TRITONBACKEND_ResponseNewFromFactory, * all the outputs and corresponding buffers must be created for that * response using TRITONBACKEND_ResponseOutput and * TRITONBACKEND_OutputBuffer *before* another response is created * for the request. For a given response, outputs can be created in * any order but they must be created sequentially/sychronously (for * example, the backend cannot use multiple threads to simultaneously * add multiple outputs to a response). * * The above requirement applies only to responses being generated * for a given request. The backend may generate responses in * parallel on multiple threads as long as those responses are for * different requests. * * This order of response creation must be strictly followed. But, * once response(s) are created they do not need to be sent * immediately, nor do they need to be sent in the order they were * created. The backend may even delete a created response instead of * sending it by using TRITONBACKEND_ResponseDelete. <p> * Create a response for a request. * * @param response Returns the new response. * @param request The request. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ResponseNew( @Cast("TRITONBACKEND_Response**") PointerPointer response, TRITONBACKEND_Request request); public static native TRITONSERVER_Error TRITONBACKEND_ResponseNew( @ByPtrPtr TRITONBACKEND_Response response, TRITONBACKEND_Request request); /** Create a response using a factory. * * @param response Returns the new response. * @param factory The response factory. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ResponseNewFromFactory( @Cast("TRITONBACKEND_Response**") PointerPointer response, TRITONBACKEND_ResponseFactory factory); public static native TRITONSERVER_Error TRITONBACKEND_ResponseNewFromFactory( @ByPtrPtr TRITONBACKEND_Response response, TRITONBACKEND_ResponseFactory factory); /** Destroy a response. It is not necessary to delete a response if * TRITONBACKEND_ResponseSend is called as that function transfers * ownership of the response object to Triton. * * @param response The response. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ResponseDelete( TRITONBACKEND_Response response); /** Set a string parameter in the response. * * @param response The response. * @param name The name of the parameter. * @param value The value of the parameter. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ResponseSetStringParameter( TRITONBACKEND_Response response, String name, String value); public static native TRITONSERVER_Error TRITONBACKEND_ResponseSetStringParameter( TRITONBACKEND_Response response, @Cast("const char*") BytePointer name, @Cast("const char*") BytePointer value); /** Set an integer parameter in the response. * * @param response The response. * @param name The name of the parameter. * @param value The value of the parameter. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ResponseSetIntParameter( TRITONBACKEND_Response response, String name, @Cast("const int64_t") long value); public static native TRITONSERVER_Error TRITONBACKEND_ResponseSetIntParameter( TRITONBACKEND_Response response, @Cast("const char*") BytePointer name, @Cast("const int64_t") long value); /** Set an boolean parameter in the response. * * @param response The response. * @param name The name of the parameter. * @param value The value of the parameter. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ResponseSetBoolParameter( TRITONBACKEND_Response response, String name, @Cast("const bool") boolean value); public static native TRITONSERVER_Error TRITONBACKEND_ResponseSetBoolParameter( TRITONBACKEND_Response response, @Cast("const char*") BytePointer name, @Cast("const bool") boolean value); /** Create an output tensor in the response. The lifetime of the * returned output tensor object matches that of the response and so * the output tensor object should not be accessed after the response * object is deleted. * * @param response The response. * @param output Returns the new response output. * @param name The name of the output tensor. * @param datatype The datatype of the output tensor. * @param shape The shape of the output tensor. * @param dims_count The number of dimensions in the output tensor * shape. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ResponseOutput( TRITONBACKEND_Response response, @Cast("TRITONBACKEND_Output**") PointerPointer output, String name, @Cast("const TRITONSERVER_DataType") int datatype, @Cast("const int64_t*") LongPointer shape, @Cast("const uint32_t") int dims_count); public static native TRITONSERVER_Error TRITONBACKEND_ResponseOutput( TRITONBACKEND_Response response, @ByPtrPtr TRITONBACKEND_Output output, String name, @Cast("const TRITONSERVER_DataType") int datatype, @Cast("const int64_t*") LongPointer shape, @Cast("const uint32_t") int dims_count); public static native TRITONSERVER_Error TRITONBACKEND_ResponseOutput( TRITONBACKEND_Response response, @ByPtrPtr TRITONBACKEND_Output output, @Cast("const char*") BytePointer name, @Cast("const TRITONSERVER_DataType") int datatype, @Cast("const int64_t*") LongBuffer shape, @Cast("const uint32_t") int dims_count); public static native TRITONSERVER_Error TRITONBACKEND_ResponseOutput( TRITONBACKEND_Response response, @ByPtrPtr TRITONBACKEND_Output output, String name, @Cast("const TRITONSERVER_DataType") int datatype, @Cast("const int64_t*") long[] shape, @Cast("const uint32_t") int dims_count); public static native TRITONSERVER_Error TRITONBACKEND_ResponseOutput( TRITONBACKEND_Response response, @ByPtrPtr TRITONBACKEND_Output output, @Cast("const char*") BytePointer name, @Cast("const TRITONSERVER_DataType") int datatype, @Cast("const int64_t*") LongPointer shape, @Cast("const uint32_t") int dims_count); public static native TRITONSERVER_Error TRITONBACKEND_ResponseOutput( TRITONBACKEND_Response response, @ByPtrPtr TRITONBACKEND_Output output, String name, @Cast("const TRITONSERVER_DataType") int datatype, @Cast("const int64_t*") LongBuffer shape, @Cast("const uint32_t") int dims_count); public static native TRITONSERVER_Error TRITONBACKEND_ResponseOutput( TRITONBACKEND_Response response, @ByPtrPtr TRITONBACKEND_Output output, @Cast("const char*") BytePointer name, @Cast("const TRITONSERVER_DataType") int datatype, @Cast("const int64_t*") long[] shape, @Cast("const uint32_t") int dims_count); /** Send a response. Calling this function transfers ownership of the * response object to Triton. The caller must not access or delete * the response object after calling this function. * * @param response The response. * @param send_flags Flags associated with the response. @see * TRITONSERVER_ResponseCompleteFlag. @see * TRITONSERVER_InferenceResponseCompleteFn_t. * @param error The TRITONSERVER_Error to send if the response is an * error, or nullptr if the response is successful. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// /// /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_ResponseSend( TRITONBACKEND_Response response, @Cast("const uint32_t") int send_flags, TRITONSERVER_Error error); /** * TRITONBACKEND_Backend * * Object representing a backend. * <p> * TRITONBACKEND_ExecutionPolicy * * Types of execution policy that can be implemented by a backend. * * TRITONBACKEND_EXECUTION_BLOCKING: An instance of the model * blocks in TRITONBACKEND_ModelInstanceExecute until it is ready * to handle another inference. Upon returning from * TRITONBACKEND_ModelInstanceExecute, Triton may immediately * call TRITONBACKEND_ModelInstanceExecute for the same instance * to execute a new batch of requests. Thus, most backends using * this policy will not return from * TRITONBACKEND_ModelInstanceExecute until all responses have * been sent and all requests have been released. This is the * default execution policy. * * TRITONBACKEND_EXECUTION_DEVICE_BLOCKING: An instance, A, of the * model blocks in TRITONBACKEND_ModelInstanceExecute if the * device associated with the instance is unable to handle * another inference. Even if another instance, B, associated * with the device, is available and ready to perform an * inference, Triton will not invoke * TRITONBACKEND_ModeInstanceExecute for B until A returns from * TRITONBACKEND_ModelInstanceExecute. Triton will not be blocked * from calling TRITONBACKEND_ModelInstanceExecute for instance * C, which is associated with a different device than A and B, * even if A or B has not returned from * TRITONBACKEND_ModelInstanceExecute. This execution policy is * typically used by a backend that can cooperatively execute * multiple model instances on the same device. * */ /** enum TRITONBACKEND_ExecutionPolicy */ public static final int TRITONBACKEND_EXECUTION_BLOCKING = 0, TRITONBACKEND_EXECUTION_DEVICE_BLOCKING = 1; /** Get the name of the backend. The caller does not own the returned * string and must not modify or delete it. The lifetime of the * returned string extends only as long as 'backend'. * * @param backend The backend. * @param name Returns the name of the backend. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_BackendName( TRITONBACKEND_Backend backend, @Cast("const char**") PointerPointer name); public static native TRITONSERVER_Error TRITONBACKEND_BackendName( TRITONBACKEND_Backend backend, @Cast("const char**") @ByPtrPtr BytePointer name); public static native TRITONSERVER_Error TRITONBACKEND_BackendName( TRITONBACKEND_Backend backend, @Cast("const char**") @ByPtrPtr ByteBuffer name); public static native TRITONSERVER_Error TRITONBACKEND_BackendName( TRITONBACKEND_Backend backend, @Cast("const char**") @ByPtrPtr byte[] name); /** Get the backend configuration. The 'backend_config' message is * owned by Triton and should not be modified or freed by the caller. * * The backend configuration, as JSON, is: * * { * "cmdline" : { * "<setting>" : "<value>", * ... * } * } * * @param backend The backend. * @param backend_config Returns the backend configuration as a message. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_BackendConfig( TRITONBACKEND_Backend backend, @Cast("TRITONSERVER_Message**") PointerPointer backend_config); public static native TRITONSERVER_Error TRITONBACKEND_BackendConfig( TRITONBACKEND_Backend backend, @ByPtrPtr TRITONSERVER_Message backend_config); /** Get the execution policy for this backend. By default the * execution policy is TRITONBACKEND_EXECUTION_BLOCKING. * * @param backend The backend. * @param policy Returns the execution policy. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_BackendExecutionPolicy( TRITONBACKEND_Backend backend, @Cast("TRITONBACKEND_ExecutionPolicy*") IntPointer policy); public static native TRITONSERVER_Error TRITONBACKEND_BackendExecutionPolicy( TRITONBACKEND_Backend backend, @Cast("TRITONBACKEND_ExecutionPolicy*") IntBuffer policy); public static native TRITONSERVER_Error TRITONBACKEND_BackendExecutionPolicy( TRITONBACKEND_Backend backend, @Cast("TRITONBACKEND_ExecutionPolicy*") int[] policy); /** Set the execution policy for this backend. By default the * execution policy is TRITONBACKEND_EXECUTION_BLOCKING. Triton reads * the backend's execution policy after calling * TRITONBACKEND_Initialize, so to be recognized changes to the * execution policy must be made in TRITONBACKEND_Initialize. * * @param backend The backend. * @param policy The execution policy. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// public static native TRITONSERVER_Error TRITONBACKEND_BackendSetExecutionPolicy( TRITONBACKEND_Backend backend, @Cast("TRITONBACKEND_ExecutionPolicy") int policy); /** Get the location of the files that make up the backend * implementation. This location contains the backend shared library * and any other files located with the shared library. The * 'location' communicated depends on how the backend is being * communicated to Triton as indicated by 'artifact_type'. * * TRITONBACKEND_ARTIFACT_FILESYSTEM: The backend artifacts are * made available to Triton via the local filesytem. 'location' * returns the full path to the directory containing this * backend's artifacts. The returned string is owned by Triton, * not the caller, and so should not be modified or freed. * * @param backend The backend. * @param artifact_type Returns the artifact type for the backend. * @param path Returns the location. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_BackendArtifacts( TRITONBACKEND_Backend backend, @Cast("TRITONBACKEND_ArtifactType*") IntPointer artifact_type, @Cast("const char**") PointerPointer location); public static native TRITONSERVER_Error TRITONBACKEND_BackendArtifacts( TRITONBACKEND_Backend backend, @Cast("TRITONBACKEND_ArtifactType*") IntPointer artifact_type, @Cast("const char**") @ByPtrPtr BytePointer location); public static native TRITONSERVER_Error TRITONBACKEND_BackendArtifacts( TRITONBACKEND_Backend backend, @Cast("TRITONBACKEND_ArtifactType*") IntBuffer artifact_type, @Cast("const char**") @ByPtrPtr ByteBuffer location); public static native TRITONSERVER_Error TRITONBACKEND_BackendArtifacts( TRITONBACKEND_Backend backend, @Cast("TRITONBACKEND_ArtifactType*") int[] artifact_type, @Cast("const char**") @ByPtrPtr byte[] location); /** Get the memory manager associated with a backend. * * @param backend The backend. * @param manager Returns the memory manager. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_BackendMemoryManager( TRITONBACKEND_Backend backend, @Cast("TRITONBACKEND_MemoryManager**") PointerPointer manager); public static native TRITONSERVER_Error TRITONBACKEND_BackendMemoryManager( TRITONBACKEND_Backend backend, @ByPtrPtr TRITONBACKEND_MemoryManager manager); /** Get the user-specified state associated with the backend. The * state is completely owned and managed by the backend. * * @param backend The backend. * @param state Returns the user state, or nullptr if no user state. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_BackendState( TRITONBACKEND_Backend backend, @Cast("void**") PointerPointer state); public static native TRITONSERVER_Error TRITONBACKEND_BackendState( TRITONBACKEND_Backend backend, @Cast("void**") @ByPtrPtr Pointer state); /** Set the user-specified state associated with the backend. The * state is completely owned and managed by the backend. * * @param backend The backend. * @param state The user state, or nullptr if no user state. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_BackendSetState( TRITONBACKEND_Backend backend, Pointer state); /** * TRITONBACKEND_Model * * Object representing a model implemented using the backend. * <p> * Get the name of the model. The returned string is owned by the * model object, not the caller, and so should not be modified or * freed. * * @param model The model. * @param name Returns the model name. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelName( TRITONBACKEND_Model model, @Cast("const char**") PointerPointer name); public static native TRITONSERVER_Error TRITONBACKEND_ModelName( TRITONBACKEND_Model model, @Cast("const char**") @ByPtrPtr BytePointer name); public static native TRITONSERVER_Error TRITONBACKEND_ModelName( TRITONBACKEND_Model model, @Cast("const char**") @ByPtrPtr ByteBuffer name); public static native TRITONSERVER_Error TRITONBACKEND_ModelName( TRITONBACKEND_Model model, @Cast("const char**") @ByPtrPtr byte[] name); /** Get the version of the model. * * @param model The model. * @param version Returns the model version. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// public static native TRITONSERVER_Error TRITONBACKEND_ModelVersion( TRITONBACKEND_Model model, @Cast("uint64_t*") LongPointer version); public static native TRITONSERVER_Error TRITONBACKEND_ModelVersion( TRITONBACKEND_Model model, @Cast("uint64_t*") LongBuffer version); public static native TRITONSERVER_Error TRITONBACKEND_ModelVersion( TRITONBACKEND_Model model, @Cast("uint64_t*") long[] version); /** Get the location of the files that make up the model. The * 'location' communicated depends on how the model is being * communicated to Triton as indicated by 'artifact_type'. * * TRITONBACKEND_ARTIFACT_FILESYSTEM: The model artifacts are made * available to Triton via the local filesytem. 'location' * returns the full path to the directory in the model repository * that contains this model's artifacts. The returned string is * owned by Triton, not the caller, and so should not be modified * or freed. * * @param model The model. * @param artifact_type Returns the artifact type for the model. * @param path Returns the location. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelRepository( TRITONBACKEND_Model model, @Cast("TRITONBACKEND_ArtifactType*") IntPointer artifact_type, @Cast("const char**") PointerPointer location); public static native TRITONSERVER_Error TRITONBACKEND_ModelRepository( TRITONBACKEND_Model model, @Cast("TRITONBACKEND_ArtifactType*") IntPointer artifact_type, @Cast("const char**") @ByPtrPtr BytePointer location); public static native TRITONSERVER_Error TRITONBACKEND_ModelRepository( TRITONBACKEND_Model model, @Cast("TRITONBACKEND_ArtifactType*") IntBuffer artifact_type, @Cast("const char**") @ByPtrPtr ByteBuffer location); public static native TRITONSERVER_Error TRITONBACKEND_ModelRepository( TRITONBACKEND_Model model, @Cast("TRITONBACKEND_ArtifactType*") int[] artifact_type, @Cast("const char**") @ByPtrPtr byte[] location); /** Get the model configuration. The caller takes ownership of the * message object and must call TRITONSERVER_MessageDelete to release * the object. The configuration is available via this call even * before the model is loaded and so can be used in * TRITONBACKEND_ModelInitialize. TRITONSERVER_ServerModelConfig * returns equivalent information but is not useable until after the * model loads. * * @param model The model. * @param config_version The model configuration will be returned in * a format matching this version. If the configuration cannot be * represented in the requested version's format then an error will * be returned. Currently only version 1 is supported. * @param model_config Returns the model configuration as a message. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelConfig( TRITONBACKEND_Model model, @Cast("const uint32_t") int config_version, @Cast("TRITONSERVER_Message**") PointerPointer model_config); public static native TRITONSERVER_Error TRITONBACKEND_ModelConfig( TRITONBACKEND_Model model, @Cast("const uint32_t") int config_version, @ByPtrPtr TRITONSERVER_Message model_config); /** Whether the backend should attempt to auto-complete the model configuration. * If true, the model should fill the inputs, outputs, and max batch size in * the model configuration if incomplete. If the model configuration is * changed, the new configuration must be reported to Triton using * TRITONBACKEND_ModelSetConfig. * * @param model The model. * @param auto_complete_config Returns whether the backend should auto-complete * the model configuration. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelAutoCompleteConfig( TRITONBACKEND_Model model, @Cast("bool*") boolean[] auto_complete_config); public static native TRITONSERVER_Error TRITONBACKEND_ModelAutoCompleteConfig( TRITONBACKEND_Model model, @Cast("bool*") BoolPointer auto_complete_config); /** Set the model configuration in Triton server. Only the inputs, outputs, * and max batch size can be changed. Any other changes to the model * configuration will be ignored by Triton. This function can only be called * from TRITONBACKEND_ModelInitialize, calling in any other context will result * in an error being returned. The function does not take ownership of the * message object and so the caller should call TRITONSERVER_MessageDelete to * release the object once the function returns. * * @param model The model. * @param config_version The format version of the model configuration. * If the configuration is not represented in the version's format * then an error will be returned. Currently only version 1 is supported. * @param model_config The updated model configuration as a message. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelSetConfig( TRITONBACKEND_Model model, @Cast("const uint32_t") int config_version, TRITONSERVER_Message model_config); /** Get the TRITONSERVER_Server object that this model is being served * by. * * @param model The model. * @param server Returns the server. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelServer( TRITONBACKEND_Model model, @Cast("TRITONSERVER_Server**") PointerPointer server); public static native TRITONSERVER_Error TRITONBACKEND_ModelServer( TRITONBACKEND_Model model, @ByPtrPtr TRITONSERVER_Server server); /** Get the backend used by the model. * * @param model The model. * @param model Returns the backend object. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelBackend( TRITONBACKEND_Model model, @Cast("TRITONBACKEND_Backend**") PointerPointer backend); public static native TRITONSERVER_Error TRITONBACKEND_ModelBackend( TRITONBACKEND_Model model, @ByPtrPtr TRITONBACKEND_Backend backend); /** Get the user-specified state associated with the model. The * state is completely owned and managed by the backend. * * @param model The model. * @param state Returns the user state, or nullptr if no user state. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelState( TRITONBACKEND_Model model, @Cast("void**") PointerPointer state); public static native TRITONSERVER_Error TRITONBACKEND_ModelState( TRITONBACKEND_Model model, @Cast("void**") @ByPtrPtr Pointer state); /** Set the user-specified state associated with the model. The * state is completely owned and managed by the backend. * * @param model The model. * @param state The user state, or nullptr if no user state. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_ModelSetState( TRITONBACKEND_Model model, Pointer state); /** * TRITONBACKEND_ModelInstance * * Object representing a model instance implemented using the * backend. * <p> * Get the name of the model instance. The returned string is owned by the * model object, not the caller, and so should not be modified or * freed. * * @param instance The model instance. * @param name Returns the instance name. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceName( TRITONBACKEND_ModelInstance instance, @Cast("const char**") PointerPointer name); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceName( TRITONBACKEND_ModelInstance instance, @Cast("const char**") @ByPtrPtr BytePointer name); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceName( TRITONBACKEND_ModelInstance instance, @Cast("const char**") @ByPtrPtr ByteBuffer name); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceName( TRITONBACKEND_ModelInstance instance, @Cast("const char**") @ByPtrPtr byte[] name); /** Get the kind of the model instance. * * @param instance The model instance. * @param kind Returns the instance kind. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceKind( TRITONBACKEND_ModelInstance instance, @Cast("TRITONSERVER_InstanceGroupKind*") IntPointer kind); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceKind( TRITONBACKEND_ModelInstance instance, @Cast("TRITONSERVER_InstanceGroupKind*") IntBuffer kind); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceKind( TRITONBACKEND_ModelInstance instance, @Cast("TRITONSERVER_InstanceGroupKind*") int[] kind); /** Get the device ID of the model instance. * * @param instance The model instance. * @param device_id Returns the instance device ID. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceDeviceId( TRITONBACKEND_ModelInstance instance, IntPointer device_id); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceDeviceId( TRITONBACKEND_ModelInstance instance, IntBuffer device_id); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceDeviceId( TRITONBACKEND_ModelInstance instance, int[] device_id); /** Get the host policy setting. The 'host_policy' message is * owned by Triton and should not be modified or freed by the caller. * * The host policy setting, as JSON, is: * * { * "<host_policy>" : { * "<setting>" : "<value>", * ... * } * } * * @param instance The model instance. * @param host_policy Returns the host policy setting as a message. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceHostPolicy( TRITONBACKEND_ModelInstance instance, @Cast("TRITONSERVER_Message**") PointerPointer host_policy); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceHostPolicy( TRITONBACKEND_ModelInstance instance, @ByPtrPtr TRITONSERVER_Message host_policy); /** Whether the model instance is passive. * * @param instance The model instance. * @param is_passive Returns true if the instance is passive, false otherwise * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceIsPassive( TRITONBACKEND_ModelInstance instance, @Cast("bool*") boolean[] is_passive); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceIsPassive( TRITONBACKEND_ModelInstance instance, @Cast("bool*") BoolPointer is_passive); /** Get the number of optimization profiles to be loaded for the instance. * * @param instance The model instance. * @param count Returns the number of optimization profiles. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceProfileCount( TRITONBACKEND_ModelInstance instance, @Cast("uint32_t*") IntPointer count); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceProfileCount( TRITONBACKEND_ModelInstance instance, @Cast("uint32_t*") IntBuffer count); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceProfileCount( TRITONBACKEND_ModelInstance instance, @Cast("uint32_t*") int[] count); /** Get the name of optimization profile. The caller does not own * the returned string and must not modify or delete it. The lifetime * of the returned string extends only as long as 'instance'. * * @param instance The model instance. * @param index The index of the optimization profile. Must be 0 * <= index < count, where count is the value returned by * TRITONBACKEND_ModelInstanceProfileCount. * @param profile_name Returns the name of the optimization profile * corresponding to the index. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceProfileName( TRITONBACKEND_ModelInstance instance, @Cast("const uint32_t") int index, @Cast("const char**") PointerPointer profile_name); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceProfileName( TRITONBACKEND_ModelInstance instance, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr BytePointer profile_name); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceProfileName( TRITONBACKEND_ModelInstance instance, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr ByteBuffer profile_name); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceProfileName( TRITONBACKEND_ModelInstance instance, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr byte[] profile_name); /** Get the number of secondary devices configured for the instance. * * @param instance The model instance. * @param count Returns the number of secondary devices. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceSecondaryDeviceCount( TRITONBACKEND_ModelInstance instance, @Cast("uint32_t*") IntPointer count); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceSecondaryDeviceCount( TRITONBACKEND_ModelInstance instance, @Cast("uint32_t*") IntBuffer count); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceSecondaryDeviceCount( TRITONBACKEND_ModelInstance instance, @Cast("uint32_t*") int[] count); /** Get the properties of indexed secondary device. The returned * strings and other properties are owned by the instance, not the * caller, and so should not be modified or freed. * * @param instance The model instance. * @param index The index of the secondary device. Must be 0 * <= index < count, where count is the value returned by * TRITONBACKEND_ModelInstanceSecondaryDeviceCount. * @param kind Returns the kind of secondary device corresponding * to the index. * @param id Returns the id of secondary device corresponding to the index. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceSecondaryDeviceProperties( TRITONBACKEND_ModelInstance instance, @Cast("uint32_t") int index, @Cast("const char**") PointerPointer kind, @Cast("int64_t*") LongPointer id); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceSecondaryDeviceProperties( TRITONBACKEND_ModelInstance instance, @Cast("uint32_t") int index, @Cast("const char**") @ByPtrPtr BytePointer kind, @Cast("int64_t*") LongPointer id); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceSecondaryDeviceProperties( TRITONBACKEND_ModelInstance instance, @Cast("uint32_t") int index, @Cast("const char**") @ByPtrPtr ByteBuffer kind, @Cast("int64_t*") LongBuffer id); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceSecondaryDeviceProperties( TRITONBACKEND_ModelInstance instance, @Cast("uint32_t") int index, @Cast("const char**") @ByPtrPtr byte[] kind, @Cast("int64_t*") long[] id); /** Get the model associated with a model instance. * * @param instance The model instance. * @param backend Returns the model object. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceModel( TRITONBACKEND_ModelInstance instance, @Cast("TRITONBACKEND_Model**") PointerPointer model); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceModel( TRITONBACKEND_ModelInstance instance, @ByPtrPtr TRITONBACKEND_Model model); /** Get the user-specified state associated with the model * instance. The state is completely owned and managed by the * backend. * * @param instance The model instance. * @param state Returns the user state, or nullptr if no user state. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceState( TRITONBACKEND_ModelInstance instance, @Cast("void**") PointerPointer state); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceState( TRITONBACKEND_ModelInstance instance, @Cast("void**") @ByPtrPtr Pointer state); /** Set the user-specified state associated with the model * instance. The state is completely owned and managed by the * backend. * * @param instance The model instance. * @param state The user state, or nullptr if no user state. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceSetState( TRITONBACKEND_ModelInstance instance, Pointer state); /** Record statistics for an inference request. * * Set 'success' true to indicate that the inference request * completed successfully. In this case all timestamps should be * non-zero values reported in nanoseconds and should be collected * using std::chrono::steady_clock::now().time_since_epoch() or the equivalent. * Set 'success' to false to indicate that the inference request failed * to complete successfully. In this case all timestamps values are * ignored. * * For consistency of measurement across different backends, the * timestamps should be collected at the following points during * TRITONBACKEND_ModelInstanceExecute. * * TRITONBACKEND_ModelInstanceExecute() * CAPTURE TIMESPACE (exec_start_ns) * < process input tensors to prepare them for inference * execution, including copying the tensors to/from GPU if * necessary> * CAPTURE TIMESPACE (compute_start_ns) * < perform inference computations to produce outputs > * CAPTURE TIMESPACE (compute_end_ns) * < allocate output buffers and extract output tensors, including * copying the tensors to/from GPU if necessary> * CAPTURE TIMESPACE (exec_end_ns) * return * * Note that these statistics are associated with a valid * TRITONBACKEND_Request object and so must be reported before the * request is released. For backends that release the request before * all response(s) are sent, these statistics cannot capture * information about the time required to produce the response. * * @param instance The model instance. * @param request The inference request that statistics are being * reported for. * @param success True if the inference request completed * successfully, false if it failed to complete. * @param exec_start_ns Timestamp for the start of execution. * @param compute_start_ns Timestamp for the start of execution * computations. * @param compute_end_ns Timestamp for the end of execution * computations. * @param exec_end_ns Timestamp for the end of execution. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceReportStatistics( TRITONBACKEND_ModelInstance instance, TRITONBACKEND_Request request, @Cast("const bool") boolean success, @Cast("const uint64_t") long exec_start_ns, @Cast("const uint64_t") long compute_start_ns, @Cast("const uint64_t") long compute_end_ns, @Cast("const uint64_t") long exec_end_ns); /** Record statistics for the execution of an entire batch of * inference requests. * * All timestamps should be non-zero values reported in nanoseconds * and should be collected using * std::chrono::steady_clock::now().time_since_epoch() or the equivalent. * See TRITONBACKEND_ModelInstanceReportStatistics for more information about * the timestamps. * * 'batch_size' is the sum of the batch sizes for the individual * requests that were delivered together in the call to * TRITONBACKEND_ModelInstanceExecute. For example, if three requests * are passed to TRITONBACKEND_ModelInstanceExecute and those * requests have batch size 1, 2, and 3; then 'batch_size' should be * set to 6. * * @param instance The model instance. * @param batch_size Combined batch size of all the individual * requests executed in the batch. * @param exec_start_ns Timestamp for the start of execution. * @param compute_start_ns Timestamp for the start of execution * computations. * @param compute_end_ns Timestamp for the end of execution * computations. * @param exec_end_ns Timestamp for the end of execution. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceReportBatchStatistics( TRITONBACKEND_ModelInstance instance, @Cast("const uint64_t") long batch_size, @Cast("const uint64_t") long exec_start_ns, @Cast("const uint64_t") long compute_start_ns, @Cast("const uint64_t") long compute_end_ns, @Cast("const uint64_t") long exec_end_ns); /** * The following functions can be implemented by a backend. Functions * indicated as required must be implemented or the backend will fail * to load. * <p> * Initialize a backend. This function is optional, a backend is not * required to implement it. This function is called once when a * backend is loaded to allow the backend to initialize any state * associated with the backend. A backend has a single state that is * shared across all models that use the backend. * * @param backend The backend. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_Initialize( TRITONBACKEND_Backend backend); /** Finalize for a backend. This function is optional, a backend is * not required to implement it. This function is called once, just * before the backend is unloaded. All state associated with the * backend should be freed and any threads created for the backend * should be exited/joined before returning from this function. * * @param backend The backend. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_Finalize( TRITONBACKEND_Backend backend); /** Initialize for a model. This function is optional, a backend is * not required to implement it. This function is called once when a * model that uses the backend is loaded to allow the backend to * initialize any state associated with the model. The backend should * also examine the model configuration to determine if the * configuration is suitable for the backend. Any errors reported by * this function will prevent the model from loading. * * @param model The model. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInitialize( TRITONBACKEND_Model model); /** Finalize for a model. This function is optional, a backend is not * required to implement it. This function is called once for a * model, just before the model is unloaded from Triton. All state * associated with the model should be freed and any threads created * for the model should be exited/joined before returning from this * function. * * @param model The model. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelFinalize( TRITONBACKEND_Model model); /** Initialize for a model instance. This function is optional, a * backend is not required to implement it. This function is called * once when a model instance is created to allow the backend to * initialize any state associated with the instance. * * @param instance The model instance. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceInitialize( TRITONBACKEND_ModelInstance instance); /** Finalize for a model instance. This function is optional, a * backend is not required to implement it. This function is called * once for an instance, just before the corresponding model is * unloaded from Triton. All state associated with the instance * should be freed and any threads created for the instance should be * exited/joined before returning from this function. * * @param instance The model instance. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceFinalize( TRITONBACKEND_ModelInstance instance); /** Execute a batch of one or more requests on a model instance. This * function is required. Triton will not perform multiple * simultaneous calls to this function for a given model 'instance'; * however, there may be simultaneous calls for different model * instances (for the same or different models). * * If an error is returned the ownership of the request objects * remains with Triton and the backend must not retain references to * the request objects or access them in any way. * * If success is returned, ownership of the request objects is * transferred to the backend and it is then responsible for creating * responses and releasing the request objects. Note that even though * ownership of the request objects is transferred to the backend, the * ownership of the buffer holding request pointers is returned back * to Triton upon return from TRITONBACKEND_ModelInstanceExecute. If * any request objects need to be maintained beyond * TRITONBACKEND_ModelInstanceExecute, then the pointers must be copied * out of the array within TRITONBACKEND_ModelInstanceExecute. * * @param instance The model instance. * @param requests The requests. * @param request_count The number of requests in the batch. * @return a TRITONSERVER_Error indicating success or failure. */ public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceExecute( TRITONBACKEND_ModelInstance instance, @Cast("TRITONBACKEND_Request**") PointerPointer requests, @Cast("const uint32_t") int request_count); public static native TRITONSERVER_Error TRITONBACKEND_ModelInstanceExecute( TRITONBACKEND_ModelInstance instance, @ByPtrPtr TRITONBACKEND_Request requests, @Cast("const uint32_t") int request_count); // #ifdef __cplusplus // #endif // Parsed from tritonrepoagent.h // Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #pragma once // #include <stddef.h> // #include <stdint.h> // #include "triton/core/tritonserver.h" // #ifdef __cplusplus // #endif // #ifdef _COMPILING_TRITONREPOAGENT // #if defined(_MSC_VER) // #define TRITONREPOAGENT_DECLSPEC __declspec(dllexport) // #define TRITONREPOAGENT_ISPEC __declspec(dllimport) // #elif defined(__GNUC__) // #define TRITONREPOAGENT_DECLSPEC __attribute__((__visibility__("default"))) // #define TRITONREPOAGENT_ISPEC // #else // #define TRITONREPOAGENT_DECLSPEC // #define TRITONREPOAGENT_ISPEC // #endif // #else // #if defined(_MSC_VER) // #define TRITONREPOAGENT_DECLSPEC __declspec(dllimport) // #define TRITONREPOAGENT_ISPEC __declspec(dllexport) // #else // #define TRITONREPOAGENT_DECLSPEC // #define TRITONREPOAGENT_ISPEC // Targeting ../tritonserver/TRITONREPOAGENT_Agent.java // Targeting ../tritonserver/TRITONREPOAGENT_AgentModel.java /** * TRITONREPOAGENT API Version * * The TRITONREPOAGENT API is versioned with major and minor version * numbers. Any change to the API that does not impact backwards * compatibility (for example, adding a non-required function) * increases the minor version number. Any change that breaks * backwards compatibility (for example, deleting or changing the * behavior of a function) increases the major version number. A * repository agent should check that the API version used to compile * the agent is compatible with the API version of the Triton server * that it is running in. This is typically done by code similar to * the following which makes sure that the major versions are equal * and that the minor version of Triton is >= the minor version used * to build the agent. * * uint32_t api_version_major, api_version_minor; * TRITONREPOAGENT_ApiVersion(&api_version_major, &api_version_minor); * if ((api_version_major != TRITONREPOAGENT_API_VERSION_MAJOR) || * (api_version_minor < TRITONREPOAGENT_API_VERSION_MINOR)) { * return TRITONSERVER_ErrorNew( * TRITONSERVER_ERROR_UNSUPPORTED, * "triton repository agent API version does not support this agent"); * } * */ public static final int TRITONREPOAGENT_API_VERSION_MAJOR = 0; /// public static final int TRITONREPOAGENT_API_VERSION_MINOR = 1; /** Get the TRITONREPOAGENT API version supported by Triton. This * value can be compared against the * TRITONREPOAGENT_API_VERSION_MAJOR and * TRITONREPOAGENT_API_VERSION_MINOR used to build the agent to * ensure that Triton is compatible with the agent. * * @param major Returns the TRITONREPOAGENT API major version supported * by Triton. * @param minor Returns the TRITONREPOAGENT API minor version supported * by Triton. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// /// public static native TRITONSERVER_Error TRITONREPOAGENT_ApiVersion( @Cast("uint32_t*") IntPointer major, @Cast("uint32_t*") IntPointer minor); public static native TRITONSERVER_Error TRITONREPOAGENT_ApiVersion( @Cast("uint32_t*") IntBuffer major, @Cast("uint32_t*") IntBuffer minor); public static native TRITONSERVER_Error TRITONREPOAGENT_ApiVersion( @Cast("uint32_t*") int[] major, @Cast("uint32_t*") int[] minor); /** TRITONREPOAGENT_ArtifactType * * The ways that the files that make up a model's repository content * are communicated between Triton and the agent. * * TRITONREPOAGENT_ARTIFACT_FILESYSTEM: The model artifacts are * communicated to and from the repository agent via a locally * accessible filesystem. The agent can access these files using * an appropriate filesystem API. * * TRITONREPOAGENT_ARTIFACT_REMOTE_FILESYSTEM: The model artifacts are * communicated to and from the repository agent via a remote filesystem. * The remote filesystem path follows the same convention as is used for * repository paths, for example, "s3://" prefix indicates an S3 path. * */ /** enum TRITONREPOAGENT_ArtifactType */ public static final int TRITONREPOAGENT_ARTIFACT_FILESYSTEM = 0, TRITONREPOAGENT_ARTIFACT_REMOTE_FILESYSTEM = 1; /** TRITONREPOAGENT_ActionType * * Types of repository actions that can be handled by an agent. * The lifecycle of a TRITONREPOAGENT_AgentModel begins with a call to * TRITONREPOAGENT_ModelInitialize and ends with a call to * TRITONREPOAGENT_ModelFinalize. Between those calls the current lifecycle * state of the model is communicated by calls to TRITONREPOAGENT_ModelAction. * Possible lifecycles are: * * LOAD -> LOAD_COMPLETE -> UNLOAD -> UNLOAD_COMPLETE * LOAD -> LOAD_FAIL * * TRITONREPOAGENT_ACTION_LOAD: A model is being loaded. * * TRITONREPOAGENT_ACTION_LOAD_COMPLETE: The model load completed * successfully and the model is now loaded. * * TRITONREPOAGENT_ACTION_LOAD_FAIL: The model load did not complete * successfully. The model is not loaded. * * TRITONREPOAGENT_ACTION_UNLOAD: The model is being unloaded. * * TRITONREPOAGENT_ACTION_UNLOAD_COMPLETE: The model unload is complete. * */ /** enum TRITONREPOAGENT_ActionType */ public static final int TRITONREPOAGENT_ACTION_LOAD = 0, TRITONREPOAGENT_ACTION_LOAD_COMPLETE = 1, TRITONREPOAGENT_ACTION_LOAD_FAIL = 2, TRITONREPOAGENT_ACTION_UNLOAD = 3, TRITONREPOAGENT_ACTION_UNLOAD_COMPLETE = 4; /** Get the location of the files that make up the model. The * 'location' communicated depends on how the model is being * communicated to the agent as indicated by 'artifact_type'. * * TRITONREPOAGENT_ARTIFACT_FILESYSTEM: The model artifacts are * made available to the agent via the local * filesytem. 'location' returns the full path to the directory * in the model repository that contains the model's * artifacts. The returned location string is owned by Triton, * not the caller, and so should not be modified or freed. The * contents of the directory are owned by Triton, not the agent, * and so the agent should not delete or modify the contents. Use * TRITONREPOAGENT_RepositoryAcquire to get a location that can be * used to modify the model repository contents. * * TRITONREPOAGENT_ARTIFACT_REMOTE_FILESYSTEM: The model artifacts are * made available to the agent via a remote filesystem. * 'location' returns the full path to the remote directory that contains * the model's artifacts. The returned location string is owned by Triton, * not the caller, and so should not be modified or freed. The contents of * the remote directory are owned by Triton, not the agent, * and so the agent should not delete or modify the contents. * Use TRITONREPOAGENT_ModelRepositoryLocationAcquire to get a location * that can be used to write updated model repository contents. * * @param agent The agent. * @param model The model. * @param artifact_type Returns the artifact type for the location. * @param path Returns the location. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// public static native TRITONSERVER_Error TRITONREPOAGENT_ModelRepositoryLocation( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("TRITONREPOAGENT_ArtifactType*") IntPointer artifact_type, @Cast("const char**") PointerPointer location); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelRepositoryLocation( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("TRITONREPOAGENT_ArtifactType*") IntPointer artifact_type, @Cast("const char**") @ByPtrPtr BytePointer location); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelRepositoryLocation( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("TRITONREPOAGENT_ArtifactType*") IntBuffer artifact_type, @Cast("const char**") @ByPtrPtr ByteBuffer location); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelRepositoryLocation( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("TRITONREPOAGENT_ArtifactType*") int[] artifact_type, @Cast("const char**") @ByPtrPtr byte[] location); /** Acquire a location where the agent can produce a new version of * the model repository files. This is a convenience method to create * a temporary directory for the agent. The agent is responsible for * calling TRITONREPOAGENT_ModelRepositoryLocationDelete in * TRITONREPOAGENT_ModelFinalize to delete the location. Initially the * acquired location is empty. The 'location' communicated depends on * the requested 'artifact_type'. * * TRITONREPOAGENT_ARTIFACT_FILESYSTEM: The location is a directory * on the local filesystem. 'location' returns the full path to * an empty directory that the agent should populate with the * model's artifacts. The returned location string is owned by * Triton, not the agent, and so should not be modified or freed. * * @param agent The agent. * @param model The model. * @param artifact_type The artifact type for the location. * @param path Returns the location. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONREPOAGENT_ModelRepositoryLocationAcquire( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("const TRITONREPOAGENT_ArtifactType") int artifact_type, @Cast("const char**") PointerPointer location); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelRepositoryLocationAcquire( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("const TRITONREPOAGENT_ArtifactType") int artifact_type, @Cast("const char**") @ByPtrPtr BytePointer location); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelRepositoryLocationAcquire( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("const TRITONREPOAGENT_ArtifactType") int artifact_type, @Cast("const char**") @ByPtrPtr ByteBuffer location); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelRepositoryLocationAcquire( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("const TRITONREPOAGENT_ArtifactType") int artifact_type, @Cast("const char**") @ByPtrPtr byte[] location); /** Discard and release ownership of a previously acquired location * and its contents. The agent must not access or modify the location * or its contents after this call. * * @param agent The agent. * @param model The model. * @param path The location to release. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONREPOAGENT_ModelRepositoryLocationRelease( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, String location); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelRepositoryLocationRelease( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("const char*") BytePointer location); /** Inform Triton that the specified repository location should be used for * the model in place of the original model repository. This method can only be * called when TRITONREPOAGENT_ModelAction is invoked with * TRITONREPOAGENT_ACTION_LOAD. The 'location' The 'location' * communicated depends on how the repository is being * communicated to Triton as indicated by 'artifact_type'. * * TRITONREPOAGENT_ARTIFACT_FILESYSTEM: The model artifacts are * made available to Triton via the local filesytem. 'location' returns * the full path to the directory. Ownership of the contents of the * returned directory are transferred to Triton and the agent should not * modified or freed the contents until TRITONREPOAGENT_ModelFinalize. * The local filesystem directory can be created using * TRITONREPOAGENT_ModelReopsitroyLocationAcquire or the agent can use * its own local filesystem API. * * TRITONREPOAGENT_ARTIFACT_REMOTE_FILESYSTEM: The model artifacts are * made available to Triton via a remote filesystem. 'location' returns * the full path to the remote filesystem directory. Ownership of the * contents of the returned directory are transferred to Triton and * the agent should not modified or freed the contents until * TRITONREPOAGENT_ModelFinalize. * * @param agent The agent. * @param model The model. * @param artifact_type The artifact type for the location. * @param path Returns the location. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONREPOAGENT_ModelRepositoryUpdate( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("const TRITONREPOAGENT_ArtifactType") int artifact_type, String location); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelRepositoryUpdate( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("const TRITONREPOAGENT_ArtifactType") int artifact_type, @Cast("const char*") BytePointer location); /** Get the number of agent parameters defined for a model. * * @param agent The agent. * @param model The model. * @param count Returns the number of input tensors. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONREPOAGENT_ModelParameterCount( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("uint32_t*") IntPointer count); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelParameterCount( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("uint32_t*") IntBuffer count); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelParameterCount( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("uint32_t*") int[] count); /** Get a parameter name and value. The caller does not own the * returned strings and must not modify or delete them. * * @param agent The agent. * @param model The model. * @param index The index of the parameter. Must be 0 <= index < * count, where count is the value returned by * TRITONREPOAGENT_ModelParameterCount. * @param parameter_name Returns the name of the parameter. * @param parameter_value Returns the value of the parameter. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONREPOAGENT_ModelParameter( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("const uint32_t") int index, @Cast("const char**") PointerPointer parameter_name, @Cast("const char**") PointerPointer parameter_value); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelParameter( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr BytePointer parameter_name, @Cast("const char**") @ByPtrPtr BytePointer parameter_value); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelParameter( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr ByteBuffer parameter_name, @Cast("const char**") @ByPtrPtr ByteBuffer parameter_value); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelParameter( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("const uint32_t") int index, @Cast("const char**") @ByPtrPtr byte[] parameter_name, @Cast("const char**") @ByPtrPtr byte[] parameter_value); /** Get the model configuration. The caller takes ownership of the * message object and must call TRITONSERVER_MessageDelete to release * the object. If the model repository does not contain a * config.pbtxt file then 'model_config' is returned as nullptr. * * @param agent The agent. * @param model The model. * @param config_version The model configuration will be returned in * a format matching this version. If the configuration cannot be * represented in the requested version's format then an error will * be returned. Currently only version 1 is supported. * @param model_config Returns the model configuration as a message. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONREPOAGENT_ModelConfig( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("const uint32_t") int config_version, @Cast("TRITONSERVER_Message**") PointerPointer model_config); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelConfig( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("const uint32_t") int config_version, @ByPtrPtr TRITONSERVER_Message model_config); /** Get the user-specified state associated with the model. * * @param model The agent model. * @param state Returns the user state, or nullptr if no user state. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONREPOAGENT_ModelState( TRITONREPOAGENT_AgentModel model, @Cast("void**") PointerPointer state); public static native TRITONSERVER_Error TRITONREPOAGENT_ModelState( TRITONREPOAGENT_AgentModel model, @Cast("void**") @ByPtrPtr Pointer state); /** Set the user-specified state associated with the model. * * @param model The agent model. * @param state The user state, or nullptr if no user state. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONREPOAGENT_ModelSetState( TRITONREPOAGENT_AgentModel model, Pointer state); /** Get the user-specified state associated with the agent. * * @param agent The agent. * @param state Returns the user state, or nullptr if no user state. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONREPOAGENT_State( TRITONREPOAGENT_Agent agent, @Cast("void**") PointerPointer state); public static native TRITONSERVER_Error TRITONREPOAGENT_State( TRITONREPOAGENT_Agent agent, @Cast("void**") @ByPtrPtr Pointer state); /** Set the user-specified state associated with the agent. * * @param agent The agent. * @param state The user state, or nullptr if no user state. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONREPOAGENT_SetState( TRITONREPOAGENT_Agent agent, Pointer state); /** * The following functions can be implemented by an agent. Functions * indicated as required must be implemented or the agent will fail * to load. * <p> * Initialize an agent. This function is optional. This function is * called once when an agent is loaded to allow the agent to * initialize any state associated with the agent. An agent has a * single state that is shared across all invocations of the agent. * * @param agent The agent. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONREPOAGENT_Initialize( TRITONREPOAGENT_Agent agent); /** Finalize for an agent. This function is optional. This function is * called once, just before the agent is unloaded. All state * associated with the agent should be freed and any threads created * for the agent should be exited/joined before returning from this * function. * * @param agent The agent. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONREPOAGENT_Finalize( TRITONREPOAGENT_Agent agent); /** Initialize a model associated with an agent. This function is optional. * This function is called once when an agent model's lifecycle begins to allow * the agent model to initialize any state associated with it. An agent model * has a single state that is shared across all the lifecycle of the agent * model. * * @param agent The agent to be associated with the model. * @param model The model. * @return a TRITONSERVER_Error indicating success or failure. */ /// public static native TRITONSERVER_Error TRITONREPOAGENT_ModelInitialize( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model); /** Finalize for a model. This function is optional. This function is * called once, just before the end of the agent model's lifecycle. All state * associated with the agent model should be freed and any threads created * for the agent model should be exited/joined before returning from this * function. If the model acquired a model location using * TRITONREPOAGENT_ModelRepositoryLocationAcquire, it must call * TRITONREPOAGENT_ModelRepositoryLocationRelease to release that location. * * @param agent The agent associated with the model. * @param model The model. * @return a TRITONSERVER_Error indicating success or failure. */ /// /// /// public static native TRITONSERVER_Error TRITONREPOAGENT_ModelFinalize( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model); /** Handle an action for a specified model. This function is * required. Triton will not perform multiple simultaneous calls to * this function for a given agent and model; however, there may be * simultaneous calls for the agent for different models. * * If the agent does not handle the action the agent should * immediately return success (nullptr). * * Any modification to the model's repository must be made when 'action_type' * is TRITONREPOAGENT_ACTION_LOAD. * To modify the model's repository the agent must either acquire a mutable * location via TRITONREPOAGENT_ModelRepositoryLocationAcquire * or its own managed location, report the location to Triton via * TRITONREPOAGENT_ModelRepositoryUpdate, and then return * success (nullptr). If the agent does not need to make any changes * to the model repository it should not call * TRITONREPOAGENT_ModelRepositoryUpdate and then return success. * To indicate that a model load should fail return a non-success status. * * @param agent The agent. * @param model The model that is the target of the action. * \action_type The type of action the agent should handle for the model. * @return a TRITONSERVER_Error indicating success or failure. */ public static native TRITONSERVER_Error TRITONREPOAGENT_ModelAction( TRITONREPOAGENT_Agent agent, TRITONREPOAGENT_AgentModel model, @Cast("const TRITONREPOAGENT_ActionType") int action_type); // #ifdef __cplusplus // #endif }
cyberglot/effectfuljs
packages/cc/transform-minimal.js
module.exports = require("./transform").options({ profile: "defaultMinimal" });
connectim/iOS
Connect/Client/Login&Register/View/LocalUserInfoView.h
// // LocalUserInfoView.h // Connect // // Created by MoHuilin on 2016/12/6. // Copyright © 2016年 Connect - P2P Encrypted Instant Message. All rights reserved. // #import <UIKit/UIKit.h> @interface LocalUserInfoView : UIControl @property(nonatomic, strong) UILabel *userNameLabel; @property(nonatomic, strong) UIImageView *avatarImageView; + (instancetype)viewWithAccountInfo:(AccountInfo *)user; - (void)reloadWithUser:(AccountInfo *)user; // hide sow @property(nonatomic, assign) BOOL hidenArrowView; @end
kubeform/provider-ibm-api
apis/cos/v1alpha1/codec.go
<reponame>kubeform/provider-ibm-api /* Copyright AppsCode Inc. and Contributors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by Kubeform. DO NOT EDIT. package v1alpha1 import ( "unsafe" jsoniter "github.com/json-iterator/go" "github.com/modern-go/reflect2" ) func GetEncoder() map[string]jsoniter.ValEncoder { return map[string]jsoniter.ValEncoder{ jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecActivityTracking{}).Type1()): BucketSpecActivityTrackingCodec{}, jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecArchiveRule{}).Type1()): BucketSpecArchiveRuleCodec{}, jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecMetricsMonitoring{}).Type1()): BucketSpecMetricsMonitoringCodec{}, jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecObjectVersioning{}).Type1()): BucketSpecObjectVersioningCodec{}, jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecRetentionRule{}).Type1()): BucketSpecRetentionRuleCodec{}, } } func GetDecoder() map[string]jsoniter.ValDecoder { return map[string]jsoniter.ValDecoder{ jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecActivityTracking{}).Type1()): BucketSpecActivityTrackingCodec{}, jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecArchiveRule{}).Type1()): BucketSpecArchiveRuleCodec{}, jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecMetricsMonitoring{}).Type1()): BucketSpecMetricsMonitoringCodec{}, jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecObjectVersioning{}).Type1()): BucketSpecObjectVersioningCodec{}, jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecRetentionRule{}).Type1()): BucketSpecRetentionRuleCodec{}, } } func getEncodersWithout(typ string) map[string]jsoniter.ValEncoder { origMap := GetEncoder() delete(origMap, typ) return origMap } func getDecodersWithout(typ string) map[string]jsoniter.ValDecoder { origMap := GetDecoder() delete(origMap, typ) return origMap } // +k8s:deepcopy-gen=false type BucketSpecActivityTrackingCodec struct { } func (BucketSpecActivityTrackingCodec) IsEmpty(ptr unsafe.Pointer) bool { return (*BucketSpecActivityTracking)(ptr) == nil } func (BucketSpecActivityTrackingCodec) Encode(ptr unsafe.Pointer, stream *jsoniter.Stream) { obj := (*BucketSpecActivityTracking)(ptr) var objs []BucketSpecActivityTracking if obj != nil { objs = []BucketSpecActivityTracking{*obj} } jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeEncoders: getEncodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecActivityTracking{}).Type1())), }.Froze() byt, _ := jsonit.Marshal(objs) stream.Write(byt) } func (BucketSpecActivityTrackingCodec) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { switch iter.WhatIsNext() { case jsoniter.NilValue: iter.Skip() *(*BucketSpecActivityTracking)(ptr) = BucketSpecActivityTracking{} return case jsoniter.ArrayValue: objsByte := iter.SkipAndReturnBytes() if len(objsByte) > 0 { var objs []BucketSpecActivityTracking jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeDecoders: getDecodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecActivityTracking{}).Type1())), }.Froze() jsonit.Unmarshal(objsByte, &objs) if len(objs) > 0 { *(*BucketSpecActivityTracking)(ptr) = objs[0] } else { *(*BucketSpecActivityTracking)(ptr) = BucketSpecActivityTracking{} } } else { *(*BucketSpecActivityTracking)(ptr) = BucketSpecActivityTracking{} } case jsoniter.ObjectValue: objByte := iter.SkipAndReturnBytes() if len(objByte) > 0 { var obj BucketSpecActivityTracking jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeDecoders: getDecodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecActivityTracking{}).Type1())), }.Froze() jsonit.Unmarshal(objByte, &obj) *(*BucketSpecActivityTracking)(ptr) = obj } else { *(*BucketSpecActivityTracking)(ptr) = BucketSpecActivityTracking{} } default: iter.ReportError("decode BucketSpecActivityTracking", "unexpected JSON type") } } // +k8s:deepcopy-gen=false type BucketSpecArchiveRuleCodec struct { } func (BucketSpecArchiveRuleCodec) IsEmpty(ptr unsafe.Pointer) bool { return (*BucketSpecArchiveRule)(ptr) == nil } func (BucketSpecArchiveRuleCodec) Encode(ptr unsafe.Pointer, stream *jsoniter.Stream) { obj := (*BucketSpecArchiveRule)(ptr) var objs []BucketSpecArchiveRule if obj != nil { objs = []BucketSpecArchiveRule{*obj} } jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeEncoders: getEncodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecArchiveRule{}).Type1())), }.Froze() byt, _ := jsonit.Marshal(objs) stream.Write(byt) } func (BucketSpecArchiveRuleCodec) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { switch iter.WhatIsNext() { case jsoniter.NilValue: iter.Skip() *(*BucketSpecArchiveRule)(ptr) = BucketSpecArchiveRule{} return case jsoniter.ArrayValue: objsByte := iter.SkipAndReturnBytes() if len(objsByte) > 0 { var objs []BucketSpecArchiveRule jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeDecoders: getDecodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecArchiveRule{}).Type1())), }.Froze() jsonit.Unmarshal(objsByte, &objs) if len(objs) > 0 { *(*BucketSpecArchiveRule)(ptr) = objs[0] } else { *(*BucketSpecArchiveRule)(ptr) = BucketSpecArchiveRule{} } } else { *(*BucketSpecArchiveRule)(ptr) = BucketSpecArchiveRule{} } case jsoniter.ObjectValue: objByte := iter.SkipAndReturnBytes() if len(objByte) > 0 { var obj BucketSpecArchiveRule jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeDecoders: getDecodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecArchiveRule{}).Type1())), }.Froze() jsonit.Unmarshal(objByte, &obj) *(*BucketSpecArchiveRule)(ptr) = obj } else { *(*BucketSpecArchiveRule)(ptr) = BucketSpecArchiveRule{} } default: iter.ReportError("decode BucketSpecArchiveRule", "unexpected JSON type") } } // +k8s:deepcopy-gen=false type BucketSpecMetricsMonitoringCodec struct { } func (BucketSpecMetricsMonitoringCodec) IsEmpty(ptr unsafe.Pointer) bool { return (*BucketSpecMetricsMonitoring)(ptr) == nil } func (BucketSpecMetricsMonitoringCodec) Encode(ptr unsafe.Pointer, stream *jsoniter.Stream) { obj := (*BucketSpecMetricsMonitoring)(ptr) var objs []BucketSpecMetricsMonitoring if obj != nil { objs = []BucketSpecMetricsMonitoring{*obj} } jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeEncoders: getEncodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecMetricsMonitoring{}).Type1())), }.Froze() byt, _ := jsonit.Marshal(objs) stream.Write(byt) } func (BucketSpecMetricsMonitoringCodec) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { switch iter.WhatIsNext() { case jsoniter.NilValue: iter.Skip() *(*BucketSpecMetricsMonitoring)(ptr) = BucketSpecMetricsMonitoring{} return case jsoniter.ArrayValue: objsByte := iter.SkipAndReturnBytes() if len(objsByte) > 0 { var objs []BucketSpecMetricsMonitoring jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeDecoders: getDecodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecMetricsMonitoring{}).Type1())), }.Froze() jsonit.Unmarshal(objsByte, &objs) if len(objs) > 0 { *(*BucketSpecMetricsMonitoring)(ptr) = objs[0] } else { *(*BucketSpecMetricsMonitoring)(ptr) = BucketSpecMetricsMonitoring{} } } else { *(*BucketSpecMetricsMonitoring)(ptr) = BucketSpecMetricsMonitoring{} } case jsoniter.ObjectValue: objByte := iter.SkipAndReturnBytes() if len(objByte) > 0 { var obj BucketSpecMetricsMonitoring jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeDecoders: getDecodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecMetricsMonitoring{}).Type1())), }.Froze() jsonit.Unmarshal(objByte, &obj) *(*BucketSpecMetricsMonitoring)(ptr) = obj } else { *(*BucketSpecMetricsMonitoring)(ptr) = BucketSpecMetricsMonitoring{} } default: iter.ReportError("decode BucketSpecMetricsMonitoring", "unexpected JSON type") } } // +k8s:deepcopy-gen=false type BucketSpecObjectVersioningCodec struct { } func (BucketSpecObjectVersioningCodec) IsEmpty(ptr unsafe.Pointer) bool { return (*BucketSpecObjectVersioning)(ptr) == nil } func (BucketSpecObjectVersioningCodec) Encode(ptr unsafe.Pointer, stream *jsoniter.Stream) { obj := (*BucketSpecObjectVersioning)(ptr) var objs []BucketSpecObjectVersioning if obj != nil { objs = []BucketSpecObjectVersioning{*obj} } jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeEncoders: getEncodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecObjectVersioning{}).Type1())), }.Froze() byt, _ := jsonit.Marshal(objs) stream.Write(byt) } func (BucketSpecObjectVersioningCodec) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { switch iter.WhatIsNext() { case jsoniter.NilValue: iter.Skip() *(*BucketSpecObjectVersioning)(ptr) = BucketSpecObjectVersioning{} return case jsoniter.ArrayValue: objsByte := iter.SkipAndReturnBytes() if len(objsByte) > 0 { var objs []BucketSpecObjectVersioning jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeDecoders: getDecodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecObjectVersioning{}).Type1())), }.Froze() jsonit.Unmarshal(objsByte, &objs) if len(objs) > 0 { *(*BucketSpecObjectVersioning)(ptr) = objs[0] } else { *(*BucketSpecObjectVersioning)(ptr) = BucketSpecObjectVersioning{} } } else { *(*BucketSpecObjectVersioning)(ptr) = BucketSpecObjectVersioning{} } case jsoniter.ObjectValue: objByte := iter.SkipAndReturnBytes() if len(objByte) > 0 { var obj BucketSpecObjectVersioning jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeDecoders: getDecodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecObjectVersioning{}).Type1())), }.Froze() jsonit.Unmarshal(objByte, &obj) *(*BucketSpecObjectVersioning)(ptr) = obj } else { *(*BucketSpecObjectVersioning)(ptr) = BucketSpecObjectVersioning{} } default: iter.ReportError("decode BucketSpecObjectVersioning", "unexpected JSON type") } } // +k8s:deepcopy-gen=false type BucketSpecRetentionRuleCodec struct { } func (BucketSpecRetentionRuleCodec) IsEmpty(ptr unsafe.Pointer) bool { return (*BucketSpecRetentionRule)(ptr) == nil } func (BucketSpecRetentionRuleCodec) Encode(ptr unsafe.Pointer, stream *jsoniter.Stream) { obj := (*BucketSpecRetentionRule)(ptr) var objs []BucketSpecRetentionRule if obj != nil { objs = []BucketSpecRetentionRule{*obj} } jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeEncoders: getEncodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecRetentionRule{}).Type1())), }.Froze() byt, _ := jsonit.Marshal(objs) stream.Write(byt) } func (BucketSpecRetentionRuleCodec) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { switch iter.WhatIsNext() { case jsoniter.NilValue: iter.Skip() *(*BucketSpecRetentionRule)(ptr) = BucketSpecRetentionRule{} return case jsoniter.ArrayValue: objsByte := iter.SkipAndReturnBytes() if len(objsByte) > 0 { var objs []BucketSpecRetentionRule jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeDecoders: getDecodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecRetentionRule{}).Type1())), }.Froze() jsonit.Unmarshal(objsByte, &objs) if len(objs) > 0 { *(*BucketSpecRetentionRule)(ptr) = objs[0] } else { *(*BucketSpecRetentionRule)(ptr) = BucketSpecRetentionRule{} } } else { *(*BucketSpecRetentionRule)(ptr) = BucketSpecRetentionRule{} } case jsoniter.ObjectValue: objByte := iter.SkipAndReturnBytes() if len(objByte) > 0 { var obj BucketSpecRetentionRule jsonit := jsoniter.Config{ EscapeHTML: true, SortMapKeys: true, ValidateJsonRawMessage: true, TagKey: "tf", TypeDecoders: getDecodersWithout(jsoniter.MustGetKind(reflect2.TypeOf(BucketSpecRetentionRule{}).Type1())), }.Froze() jsonit.Unmarshal(objByte, &obj) *(*BucketSpecRetentionRule)(ptr) = obj } else { *(*BucketSpecRetentionRule)(ptr) = BucketSpecRetentionRule{} } default: iter.ReportError("decode BucketSpecRetentionRule", "unexpected JSON type") } }
billcchung/splunk-operator
pkg/splunk/common/names.go
// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package common import "fmt" const ( // namespace scoped secret name namespaceScopedSecretNameTemplateStr = "splunk-%s-secret" // versionedSecretIdentifier based secret name versionedSecretNameTemplateStr = "%s-secret-v%s" // FirstVersion represents the first version of versioned secrets FirstVersion = "1" // SecretBytes used to generate Splunk secrets SecretBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" // HexBytes used to generate random hexadecimal strings (e.g. HEC tokens) HexBytes = "ABCDEF01234567890" // MinimumVersionedSecrets holds the minimum number of secrets to be held per version MinimumVersionedSecrets = 3 // IdxcSecret represents indexer cluster pass4Symmkey secret token IdxcSecret = "idxc_secret" ) // GetVersionedSecretName returns a versioned secret name func GetVersionedSecretName(versionedSecretIdentifier string, version string) string { return fmt.Sprintf(versionedSecretNameTemplateStr, versionedSecretIdentifier, version) } // GetNamespaceScopedSecretName gets namespace scoped secret name func GetNamespaceScopedSecretName(namespace string) string { return fmt.Sprintf(namespaceScopedSecretNameTemplateStr, namespace) } // GetSplunkSecretTokenTypes returns all types of Splunk secret tokens func GetSplunkSecretTokenTypes() []string { return []string{"hec_token", "password", "<PASSWORD>", "idxc_secret", "shc_secret"} } // GetLabelTypes returns a map of label types to strings func GetLabelTypes() map[string]string { // Assigning each type of label to string return map[string]string{"manager": "app.kubernetes.io/managed-by", "component": "app.kubernetes.io/component", "name": "app.kubernetes.io/name", "partof": "app.kubernetes.io/part-of", "instance": "app.kubernetes.io/instance", } }
TrilateralX/TrilateralLimeTriangle
Export/macos/obj/include/trilateral3/color/ColorHelper.h
<reponame>TrilateralX/TrilateralLimeTriangle // Generated by Haxe 4.2.0-rc.1+cb30bd580 #ifndef INCLUDED_trilateral3_color_ColorHelper #define INCLUDED_trilateral3_color_ColorHelper #ifndef HXCPP_H #include <hxcpp.h> #endif HX_DECLARE_CLASS2(trilateral3,color,ColorHelper) namespace trilateral3{ namespace color{ class HXCPP_CLASS_ATTRIBUTES ColorHelper_obj : public ::hx::Object { public: typedef ::hx::Object super; typedef ColorHelper_obj OBJ_; ColorHelper_obj(); public: enum { _hx_ClassId = 0x0a3663b1 }; void __construct(); inline void *operator new(size_t inSize, bool inContainer=true,const char *inName="trilateral3.color.ColorHelper") { return ::hx::Object::operator new(inSize,inContainer,inName); } inline void *operator new(size_t inSize, int extra) { return ::hx::Object::operator new(inSize+extra,true,"trilateral3.color.ColorHelper"); } static ::hx::ObjectPtr< ColorHelper_obj > __new(); static ::hx::ObjectPtr< ColorHelper_obj > __alloc(::hx::Ctx *_hx_ctx); static void * _hx_vtable; static Dynamic __CreateEmpty(); static Dynamic __Create(::hx::DynamicArray inArgs); //~ColorHelper_obj(); HX_DO_RTTI_ALL; ::hx::Val __Field(const ::String &inString, ::hx::PropertyAccess inCallProp); ::hx::Val __SetField(const ::String &inString,const ::hx::Val &inValue, ::hx::PropertyAccess inCallProp); static void __register(); void __Mark(HX_MARK_PARAMS); void __Visit(HX_VISIT_PARAMS); bool _hx_isInstanceOf(int inClassId); ::String __ToString() const { return HX_("ColorHelper",11,a7,a9,a2); } ::Dynamic getBlack_; Dynamic getBlack__dyn() { return getBlack_;} ::Dynamic from_cymka_; Dynamic from_cymka__dyn() { return from_cymka_;} ::Dynamic from_argb_; Dynamic from_argb__dyn() { return from_argb_;} ::Dynamic toHexInt_; Dynamic toHexInt__dyn() { return toHexInt_;} ::Dynamic rgbConvert_; Dynamic rgbConvert__dyn() { return rgbConvert_;} ::Dynamic cymkConvert_; Dynamic cymkConvert__dyn() { return cymkConvert_;} ::Dynamic alphaChannel_; Dynamic alphaChannel__dyn() { return alphaChannel_;} ::Dynamic redChannel_; Dynamic redChannel__dyn() { return redChannel_;} ::Dynamic greenChannel_; Dynamic greenChannel__dyn() { return greenChannel_;} ::Dynamic blueChannel_; Dynamic blueChannel__dyn() { return blueChannel_;} }; } // end namespace trilateral3 } // end namespace color #endif /* INCLUDED_trilateral3_color_ColorHelper */
xtoples/LiquidPractice
src/main/java/dev/liquidnetwork/liquidpractice/tournament/command/TournamentLeaveCommand.java
package dev.liquidnetwork.liquidpractice.tournament.command; import dev.liquidnetwork.liquidpractice.party.Party; import dev.liquidnetwork.liquidpractice.profile.Profile; import dev.liquidnetwork.liquidpractice.tournament.Tournament; import dev.liquidnetwork.liquidpractice.util.command.command.CommandMeta; import org.bukkit.ChatColor; import org.bukkit.entity.Player; @CommandMeta(label = "tournament leave") public class TournamentLeaveCommand { public void execute(Player player) { if (Tournament.CURRENT_TOURNAMENT == null || Tournament.CURRENT_TOURNAMENT.hasStarted()) { player.sendMessage(ChatColor.RED + "There isn't a Tournament you can leave"); return; } Party party = Profile.getByUuid(player.getUniqueId()).getParty(); if (party == null) { player.sendMessage("You aren't currently in a Tournament"); return; } if (!Tournament.CURRENT_TOURNAMENT.isParticipating(player)) { player.sendMessage("You aren't currently in a Tournament"); return; } if (!party.isLeader(player.getUniqueId())) { player.sendMessage(ChatColor.RED + "&cOnly Leaders can do this"); return; } Tournament.CURRENT_TOURNAMENT.leave(party); } }
x403368945/common-mvc-parent
demo-service/src/main/java/com/ccx/demo/DemoServiceApplication.java
package com.ccx.demo; import org.springframework.boot.SpringApplication; /** * spring-boot 特殊处理:只有 spring-boot 需要该文件 * <pre> * 参考配置: * https://docs.spring.io/spring-boot/docs/2.1.5.RELEASE/reference/htmlsingle/ * * @author 谢长春 2019/1/21 */ public class DemoServiceApplication { public static void main(String[] args) { SpringApplication.run(DemoMainApplication.class, args); } // @Bean // public CommandLineRunner commandLineRunner(ApplicationContext context) { // return args -> { // System.out.println("打印所有bean:"); // Stream.of(context.getBeanDefinitionNames()).sorted().forEach(System.out::println); // }; // } }
codartX/ZCL
zcl/zcl_cluster.c
<reponame>codartX/ZCL #include <string.h> #include "zcl_cluster.h" zcl_attr_t *zcl_cluster_find_attr(zcl_cluster_t *cluster, uint16_t attr_id) { uint8_t i = 0; while (cluster->attr_list[i]) { if (cluster->attr_list[i].id == attr_id) { return &(cluster->attr_list[i]); } i++; } return NULL; }
chachabooboo/king-phisher
king_phisher/server/letsencrypt.py
<filename>king_phisher/server/letsencrypt.py #!/usr/bin/env python # -*- coding: utf-8 -*- # # king_phisher/letsencrypt.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import collections import logging import os import re from king_phisher import startup from king_phisher.server.database import storage as db_storage logger = logging.getLogger('KingPhisher.LetsEncrypt') LETS_ENCRYPT_DEFAULT_DATA_PATH = '/etc/letsencrypt' """The default path at which Let's Encrypt data is stored.""" _HOSTNAME_DIRECTORY_REGEX = re.compile(r'^(?P<hostname>[a-z0-9][a-z0-9-]*(\.[a-z0-9-]+)*\.[a-z]+)(-(?P<index>\d+))?$', re.IGNORECASE) _sni_hostnames = db_storage.KeyValueStorage(namespace='server.ssl.sni.hostnames', order_by='key') SNIHostnameConfiguration = collections.namedtuple('SNIHostnameConfiguration', ('certfile', 'keyfile', 'enabled')) """ The information for a certificate used by the server's SSL Server Name Indicator (SNI) extension. .. py:attribute:: certfile The path to the SSL certificate file on disk to use for the hostname. .. py:attribute:: keyfile The path to the SSL key file on disk to use for the hostname. .. py:attribute:: enabled Whether or not this configuration is set to be loaded by the server. """ def _check_files(*file_paths): return all(os.path.isfile(file_path) and os.access(file_path, os.R_OK) for file_path in file_paths) def _get_files(directory, hostname): if os.path.isdir(os.path.join(directory, hostname)): directory = os.path.join(directory, hostname) else: # certbot will append digits to the end of a directory to avoid naming conflicts, so find the highest index index_str = None for subdirectory in os.listdir(directory): match = _HOSTNAME_DIRECTORY_REGEX.match(subdirectory) if match is None or match.group('hostname') != hostname or not match.group('index'): continue if index_str is None or int(match.group('index')) > int(index_str): index_str = match.group('index') if index_str is None: return None, None directory = os.path.join(directory, hostname + '-' + index_str) cert_path = os.path.join(directory, 'fullchain.pem') if not _check_files(cert_path): cert_path = None key_path = os.path.join(directory, 'privkey.pem') if not _check_files(key_path): key_path = None return cert_path, key_path def _run_certbot(args, bin_path=None): bin_path = bin_path or get_certbot_bin_path() if bin_path is None: return FileNotFoundError('the certbot binary could not be found') args = (bin_path,) + tuple(args) return startup.run_process(args) def _sync_hostnames(unified_directory): directory = os.path.join(unified_directory, 'etc', 'live') if not os.path.isdir(directory): logger.warning('can not enumerate available letsencrypt data (directory not found)') return if not os.access(directory, os.R_OK | os.X_OK): logger.warning('can not enumerate available letsencrypt data (invalid permissions)') return for subdirectory in os.listdir(directory): match = _HOSTNAME_DIRECTORY_REGEX.match(subdirectory) if match is None: continue hostname = match.group('hostname') if hostname in _sni_hostnames: continue certfile, keyfile = _get_files(directory, match.group('hostname')) if not (certfile and keyfile): continue set_sni_hostname(hostname, certfile, keyfile) def certbot_issue(webroot, hostname, bin_path=None, unified_directory=None): """ Issue a certificate using Let's Encrypt's ``certbot`` utility. This function wraps the ``certbot`` binary and configures the parameters as appropriate. By default, the resulting certificate will be placed under :py:data:`.LETS_ENCRYPT_DEFAULT_DATA_PATH`, however if *unified_directory* is used then it will be under ``$unified_directory/etc``. :param str webroot: The webroot to use while requesting the certificate. :param str hostname: The hostname of the certificate to request. :param str bin_path: The optional path to the ``certbot`` binary. If not specified, then it will be searched for utilizing :py:func:`~king_phisher.startup.which`. :param str unified_directory: A single directory under which all the Let's Encrypt data should be stored. This is useful when not running the utility as root. :return: The exit status of the ``certbot`` utility. :rtype: int """ args = ['certonly'] if unified_directory: args.extend(['--config-dir', os.path.join(unified_directory, 'etc')]) args.extend(['--logs-dir', os.path.join(unified_directory, 'log')]) args.extend(['--work-dir', os.path.join(unified_directory, 'lib')]) args.extend(['--webroot', '--webroot-path', webroot, '-d', hostname]) proc = _run_certbot(args, bin_path=bin_path) return proc.status def get_certbot_bin_path(config=None): """ Get the path to Let's Encrypt's ``certbot`` command line utility. If the path is found, it is verified to be both a file and executable. If the path verification fails, ``None`` is returned. .. versionadded:: 1.14.0 :param config: Configuration to retrieve settings from. :type config: :py:class:`smoke_zephyr.configuration.Configuration` :return: The path to the certbot binary. :rtype: str """ if config: letsencrypt_config = config.get_if_exists('server.letsencrypt', {}) else: letsencrypt_config = {} bin_path = letsencrypt_config.get('certbot_path') or startup.which('certbot') if bin_path is None: return None if not os.path.isfile(bin_path): return None if not os.access(bin_path, os.R_OK | os.X_OK): return None return bin_path def get_sni_hostname_config(hostname, config=None): """ Search for and return the SNI configuration for the specified *hostname*. This method will first check to see if the entry exists in the database before searching the Let's Encrypt data directory (if ``data_path`` is present in the server configuration). If no configuration data is found, or the data file paths appear invalid, ``None`` is returned. :param str hostname: The hostname to retrieve the configuration for. :param config: Configuration to retrieve settings from. :type config: :py:class:`smoke_zephyr.configuration.Configuration` :return: The SNI configuration for the hostname if it was found. :rtype: :py:class:`.SNIHostnameConfiguration` """ unified_directory = config.get_if_exists('server.letsencrypt.data_path') if config else None if unified_directory: _sync_hostnames(unified_directory) sni_config = _sni_hostnames.get(hostname) if not sni_config: return None if not _check_files(sni_config['certfile'], sni_config['keyfile']): return None return SNIHostnameConfiguration(**sni_config) def get_sni_hostnames(config=None, check_files=True): """ Retrieve all the hostnames for which a valid SNI configuration can be retrieved. These are the hostnames for which SNI can be enabled. If *check_files* is enabled, the data files will be checked to ensure that they exist and are readable, else the configuration will be omitted. :param config: Configuration to retrieve settings from. :type config: :py:class:`smoke_zephyr.configuration.Configuration` :param bool check_files: Whether or not to check the referenced data files. :return: A dictionary, keyed by hostnames with values of :py:class:`.SNIHostnameConfiguration` instances. :rtype: dict """ unified_directory = config.get_if_exists('server.letsencrypt.data_path') if config else None if unified_directory: _sync_hostnames(unified_directory) hostnames = collections.OrderedDict() for hostname, sni_config in _sni_hostnames.items(): if check_files and not _check_files(sni_config['certfile'], sni_config['keyfile']): continue hostnames[hostname] = SNIHostnameConfiguration(**sni_config) return hostnames def set_sni_hostname(hostname, certfile, keyfile, enabled=False): """ Set the SNI configuration for the specified *hostname*. This information can then later be retrieved with either :py:func:`get_sni_hostname_config` or :py:func:`get_sni_hostnames`. :param str hostname: The hostname associated with the configuration data. :param str certfile: The path to the certificate file on disk. :param str keyfile: The path to the key file on disk. :param bool enabled: Whether or not this SNI configuration is loaded in the server. """ _sni_hostnames[hostname] = {'certfile': os.path.abspath(certfile), 'keyfile': os.path.abspath(keyfile), 'enabled': enabled}
yuxijian/WxJava
weixin-java-mp/src/main/java/me/chanjar/weixin/mp/bean/WxMpMassPreviewMessage.java
<gh_stars>1000+ package me.chanjar.weixin.mp.bean; import lombok.Data; import me.chanjar.weixin.common.api.WxConsts; import me.chanjar.weixin.mp.util.json.WxMpGsonBuilder; import java.io.Serializable; /** * @author miller */ @Data public class WxMpMassPreviewMessage implements Serializable { private static final long serialVersionUID = 9095211638358424020L; private String toWxUserName; private String toWxUserOpenid; /** * <pre> * 消息类型 * 请使用 * {@link WxConsts.MassMsgType#IMAGE} * {@link WxConsts.MassMsgType#MPNEWS} * {@link WxConsts.MassMsgType#TEXT} * {@link WxConsts.MassMsgType#MPVIDEO} * {@link WxConsts.MassMsgType#VOICE} * 如果msgtype和media_id不匹配的话,会返回系统繁忙的错误 * </pre> */ private String msgType; private String content; private String mediaId; public WxMpMassPreviewMessage() { super(); } public String toJson() { return WxMpGsonBuilder.create().toJson(this); } }
eloemosynator/YbEasyCli
yb_chunk_dml_by_date_part.py
<reponame>eloemosynator/YbEasyCli #!/usr/bin/env python3 """ USAGE: chunk_dml_by_date_part.py [options] PURPOSE: Create/execute DML chunked by a date/timestamp column. OPTIONS: See the command line help message for all options. (chunk_dml_by_date_part.py --help) Output: Chunked DML statements. """ import sys from yb_common import ArgIntRange, StoredProc, Util class chunk_dml_by_date_part(Util): """Issue the ybsql command used to create/execute DML chunked by date/timestamp column """ config = { 'description': 'Chunk DML by DATE/TIMESTAMP column.' , 'optional_args_single': [] , 'default_args': {'pre_sql': '', 'post_sql': ''} , 'usage_example': { 'cmd_line_args': '@$HOME/conn.args @$HOME/yb_chunk_dml_by_date_part.args --print_chunk_dml' , 'file_args': [ Util.conn_args_file , {'$HOME/yb_chunk_dml_by_date_part.args': """--table dze_db1.dev.sales --dml \"\"\"INSERT INTO sales_chunk_ordered SELECT * FROM dze_db1.dev.sales WHERE <chunk_where_clause> ORDER BY sale_ts\"\"\" --column 'sale_ts' --date_part HOUR --chunk_rows 100000000"""} ] } } def execute(self): self.cmd_results = StoredProc('yb_chunk_dml_by_date_part_p', self.db_conn).call_proc_as_anonymous_block( args = { 'a_table' : self.args_handler.args.table , 'a_ts_column' : self.args_handler.args.column , 'a_date_part' : self.args_handler.args.date_part , 'a_dml' : self.args_handler.args.dml , 'a_min_chunk_size' : self.args_handler.args.chunk_rows , 'a_verbose' : ('TRUE' if self.args_handler.args.verbose_chunk_off else 'FALSE') , 'a_add_null_chunk' : ('TRUE' if self.args_handler.args.null_chunk_off else 'FALSE') , 'a_print_chunk_dml' : ('TRUE' if self.args_handler.args.print_chunk_dml else 'FALSE') , 'a_execute_chunk_dml' : ('TRUE' if self.args_handler.args.execute_chunk_dml else 'FALSE')} , pre_sql = self.args_handler.args.pre_sql , post_sql = self.args_handler.args.post_sql) def additional_args(self): args_chunk_r_grp = self.args_handler.args_parser.add_argument_group( 'required chunking arguments') args_chunk_r_grp.add_argument( "--table", required=True , help="table name, the name may be qualified if needed") args_chunk_r_grp.add_argument( "--dml", required=True , help="DML to perform in chunks, the DML" " must contain the string '<chunk_where_clause>' to properly facilitate the" " dynamic chunking filter") args_chunk_r_grp.add_argument( "--column", required=True , help="the column which is used to create chunks on the" " DML, the column must be a date/timestamp data type") args_chunk_r_grp.add_argument( "--date_part", required=True , choices=['MINUTE', 'HOUR', 'DAY', 'MONTH', 'YEAR'] , help="create chunks down to the time unit selected") args_chunk_r_grp.add_argument( "--chunk_rows", dest="chunk_rows", required=True , type=ArgIntRange(1,9223372036854775807) , help="the minimum rows that each chunk should contain") args_chunk_o_grp = self.args_handler.args_parser.add_argument_group( 'optional chunking arguments') args_chunk_o_grp.add_argument("--verbose_chunk_off", action="store_false" , help="don't print additional chunking details, defaults to FALSE") args_chunk_o_grp.add_argument("--null_chunk_off", action="store_false" , help="don't create a chunk where the chunking column is NULL, defaults to FALSE") args_chunk_o_grp.add_argument("--print_chunk_dml", action="store_true" , help="print the chunked DML, defaults to FALSE") args_chunk_o_grp.add_argument("--execute_chunk_dml", action="store_true" , help="execute the chunked DML, defaults to FALSE") args_chunk_o_grp.add_argument("--pre_sql", default='' , help="SQL to run before the chunking DML, only runs if execute_chunk_dml is set") args_chunk_o_grp.add_argument("--post_sql", default='' , help="SQL to run after the chunking DML, only runs if execute_chunk_dml is set") def additional_args_process(self): if '<chunk_where_clause>' not in self.args_handler.args.dml: self.args_handler.args_parser.error("DML must contain the string '<chunk_where_clause>'") if not self.args_handler.args.execute_chunk_dml: self.args_handler.args.pre_sql = '' self.args_handler.args.post_sql = '' def main(): cdml = chunk_dml_by_date_part() sys.stdout.write('-- Running DML chunking.\n') cdml.execute() cdml.cmd_results.write(tail='-- Completed DML chunking.\n') exit(cdml.cmd_results.exit_code) if __name__ == "__main__": main()
gza/beats
vendor/gopkg.in/jcmturner/gokrb5.v7/iana/msgtype/constants.go
// Package msgtype provides Kerberos 5 message type assigned numbers. package msgtype // KRB message type IDs. const ( KRB_AS_REQ = 10 //Request for initial authentication KRB_AS_REP = 11 //Response to KRB_AS_REQ request KRB_TGS_REQ = 12 //Request for authentication based on TGT KRB_TGS_REP = 13 //Response to KRB_TGS_REQ request KRB_AP_REQ = 14 //Application request to server KRB_AP_REP = 15 //Response to KRB_AP_REQ_MUTUAL KRB_RESERVED16 = 16 //Reserved for user-to-user krb_tgt_request KRB_RESERVED17 = 17 //Reserved for user-to-user krb_tgt_reply KRB_SAFE = 20 // Safe (checksummed) application message KRB_PRIV = 21 // Private (encrypted) application message KRB_CRED = 22 //Private (encrypted) message to forward credentials KRB_ERROR = 30 //Error response )
jquacinella/nexus
delta/service/src/test/scala/ch/epfl/bluebrain/nexus/delta/service/realms/WellKnownResolverSpec.scala
package ch.epfl.bluebrain.nexus.delta.service.realms import akka.http.scaladsl.model.{HttpRequest, Uri} import ch.epfl.bluebrain.nexus.delta.sdk.model.realms.GrantType import ch.epfl.bluebrain.nexus.delta.sdk.model.realms.GrantType._ import ch.epfl.bluebrain.nexus.delta.sdk.model.realms.RealmRejection.{IllegalEndpointFormat, IllegalGrantTypeFormat, IllegalIssuerFormat, IllegalJwkFormat, IllegalJwksUriFormat, NoValidKeysFound, UnsuccessfulJwksResponse, UnsuccessfulOpenIdConfigResponse} import ch.epfl.bluebrain.nexus.delta.sdk.http.HttpClientError.HttpUnexpectedError import ch.epfl.bluebrain.nexus.testkit.{EitherValuable, IOValues, TestHelpers} import com.nimbusds.jose.jwk.gen.RSAKeyGenerator import io.circe.Json import io.circe.parser._ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import org.scalatest.{Inspectors, OptionValues} class WellKnownResolverSpec extends AnyWordSpecLike with IOValues with Inspectors with OptionValues with TestHelpers with Matchers { import WellKnownResolverSpec._ "A WellKnown" should { def resolveWellKnown(openIdConfig: Json, jwks: Json) = WellKnownResolver( ioFromMap( Map( openIdUri -> openIdConfig, jwksUri -> jwks ), (_: Uri) => HttpUnexpectedError(HttpRequest(), "Failed") ) )(openIdUri) def alwaysFail = WellKnownResolver( ioFromMap( Map.empty[Uri, Json], (_: Uri) => HttpUnexpectedError(HttpRequest(), "Failed") ) )(openIdUri) "be constructed correctly" when { "the openid config is valid" in { val wk = resolveWellKnown( validOpenIdConfig, validJwks ).accepted wk.issuer shouldEqual issuer wk.grantTypes shouldEqual grantTypes wk.keys shouldEqual Set(publicKeyJson) } "the openid contains empty grant_types" in { val wk = resolveWellKnown( validOpenIdConfig.deepMerge(Json.obj("grant_types_supported" -> Json.arr())), validJwks ).accepted wk.grantTypes shouldEqual Set.empty[GrantType] } "the openid contains no grant_types" in { val wk = resolveWellKnown( validOpenIdConfig.hcursor.downField("grant_types_supported").delete.top.value, validJwks ).accepted wk.grantTypes shouldEqual Set.empty[GrantType] } "the openid contains the expected endpoints" in { val wk = resolveWellKnown( fullOpenIdConfig, validJwks ).accepted wk.issuer shouldEqual issuer wk.grantTypes shouldEqual grantTypes wk.keys shouldEqual Set(publicKeyJson) wk.authorizationEndpoint shouldEqual authorizationUri wk.tokenEndpoint shouldEqual tokenUri wk.userInfoEndpoint shouldEqual userInfoUri wk.revocationEndpoint.value shouldEqual revocationUri wk.endSessionEndpoint.value shouldEqual endSessionUri } } "fail to construct" when { "the client records a bad response" in { val rej = alwaysFail.rejectedWith[UnsuccessfulOpenIdConfigResponse] rej.document shouldEqual openIdUri } "the openid contains an invalid issuer" in { val rej = resolveWellKnown( validOpenIdConfig.deepMerge(Json.obj("issuer" -> Json.fromString(" "))), validJwks ).rejectedWith[IllegalIssuerFormat] rej.document shouldEqual openIdUri rej.location shouldEqual ".issuer" } "the openid contains a issuer with an invalid type" in { val rej = resolveWellKnown( validOpenIdConfig.deepMerge(Json.obj("issuer" -> Json.fromInt(3))), validJwks ).rejectedWith[IllegalIssuerFormat] rej.document shouldEqual openIdUri rej.location shouldEqual ".issuer" } } "the openid contains an invalid jwks_uri" in { val rej = resolveWellKnown( validOpenIdConfig.deepMerge(Json.obj("jwks_uri" -> Json.fromString("asd"))), validJwks ).rejectedWith[IllegalJwksUriFormat] rej.document shouldEqual openIdUri rej.location shouldEqual ".jwks_uri" } "the openid contains a jwks_uri with an invalid type" in { val rej = resolveWellKnown( validOpenIdConfig.deepMerge(Json.obj("jwks_uri" -> Json.fromInt(3))), validJwks ).rejectedWith[IllegalJwksUriFormat] rej.document shouldEqual openIdUri rej.location shouldEqual ".jwks_uri" } "the openid contains a invalid grant_types" in { val rej = resolveWellKnown( validOpenIdConfig.deepMerge(Json.obj("grant_types_supported" -> Json.fromString("incorrect"))), validJwks ).rejectedWith[IllegalGrantTypeFormat] rej.document shouldEqual openIdUri rej.location shouldEqual ".grant_types_supported" } "the openid contains no valid grant_types" in { val rej = resolveWellKnown( validOpenIdConfig.deepMerge(Json.obj("grant_types_supported" -> Json.arr(Json.fromString("incorrect")))), validJwks ).rejectedWith[IllegalGrantTypeFormat] rej.document shouldEqual openIdUri rej.location shouldEqual ".grant_types_supported[0]" } "the openid contains an incorrect endpoint" in { forAll( List( "authorization_endpoint", "token_endpoint", "userinfo_endpoint", "revocation_endpoint", "end_session_endpoint" ) ) { key => val rej = resolveWellKnown( fullOpenIdConfig.deepMerge(Json.obj(key -> Json.fromInt(3))), validJwks ).rejectedWith[IllegalEndpointFormat] rej.document shouldEqual openIdUri rej.location shouldEqual s".$key" } } "the openid does not contain required endpoints" in { forAll(List("authorization_endpoint", "token_endpoint", "userinfo_endpoint")) { key => val rej = resolveWellKnown( fullOpenIdConfig.hcursor.downField(key).delete.top.value, validJwks ).rejectedWith[IllegalEndpointFormat] rej.document shouldEqual openIdUri rej.location shouldEqual s".$key" } } "the client returns a bad response for the jwks document" in { val invalidJwksUri = Uri("https://localhost/invalid") val rej = resolveWellKnown( validOpenIdConfig.deepMerge(Json.obj("jwks_uri" -> Json.fromString(invalidJwksUri.toString()))), validJwks ).rejectedWith[UnsuccessfulJwksResponse] rej.document shouldEqual invalidJwksUri } "the jwks document has an incorrect format" in { val rej = resolveWellKnown( validOpenIdConfig, Json.obj() ).rejectedWith[IllegalJwkFormat] rej.document shouldEqual jwksUri } "the jwks document has no keys" in { val rej = resolveWellKnown( validOpenIdConfig, Json.obj("keys" -> Json.arr()) ).rejectedWith[NoValidKeysFound] rej.document shouldEqual jwksUri } "the jwks document has incorrect keys" in { val rej = resolveWellKnown( validOpenIdConfig, Json.obj("keys" -> Json.arr(Json.fromString("incorrect"))) ).rejectedWith[NoValidKeysFound] rej.document shouldEqual jwksUri } } } object WellKnownResolverSpec extends EitherValuable { private val openIdUri = Uri("https://localhost/auth/realms/master/.well-known/openid-configuration") private val jwksUri = Uri("https://localhost/auth/realms/master/protocol/openid-connect/certs") private val issuer = "https://localhost/auth/realms/master" private val authorizationUri = Uri("https://localhost/auth") private val tokenUri = Uri("https://localhost/auth/token") private val userInfoUri = Uri("https://localhost/auth/userinfo") private val revocationUri = Uri("https://localhost/auth/revoke") private val endSessionUri = Uri("https://localhost/auth/logout") private val validOpenIdConfigString = s""" | { | "issuer": "$issuer", | "jwks_uri": "$jwksUri", | "grant_types_supported": [ | "authorization_code", | "implicit", | "refresh_token", | "password", | "client_credentials" | ], | "authorization_endpoint": "$authorizationUri", | "token_endpoint": "$tokenUri", | "userinfo_endpoint": "$userInfoUri" | } """.stripMargin private val validOpenIdConfig = parse(validOpenIdConfigString).rightValue private val fullOpenIdConfigString = s""" | { | "issuer": "$issuer", | "jwks_uri": "$jwksUri", | "grant_types_supported": [ | "authorization_code", | "implicit", | "refresh_token", | "password", | "client_credentials" | ], | "authorization_endpoint": "$authorizationUri", | "token_endpoint": "$tokenUri", | "userinfo_endpoint": "$userInfoUri", | "revocation_endpoint": "$revocationUri", | "end_session_endpoint": "$endSessionUri" | } """.stripMargin private val fullOpenIdConfig = parse(fullOpenIdConfigString).rightValue private val publicKey = new RSAKeyGenerator(2048) .keyID("123") .generate() .toPublicJWK .toJSONString private val publicKeyJson = parse(publicKey).rightValue private val validJwksString = s""" | { | "keys": [ | $publicKey | ] | } """.stripMargin private val validJwks = parse(validJwksString).rightValue private val grantTypes = Set(AuthorizationCode, Implicit, RefreshToken, Password, ClientCredentials) }
acionescu/commons
src/main/java/net/segoia/util/execution/test/ControlExecutionEntity.java
<reponame>acionescu/commons /** * commons - Various Java Utils * Copyright (C) 2009 <NAME> - https://github.com/acionescu * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.segoia.util.execution.test; import net.segoia.util.execution.ExecutionEntity; public class ControlExecutionEntity<I,O,C> implements ContextExecutionEntity<ExecutionContext<I,ControlExecutionEntityConfiguration>, C>{ public C execute(ExecutionContext<I, ControlExecutionEntityConfiguration> executionContext) throws Exception { I input = executionContext.getInput(); ExecutionEntity mainExecutionEntity = executionContext.getConfig().getMainExecutionEntity(); ExecutionEntity controlExecutionEntity = executionContext.getConfig().getControlExecutionEntity(); ConfigurationEntity mainEntityConf = executionContext.getConfig().getMainEntityConfiguration(); ConfigurationEntity controlEntityConf = executionContext.getConfig().getControlEntityConfiguration(); O output = (O)mainExecutionEntity.execute(new ExecutionContext(input,mainEntityConf)); C control = (C)controlExecutionEntity.execute(new ExecutionContext(new IOPair<I, O>(input, output),controlEntityConf)); return control; } }
gobuffalo/bufcli
cli/internal/plugins/packr/packager.go
package packr import ( "context" "fmt" "github.com/gobuffalo/buffalo-cli/v2/cli/cmds/build" "github.com/gobuffalo/packr/v2/jam" "github.com/gobuffalo/plugins" "github.com/gobuffalo/plugins/plugcmd" ) var _ build.BeforeBuilder = &Packager{} var _ build.Packager = &Packager{} var _ plugcmd.Namer = &Packager{} var _ plugins.Plugin = &Packager{} type Packager struct{} func (b *Packager) BeforeBuild(ctx context.Context, root string, args []string) error { return jam.Clean() } func (b *Packager) Package(ctx context.Context, root string, files []string) error { if len(files) > 0 { fmt.Printf("%s does not support additional files\n", b.PluginName()) for _, f := range files { fmt.Printf("\t> %s\n", f) } } err := jam.Pack(jam.PackOptions{ Roots: []string{root}, }) return plugins.Wrap(b, err) } func (b Packager) PluginName() string { return "packr" } func (b Packager) CmdName() string { return "packr" }
jturner65/ParticleSim
external/fltk-2.0.x-r5966/fluidOld/Fluid_Image.cxx
// // "$Id: Fluid_Image.cxx 5738 2007-03-12 18:07:45Z spitzak $" // // Pixmap label support for the Fast Light Tool Kit (FLTK). // // Copyright 1998-2006 by <NAME> and others. // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Library General Public // License as published by the Free Software Foundation; either // version 2 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Library General Public License for more details. // // You should have received a copy of the GNU Library General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 // USA. // // Please report all bugs and problems to "<EMAIL>". // #include <fltk/run.h> #include <fltk/Widget.h> #include "FluidType.h" #include "Fluid_Image.h" #include <string.h> #ifdef _WIN32 # define strcasecmp(a,b) stricmp(a,b) # define strncasecmp(a,b,c) strnicmp(a,b,c) #endif #include <stdio.h> #include <errno.h> #include <ctype.h> #include <stdlib.h> #include <fltk/filename.h> //////////////////////////////////////////////////////////////// static uchar* store_datas_from_file(const char *filename, size_t &size) { uchar *d=0; FILE *fd = fopen(filename, "rb"); if(!fd) return 0; size_t c=512, cc=0, r; size = 0; do { c*=2; cc+=c; if(d) d=(uchar *) realloc(d, cc); else d=(uchar *) malloc(cc); r=fread(d+cc-c, 1, c, fd); size+=r; } while(r==c); if(size!=cc) d=(uchar *) realloc(d, size?size:1); return d; } //////////////////////////////////////////////////////////////// class generic_image : public Fluid_Image { protected: fltk::SharedImage *p; int *linelength; fltk::ImageType* filetype; public: generic_image(const char *name); ~generic_image(); virtual const fltk::Symbol* symbol() {return p;} virtual void write_static(); virtual void write_code(); static int test_file(char *buffer); }; int generic_image::test_file(char *buffer) { fltk::ImageType* ft = fltk::guess_image("", (uchar*)buffer); return ft->name != 0; } static int image_file_header_written; #define MAX_CLINESIZE 256 void generic_image::write_static() { uchar* d=0; if (!p) return; if(image_file_header_written != write_number) { write_c("\n#include <fltk/SharedImage.h>\n"); image_file_header_written = write_number; } if (inlined) { size_t l=0; if (filetype->name && !strcasecmp(filetype->name, "xpm")) { write_c("static const char *%s[] = {\n", unique_id(this, "datas", fltk::filename_name(name()), 0)); FILE* fp = fopen(name(), "rb"); if(fp) { indentation += 2; char s[MAX_CLINESIZE+1]; do { fgets(s, MAX_CLINESIZE+1, fp); } while (!feof(fp) && !strchr(s, '{')); while (!feof(fp) && fgets(s, MAX_CLINESIZE+1, fp)) { write_c(indent()); write_craw(s); // write_c(s); } indentation -= 2; fclose(fp); } } else { d = store_datas_from_file(name(), l); if(d) { #if 1 write_c("static const unsigned char %s[%d] = {\n", unique_id(this, "datas", fltk::filename_name(name()), 0), l); write_carray((const char*)d, l); write_c("};\n"); #else write_c("static const unsigned char %s[] =\n", unique_id(this, "datas", filename_name(name()), 0)); write_cstring((const char*)d, l); write_c(";\n"); #endif free(d); } } } } void generic_image::write_code() { if (!p) return; if (0 && inlined) { write_c("%so->image(%s%s", indent(), (filetype->name && !strcasecmp(filetype->name, "xpm")) ? "(const char*const *)" : "", unique_id(this, "datas", fltk::filename_name(name()), 0) ); } else { write_c("%so->image(fltk::SharedImage::get(\"%s\"", indent(), name()); } write_c("));\n"); } generic_image::generic_image(const char *name ) : Fluid_Image(name) { filetype = fltk::guess_image(fltk::SharedImage::get_filename(name)); p = filetype->get((char*) name, 0); inlined = 1; } generic_image::~generic_image() { } //////////////////////////////////////////////////////////////// #include <fltk/xbmImage.h> class bitmap_image : public Fluid_Image { fltk::xbmImage *p; public: ~bitmap_image(); bitmap_image(const char *name, FILE *); virtual const fltk::Symbol* symbol() {return p;} virtual void write_static(); virtual void write_code(); static int test_file(char *buffer); }; // bad test, always do this last! int bitmap_image::test_file(char *buffer) { return (strstr(buffer,"#define ") != 0); } static int bitmap_header_written; void bitmap_image::write_static() { if (!p) return; write_c("\n"); if (bitmap_header_written != write_number) { write_c("#include <fltk/xbmImage.h>\n"); bitmap_header_written = write_number; } int w = p->w(); int h = p->h(); int n = ((w+7)/8)*h; #if 1 // older one write_c("static const unsigned char %s[%d] = {\n", unique_id(this, "bits", fltk::filename_name(name()), 0), n); write_carray((const char*)(p->array), n); write_c("};\n"); #else // this seems to produce slightly shorter c++ files write_c("static const unsigned char %s[] =\n", unique_id(this, "bits", fltk::filename_name(name()), 0)); write_cstring((const char*)(p->array), n); write_c(";\n"); #endif write_c("static fltk::xbmImage %s(%s, %d, %d);\n", unique_id(this, "xbmImage", fltk::filename_name(name()), 0), unique_id(this, "bits", fltk::filename_name(name()), 0), w, h); } void bitmap_image::write_code() { if (!p) return; write_c("%so->image(%s);\n", indent(), unique_id(this, "xbmImage", fltk::filename_name(name()), 0)); } #define ns_width 16 #define ns_height 16 static unsigned char ns_bits[] = { 0x00, 0x00, 0x80, 0x01, 0xc0, 0x03, 0xe0, 0x07, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0x80, 0x01, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x01, 0x00, 0x00}; static fltk::xbmImage nosuch_bitmap(ns_bits, ns_width, ns_height); bitmap_image::bitmap_image(const char *name, FILE *f) : Fluid_Image(name) { p = &nosuch_bitmap; // if any problems with parse we exit with this if (!f) return; char buffer[1024]; char junk[1024]; int wh[2]; // width and height int i; for (i = 0; i<2; i++) { for (;;) { if (!fgets(buffer,1024,f)) return; int r = sscanf(buffer,"#define %s %d",junk,&wh[i]); if (r >= 2) break; } } // skip to data array: for (;;) { if (!fgets(buffer,1024,f)) return; if (!strncmp(buffer,"static ",7)) break; } int n = ((wh[0]+7)/8)*wh[1]; uchar *data = new uchar[n]; // read the data: i = 0; for (;i<n;) { if (!fgets(buffer,1024,f)) return; const char *a = buffer; while (*a && i<n) { int t; if (sscanf(a," 0x%x",&t)>0) data[i++] = t; while (*a && *a++ != ','); } } p = new fltk::xbmImage(data,wh[0],wh[1]); } bitmap_image::~bitmap_image() { if (p && p != &nosuch_bitmap) { delete p; } } //////////////////////////////////////////////////////////////// static Fluid_Image** images; // sorted list static int numimages; static int tablesize; Fluid_Image* Fluid_Image::find(const char *name) { if (!name || !*name) return 0; // first search to see if it exists already: int a = 0; int b = numimages; while (a < b) { int c = (a+b)/2; int i = strcmp(name,images[c]->name_); if (i < 0) b = c; else if (i > 0) a = c+1; else return images[c]; } // no, so now see if the file exists: Fluid_Image *ret = 0; const char* realname = fltk::SharedImage::get_filename(name); FILE *f = fopen(realname,"rb"); if (!f) { read_error("%s : %s", realname, strerror(errno)); } else { // now see if we can identify the type, by reading in some data // and asking all the types we know about: char buffer[1025]; fread(buffer, 1, 1024, f); rewind(f); buffer[1024] = 0; // null-terminate so strstr() works if (generic_image::test_file(buffer)) { ret = new generic_image(name); } else if (bitmap_image::test_file(buffer)) { ret = new bitmap_image(name,f); } else { ret = 0; read_error("%s : unrecognized image format", name); } fclose(f); } if (!ret) ret = new bitmap_image(name, 0); // make a new entry in the table: numimages++; if (numimages > tablesize) { tablesize = tablesize ? 2*tablesize : 16; images = (Fluid_Image**)realloc(images, tablesize*sizeof(Fluid_Image*)); } for (b = numimages-1; b > a; b--) images[b] = images[b-1]; images[a] = ret; return ret; } Fluid_Image::Fluid_Image(const char *name) { name_ = strdup(name); written = 0; refcount = 0; inlined=0; } void Fluid_Image::increment() { ++refcount; } void Fluid_Image::decrement() { --refcount; if (refcount > 0) return; delete this; } Fluid_Image::~Fluid_Image() { int a; for (a = 0; a<numimages; a++) if (images[a] == this) break; if(a<numimages) { numimages--; for (; a < numimages; a++) images[a] = images[a+1]; } free((void*)name_); } //////////////////////////////////////////////////////////////// #include <fltk/file_chooser.h> Fluid_Image *ui_find_image(Fluid_Image *old) { const char *name = fltk::file_chooser("Image", "*.{bm|xbm|xpm|gif|png|bmp|jpg|jpeg}", old ? old->name() : 0); Fluid_Image *ret = (name && *name) ? Fluid_Image::find(name) : 0; return ret; } //////////////////////////////////////////////////////////////// static int cancel, modal; void browse_dir_cb(); #include "image_file_panel.h" #include "image_file_panel.cxx" const char *images_dir = 0; extern void fix_images_dir(); void browse_dir_cb() { const char *f = fltk::file_chooser("Images directory","", images_dir_input->value()); if (f) images_dir_input->value(f); } void set_images_dir_cb(fltk::Widget *, void *) { if(!images_dir_window) make_images_dir_window(); images_dir_input->value(images_dir); images_dir_window->show(); cancel=0; modal=1; while(modal) fltk::wait(); if (!cancel) { images_dir = images_dir_input->value(); if (!*images_dir) images_dir = 0; fix_images_dir(); } } // // End of "$Id: Fluid_Image.cxx 5738 2007-03-12 18:07:45Z spitzak $". //
Yun-xi/xchat
xchat-common/src/main/java/com/xx/xchat/conf/MybatisPlusConfig.java
<filename>xchat-common/src/main/java/com/xx/xchat/conf/MybatisPlusConfig.java /* * Copyright (c) 2011-2020, baomidou (<EMAIL>). * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * <p> * https://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.xx.xchat.conf; import com.baomidou.mybatisplus.core.parser.ISqlParser; import com.baomidou.mybatisplus.extension.parsers.BlockAttackSqlParser; import com.baomidou.mybatisplus.extension.plugins.OptimisticLockerInterceptor; import com.baomidou.mybatisplus.extension.plugins.PaginationInterceptor; import org.mybatis.spring.annotation.MapperScan; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import java.util.ArrayList; import java.util.List; /** * Mybatis Plus Config * * @author Caratacus * @since 2017/4/1 */ @Configuration @MapperScan(basePackages = "com.xx.xchat.dao") public class MybatisPlusConfig { /** * 分页插件 */ @Bean public PaginationInterceptor paginationInterceptor() { PaginationInterceptor paginationInterceptor = new PaginationInterceptor(); // paginationInterceptor.setLimit(你的最大单页限制数量,默认 500 条,小于 0 如 -1 不受限制); List<ISqlParser> sqlParserList = new ArrayList<>(); // 攻击 SQL 阻断解析器、加入解析链 sqlParserList.add(new BlockAttackSqlParser()); paginationInterceptor.setSqlParserList(sqlParserList); return paginationInterceptor; } /** * 乐观锁 * @return */ @Bean public OptimisticLockerInterceptor optimisticLockerInterceptor() { return new OptimisticLockerInterceptor(); } }
hafeez3000/expo
ios/versioned-react-native/ABI17_0_0/Exponent/Modules/Api/ABI17_0_0EXFingerprint.h
<reponame>hafeez3000/expo // Copyright 2016-present 650 Industries. All rights reserved. #import <ReactABI17_0_0/ABI17_0_0RCTBridgeModule.h> @interface ABI17_0_0EXFingerprint : NSObject<ABI17_0_0RCTBridgeModule> @end
dbflute/dbflute-core
dbflute-runtime/src/main/java/org/dbflute/exception/SelectedCountExceedMaxCountException.java
<filename>dbflute-runtime/src/main/java/org/dbflute/exception/SelectedCountExceedMaxCountException.java<gh_stars>10-100 /* * Copyright 2014-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific language * governing permissions and limitations under the License. */ package org.dbflute.exception; /** * The exception when selected count exceeds max count. * @author jflute */ public class SelectedCountExceedMaxCountException extends RuntimeException { /** The serial version UID for object serialization. (Default) */ private static final long serialVersionUID = 1L; /** Selected count. */ protected int _selectedCount; /** Max count. */ protected int _maxCount; /** * Constructor. * * @param msg The message of the exception. (NotNull) * @param maxCount Max count. * @param selectedCount Selected count. */ public SelectedCountExceedMaxCountException(String msg, int selectedCount, int maxCount) { super(msg); _selectedCount = selectedCount; _maxCount = maxCount; } /** * Get selected count. * @return Selected count. */ public int getSelectedCount() { return _selectedCount; } /** * Get max count. * @return Max count. */ public int getMaxCount() { return _maxCount; } }
zst123/project-fimble_amazon-freertos
vendors/cypress/libraries/internal/command-console/iperf/rtos/AFR/rtos_config.h
<filename>vendors/cypress/libraries/internal/command-console/iperf/rtos/AFR/rtos_config.h /* config.h. Generated by hand for WICED. */ #ifndef CONFIG_H #define CONFIG_H /* =================================================================== * config.h * * config.h is derived from config.h.in -- do not edit config.h * * This contains variables that the configure script checks and * then defines or undefines. The source code checks for these * variables to know if certain features are present. * =================================================================== */ /* Define if threads exist (using pthreads or Win32 threads) */ /* #undef HAVE_THREAD */ /* #undef HAVE_POSIX_THREAD */ /* #undef HAVE_WIN32_THREAD */ /* #undef _REENTRANT */ /* Define if on OSF1 and need special extern "C" around some header files */ /* #undef SPECIAL_OSF1_EXTERN */ /* Define if the strings.h header file exists */ #define gettimeofday(tv, timezone) afr_gettimeofday(tv, timezone) typedef unsigned long useconds_t; #define HAVE_STRINGS_H #if !(defined (__GNUC__) && (__GNUC__ >= 6)) struct timeval { long tv_sec; /* Seconds */ long tv_usec; /* Microseconds */ }; #endif #if defined(__ARMCC_VERSION) typedef signed int ssize_t; ///< Signed size type, usually encodes negative errors #endif /* Define if you have these functions. */ /* #define HAVE_SNPRINTF */ /* #undef HAVE_INET_PTON */ /* #undef HAVE_INET_NTOP */ /* #undef HAVE_GETTIMEOFDAY */ /* #undef HAVE_PTHREAD_CANCEL */ /* #undef HAVE_USLEEP */ /* #undef HAVE_QUAD_SUPPORT */ /* #undef HAVE_PRINTF_QD */ /* standard C++, which isn't always... */ /* #undef bool */ #endif /* CONFIG_H */
MaxWainer/framework
bukkit/command-implementation/src/main/java/dev/framework/bukkit/implementation/command/BukkitCommandManager.java
/* * MIT License * * Copyright (c) 2022 MaxWainer * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package dev.framework.bukkit.implementation.command; import dev.framework.bukkit.implementation.bootstrap.AbstractBukkitBootstrap; import dev.framework.commands.manager.AbstractCommandManager; import java.util.List; import net.kyori.adventure.platform.bukkit.BukkitAudiences; import org.bukkit.command.Command; import org.bukkit.command.CommandSender; import org.jetbrains.annotations.NotNull; public final class BukkitCommandManager extends AbstractCommandManager<CommandSender, BukkitSenderFactory, Command> { private final String pluginName; public BukkitCommandManager( final @NotNull BukkitAudiences audiences, final @NotNull AbstractBukkitBootstrap bootstrap) { super(new BukkitSenderFactory(audiences)); this.pluginName = bootstrap.getName(); } @Override protected void registerHandle(final @NotNull Command handle) { Commands.injectBukkitCommand(pluginName, handle); } @Override protected Command wrapToInternal( final @NotNull String name, final @NotNull WrappedCommand<CommandSender> wrappedCommand) { return new Command(name) { @Override public boolean execute( final CommandSender commandSender, final String s, final String[] strings) { wrappedCommand.execute(commandSender, strings); return false; } @Override public List<String> tabComplete( final CommandSender sender, final String alias, final String[] args) throws IllegalArgumentException { return wrappedCommand.suggestions(sender, args); } }; } }
gcbpay/radarj9
ripple-bouncycastle/src/main/java/org/ripple/bouncycastle/asn1/x509/AttCertValidityPeriod.java
<reponame>gcbpay/radarj9 package org.ripple.bouncycastle.asn1.x509; import org.ripple.bouncycastle.asn1.ASN1EncodableVector; import org.ripple.bouncycastle.asn1.ASN1GeneralizedTime; import org.ripple.bouncycastle.asn1.ASN1Object; import org.ripple.bouncycastle.asn1.ASN1Primitive; import org.ripple.bouncycastle.asn1.ASN1Sequence; import org.ripple.bouncycastle.asn1.DERSequence; public class AttCertValidityPeriod extends ASN1Object { ASN1GeneralizedTime notBeforeTime; ASN1GeneralizedTime notAfterTime; public static AttCertValidityPeriod getInstance( Object obj) { if (obj instanceof AttCertValidityPeriod) { return (AttCertValidityPeriod)obj; } else if (obj != null) { return new AttCertValidityPeriod(ASN1Sequence.getInstance(obj)); } return null; } private AttCertValidityPeriod( ASN1Sequence seq) { if (seq.size() != 2) { throw new IllegalArgumentException("Bad sequence size: " + seq.size()); } notBeforeTime = ASN1GeneralizedTime.getInstance(seq.getObjectAt(0)); notAfterTime = ASN1GeneralizedTime.getInstance(seq.getObjectAt(1)); } /** * @param notBeforeTime * @param notAfterTime */ public AttCertValidityPeriod( ASN1GeneralizedTime notBeforeTime, ASN1GeneralizedTime notAfterTime) { this.notBeforeTime = notBeforeTime; this.notAfterTime = notAfterTime; } public ASN1GeneralizedTime getNotBeforeTime() { return notBeforeTime; } public ASN1GeneralizedTime getNotAfterTime() { return notAfterTime; } /** * Produce an object suitable for an ASN1OutputStream. * <pre> * AttCertValidityPeriod ::= SEQUENCE { * notBeforeTime GeneralizedTime, * notAfterTime GeneralizedTime * } * </pre> */ public ASN1Primitive toASN1Primitive() { ASN1EncodableVector v = new ASN1EncodableVector(); v.add(notBeforeTime); v.add(notAfterTime); return new DERSequence(v); } }
QAQddbest/OJiJ
leetcode/src/main/java/ojij/common/id621/Solution.java
package ojij.common.id621; /** * 想法是: * 1. 用26长度的数组tasks,记录每个任务的数量 * 2. 用26长度的数组pending,记录每个任务最快那个时间点可以运行 * 遍历tasks: * time跳到最小的pending值那 * 对所有pending相同的任务,找到剩余时间最大的任务,执行并更新pending为n */ public class Solution { public int leastInterval(char[] tasks, int n) throws Exception { throw new Exception("太太太麻烦了吧...写不动了..."); } }
himdel/manageiq-ui-classic
app/helpers/hide_partial_helper.rb
module HidePartialHelper def hide_x_edit_buttons(action) %w(snap_vm).include?(action) end end
gdmuzzillo/samples
test/kafka/CakeConsumerTest.scala
package kafka import scala.concurrent.duration._ import cakesolutions.kafka.KafkaConsumer import cakesolutions.kafka.akka.KafkaConsumerActor import cakesolutions.kafka.KafkaConsumer.Conf import org.apache.kafka.common.serialization.StringDeserializer import akka.actor.ActorSystem import org.apache.kafka.common.network.Receive import cakesolutions.kafka.akka.KafkaConsumerActor.Confirm import cakesolutions.kafka.akka.ConsumerRecords import akka.actor.Actor import akka.actor.Props import cakesolutions.kafka.akka.KafkaConsumerActor.Subscribe /** * @author martinpaoletta */ object CakeConsumerTest extends App { implicit val system = ActorSystem("main") val receiver = system.actorOf(Props[ReceiverActor]) receiver ! "prueba" } class ReceiverActor extends Actor { // Configuration for the KafkaConsumer val consumerConf = KafkaConsumer.Conf( new StringDeserializer, new StringDeserializer, bootstrapServers = "localhost:9092", groupId = "groupId", enableAutoCommit = false) // Configuration specific to the Async Consumer Actor val actorConf = KafkaConsumerActor.Conf(List("hitopic"), 1.seconds, 3.seconds) // Create the Actor val consumer = context.actorOf( KafkaConsumerActor.props(consumerConf, actorConf, self) ) // Extractor for ensuring type safe cast of records val recordsExt = ConsumerRecords.extractor[String, String] consumer ! Subscribe() override def receive: Receive = { case string: String => println(string) // Type safe cast of records to correct serialisation type case recordsExt(records) => processRecords(records.pairs) sender() ! Confirm(records.offsets) // case other => { // println("Otra cosa: " + other) // } } // Process the whole batch of received records. // The first value in the tuple is the optional key of a record. // The second value in the tuple is the actual value from a record. def processRecords(records: Seq[(Option[String], String)]) = { println("processRecords: " + records.size) records.foreach(println) } }
Maledictus/leechcraft
src/plugins/newlife/importers/firefox/firefoximporter.h
/********************************************************************** * LeechCraft - modular cross-platform feature rich internet client. * Copyright (C) 2010-2011 <NAME> * * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE or copy at https://www.boost.org/LICENSE_1_0.txt) **********************************************************************/ #pragma once #include "abstractimporter.h" #include "firefoxprofileselectpage.h" namespace LC { namespace NewLife { namespace Importers { class FirefoxImportPage; class FirefoxImporter : public AbstractImporter { FirefoxImportPage *ImportPage_; FirefoxProfileSelectPage *ProfileSelectPage_; public: FirefoxImporter (const ICoreProxy_ptr&, QWidget* = 0); QStringList GetNames () const; QStringList GetIcons () const; QList<QWizardPage*> GetWizardPages () const; }; } } }
Coderushnepal/PoojaShrestha
FinalProject/Node/src/seeds/2_category.js
<reponame>Coderushnepal/PoojaShrestha<filename>FinalProject/Node/src/seeds/2_category.js /** * Delete existing entries and seed values for `category`. * * @param {object} knex * @returns {Promise} */ export function seed(knex) { return knex('category') .del() .then(() => { return knex('category').insert([ { name: 'Entertainment', description: 'This is entertainment section', }, { name: 'Sports', description: 'This is sports section', }, ]); }); }
vttranlina/james-project
server/protocols/jmap-draft/src/main/java/org/apache/james/jmap/draft/model/deserialization/JmapRuleDTODeserializer.java
<reponame>vttranlina/james-project<filename>server/protocols/jmap-draft/src/main/java/org/apache/james/jmap/draft/model/deserialization/JmapRuleDTODeserializer.java /**************************************************************** * Licensed to the Apache Software Foundation (ASF) under one * * or more contributor license agreements. See the NOTICE file * * distributed with this work for additional information * * regarding copyright ownership. The ASF licenses this file * * to you under the Apache License, Version 2.0 (the * * "License"); you may not use this file except in compliance * * with the License. You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, * * software distributed under the License is distributed on an * * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * * KIND, either express or implied. See the License for the * * specific language governing permissions and limitations * * under the License. * ****************************************************************/ package org.apache.james.jmap.draft.model.deserialization; import java.io.IOException; import org.apache.commons.lang3.StringUtils; import org.apache.james.jmap.draft.model.JmapRuleDTO; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.databind.DeserializationContext; import com.fasterxml.jackson.databind.JsonDeserializer; import com.fasterxml.jackson.databind.JsonNode; import com.google.common.base.Preconditions; public class JmapRuleDTODeserializer extends JsonDeserializer<JmapRuleDTO> { @Override public JmapRuleDTO deserialize(JsonParser jp, DeserializationContext deserializationContext) throws IOException { JsonNode node = jp.readValueAsTree(); JsonNode idNode = node.get("id"); Preconditions.checkArgument(!idNode.isNull(), "`id` is mandatory"); Preconditions.checkArgument(StringUtils.isNotBlank(idNode.asText()), "`id` is mandatory"); JsonNode nameNode = node.get("name"); Preconditions.checkArgument(!nameNode.isNull(), "`name` is mandatory"); Preconditions.checkArgument(StringUtils.isNotBlank(nameNode.asText()), "`name` is mandatory"); JsonNode conditionNode = node.get("condition"); Preconditions.checkArgument(!conditionNode.isNull(), "`condition` is mandatory"); JmapRuleDTO.ConditionDTO conditionDTO = jp.getCodec().treeToValue(conditionNode, JmapRuleDTO.ConditionDTO.class); JsonNode actionNode = node.get("action"); Preconditions.checkArgument(!actionNode.isNull(), "`action` is mandatory"); JmapRuleDTO.ActionDTO actionDTO = jp.getCodec().treeToValue(actionNode, JmapRuleDTO.ActionDTO.class); return new JmapRuleDTO(node.get("id").asText(), node.get("name").asText(), conditionDTO, actionDTO); } }
m3d/osgar_archive_2020
subt/ros/robot/src/globalmap_to_localmap.py
<filename>subt/ros/robot/src/globalmap_to_localmap.py #!/usr/bin/env python import rospy import sys import pdb from nav_msgs.msg import * from std_msgs.msg import * from sensor_msgs.msg import * import cv2 import numpy as np import math import tf lastMap = None SUBMAP_RADIUS = 600 lastPose = None def mapCallback(costmap): global lastMap lastMap = costmap def odomCallback(odom): global lastPose lastPose = odom.pose.pose.position def updateMap(): global map_pub, lastMap, lastPose, transformListener now = rospy.Time.now() try: transformListener.waitForTransform("map", "base_link", now, rospy.Duration(4.0)) (trans, rot) = transformListener.lookupTransform("map", "base_link", now) except: print("map_to_scan: transform missed") return #pdb.set_trace() pose = trans rotationAngle = tf.transformations.euler_from_quaternion(rot)[2] mapArray = np.reshape(np.array(lastMap.data,dtype = np.int8),(lastMap.info.height,lastMap.info.width)) #enlarge array so that the subMap works well also at the border of the global map mapArray = np.pad(mapArray, pad_width=SUBMAP_RADIUS, mode='constant', constant_values=-1) mapY = int((pose[0] - lastMap.info.origin.position.x) / lastMap.info.resolution) + SUBMAP_RADIUS mapX = int((pose[1] - lastMap.info.origin.position.y) / lastMap.info.resolution) + SUBMAP_RADIUS #pdb.set_trace() subMap = mapArray[mapX - SUBMAP_RADIUS:mapX + SUBMAP_RADIUS,mapY - SUBMAP_RADIUS:mapY + SUBMAP_RADIUS] #pdb.set_trace() #subMap = np.array(subMap > 60,dtype = np.uint8) * 100 subMap = np.where(subMap > 70,100,subMap) #cv2.imshow('subMap',cv2.resize(subMapVisible,(400,400))) #cv2.imshow('orig submap', subMap) #cv2.waitKey(1) #pdb.set_trace() newMap = OccupancyGrid() newMap.header.stamp = rospy.Time.now() newMap.header.frame_id = 'odom' newMap.info.resolution = lastMap.info.resolution newMap.info.width = subMap.shape[0] newMap.info.height = subMap.shape[1] newMap.info.origin.position.x = lastPose.x - subMap.shape[0] / 2 * lastMap.info.resolution newMap.info.origin.position.y = lastPose.y - subMap.shape[1] / 2 * lastMap.info.resolution newMap.info.origin.orientation.w = 1.0 newMap.data = subMap.flatten().tolist() map_pub.publish(newMap) #cv2.imshow('Polar',cv2.resize(polar,(300,300))) #cv2.waitKey(1) if __name__ == '__main__': try: rospy.init_node('map_to_scan', anonymous=True) map_pub = rospy.Publisher('/map_out', OccupancyGrid) rospy.Subscriber('/map_in',OccupancyGrid, mapCallback) rospy.Subscriber('/odom', Odometry, odomCallback, queue_size=15) transformListener = tf.TransformListener() r = rospy.Rate(20) # 5hz while not rospy.is_shutdown(): if lastMap: updateMap() r.sleep() except rospy.ROSInterruptException: pass
shreyasvj25/turicreate
deps/src/boost_1_65_1/libs/spirit/test/lex/matlib.h
// Copyright (c) 2001-2009 <NAME> // Copyright (c) 2009 <NAME> // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef MATLIB_H_05102009 #define MATLIB_H_05102009 #include <boost/spirit/include/lex.hpp> #include <vector> #include <string> struct set_lexer_state { std::string state; set_lexer_state(const std::string &a):state(a){} template <class Iterator,class Context> void operator () (Iterator const&, Iterator const& , BOOST_SCOPED_ENUM(boost::spirit::lex::pass_flags)&, std::size_t , Context &ctx) const { ctx.set_state_name(state.c_str()); } }; struct store_double { std::vector<double> &out; store_double(std::vector<double> &a):out(a){} template <class Iterator,class LexerContext> void operator () (Iterator const& start, Iterator const& end , BOOST_SCOPED_ENUM(boost::spirit::lex::pass_flags)&, std::size_t , LexerContext &) const { std::string work(start, end); out.push_back(std::atof(work.c_str())); } private: // silence MSVC warning C4512: assignment operator could not be generated store_double& operator= (store_double const&); }; struct add_row { std::vector<std::vector<double> > &matrix; std::vector<double> &row; add_row(std::vector<std::vector<double> > &a,std::vector<double> &b) :matrix(a),row(b) {} template <class Iterator,class Context> void operator () (Iterator const&, Iterator const& , BOOST_SCOPED_ENUM(boost::spirit::lex::pass_flags)&, std::size_t , Context &ctx) const { matrix.push_back(std::vector<double>()); matrix.back().swap(row); ctx.set_state_name("A"); } private: // silence MSVC warning C4512: assignment operator could not be generated add_row& operator= (add_row const&); }; template <class Lexer> struct matlib_tokens : boost::spirit::lex::lexer<Lexer> { matlib_tokens(std::vector<std::vector<double> > &a) : matrix(a) { typedef boost::spirit::lex::token_def<> token_def_; this->self.add_pattern("REAL1", "[0-9]+(\\.[0-9]*)?"); this->self.add_pattern("REAL2", "\\.[0-9]+"); number = "[-+]?({REAL1}|{REAL2})([eE][-+]?[0-9]+)?"; this->self = token_def_('[') [set_lexer_state("A")] ; this->self("A") = token_def_('[') [set_lexer_state("B")] | ',' | token_def_(']') [set_lexer_state("INITIAL")] ; this->self("B") = number [store_double(row)] | ',' | token_def_(']') [add_row(matrix,row)] ; } boost::spirit::lex::token_def<> number; std::vector<std::vector<double> > &matrix; std::vector<double> row; }; #endif
hww/makerlisp
src/usbkey/mla_v2017_03_06/framework/usb/inc/usb_host.h
<filename>src/usbkey/mla_v2017_03_06/framework/usb/inc/usb_host.h // DOM-IGNORE-BEGIN /******************************************************************************* Copyright 2015 Microchip Technology Inc. (www.microchip.com) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. To request to license the code under the MLA license (www.microchip.com/mla_license), please contact <EMAIL> *******************************************************************************/ //DOM-IGNORE-END #ifndef __USBHOST_H__ #define __USBHOST_H__ //DOM-IGNORE-END #include <limits.h> // ***************************************************************************** // ***************************************************************************** // Section: Host Firmware Version // ***************************************************************************** // ***************************************************************************** #define USB_HOST_FW_MAJOR_VER 1 // Firmware version, major release number. #define USB_HOST_FW_MINOR_VER 0 // Firmware version, minor release number. #define USB_HOST_FW_DOT_VER 0 // Firmware version, dot release number. // ***************************************************************************** // ***************************************************************************** // Section: Set Default Configuration Constants // ***************************************************************************** // ***************************************************************************** #ifndef USB_NUM_BULK_NAKS #define USB_NUM_BULK_NAKS 10000 // Define how many NAK's are allowed // during a bulk transfer before erroring. #endif #ifndef USB_NUM_COMMAND_TRIES #define USB_NUM_COMMAND_TRIES 3 // During enumeration, define how many // times each command will be tried before // giving up and resetting the device. #endif #ifndef USB_NUM_CONTROL_NAKS #define USB_NUM_CONTROL_NAKS 20 // Define how many NAK's are allowed // during a control transfer before erroring. #endif #ifndef USB_NUM_ENUMERATION_TRIES #define USB_NUM_ENUMERATION_TRIES 3 // Define how many times the host will try // to enumerate the device before giving // up and setting the state to DETACHED. #endif #ifndef USB_NUM_INTERRUPT_NAKS #define USB_NUM_INTERRUPT_NAKS 3 // Define how many NAK's are allowed // during an interrupt OUT transfer before // erroring. Interrupt IN transfers that // are NAK'd are terminated without error. #endif #ifndef USB_INITIAL_VBUS_CURRENT #error The application must define USB_INITIAL_VBUS_CURRENT as 100 mA for Host or 8-100 mA for OTG. #endif #if defined (USB_SUPPORT_HOST) #if defined (USB_SUPPORT_OTG) #if (USB_INITIAL_VBUS_CURRENT < 8/2) || (USB_INITIAL_VBUS_CURRENT > 100/2) #warning USB_INITIAL_VBUS_CURRENT is in violation of the USB specification. #endif #else #if (USB_INITIAL_VBUS_CURRENT != 100/2) #warning USB_INITIAL_VBUS_CURRENT is in violation of the USB specification. #endif #endif #endif // ***************************************************************************** // ***************************************************************************** // Section: USB Constants // ***************************************************************************** // ***************************************************************************** // Section: Values for USBHostIssueDeviceRequest(), dataDirection #define USB_DEVICE_REQUEST_SET 0 // USBHostIssueDeviceRequest() will set information. #define USB_DEVICE_REQUEST_GET 1 // USBHostIssueDeviceRequest() will get information. // Section: Dummy Device ID's #define USB_ROOT_HUB 255 // Invalid Device ID used to indicate the root hub. // ***************************************************************************** // ***************************************************************************** // Section: USB Data Structures // ***************************************************************************** // ***************************************************************************** // ***************************************************************************** /* Transfer Attributes This structure describes the transfer attributes of an endpoint. */ typedef union { uint8_t val; // struct { uint8_t bfTransferType : 2; // See USB_TRANSFER_TYPE_* for values. uint8_t bfSynchronizationType : 2; // For isochronous endpoints only. uint8_t bfUsageType : 2; // For isochronous endpoints only. }; } TRANSFER_ATTRIBUTES; // ***************************************************************************** /* Host Transfer Information This structure is used when the event handler is used to notify the upper layer of transfer completion. */ typedef struct _HOST_TRANSFER_DATA { uint32_t dataCount; // Count of bytes transferred. uint8_t *pUserData; // Pointer to transfer data. uint8_t bEndpointAddress; // Transfer endpoint. uint8_t bErrorCode; // Transfer error code. TRANSFER_ATTRIBUTES bmAttributes; // INTERNAL USE ONLY - Endpoint transfer attributes. uint8_t clientDriver; // INTERNAL USE ONLY - Client driver index for sending the event. } HOST_TRANSFER_DATA; // ***************************************************************************** /* Isochronous Data Buffer Isochronous data transfers are continuous, until they are explicitly terminated. The maximum transfer size is given in the endpoint descriptor, but a single transfer may contain less data than the maximum. Also, the USB peripheral can store data to RAM in a linear fashion only. Therefore, we cannot use a simple circular buffer for the data. Instead, the application or client driver must allocate multiple independent data buffers. These buffers must be the maximum transfer size. This structure contains a pointer to an allocated buffer, plus the valid data length of the buffer. */ typedef struct _ISOCHRONOUS_DATA_BUFFER { uint8_t *pBuffer; // Data buffer pointer. uint16_t dataLength; // Amount of valid data in the buffer. uint8_t bfDataLengthValid : 1; // dataLength value is valid. } ISOCHRONOUS_DATA_BUFFER; // ***************************************************************************** /* Isochronous Data Isochronous data transfers are continuous, until they are explicitly terminated. This requires a tighter integration between the host layer and the application layer to manage the streaming data. If an application uses isochronous transfers, it must allocate one variable of type ISOCHRONOUS_DATA for each concurrent transfer. When the device attaches, the client driver must inform the application layer of the maximum transfer size. At this point, the application must allocate space for the data buffers, and set the data buffer points in this structure to point to them. */ #if !defined( USB_MAX_ISOCHRONOUS_DATA_BUFFERS ) #define USB_MAX_ISOCHRONOUS_DATA_BUFFERS 2 #endif #if USB_MAX_ISOCHRONOUS_DATA_BUFFERS < 2 #error At least two buffers must be defined for isochronous data. #endif typedef struct _ISOCHRONOUS_DATA { uint8_t totalBuffers; // Total number of buffers available. uint8_t currentBufferUSB; // The current buffer the USB peripheral is accessing. uint8_t currentBufferUser; // The current buffer the user is reading/writing. uint8_t *pDataUser; // User pointer for accessing data. ISOCHRONOUS_DATA_BUFFER buffers[USB_MAX_ISOCHRONOUS_DATA_BUFFERS]; // Data buffer information. } ISOCHRONOUS_DATA; // ***************************************************************************** /* Targeted Peripheral List This structure is used to define the devices that this host can support. If the host is a USB Embedded Host or Dual Role Device that does not support OTG, the TPL may contain both specific devices and generic classes. If the host supports OTG, then the TPL may contain ONLY specific devices. */ typedef struct _USB_TPL { union { uint32_t val; // struct { uint16_t idVendor; // Vendor ID uint16_t idProduct; // Product ID }; struct { uint8_t bClass; // Class ID uint8_t bSubClass; // SubClass ID uint8_t bProtocol; // Protocol ID }; } device; // uint8_t bConfiguration; // Initial device configuration uint8_t ClientDriver; // Index of client driver in the Client Driver table union { uint8_t val; // struct { uint8_t bfAllowHNP :1; // Is HNP allowed? uint8_t bfIsClassDriver :1; // Client driver is a class-level driver uint8_t bfSetConfiguration :1; // bConfiguration is valid uint8_t bfIgnoreProtocol :1; uint8_t bfIgnoreSubClass :1; uint8_t bfIgnoreClass :1; uint8_t bfIgnorePID :1; uint8_t bfEP0OnlyCustomDriver :1; }; } flags; // } USB_TPL; // Section: TPL Initializers #define INIT_VID_PID(v,p) {((v)|((p)<<16))} // Set VID/PID support in the TPL. #define INIT_CL_SC_P(c,s,p) {((c)|((s)<<8)|((p)<<16))} // Set class support in the TPL (non-OTG only). // Section: TPL Flags #define TPL_ALLOW_HNP 0x01 // Bitmask for Host Negotiation Protocol. #define TPL_CLASS_DRV 0x02 // Bitmask for class driver support. #define TPL_SET_CONFIG 0x04 // Bitmask for setting the configuration. #define TPL_IGNORE_PROTOCOL 0x08 // Bitmask for ignoring the protocol of a CL/SC/P driver #define TPL_IGNORE_SUBCLASS 0x10 // Bitmask for ignoring the subclass of a CL/SC/P driver #define TPL_IGNORE_CLASS 0x20 // Bitmask for ignoring the class of a CL/SC/P driver #define TPL_IGNORE_PID 0x40 // Bitmask for ignoring the PID of a VID/PID driver #define TPL_EP0_ONLY_CUSTOM_DRIVER 0x80 // Bitmask to let a custom driver gain EP0 only and allow other interfaces to use standard drivers // ***************************************************************************** // ***************************************************************************** // Section: USB Host - Client Driver Interface // ***************************************************************************** // ***************************************************************************** /**************************************************************************** Function: bool (*USB_CLIENT_EVENT_HANDLER) ( uint8_t address, USB_EVENT event, void *data, uint32_t size ) Summary: This is a typedef to use when defining a client driver event handler. Description: This data type defines a pointer to a call-back function that must be implemented by a client driver if it needs to be aware of events on the USB. When an event occurs, the Host layer will call the client driver via this pointer to handle the event. Events are identified by the "event" parameter and may have associated data. If the client driver was able to handle the event, it should return true. If not (or if additional processing is required), it should return false. Precondition: The client must have been initialized. Parameters: uint8_t address - Address of device where event occurred USB_EVENT event - Identifies the event that occured void *data - Pointer to event-specific data uint32_t size - Size of the event-specific data Return Values: true - The event was handled false - The event was not handled Remarks: The application may also implement an event handling routine if it requires knowledge of events. To do so, it must implement a routine that matches this function signature and define the USB_HOST_APP_EVENT_HANDLER macro as the name of that function. ***************************************************************************/ typedef bool (*USB_CLIENT_EVENT_HANDLER) ( uint8_t address, USB_EVENT event, void *data, uint32_t size ); /**************************************************************************** Function: bool (*USB_CLIENT_INIT) ( uint8_t address, uint32_t flags, uint8_t clientDriverID ) Summary: This is a typedef to use when defining a client driver initialization handler. Description: This routine is a call out from the host layer to a USB client driver. It is called when the system has been configured as a USB host and a new device has been attached to the bus. Its purpose is to initialize and activate the client driver. Precondition: The device has been configured. Parameters: uint8_t address - Device's address on the bus uint32_t flags - Initialization flags uint8_t clientDriverID - ID to send when issuing a Device Request via USBHostIssueDeviceRequest() or USBHostSetDeviceConfiguration(). Return Values: true - Successful false - Not successful Remarks: There may be multiple client drivers. If so, the USB host layer will call the initialize routine for each of the clients that are in the selected configuration. ***************************************************************************/ typedef bool (*USB_CLIENT_INIT) ( uint8_t address, uint32_t flags, uint8_t clientDriverID ); /**************************************************************************** Function: bool USB_HOST_APP_EVENT_HANDLER ( uint8_t address, USB_EVENT event, void *data, uint32_t size ) Summary: This is a typedef to use when defining the application level events handler. Description: This function is implemented by the application. The function name can be anything - the macro USB_HOST_APP_EVENT_HANDLER must be set in usb_config.h to the name of the application function. In the application layer, this function is responsible for handling all application-level events that are generated by the stack. See the enumeration USB_EVENT for a complete list of all events that can occur. Note that some of these events are intended for client drivers (e.g. EVENT_TRANSFER), while some are intended for for the application layer (e.g. EVENT_UNSUPPORTED_DEVICE). If the application can handle the event successfully, the function should return true. For example, if the function receives the event EVENT_VBUS_REQUEST_POWER and the system can allocate that much power to an attached device, the function should return true. If, however, the system cannot allocate that much power to an attached device, the function should return false. Precondition: None Parameters: uint8_t address - Address of the USB device generating the event USB_EVENT event - Event that occurred void *data - Optional pointer to data for the event uint32_t size - Size of the data pointed to by *data Return Values: true - Event was processed successfully false - Event was not processed successfully Remarks: If this function is not provided by the application, then all application events are assumed to function without error. ***************************************************************************/ #if defined( USB_HOST_APP_EVENT_HANDLER ) bool USB_HOST_APP_EVENT_HANDLER ( uint8_t address, USB_EVENT event, void *data, uint32_t size ); #else // If the application does not provide an event handler, then we will // assume that all events function without error. #define USB_HOST_APP_EVENT_HANDLER(a,e,d,s) ((e==EVENT_OVERRIDE_CLIENT_DRIVER_SELECTION)?false:true) #endif /**************************************************************************** Function: bool USB_HOST_APP_DATA_EVENT_HANDLER ( uint8_t address, USB_EVENT event, void *data, uint32_t size ) Summary: This is a typedef to use when defining the application level data events handler. Description: This function is implemented by the application. The function name can be anything - the macro USB_HOST_APP_EVENT_HANDLER must be set in usb_config.h to the name of the application function. In the application layer, this function is responsible for handling all application-level data events that are generated by the stack. See the enumeration USB_EVENT for a complete list of all events that can occur. Note that only data events, such as EVENT_DATA_ISOC_READ, will be passed to this event handler. If the application can handle the event successfully, the function should return true. Precondition: None Parameters: uint8_t address - Address of the USB device generating the event USB_EVENT event - Event that occurred void *data - Optional pointer to data for the event uint32_t size - Size of the data pointed to by *data Return Values: true - Event was processed successfully false - Event was not processed successfully Remarks: If this function is not provided by the application, then all application events are assumed to function without error. ***************************************************************************/ #if defined( USB_HOST_APP_DATA_EVENT_HANDLER ) bool USB_HOST_APP_DATA_EVENT_HANDLER ( uint8_t address, USB_EVENT event, void *data, uint32_t size ); #else // If the application does not provide an event handler, then we will // assume that all events function without error. #define USB_HOST_APP_DATA_EVENT_HANDLER(a,e,d,s) true #endif // ***************************************************************************** /* Client Driver Table Structure This structure is used to define an entry in the client-driver table. Each entry provides the information that the Host layer needs to manage a particular USB client driver, including pointers to the interface routines that the Client Driver must implement. */ typedef struct _CLIENT_DRIVER_TABLE { USB_CLIENT_INIT Initialize; // Initialization routine USB_CLIENT_EVENT_HANDLER EventHandler; // Event routine #ifdef USB_HOST_APP_DATA_EVENT_HANDLER USB_CLIENT_EVENT_HANDLER DataEventHandler; // Data Event routine #endif uint32_t flags; // Initialization flags } CLIENT_DRIVER_TABLE; // ***************************************************************************** // ***************************************************************************** // Section: USB Host - Device Information Hooks // ***************************************************************************** // ***************************************************************************** extern uint8_t *pCurrentConfigurationDescriptor; // Pointer to the current Configuration Descriptor of the attached device. extern uint8_t *pDeviceDescriptor; // Pointer to the Device Descriptor of the attached device. extern USB_TPL usbTPL[]; // Application's Targeted Peripheral List. extern CLIENT_DRIVER_TABLE usbClientDrvTable[]; // Application's client driver table. // ***************************************************************************** // ***************************************************************************** // Section: Function Prototypes and Macro Functions // ***************************************************************************** // ***************************************************************************** /**************************************************************************** Function: uint8_t USBHostClearEndpointErrors( uint8_t deviceAddress, uint8_t endpoint ) Summary: This function clears an endpoint's internal error condition. Description: This function is called to clear the internal error condition of a device's endpoint. It should be called after the application has dealt with the error condition on the device. This routine clears internal status only; it does not interact with the device. Precondition: None Parameters: uint8_t deviceAddress - Address of device uint8_t endpoint - Endpoint to clear error condition Return Values: USB_SUCCESS - Errors cleared USB_UNKNOWN_DEVICE - Device not found USB_ENDPOINT_NOT_FOUND - Specified endpoint not found Remarks: None ***************************************************************************/ uint8_t USBHostClearEndpointErrors( uint8_t deviceAddress, uint8_t endpoint ); /**************************************************************************** Function: bool USBHostDeviceSpecificClientDriver( uint8_t deviceAddress ) Summary: This function indicates if the specified device has explicit client driver support specified in the TPL. Description: This function indicates if the specified device has explicit client driver support specified in the TPL. It is used in client drivers' USB_CLIENT_INIT routines to indicate that the client driver should be used even though the class, subclass, and protocol values may not match those normally required by the class. For example, some printing devices do not fulfill all of the requirements of the printer class, so their class, subclass, and protocol fields indicate a custom driver rather than the printer class. But the printer class driver can still be used, with minor limitations. Precondition: None Parameters: uint8_t deviceAddress - Address of device Return Values: true - This device is listed in the TPL by VID andPID, and has explicit client driver support. false - This device is not listed in the TPL by VID and PID. Remarks: This function is used so client drivers can allow certain devices to enumerate. For example, some printer devices indicate a custom class rather than the printer class, even though the device has only minor limitations from the full printer class. The printer client driver will fail to initialize the device if it does not indicate printer class support in its interface descriptor. The printer client driver could allow any device with an interface that matches the printer class endpoint configuration, but both printer and mass storage devices utilize one bulk IN and one bulk OUT endpoint. So a mass storage device would be erroneously initialized as a printer device. This function allows a client driver to know that the client driver support was specified explicitly in the TPL, so for this particular device only, the class, subclass, and protocol fields can be safely ignored. ***************************************************************************/ bool USBHostDeviceSpecificClientDriver( uint8_t deviceAddress ); /**************************************************************************** Function: uint8_t USBHostDeviceStatus( uint8_t deviceAddress ) Summary: This function returns the current status of a device. Description: This function returns the current status of a device. If the device is in a holding state due to an error, the error is returned. Precondition: None Parameters: uint8_t deviceAddress - Device address Return Values: USB_DEVICE_ATTACHED - Device is attached and running USB_DEVICE_DETACHED - No device is attached USB_DEVICE_ENUMERATING - Device is enumerating USB_HOLDING_OUT_OF_MEMORY - Not enough heap space available USB_HOLDING_UNSUPPORTED_DEVICE - Invalid configuration or unsupported class USB_HOLDING_UNSUPPORTED_HUB - Hubs are not supported USB_HOLDING_INVALID_CONFIGURATION - Invalid configuration requested USB_HOLDING_PROCESSING_CAPACITY - Processing requirement excessive USB_HOLDING_POWER_REQUIREMENT - Power requirement excessive USB_HOLDING_CLIENT_INIT_ERROR - Client driver failed to initialize USB_DEVICE_SUSPENDED - Device is suspended Other - Device is holding in an error state. The return value indicates the error. Remarks: None ***************************************************************************/ uint8_t USBHostDeviceStatus( uint8_t deviceAddress ); /**************************************************************************** Function: uint8_t * USBHostGetCurrentConfigurationDescriptor( uint8_t deviceAddress ) Description: This function returns a pointer to the current configuration descriptor of the requested device. Precondition: None Parameters: uint8_t deviceAddress - Address of device Returns: uint8_t * - Pointer to the Configuration Descriptor. Remarks: This will need to be expanded to a full function when multiple device support is added. ***************************************************************************/ #define USBHostGetCurrentConfigurationDescriptor( deviceAddress) ( pCurrentConfigurationDescriptor ) /**************************************************************************** Function: uint8_t * USBHostGetDeviceDescriptor( uint8_t deviceAddress ) Description: This function returns a pointer to the device descriptor of the requested device. Precondition: None Parameters: uint8_t deviceAddress - Address of device Returns: uint8_t * - Pointer to the Device Descriptor. Remarks: This will need to be expanded to a full function when multiple device support is added. ***************************************************************************/ #define USBHostGetDeviceDescriptor( deviceAddress ) ( pDeviceDescriptor ) /**************************************************************************** Function: uint8_t USBHostGetStringDescriptor ( uint8_t deviceAddress, uint8_t stringNumber, uint8_t LangID, uint8_t *stringDescriptor, uint8_t stringLength, uint8_t clientDriverID ) Summary: This routine initiates a request to obtains the requested string descriptor. Description: This routine initiates a request to obtains the requested string descriptor. If the request cannot be started, the routine returns an error. Otherwise, the request is started, and the requested string descriptor is stored in the designated location. Example Usage: <code> USBHostGetStringDescriptor( deviceAddress, stringDescriptorNum, LangID, stringDescriptorBuffer, sizeof(stringDescriptorBuffer), 0xFF ); while(1) { if(USBHostTransferIsComplete( deviceAddress , 0, &errorCode, &byteCount)) { if(errorCode) { //There was an error reading the string, bail out of loop } else { //String is located in specified buffer, do something with it. //The length of the string is both in the byteCount variable // as well as the first byte of the string itself } break; } USBTasks(); } </code> Precondition: None Parameters: deviceAddress - Address of the device stringNumber - Index of the desired string descriptor LangID - The Language ID of the string to read (should be 0 if trying to read the language ID list *stringDescriptor - Pointer to where to store the string. stringLength - Maximum length of the returned string. clientDriverID - Client driver to return the completion event to. Return Values: USB_SUCCESS - The request was started successfully. USB_UNKNOWN_DEVICE - Device not found USB_INVALID_STATE - We must be in a normal running state. USB_ENDPOINT_BUSY - The endpoint is currently processing a request. Remarks: The returned string descriptor will be in the exact format as obtained from the device. The length of the entire descriptor will be in the first byte, and the descriptor type will be in the second. The string itself is represented in UNICODE. Refer to the USB 2.0 Specification for more information about the format of string descriptors. ***************************************************************************/ #define USBHostGetStringDescriptor( deviceAddress, stringNumber, LangID, stringDescriptor, stringLength, clientDriverID ) \ USBHostIssueDeviceRequest( deviceAddress, USB_SETUP_DEVICE_TO_HOST | USB_SETUP_TYPE_STANDARD | USB_SETUP_RECIPIENT_DEVICE, \ USB_REQUEST_GET_DESCRIPTOR, (USB_DESCRIPTOR_STRING << 8) | stringNumber, \ LangID, stringLength, stringDescriptor, USB_DEVICE_REQUEST_GET, clientDriverID ) /**************************************************************************** Function: bool USBHostInit( unsigned long flags ) Summary: This function initializes the variables of the USB host stack. Description: This function initializes the variables of the USB host stack. It does not initialize the hardware. The peripheral itself is initialized in one of the state machine states. Therefore, USBHostTasks() should be called soon after this function. Precondition: None Parameters: flags - reserved Return Values: true - Initialization successful false - Could not allocate memory. Remarks: If the endpoint list is empty, an entry is created in the endpoint list for EP0. If the list is not empty, free all allocated memory other than the EP0 node. This allows the routine to be called multiple times by the application. ***************************************************************************/ bool USBHostInit( unsigned long flags ); /**************************************************************************** Function: bool USBHostIsochronousBuffersCreate( ISOCHRONOUS_DATA * isocData, uint8_t numberOfBuffers, uint16_t bufferSize ) Description: This function initializes the isochronous data buffer information and allocates memory for each buffer. This function will not allocate memory if the buffer pointer is not NULL. Precondition: None Parameters: None Return Values: true - All buffers are allocated successfully. false - Not enough heap space to allocate all buffers - adjust the project to provide more heap space. Remarks: This function is available only if USB_SUPPORT_ISOCHRONOUS_TRANSFERS is defined in usb_config.h. ***************************************************************************/ #ifdef USB_SUPPORT_ISOCHRONOUS_TRANSFERS bool USBHostIsochronousBuffersCreate( ISOCHRONOUS_DATA * isocData, uint8_t numberOfBuffers, uint16_t bufferSize ); #endif /**************************************************************************** Function: void USBHostIsochronousBuffersDestroy( ISOCHRONOUS_DATA * isocData, uint8_t numberOfBuffers ) Description: This function releases all of the memory allocated for the isochronous data buffers. It also resets all other information about the buffers. Precondition: None Parameters: None Returns: None Remarks: This function is available only if USB_SUPPORT_ISOCHRONOUS_TRANSFERS is defined in usb_config.h. ***************************************************************************/ #ifdef USB_SUPPORT_ISOCHRONOUS_TRANSFERS void USBHostIsochronousBuffersDestroy( ISOCHRONOUS_DATA * isocData, uint8_t numberOfBuffers ); #endif /**************************************************************************** Function: void USBHostIsochronousBuffersReset( ISOCHRONOUS_DATA * isocData, uint8_t numberOfBuffers ) Description: This function resets all the isochronous data buffers. It does not do anything with the space allocated for the buffers. Precondition: None Parameters: None Returns: None Remarks: This function is available only if USB_SUPPORT_ISOCHRONOUS_TRANSFERS is defined in usb_config.h. ***************************************************************************/ #ifdef USB_SUPPORT_ISOCHRONOUS_TRANSFERS void USBHostIsochronousBuffersReset( ISOCHRONOUS_DATA * isocData, uint8_t numberOfBuffers ); #endif /**************************************************************************** Function: uint8_t USBHostIssueDeviceRequest( uint8_t deviceAddress, uint8_t bmRequestType, uint8_t bRequest, uint16_t wValue, uint16_t wIndex, uint16_t wLength, uint8_t *data, uint8_t dataDirection, uint8_t clientDriverID ) Summary: This function sends a standard device request to the attached device. Description: This function sends a standard device request to the attached device. The user must pass in the parameters of the device request. If there is input or output data associated with the request, a pointer to the data must be provided. The direction of the associated data (input or output) must also be indicated. This function does no special processing in regards to the request except for three requests. If SET INTERFACE is sent, then DTS is reset for all endpoints. If CLEAR FEATURE (ENDPOINT HALT) is sent, then DTS is reset for that endpoint. If SET CONFIGURATION is sent, the request is aborted with a failure. The function USBHostSetDeviceConfiguration() must be called to change the device configuration, since endpoint definitions may change. Precondition: The host state machine should be in the running state, and no reads or writes to EP0 should be in progress. Parameters: uint8_t deviceAddress - Device address uint8_t bmRequestType - The request type as defined by the USB specification. uint8_t bRequest - The request as defined by the USB specification. uint16_t wValue - The value for the request as defined by the USB specification. uint16_t wIndex - The index for the request as defined by the USB specification. uint16_t wLength - The data length for the request as defined by the USB specification. uint8_t *data - Pointer to the data for the request. uint8_t dataDirection - USB_DEVICE_REQUEST_SET or USB_DEVICE_REQUEST_GET uint8_t clientDriverID - Client driver to send the event to. Return Values: USB_SUCCESS - Request processing started USB_UNKNOWN_DEVICE - Device not found USB_INVALID_STATE - The host must be in a normal running state to do this request USB_ENDPOINT_BUSY - A read or write is already in progress USB_ILLEGAL_REQUEST - SET CONFIGURATION cannot be performed with this function. Remarks: DTS reset is done before the command is issued. ***************************************************************************/ uint8_t USBHostIssueDeviceRequest( uint8_t deviceAddress, uint8_t bmRequestType, uint8_t bRequest, uint16_t wValue, uint16_t wIndex, uint16_t wLength, uint8_t *data, uint8_t dataDirection, uint8_t clientDriverID ); /**************************************************************************** Function: uint8_t USBHostRead( uint8_t deviceAddress, uint8_t endpoint, uint8_t *pData, uint32_t size ) Summary: This function initiates a read from the attached device. Description: This function initiates a read from the attached device. If the endpoint is isochronous, special conditions apply. The pData and size parameters have slightly different meanings, since multiple buffers are required. Once started, an isochronous transfer will continue with no upper layer intervention until USBHostTerminateTransfer() is called. The ISOCHRONOUS_DATA_BUFFERS structure should not be manipulated until the transfer is terminated. To clarify parameter usage and to simplify casting, use the macro USBHostReadIsochronous() when reading from an isochronous endpoint. Precondition: None Parameters: uint8_t deviceAddress - Device address uint8_t endpoint - Endpoint number uint8_t *pData - Pointer to where to store the data. If the endpoint is isochronous, this points to an ISOCHRONOUS_DATA_BUFFERS structure, with multiple data buffer pointers. uint32_t size - Number of data bytes to read. If the endpoint is isochronous, this is the number of data buffer pointers pointed to by pData. Return Values: USB_SUCCESS - Read started successfully. USB_UNKNOWN_DEVICE - Device with the specified address not found. USB_INVALID_STATE - We are not in a normal running state. USB_ENDPOINT_ILLEGAL_TYPE - Must use USBHostControlRead to read from a control endpoint. USB_ENDPOINT_ILLEGAL_DIRECTION - Must read from an IN endpoint. USB_ENDPOINT_STALLED - Endpoint is stalled. Must be cleared by the application. USB_ENDPOINT_ERROR - Endpoint has too many errors. Must be cleared by the application. USB_ENDPOINT_BUSY - A Read is already in progress. USB_ENDPOINT_NOT_FOUND - Invalid endpoint. Remarks: None ***************************************************************************/ uint8_t USBHostRead( uint8_t deviceAddress, uint8_t endpoint, uint8_t *data, uint32_t size ); /**************************************************************************** Function: uint8_t USBHostReadIsochronous( uint8_t deviceAddress, uint8_t endpoint, ISOCHRONOUS_DATA *pIsochronousData ) Summary: This function initiates a read from an isochronous endpoint on the attached device. Description: This function initiates a read from an isochronous endpoint on the attached device. If the endpoint is not isochronous, use USBHostRead(). Once started, an isochronous transfer will continue with no upper layer intervention until USBHostTerminateTransfer() is called. Precondition: None Parameters: uint8_t deviceAddress - Device address uint8_t endpoint - Endpoint number ISOCHRONOUS_DATA *pIsochronousData - Pointer to an ISOCHRONOUS_DATA structure, containing information for the application and the host driver for the isochronous transfer. Return Values: USB_SUCCESS - Read started successfully. USB_UNKNOWN_DEVICE - Device with the specified address not found. USB_INVALID_STATE - We are not in a normal running state. USB_ENDPOINT_ILLEGAL_TYPE - Must use USBHostControlRead to read from a control endpoint. USB_ENDPOINT_ILLEGAL_DIRECTION - Must read from an IN endpoint. USB_ENDPOINT_STALLED - Endpoint is stalled. Must be cleared by the application. USB_ENDPOINT_ERROR - Endpoint has too many errors. Must be cleared by the application. USB_ENDPOINT_BUSY - A Read is already in progress. USB_ENDPOINT_NOT_FOUND - Invalid endpoint. Remarks: None ***************************************************************************/ #define USBHostReadIsochronous( a, e, p ) USBHostRead( a, e, (uint8_t *)p, (uint32_t)0 ); /**************************************************************************** Function: uint8_t USBHostResetDevice( uint8_t deviceAddress ) Summary: This function resets an attached device. Description: This function places the device back in the RESET state, to issue RESET signaling. It can be called only if the state machine is not in the DETACHED state. Precondition: None Parameters: uint8_t deviceAddress - Device address Return Values: USB_SUCCESS - Success USB_UNKNOWN_DEVICE - Device not found USB_ILLEGAL_REQUEST - Device cannot RESUME unless it is suspended Remarks: In order to do a full clean-up, the state is set back to STATE_DETACHED rather than a reset state. The ATTACH interrupt will automatically be triggered when the module is re-enabled, and the proper reset will be performed. ***************************************************************************/ uint8_t USBHostResetDevice( uint8_t deviceAddress ); /**************************************************************************** Function: uint8_t USBHostResumeDevice( uint8_t deviceAddress ) Summary: This function issues a RESUME to the attached device. Description: This function issues a RESUME to the attached device. It can called only if the state machine is in the suspend state. Precondition: None Parameters: uint8_t deviceAddress - Device address Return Values: USB_SUCCESS - Success USB_UNKNOWN_DEVICE - Device not found USB_ILLEGAL_REQUEST - Device cannot RESUME unless it is suspended Remarks: None ***************************************************************************/ uint8_t USBHostResumeDevice( uint8_t deviceAddress ); /**************************************************************************** Function: uint8_t USBHostSetDeviceConfiguration( uint8_t deviceAddress, uint8_t configuration ) Summary: This function changes the device's configuration. Description: This function is used by the application to change the device's Configuration. This function must be used instead of USBHostIssueDeviceRequest(), because the endpoint definitions may change. To see when the reconfiguration is complete, use the USBHostDeviceStatus() function. If configuration is still in progress, this function will return USB_DEVICE_ENUMERATING. Precondition: The host state machine should be in the running state, and no reads or writes should be in progress. Parameters: uint8_t deviceAddress - Device address uint8_t configuration - Index of the new configuration Return Values: USB_SUCCESS - Process of changing the configuration was started successfully. USB_UNKNOWN_DEVICE - Device not found USB_INVALID_STATE - This function cannot be called during enumeration or while performing a device request. USB_BUSY - No IN or OUT transfers may be in progress. Example: <code> rc = USBHostSetDeviceConfiguration( attachedDevice, configuration ); if (rc) { // Error - cannot set configuration. } else { while (USBHostDeviceStatus( attachedDevice ) == USB_DEVICE_ENUMERATING) { USBHostTasks(); } } if (USBHostDeviceStatus( attachedDevice ) != USB_DEVICE_ATTACHED) { // Error - cannot set configuration. } </code> Remarks: If an invalid configuration is specified, this function cannot return an error. Instead, the event USB_UNSUPPORTED_DEVICE will the sent to the application layer and the device will be placed in a holding state with a USB_HOLDING_UNSUPPORTED_DEVICE error returned by USBHostDeviceStatus(). ***************************************************************************/ uint8_t USBHostSetDeviceConfiguration( uint8_t deviceAddress, uint8_t configuration ); /**************************************************************************** Function: uint8_t USBHostSetNAKTimeout( uint8_t deviceAddress, uint8_t endpoint, uint16_t flags, uint16_t timeoutCount ) Summary: This function specifies NAK timeout capability. Description: This function is used to set whether or not an endpoint on a device should time out a transaction based on the number of NAKs received, and if so, how many NAKs are allowed before the timeout. Precondition: None Parameters: uint8_t deviceAddress - Device address uint8_t endpoint - Endpoint number to configure uint16_t flags - Bit 0: * 0 = disable NAK timeout * 1 = enable NAK timeout uint16_t timeoutCount - Number of NAKs allowed before a timeout Return Values: USB_SUCCESS - NAK timeout was configured successfully. USB_UNKNOWN_DEVICE - Device not found. USB_ENDPOINT_NOT_FOUND - The specified endpoint was not found. Remarks: None ***************************************************************************/ uint8_t USBHostSetNAKTimeout( uint8_t deviceAddress, uint8_t endpoint, uint16_t flags, uint16_t timeoutCount ); /**************************************************************************** Function: void USBHostShutdown( void ) Description: This function turns off the USB module and frees all unnecessary memory. This routine can be called by the application layer to shut down all USB activity, which effectively detaches all devices. The event EVENT_DETACH will be sent to the client drivers for the attached device, and the event EVENT_VBUS_RELEASE_POWER will be sent to the application layer. Precondition: None Parameters: None - None Returns: None Remarks: None ***************************************************************************/ void USBHostShutdown( void ); /**************************************************************************** Function: uint8_t USBHostSuspendDevice( uint8_t deviceAddress ) Summary: This function suspends a device. Description: This function put a device into an IDLE state. It can only be called while the state machine is in normal running mode. After 3ms, the attached device should go into SUSPEND mode. Precondition: None Parameters: uint8_t deviceAddress - Device to suspend Return Values: USB_SUCCESS - Success USB_UNKNOWN_DEVICE - Device not found USB_ILLEGAL_REQUEST - Cannot suspend unless device is in normal run mode Remarks: None ****************************************************************************/ uint8_t USBHostSuspendDevice( uint8_t deviceAddress ); /**************************************************************************** Function: void USBHostTasks( void ) Summary: This function executes the host tasks for USB host operation. Description: This function executes the host tasks for USB host operation. It must be executed on a regular basis to keep everything functioning. The primary purpose of this function is to handle device attach/detach and enumeration. It does not handle USB packet transmission or reception; that must be done in the USB interrupt handler to ensure timely operation. This routine should be called on a regular basis, but there is no specific time requirement. Devices will still be able to attach, enumerate, and detach, but the operations will occur more slowly as the calling interval increases. Precondition: USBHostInit() has been called. Parameters: None Returns: None Remarks: None ***************************************************************************/ void USBHostTasks( void ); /**************************************************************************** Function: void USBHostTerminateTransfer( uint8_t deviceAddress, uint8_t endpoint ) Summary: This function terminates the current transfer for the given endpoint. Description: This function terminates the current transfer for the given endpoint. It can be used to terminate reads or writes that the device is not responding to. It is also the only way to terminate an isochronous transfer. Precondition: None Parameters: uint8_t deviceAddress - Device address uint8_t endpoint - Endpoint number Returns: None Remarks: None ***************************************************************************/ void USBHostTerminateTransfer( uint8_t deviceAddress, uint8_t endpoint ); /**************************************************************************** Function: bool USBHostTransferIsComplete( uint8_t deviceAddress, uint8_t endpoint, uint8_t *errorCode, uint32_t *byteCount ) Summary: This function initiates whether or not the last endpoint transaction is complete. Description: This function initiates whether or not the last endpoint transaction is complete. If it is complete, an error code and the number of bytes transferred are returned. For isochronous transfers, byteCount is not valid. Instead, use the returned byte counts for each EVENT_TRANSFER event that was generated during the transfer. Precondition: None Parameters: uint8_t deviceAddress - Device address uint8_t endpoint - Endpoint number uint8_t *errorCode - Error code indicating the status of the transfer. Only valid if the transfer is complete. uint32_t *byteCount - The number of bytes sent or received. Invalid for isochronous transfers. Return Values: true - Transfer is complete. false - Transfer is not complete. Remarks: Possible values for errorCode are: * USB_SUCCESS - Transfer successful * USB_UNKNOWN_DEVICE - Device not attached * USB_ENDPOINT_STALLED - Endpoint STALL'd * USB_ENDPOINT_ERROR_ILLEGAL_PID - Illegal PID returned * USB_ENDPOINT_ERROR_BIT_STUFF * USB_ENDPOINT_ERROR_DMA * USB_ENDPOINT_ERROR_TIMEOUT * USB_ENDPOINT_ERROR_DATA_FIELD * USB_ENDPOINT_ERROR_CRC16 * USB_ENDPOINT_ERROR_END_OF_FRAME * USB_ENDPOINT_ERROR_PID_CHECK * USB_ENDPOINT_ERROR - Other error ***************************************************************************/ bool USBHostTransferIsComplete( uint8_t deviceAddress, uint8_t endpoint, uint8_t *errorCode, uint32_t *byteCount ); /**************************************************************************** Function: uint8_t USBHostVbusEvent( USB_EVENT vbusEvent, uint8_t hubAddress, uint8_t portNumber) Summary: This function handles Vbus events that are detected by the application. Description: This function handles Vbus events that are detected by the application. Since Vbus management is application dependent, the application is responsible for monitoring Vbus and detecting overcurrent conditions and removal of the overcurrent condition. If the application detects an overcurrent condition, it should call this function with the event EVENT_VBUS_OVERCURRENT with the address of the hub and port number that has the condition. When a port returns to normal operation, the application should call this function with the event EVENT_VBUS_POWER_AVAILABLE so the stack knows that it can allow devices to attach to that port. Precondition: None Parameters: USB_EVENT vbusEvent - Vbus event that occured. Valid events: * EVENT_VBUS_OVERCURRENT * EVENT_VBUS_POWER_AVAILABLE uint8_t hubAddress - Address of the hub device (USB_ROOT_HUB for the root hub) uint8_t portNumber - Number of the physical port on the hub (0 - based) Return Values: USB_SUCCESS - Event handled USB_ILLEGAL_REQUEST - Invalid event, hub, or port Remarks: None ***************************************************************************/ uint8_t USBHostVbusEvent(USB_EVENT vbusEvent, uint8_t hubAddress, uint8_t portNumber); /**************************************************************************** Function: uint8_t USBHostWrite( uint8_t deviceAddress, uint8_t endpoint, uint8_t *data, uint32_t size ) Summary: This function initiates a write to the attached device. Description: This function initiates a write to the attached device. The data buffer pointed to by *data must remain valid during the entire time that the write is taking place; the data is not buffered by the stack. If the endpoint is isochronous, special conditions apply. The pData and size parameters have slightly different meanings, since multiple buffers are required. Once started, an isochronous transfer will continue with no upper layer intervention until USBHostTerminateTransfer() is called. The ISOCHRONOUS_DATA_BUFFERS structure should not be manipulated until the transfer is terminated. To clarify parameter usage and to simplify casting, use the macro USBHostWriteIsochronous() when writing to an isochronous endpoint. Precondition: None Parameters: uint8_t deviceAddress - Device address uint8_t endpoint - Endpoint number uint8_t *data - Pointer to where the data is stored. If the endpoint is isochronous, this points to an ISOCHRONOUS_DATA_BUFFERS structure, with multiple data buffer pointers. uint32_t size - Number of data bytes to send. If the endpoint is isochronous, this is the number of data buffer pointers pointed to by pData. Return Values: USB_SUCCESS - Write started successfully. USB_UNKNOWN_DEVICE - Device with the specified address not found. USB_INVALID_STATE - We are not in a normal running state. USB_ENDPOINT_ILLEGAL_TYPE - Must use USBHostControlWrite to write to a control endpoint. USB_ENDPOINT_ILLEGAL_DIRECTION - Must write to an OUT endpoint. USB_ENDPOINT_STALLED - Endpoint is stalled. Must be cleared by the application. USB_ENDPOINT_ERROR - Endpoint has too many errors. Must be cleared by the application. USB_ENDPOINT_BUSY - A Write is already in progress. USB_ENDPOINT_NOT_FOUND - Invalid endpoint. Remarks: None ***************************************************************************/ uint8_t USBHostWrite( uint8_t deviceAddress, uint8_t endpoint, uint8_t *data, uint32_t size ); /**************************************************************************** Function: uint8_t USBHostWriteIsochronous( uint8_t deviceAddress, uint8_t endpoint, ISOCHRONOUS_DATA *pIsochronousData ) Summary: This function initiates a write to an isochronous endpoint on the attached device. Description: This function initiates a write to an isochronous endpoint on the attached device. If the endpoint is not isochronous, use USBHostWrite(). Once started, an isochronous transfer will continue with no upper layer intervention until USBHostTerminateTransfer() is called. Precondition: None Parameters: uint8_t deviceAddress - Device address uint8_t endpoint - Endpoint number ISOCHRONOUS_DATA *pIsochronousData - Pointer to an ISOCHRONOUS_DATA structure, containing information for the application and the host driver for the isochronous transfer. Return Values: USB_SUCCESS - Write started successfully. USB_UNKNOWN_DEVICE - Device with the specified address not found. USB_INVALID_STATE - We are not in a normal running state. USB_ENDPOINT_ILLEGAL_TYPE - Must use USBHostControlWrite to write to a control endpoint. USB_ENDPOINT_ILLEGAL_DIRECTION - Must write to an OUT endpoint. USB_ENDPOINT_STALLED - Endpoint is stalled. Must be cleared by the application. USB_ENDPOINT_ERROR - Endpoint has too many errors. Must be cleared by the application. USB_ENDPOINT_BUSY - A Write is already in progress. USB_ENDPOINT_NOT_FOUND - Invalid endpoint. Remarks: None ***************************************************************************/ #define USBHostWriteIsochronous( a, e, p ) USBHostWrite( a, e, (uint8_t *)p, (uint32_t)0 ); /**************************************************************************** Function: void USB_HostInterruptHandler(void); Summary: This function handles the interrupts when the USB module is running in host mode. Description: This function handles the interrupts when the USB module is running in host mode. It will clear all USB based interrupts as applicable. It should only be called when the module is in host mode. Precondition: Should only be called when in host mode. Parameters: None Return Values: None ***************************************************************************/ void USB_HostInterruptHandler(void); #endif // ***************************************************************************** // EOF
faipaz/Algorithms
Optimization/LevmarAndroid/jni/Thirdparty/clapack/TESTING/MATGEN/zlakf2.c
/* zlakf2.f -- translated by f2c (version 20061008). You must link the resulting object file with libf2c: on Microsoft Windows system, link with libf2c.lib; on Linux or Unix systems, link with .../path/to/libf2c.a -lm or, if you install libf2c.a in a standard place, with -lf2c -lm -- in that order, at the end of the command line, as in cc *.o -lf2c -lm Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., http://www.netlib.org/f2c/libf2c.zip */ #include "f2c.h" #include "blaswrap.h" /* Table of constant values */ static doublecomplex c_b1 = {0.,0.}; /* Subroutine */ int zlakf2_(integer *m, integer *n, doublecomplex *a, integer *lda, doublecomplex *b, doublecomplex *d__, doublecomplex *e, doublecomplex *z__, integer *ldz) { /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, d_dim1, d_offset, e_dim1, e_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4, i__5; doublecomplex z__1; /* Local variables */ integer i__, j, l, ik, jk, mn, mn2; extern /* Subroutine */ int zlaset_(char *, integer *, integer *, doublecomplex *, doublecomplex *, doublecomplex *, integer *); /* -- LAPACK test routine (version 3.1) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* Form the 2*M*N by 2*M*N matrix */ /* Z = [ kron(In, A) -kron(B', Im) ] */ /* [ kron(In, D) -kron(E', Im) ], */ /* where In is the identity matrix of size n and X' is the transpose */ /* of X. kron(X, Y) is the Kronecker product between the matrices X */ /* and Y. */ /* Arguments */ /* ========= */ /* M (input) INTEGER */ /* Size of matrix, must be >= 1. */ /* N (input) INTEGER */ /* Size of matrix, must be >= 1. */ /* A (input) COMPLEX*16, dimension ( LDA, M ) */ /* The matrix A in the output matrix Z. */ /* LDA (input) INTEGER */ /* The leading dimension of A, B, D, and E. ( LDA >= M+N ) */ /* B (input) COMPLEX*16, dimension ( LDA, N ) */ /* D (input) COMPLEX*16, dimension ( LDA, M ) */ /* E (input) COMPLEX*16, dimension ( LDA, N ) */ /* The matrices used in forming the output matrix Z. */ /* Z (output) COMPLEX*16, dimension ( LDZ, 2*M*N ) */ /* The resultant Kronecker M*N*2 by M*N*2 matrix (see above.) */ /* LDZ (input) INTEGER */ /* The leading dimension of Z. ( LDZ >= 2*M*N ) */ /* ==================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Executable Statements .. */ /* Initialize Z */ /* Parameter adjustments */ e_dim1 = *lda; e_offset = 1 + e_dim1; e -= e_offset; d_dim1 = *lda; d_offset = 1 + d_dim1; d__ -= d_offset; b_dim1 = *lda; b_offset = 1 + b_dim1; b -= b_offset; a_dim1 = *lda; a_offset = 1 + a_dim1; a -= a_offset; z_dim1 = *ldz; z_offset = 1 + z_dim1; z__ -= z_offset; /* Function Body */ mn = *m * *n; mn2 = mn << 1; zlaset_("Full", &mn2, &mn2, &c_b1, &c_b1, &z__[z_offset], ldz); ik = 1; i__1 = *n; for (l = 1; l <= i__1; ++l) { /* form kron(In, A) */ i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = *m; for (j = 1; j <= i__3; ++j) { i__4 = ik + i__ - 1 + (ik + j - 1) * z_dim1; i__5 = i__ + j * a_dim1; z__[i__4].r = a[i__5].r, z__[i__4].i = a[i__5].i; /* L10: */ } /* L20: */ } /* form kron(In, D) */ i__2 = *m; for (i__ = 1; i__ <= i__2; ++i__) { i__3 = *m; for (j = 1; j <= i__3; ++j) { i__4 = ik + mn + i__ - 1 + (ik + j - 1) * z_dim1; i__5 = i__ + j * d_dim1; z__[i__4].r = d__[i__5].r, z__[i__4].i = d__[i__5].i; /* L30: */ } /* L40: */ } ik += *m; /* L50: */ } ik = 1; i__1 = *n; for (l = 1; l <= i__1; ++l) { jk = mn + 1; i__2 = *n; for (j = 1; j <= i__2; ++j) { /* form -kron(B', Im) */ i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { i__4 = ik + i__ - 1 + (jk + i__ - 1) * z_dim1; i__5 = j + l * b_dim1; z__1.r = -b[i__5].r, z__1.i = -b[i__5].i; z__[i__4].r = z__1.r, z__[i__4].i = z__1.i; /* L60: */ } /* form -kron(E', Im) */ i__3 = *m; for (i__ = 1; i__ <= i__3; ++i__) { i__4 = ik + mn + i__ - 1 + (jk + i__ - 1) * z_dim1; i__5 = j + l * e_dim1; z__1.r = -e[i__5].r, z__1.i = -e[i__5].i; z__[i__4].r = z__1.r, z__[i__4].i = z__1.i; /* L70: */ } jk += *m; /* L80: */ } ik += *m; /* L90: */ } return 0; /* End of ZLAKF2 */ } /* zlakf2_ */
Milxnor/Nacro
Nacro/SDK/FN_RewardInfoButtonWidget_classes.hpp
#pragma once // Fortnite (1.8) SDK #ifdef _MSC_VER #pragma pack(push, 0x8) #endif namespace SDK { //--------------------------------------------------------------------------- //Classes //--------------------------------------------------------------------------- // WidgetBlueprintGeneratedClass RewardInfoButtonWidget.RewardInfoButtonWidget_C // 0x0048 (0x0908 - 0x08C0) class URewardInfoButtonWidget_C : public UFortRewardInfoButton { public: struct FPointerToUberGraphFrame UberGraphFrame; // 0x08C0(0x0008) (Transient, DuplicateTransient) class UHorizontalBox* ItemDetails; // 0x08C8(0x0008) (BlueprintVisible, ExportObject, ZeroConstructor, InstancedReference, IsPlainOldData, RepSkip, RepNotify, Interp, NonTransactional, EditorOnly, NoDestructor, AutoWeak, ContainsInstancedReference, AssetRegistrySearchable, SimpleDisplay, AdvancedDisplay, Protected, BlueprintCallable, BlueprintAuthorityOnly, TextExportTransient, NonPIEDuplicateTransient, ExposeOnSpawn, PersistentInstance, UObjectWrapper, HasGetValueTypeHash, NativeAccessSpecifierPublic, NativeAccessSpecifierProtected, NativeAccessSpecifierPrivate) class UCommonTextBlock* ItemInfo; // 0x08D0(0x0008) (BlueprintVisible, ExportObject, ZeroConstructor, InstancedReference, IsPlainOldData, RepSkip, RepNotify, Interp, NonTransactional, EditorOnly, NoDestructor, AutoWeak, ContainsInstancedReference, AssetRegistrySearchable, SimpleDisplay, AdvancedDisplay, Protected, BlueprintCallable, BlueprintAuthorityOnly, TextExportTransient, NonPIEDuplicateTransient, ExposeOnSpawn, PersistentInstance, UObjectWrapper, HasGetValueTypeHash, NativeAccessSpecifierPublic, NativeAccessSpecifierProtected, NativeAccessSpecifierPrivate) struct FText DisplayNameText; // 0x08D8(0x0018) (Edit, BlueprintVisible, DisableEditOnInstance) struct FText DisplayQuantity; // 0x08F0(0x0018) (Edit, BlueprintVisible, DisableEditOnInstance) static UClass* StaticClass() { static auto ptr = UObject::FindClass("WidgetBlueprintGeneratedClass RewardInfoButtonWidget.RewardInfoButtonWidget_C"); return ptr; } void GetDisplayName(struct FText* DisplayName); void SetShowDescriptionBP(bool* bInShowDescription); void HandleDifferentItemOrQuantitySetBP(); void ExecuteUbergraph_RewardInfoButtonWidget(int EntryPoint); }; } #ifdef _MSC_VER #pragma pack(pop) #endif
IsyFact/IsyFact-Vorlageanwendung
src/main/java/de/msg/terminfindung/core/datenpflege/Datenpflege.java
package de.msg.terminfindung.core.datenpflege; import java.time.LocalDate; import de.msg.terminfindung.common.exception.TerminfindungBusinessException; /** * Schnittstelle der Anwendungskomponente "Datenpflege" zur Pflege der Bestandsdaten. * * @author msg systems ag, <NAME> */ public interface Datenpflege { /** * Löscht alle Terminfindungen, die abgeschlossen wurden und deren Termine vor dem angegebenen Stichtag * stattgefunden haben. * * @param stichtag Stichtag * @return Anzahl der gelöschten Terminfindungen. * @throws TerminfindungBusinessException falls das Datum ungültig ist. */ int loescheVergangeneTerminfindungen(LocalDate stichtag) throws TerminfindungBusinessException; }
dk00/old-stuff
csie/08design-patterns/src/com/sun/tools/javac/processing/ServiceProxy.java
/* * Copyright 2006-2008 Sun Microsystems, Inc. All rights reserved. * SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. * * * * * * * * * * * * * * * * * * * * */ package com.sun.tools.javac.processing; import java.io.BufferedReader; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.net.MalformedURLException; import java.net.URL; /** * Utility class to determine if a service can be found on the * path that might be used to create a class loader. * * <p><b>This is NOT part of any API supported by Sun Microsystems. * If you write code that depends on this, you do so at your own risk. * This code and its internal interfaces are subject to change or * deletion without notice.</b> * */ // based on sun.misc.Service class ServiceProxy { static class ServiceConfigurationError extends Error { static final long serialVersionUID = 7732091036771098303L; ServiceConfigurationError(String msg) { super(msg); } } private static final String prefix = "META-INF/services/"; private static void fail(Class service, String msg) throws ServiceConfigurationError { throw new ServiceConfigurationError(service.getName() + ": " + msg); } private static void fail(Class service, URL u, int line, String msg) throws ServiceConfigurationError { fail(service, u + ":" + line + ": " + msg); } /** * Parse the content of the given URL as a provider-configuration file. * * @param service * The service class for which providers are being sought; * used to construct error detail strings * * @param url * The URL naming the configuration file to be parsed * * @return true if the name of a service is found * * @throws ServiceConfigurationError * If an I/O error occurs while reading from the given URL, or * if a configuration-file format error is detected */ private static boolean parse(Class service, URL u) throws ServiceConfigurationError { InputStream in = null; BufferedReader r = null; try { in = u.openStream(); r = new BufferedReader(new InputStreamReader(in, "utf-8")); int lc = 1; String ln; while ((ln = r.readLine()) != null) { int ci = ln.indexOf('#'); if (ci >= 0) ln = ln.substring(0, ci); ln = ln.trim(); int n = ln.length(); if (n != 0) { if ((ln.indexOf(' ') >= 0) || (ln.indexOf('\t') >= 0)) fail(service, u, lc, "Illegal configuration-file syntax"); int cp = ln.codePointAt(0); if (!Character.isJavaIdentifierStart(cp)) fail(service, u, lc, "Illegal provider-class name: " + ln); for (int i = Character.charCount(cp); i < n; i += Character.charCount(cp)) { cp = ln.codePointAt(i); if (!Character.isJavaIdentifierPart(cp) && (cp != '.')) fail(service, u, lc, "Illegal provider-class name: " + ln); } return true; } } } catch (FileNotFoundException x) { return false; } catch (IOException x) { fail(service, ": " + x); } finally { try { if (r != null) r.close(); } catch (IOException y) { fail(service, ": " + y); } try { if (in != null) in.close(); } catch (IOException y) { fail(service, ": " + y); } } return false; } /** * Return true if a description for at least one service is found in the * service configuration files in the given URLs. */ public static boolean hasService(Class<?> service, URL[] urls) throws ServiceConfigurationError { for (URL url: urls) { try { String fullName = prefix + service.getName(); URL u = new URL(url, fullName); boolean found = parse(service, u); if (found) return true; } catch (MalformedURLException e) { // should not happen; ignore it if it does } } return false; } }
operativeF/Kvasir
Lib/Chip/CM4/STMicro/STM32L4x2/SYSCFG.hpp
<reponame>operativeF/Kvasir<gh_stars>0 #pragma once #include <Register/Utility.hpp> namespace Kvasir { //System configuration controller namespace SyscfgMemrmp{ ///<memory remap register using Addr = Register::Address<0x40010000,0xfffffef0,0x00000000,std::uint32_t>; ///Flash Bank mode selection constexpr Register::FieldLocation<Addr,Register::maskFromRange(8,8),Register::ReadWriteAccess,unsigned> fbMode{}; ///QUADSPI memory mapping swap constexpr Register::FieldLocation<Addr,Register::maskFromRange(3,3),Register::ReadWriteAccess,unsigned> qfs{}; ///Memory mapping selection constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,0),Register::ReadWriteAccess,unsigned> memMode{}; } namespace SyscfgCfgr1{ ///<configuration register 1 using Addr = Register::Address<0x40010004,0x0380fefe,0x00000000,std::uint32_t>; ///Floating Point Unit interrupts enable bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(31,26),Register::ReadWriteAccess,unsigned> fpuIe{}; ///I2C3 Fast-mode Plus driving capability activation constexpr Register::FieldLocation<Addr,Register::maskFromRange(22,22),Register::ReadWriteAccess,unsigned> i2c3Fmp{}; ///I2C2 Fast-mode Plus driving capability activation constexpr Register::FieldLocation<Addr,Register::maskFromRange(21,21),Register::ReadWriteAccess,unsigned> i2c2Fmp{}; ///I2C1 Fast-mode Plus driving capability activation constexpr Register::FieldLocation<Addr,Register::maskFromRange(20,20),Register::ReadWriteAccess,unsigned> i2c1Fmp{}; ///Fast-mode Plus (Fm+) driving capability activation on PB9 constexpr Register::FieldLocation<Addr,Register::maskFromRange(19,19),Register::ReadWriteAccess,unsigned> i2cPb9Fmp{}; ///Fast-mode Plus (Fm+) driving capability activation on PB8 constexpr Register::FieldLocation<Addr,Register::maskFromRange(18,18),Register::ReadWriteAccess,unsigned> i2cPb8Fmp{}; ///Fast-mode Plus (Fm+) driving capability activation on PB7 constexpr Register::FieldLocation<Addr,Register::maskFromRange(17,17),Register::ReadWriteAccess,unsigned> i2cPb7Fmp{}; ///Fast-mode Plus (Fm+) driving capability activation on PB6 constexpr Register::FieldLocation<Addr,Register::maskFromRange(16,16),Register::ReadWriteAccess,unsigned> i2cPb6Fmp{}; ///I/O analog switch voltage booster enable constexpr Register::FieldLocation<Addr,Register::maskFromRange(8,8),Register::ReadWriteAccess,unsigned> boosten{}; ///Firewall disable constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::ReadWriteAccess,unsigned> fwdis{}; } namespace SyscfgExticr1{ ///<external interrupt configuration register 1 using Addr = Register::Address<0x40010008,0xffff8888,0x00000000,std::uint32_t>; ///EXTI 3 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(14,12),Register::ReadWriteAccess,unsigned> exti3{}; ///EXTI 2 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(10,8),Register::ReadWriteAccess,unsigned> exti2{}; ///EXTI 1 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(6,4),Register::ReadWriteAccess,unsigned> exti1{}; ///EXTI 0 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,0),Register::ReadWriteAccess,unsigned> exti0{}; } namespace SyscfgExticr2{ ///<external interrupt configuration register 2 using Addr = Register::Address<0x4001000c,0xffff8888,0x00000000,std::uint32_t>; ///EXTI 7 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(14,12),Register::ReadWriteAccess,unsigned> exti7{}; ///EXTI 6 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(10,8),Register::ReadWriteAccess,unsigned> exti6{}; ///EXTI 5 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(6,4),Register::ReadWriteAccess,unsigned> exti5{}; ///EXTI 4 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,0),Register::ReadWriteAccess,unsigned> exti4{}; } namespace SyscfgExticr3{ ///<external interrupt configuration register 3 using Addr = Register::Address<0x40010010,0xffff8888,0x00000000,std::uint32_t>; ///EXTI 11 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(14,12),Register::ReadWriteAccess,unsigned> exti11{}; ///EXTI 10 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(10,8),Register::ReadWriteAccess,unsigned> exti10{}; ///EXTI 9 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(6,4),Register::ReadWriteAccess,unsigned> exti9{}; ///EXTI 8 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,0),Register::ReadWriteAccess,unsigned> exti8{}; } namespace SyscfgExticr4{ ///<external interrupt configuration register 4 using Addr = Register::Address<0x40010014,0xffff8888,0x00000000,std::uint32_t>; ///EXTI15 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(14,12),Register::ReadWriteAccess,unsigned> exti15{}; ///EXTI14 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(10,8),Register::ReadWriteAccess,unsigned> exti14{}; ///EXTI13 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(6,4),Register::ReadWriteAccess,unsigned> exti13{}; ///EXTI12 configuration bits constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,0),Register::ReadWriteAccess,unsigned> exti12{}; } namespace SyscfgScsr{ ///<SCSR using Addr = Register::Address<0x40010018,0xfffffffc,0x00000000,std::uint32_t>; ///SRAM2 busy by erase operation constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,1),Register::Access<Register::AccessType::readOnly,Register::ReadActionType::normal,Register::ModifiedWriteValueType::normal>,unsigned> sram2bsy{}; ///SRAM2 Erase constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::ReadWriteAccess,unsigned> sram2er{}; } namespace SyscfgCfgr2{ ///<CFGR2 using Addr = Register::Address<0x4001001c,0xfffffef0,0x00000000,std::uint32_t>; ///SRAM2 parity error flag constexpr Register::FieldLocation<Addr,Register::maskFromRange(8,8),Register::ReadWriteAccess,unsigned> spf{}; ///ECC Lock constexpr Register::FieldLocation<Addr,Register::maskFromRange(3,3),Register::Access<Register::AccessType::writeOnly,Register::ReadActionType::normal,Register::ModifiedWriteValueType::normal>,unsigned> eccl{}; ///PVD lock enable bit constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,2),Register::Access<Register::AccessType::writeOnly,Register::ReadActionType::normal,Register::ModifiedWriteValueType::normal>,unsigned> pvdl{}; ///SRAM2 parity lock bit constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,1),Register::Access<Register::AccessType::writeOnly,Register::ReadActionType::normal,Register::ModifiedWriteValueType::normal>,unsigned> spl{}; ///OCKUP (Hardfault) output enable bit constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::Access<Register::AccessType::writeOnly,Register::ReadActionType::normal,Register::ModifiedWriteValueType::normal>,unsigned> cll{}; } namespace SyscfgSwpr{ ///<SWPR using Addr = Register::Address<0x40010020,0x00000000,0x00000000,std::uint32_t>; ///SRAM2 page 31 write protection constexpr Register::FieldLocation<Addr,Register::maskFromRange(31,31),Register::ReadWriteAccess,unsigned> p31wp{}; ///P30WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(30,30),Register::ReadWriteAccess,unsigned> p30wp{}; ///P29WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(29,29),Register::ReadWriteAccess,unsigned> p29wp{}; ///P28WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(28,28),Register::ReadWriteAccess,unsigned> p28wp{}; ///P27WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(27,27),Register::ReadWriteAccess,unsigned> p27wp{}; ///P26WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(26,26),Register::ReadWriteAccess,unsigned> p26wp{}; ///P25WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(25,25),Register::ReadWriteAccess,unsigned> p25wp{}; ///P24WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(24,24),Register::ReadWriteAccess,unsigned> p24wp{}; ///P23WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(23,23),Register::ReadWriteAccess,unsigned> p23wp{}; ///P22WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(22,22),Register::ReadWriteAccess,unsigned> p22wp{}; ///P21WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(21,21),Register::ReadWriteAccess,unsigned> p21wp{}; ///P20WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(20,20),Register::ReadWriteAccess,unsigned> p20wp{}; ///P19WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(19,19),Register::ReadWriteAccess,unsigned> p19wp{}; ///P18WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(18,18),Register::ReadWriteAccess,unsigned> p18wp{}; ///P17WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(17,17),Register::ReadWriteAccess,unsigned> p17wp{}; ///P16WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(16,16),Register::ReadWriteAccess,unsigned> p16wp{}; ///P15WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(15,15),Register::ReadWriteAccess,unsigned> p15wp{}; ///P14WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(14,14),Register::ReadWriteAccess,unsigned> p14wp{}; ///P13WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(13,13),Register::ReadWriteAccess,unsigned> p13wp{}; ///P12WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(12,12),Register::ReadWriteAccess,unsigned> p12wp{}; ///P11WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(11,11),Register::ReadWriteAccess,unsigned> p11wp{}; ///P10WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(10,10),Register::ReadWriteAccess,unsigned> p10wp{}; ///P9WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(9,9),Register::ReadWriteAccess,unsigned> p9wp{}; ///P8WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(8,8),Register::ReadWriteAccess,unsigned> p8wp{}; ///P7WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,7),Register::ReadWriteAccess,unsigned> p7wp{}; ///P6WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(6,6),Register::ReadWriteAccess,unsigned> p6wp{}; ///P5WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(5,5),Register::ReadWriteAccess,unsigned> p5wp{}; ///P4WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(4,4),Register::ReadWriteAccess,unsigned> p4wp{}; ///P3WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(3,3),Register::ReadWriteAccess,unsigned> p3wp{}; ///P2WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,2),Register::ReadWriteAccess,unsigned> p2wp{}; ///P1WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,1),Register::ReadWriteAccess,unsigned> p1wp{}; ///P0WP constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::ReadWriteAccess,unsigned> p0wp{}; } namespace SyscfgSkr{ ///<SKR using Addr = Register::Address<0x40010024,0xffffff00,0x00000000,std::uint32_t>; ///SRAM2 write protection key for software erase constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,0),Register::ReadWriteAccess,unsigned> key{}; } }
hidirektor/AnimationLib
java/me/megamichiel/animationlib/util/pipeline/DoublePipeline.java
package me.megamichiel.animationlib.util.pipeline; import java.util.concurrent.atomic.AtomicLong; import java.util.function.*; public class DoublePipeline extends AbstractPipeline<DoublePredicate> { public DoublePipeline(PipelineContext ctx) { super(ctx); } public void accept(double d) { _accept(p -> p.test(d)); } public DoublePipeline filter(DoublePredicate predicate) { DoublePipeline pipeline = new DoublePipeline(ctx); forEach(d -> { if (predicate.test(d)) pipeline.accept(d); }); return pipeline; } public DoublePipeline exclude(DoublePredicate predicate) { DoublePipeline pipeline = new DoublePipeline(ctx); forEach(d -> { if (!predicate.test(d)) pipeline.accept(d); }); return pipeline; } public DoublePipeline map(DoubleUnaryOperator mapper) { DoublePipeline pipeline = new DoublePipeline(ctx); forEach(d -> pipeline.accept(mapper.applyAsDouble(d))); return pipeline; } public <U> Pipeline<U> mapToObj(DoubleFunction<? extends U> mapper) { Pipeline<U> pipeline = new Pipeline<>(ctx); forEach(d -> pipeline.accept(mapper.apply(d))); return pipeline; } public IntPipeline mapToInt(DoubleToIntFunction mapper) { IntPipeline pipeline = new IntPipeline(ctx); forEach(d -> pipeline.accept(mapper.applyAsInt(d))); return pipeline; } public LongPipeline mapToLong(DoubleToLongFunction mapper) { LongPipeline pipeline = new LongPipeline(ctx); forEach(d -> pipeline.accept(mapper.applyAsLong(d))); return pipeline; } public DoublePipeline flatMap(DoubleFunction<? extends DoublePipeline> mapper) { DoublePipeline pipeline = new DoublePipeline(ctx); forEach(d -> mapper.apply(d).forEach(pipeline::accept)); return pipeline; } public DoublePipeline acceptWhile(BooleanSupplier supplier) { DoublePipeline pipeline = new DoublePipeline(ctx); add(e -> { if (supplier.getAsBoolean()) { pipeline.accept(e); return false; } return true; }); return pipeline; } public DoublePipeline acceptUntil(BooleanSupplier supplier) { DoublePipeline pipeline = new DoublePipeline(ctx); add(e -> { if (supplier.getAsBoolean()) return true; pipeline.accept(e); return false; }); return pipeline; } public DoublePipeline acceptWhileBefore(long time) { return acceptWhile(() -> System.currentTimeMillis() < time); } public DoublePipeline acceptUntil(long time) { return acceptUntil(() -> System.currentTimeMillis() >= time); } public DoublePipeline skipUntil(BooleanSupplier supplier) { DoublePipeline pipeline = new DoublePipeline(ctx); add(e -> { if (supplier.getAsBoolean()) { forEach(pipeline::accept); return true; } return false; }); return pipeline; } public DoublePipeline skipUntil(long time) { return skipUntil(() -> System.currentTimeMillis() >= time); } public DoublePipeline skipWhile(BooleanSupplier supplier) { DoublePipeline pipeline = new DoublePipeline(ctx); add(e -> { if (supplier.getAsBoolean()) return false; forEach(pipeline::accept); return true; }); return pipeline; } public DoublePipeline limit(long maxSize) { AtomicLong l = new AtomicLong(maxSize); return acceptUntil(() -> l.decrementAndGet() < 0); } public DoublePipeline skip(long n) { AtomicLong l = new AtomicLong(n); return skipUntil(() -> l.decrementAndGet() < 0); } public DoublePipeline peek(DoubleConsumer action) { forEach(action); return this; } public DoublePipeline post(boolean async) { DoublePipeline pipeline = new DoublePipeline(ctx); forEach(d -> ctx.post(() -> pipeline.accept(d), async)); return pipeline; } public void forEach(DoubleConsumer action) { add(d -> { action.accept(d); return false; }); } public IntPipeline asIntPipeline() { IntPipeline pipeline = new IntPipeline(ctx); forEach(d -> pipeline.accept((int) d)); return pipeline; } public LongPipeline asLongPipeline() { LongPipeline pipeline = new LongPipeline(ctx); forEach(d -> pipeline.accept((long) d)); return pipeline; } public Pipeline<Double> boxed() { return mapToObj(Double::new); } }
insoft-cloud/go-project-release-sample-
src/api-v3/builds/controller.go
package builds import ( "PAAS-TA-PORTAL-V3/config" "encoding/json" "github.com/gorilla/mux" "io/ioutil" "net/http" "net/url" ) var uris = "builds" func BuildPackHandleRequests(myRouter *mux.Router) { myRouter.HandleFunc("/v3/"+uris, createBuild).Methods("POST") myRouter.HandleFunc("/v3/"+uris+"/{guid}", getBuild).Methods("GET") myRouter.HandleFunc("/v3/"+uris, getBuilds).Methods("GET") myRouter.HandleFunc("/v3/apps/{guid}/"+uris, getBuildApps).Methods("GET") myRouter.HandleFunc("/v3/"+uris+"/{guid}", updateBuild).Methods("PATCH") } // @Description Permitted Roles 'Admin Space Developer' // @Summary Create a build // @Description // @Tags Builds // @Produce json // @Security ApiKeyAuth // @Param CreateBuild body CreateBuild true "Create Build" // @Success 200 {object} Build // @Failure 400,404 {object} config.Error // @Failure 500 {object} config.Error // @Failure default {object} config.Error // @Router /builds [POST] func createBuild(w http.ResponseWriter, r *http.Request) { var pBody CreateBuild vResultI, vResultB := config.Validation(r, &pBody) if !vResultB { json.NewEncoder(w).Encode(vResultI) return } //호출 reqBody, _ := ioutil.ReadAll(r.Body) json.Unmarshal(reqBody, pBody) reqBody, _ = json.Marshal(pBody) rBody, rBodyResult := config.Curl("/v3/"+uris, reqBody, "POST", w, r) if rBodyResult { var final Build json.Unmarshal(rBody.([]byte), &final) json.NewEncoder(w).Encode(final) } else { json.NewEncoder(w).Encode(rBody) } } // @Description Permitted Roles 'Admin Admin Read-Only Global Auditor Space Auditor Space Developer Org Auditor' // @Summary Get a build // @Description // @Tags Builds // @Produce json // @Security ApiKeyAuth // @Param guid path string true "Build Guid" // @Success 200 {object} Build // @Failure 400,404 {object} config.Error // @Failure 500 {object} config.Error // @Failure default {object} config.Error // @Router /builds/{guid} [GET] func getBuild(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) guid := vars["guid"] rBody, rBodyResult := config.Curl("/v3/"+uris+"/"+guid, nil, "GET", w, r) if rBodyResult { var final Build json.Unmarshal(rBody.([]byte), &final) json.NewEncoder(w).Encode(final) } else { json.NewEncoder(w).Encode(rBody) } } // @Description Permitted Roles 'All' // @Summary List builds // @Description Retrieve all builds the user has access to. // @Tags Builds // @Produce json // @Security ApiKeyAuth // @Param states query []string false "Comma-delimited list of build states to filter by" collectionFormat(csv) // @Param app_guids query []string false "Comma-delimited list of app guids to filter by" collectionFormat(csv) // @Param package_guids query []string false "Comma-delimited list of package guids to filter by" collectionFormat(csv) // @Param page query integer false "Page to display; valid values are integers >= 1" // @Param per_page query integer false "Number of results per page; valid values are 1 through 5000" // @Param order_by query string false "Value to sort by. Defaults to ascending; prepend with - to sort descending. Valid values are created_at, updated_at, name, state" // @Param label_selector query string false "A query string containing a list of label selector requirements" // @Param created_ats query string false "Timestamp to filter by. When filtering on equality, several comma-delimited timestamps may be passed. Also supports filtering with relational operators" // @Param updated_ats query string false "Timestamp to filter by. When filtering on equality, several comma-delimited timestamps may be passed. Also supports filtering with relational operators" // @Success 200 {object} BuildList // @Failure 400,404 {object} config.Error // @Failure 500 {object} config.Error // @Failure default {object} config.Error // @Router /builds [GET] func getBuilds(w http.ResponseWriter, r *http.Request) { query, _ := url.QueryUnescape(r.URL.Query().Encode()) rBody, rBodyResult := config.Curl("/v3/"+uris+"?"+query, nil, "GET", w, r) if rBodyResult { var final BuildList json.Unmarshal(rBody.([]byte), &final) json.NewEncoder(w).Encode(final) } else { json.NewEncoder(w).Encode(rBody) } } // @Description Permitted Roles 'Admin Admin Read-Only Global Auditor Space Auditor Space Developer Org Auditor' // @Summary List builds // @Description Retrieve all builds for the app. // @Tags Builds // @Produce json // @Security ApiKeyAuth // @Param states query []string false "Comma-delimited list of build states to filter by" collectionFormat(csv) // @Param page query integer false "Page to display; valid values are integers >= 1" // @Param per_page query integer false "Number of results per page; valid values are 1 through 5000" // @Param order_by query string false "Value to sort by. Defaults to ascending; prepend with - to sort descending. Valid values are created_at, updated_at, name, state" // @Param label_selector query string false "A query string containing a list of label selector requirements" // @Param created_ats query string false "Timestamp to filter by. When filtering on equality, several comma-delimited timestamps may be passed. Also supports filtering with relational operators" // @Param updated_ats query string false "Timestamp to filter by. When filtering on equality, several comma-delimited timestamps may be passed. Also supports filtering with relational operators" // @Success 200 {object} BuildList // @Failure 400,404 {object} config.Error // @Failure 500 {object} config.Error // @Failure default {object} config.Error // @Router /apps/{guid}/builds [GET] func getBuildApps(w http.ResponseWriter, r *http.Request) { query, _ := url.QueryUnescape(r.URL.Query().Encode()) vars := mux.Vars(r) guid := vars["guid"] rBody, rBodyResult := config.Curl("/v3/apps/"+guid+"/"+uris+"?"+query, nil, "GET", w, r) if rBodyResult { var final BuildList json.Unmarshal(rBody.([]byte), &final) json.NewEncoder(w).Encode(final) } else { json.NewEncoder(w).Encode(rBody) } } // @Description Permitted Roles 'Admin Space Developer Build State Updater' // @Summary Update a build // @Description // @Tags Builds // @Produce json // @Security ApiKeyAuth // @Param UpdateBuild body UpdateBuild true "Update Build" // @Success 200 {object} Build // @Failure 400,404 {object} config.Error // @Failure 500 {object} config.Error // @Failure default {object} config.Error // @Router /builds/{guid} [PATCH] func updateBuild(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) guid := vars["guid"] var pBody UpdateBuild vResultI, vResultB := config.Validation(r, &pBody) if !vResultB { json.NewEncoder(w).Encode(vResultI) return } //호출 reqBody, _ := ioutil.ReadAll(r.Body) json.Unmarshal(reqBody, pBody) reqBody, _ = json.Marshal(pBody) rBody, rBodyResult := config.Curl("/v3/"+uris+"/"+guid, reqBody, "PATCH", w, r) if rBodyResult { var final Build json.Unmarshal(rBody.([]byte), &final) json.NewEncoder(w).Encode(final) } else { json.NewEncoder(w).Encode(rBody) } }
pks5/ui5strap
www/lib/pks/ui5strap_dev/viewer/library.js
/* * * UI5Strap Core Library * * pks.ui5strap.core.library * * @author <NAME> <<EMAIL>> * * Homepage: http://ui5strap.com * * Copyright (c) 2013-2014 <NAME> <<EMAIL>> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * Released under Apache2 license: http://www.apache.org/licenses/LICENSE-2.0.txt * */ sap.ui .define( [ 'jquery.sap.global', 'sap/ui/Device', 'sap/ui/core/library', 'jquery.sap.mobile' ], function( jQuery, Device, coreLib, jqm ) { "use strict"; /* * --------------- * * Declare Library * * --------------- */ /** * The ui5strap library. * * @namespace * @name pks.ui5strap.viewer * @author <NAME> * @version 1.0.2-SNAPSHOT * @public */ sap.ui.getCore().initLibrary( { name : "pks.ui5strap.viewer", version : "1.0.5-SNAPSHOT", dependencies : [ "pks.ui5strap.core" ], types : [ ], interfaces : ["pks.ui5strap.viewer.IApp", "pks.ui5strap.viewer.IRootComponent", "pks.ui5strap.viewer.IRootNavigator"], controls : ["pks.ui5strap.viewer.Console", "pks.ui5strap.viewer.Sandbox"], elements : [] }); var ui5strapViewerLib = pks.ui5strap.viewer; // End of library return ui5strapViewerLib; });
Enzime/pipenv
pipenv/vendor/shellingham/__init__.py
import importlib import os from ._core import ShellDetectionFailure __version__ = '1.2.3.dev0' def detect_shell(pid=None, max_depth=6): name = os.name try: impl = importlib.import_module('.' + name, __name__) except ImportError: raise RuntimeError( 'Shell detection not implemented for {0!r}'.format(name), ) try: get_shell = impl.get_shell except AttributeError: raise RuntimeError('get_shell not implemented for {0!r}'.format(name)) shell = get_shell(pid, max_depth=max_depth) if shell: return shell raise ShellDetectionFailure()
Thomas58/Serializer
src/org/jspace/io/test/xml/FileTests.java
<reponame>Thomas58/Serializer package org.jspace.io.test.xml; import java.io.FileInputStream; import java.io.FileOutputStream; import java.util.ArrayList; import java.util.Locale; import org.jspace.io.test.objects.Family; import org.jspace.io.test.objects.Numbers; import org.jspace.io.test.objects.Person; import org.jspace.io.test.objects.Pet; import org.jspace.io.test.objects.SpecialFamily; import org.jspace.io.tools.ClassRegistry; import org.jspace.io.xml.XMLSerializer; import org.junit.Assert; import org.junit.Test; import org.junit.Before; public class FileTests { // public String directoryPath = Paths.get("").toAbsolutePath().toString() + "/"; public String directoryPath = "C:\\Users\\Tengux\\Documents\\Visual Studio 2017\\Projects\\XMLSerializer\\XMLSerializerTests\\"; public String filename = "WorkingTest.file"; public boolean readOnlyTest = true; public XMLSerializer serial; @Before public void setup(){ serial = new XMLSerializer(Locale.US); } @Test public void testPrivateField() throws Exception { ArrayList<Person> members = new ArrayList<Person>(); members.add(new Person("Miranda", 56)); members.add(new Person("Greham", 54)); members.add(new Person("Maria", 16)); Family family = new Family(members); family.addSecret("Our little secret." + System.lineSeparator() + "In a file."); ClassRegistry.put(Family.class, "family"); ClassRegistry.put(Person.class, "person"); String filename = "TestPrivateField.file"; if (!readOnlyTest) try (FileOutputStream output = new FileOutputStream(directoryPath + filename)){ serial.Serialize(family, output); } Family newFamily = new Family(); try (FileInputStream input = new FileInputStream(directoryPath + filename)){ newFamily = (Family) serial.Deserialize(input); } Assert.assertEquals(family, newFamily); Assert.assertEquals(family.getSecret(), newFamily.getSecret()); } @Test public void testSuperClass() throws Exception { ArrayList<Pet> pets = new ArrayList<Pet>(); pets.add(new Pet("Snowball", Pet.Type.Cat)); pets.add(new Pet("Mickey", Pet.Type.Dog)); ArrayList<Person> members = new ArrayList<Person>(); members.add(new Person("Miranda", 56, pets.get(0))); members.add(new Person("Greham", 54, pets.get(1))); members.add(new Person("Maria", 16, pets.get(0))); SpecialFamily family = new SpecialFamily(members, pets); ClassRegistry.put(SpecialFamily.class, "specialfamily"); ClassRegistry.put(Person.class, "person"); ClassRegistry.put(Pet.class, "pet"); String filename = "TestSuperClass.file"; if (!readOnlyTest) try (FileOutputStream output = new FileOutputStream(directoryPath + filename)){ serial.Serialize(family, output); } SpecialFamily newFamily = new SpecialFamily(); try (FileInputStream input = new FileInputStream(directoryPath + filename)){ newFamily = (SpecialFamily)serial.Deserialize(input); } Assert.assertEquals(family, newFamily); } @Test public void testAdvanced() throws Exception { ArrayList<Pet> pets = new ArrayList<Pet>(); pets.add(new Pet("Snowball", Pet.Type.Cat)); pets.add(new Pet("Mickey", Pet.Type.Dog)); ArrayList<Person> members = new ArrayList<Person>(); members.add(new Person("Miranda", 56, pets.get(0))); members.add(new Person("Greham", 54, pets.get(1))); members.add(new Person("Maria", 16, pets.get(0))); Family family = new Family(members, pets); ClassRegistry.put(Family.class, "family"); ClassRegistry.put(Person.class, "person"); ClassRegistry.put(Pet.class, "pet"); String filename = "TestAdvanced.file"; if (!readOnlyTest) try (FileOutputStream output = new FileOutputStream(directoryPath + filename)){ serial.Serialize(family, output); } Family newFamily = new Family(); try (FileInputStream input = new FileInputStream(directoryPath + filename)){ newFamily = (Family)serial.Deserialize(input); } Assert.assertEquals(family, newFamily); } @Test public void testAdvancedList() throws Exception { ArrayList<Person> members = new ArrayList<Person>(); members.add(new Person("Miranda", 56)); members.add(new Person("Greham", 54)); members.add(new Person("Maria", 16)); Family family = new Family(members); ClassRegistry.put(Family.class, "family"); ClassRegistry.put(Person.class, "person"); String filename = "TestAdvancedList.file"; if (!readOnlyTest) try (FileOutputStream output = new FileOutputStream(directoryPath + filename)){ serial.Serialize(family, output); } Family newFamily = new Family(); try (FileInputStream input = new FileInputStream(directoryPath + filename)){ newFamily = (Family)serial.Deserialize(input); } Assert.assertEquals(family, newFamily); } @Test public void testSimpleList() throws Exception { Numbers numbers = new Numbers(); ArrayList<Integer> list = numbers.list; list.add(1); list.add(2); list.add(3); list.add(4); list.add(5); ClassRegistry.put(numbers.getClass(), "numbers"); String filename = "TestSimpleList.file"; if (!readOnlyTest) try (FileOutputStream output = new FileOutputStream(directoryPath + filename)){ serial.Serialize(numbers, output); } Numbers newNumbers = new Numbers(); try (FileInputStream input = new FileInputStream(directoryPath + filename)){ newNumbers = (Numbers)serial.Deserialize(input); } Assert.assertEquals(numbers, newNumbers); } @Test public void testSimple() throws Exception { Person maria = new Person("Maria", 16); ClassRegistry.put(Person.class, "person"); String filename = "TestSimple.file"; if (!readOnlyTest) try (FileOutputStream output = new FileOutputStream(directoryPath + filename)){ serial.Serialize(maria, output); } Person newMaria = new Person(); try (FileInputStream input = new FileInputStream(directoryPath + filename)){ newMaria = (Person) serial.Deserialize(input); } Assert.assertEquals(maria, newMaria); } @Test public void testFilePrimitives() throws Exception { short sh = 21; int in = 22; long lo = 23; float fl = 24.5f; double dou = 25.5; boolean bo = true; char ch = 'c'; String str = "string"; String filename = "TestPrimitiveShort.file"; if (!readOnlyTest) try (FileOutputStream output = new FileOutputStream(directoryPath + filename)){ serial.Serialize(sh, output); } short newsh = 0; try (FileInputStream input = new FileInputStream(directoryPath + filename)){ newsh = ((Integer) serial.Deserialize(input)).shortValue(); } Assert.assertEquals(sh, newsh); filename = "TestPrimitiveInt.file"; if (!readOnlyTest) try (FileOutputStream output = new FileOutputStream(directoryPath + filename)){ serial.Serialize(in, output); } int newin = 0; try (FileInputStream input = new FileInputStream(directoryPath + filename)){ newin = (int) serial.Deserialize(input); } Assert.assertEquals(in, newin); filename = "TestPrimitiveLong.file"; if (!readOnlyTest) try (FileOutputStream output = new FileOutputStream(directoryPath + filename)){ serial.Serialize(lo, output); } long newlo = 0; try (FileInputStream input = new FileInputStream(directoryPath + filename)){ newlo = ((Integer) serial.Deserialize(input)).longValue(); } Assert.assertEquals(lo, newlo); filename = "TestPrimitiveFloat.file"; if (!readOnlyTest) try (FileOutputStream output = new FileOutputStream(directoryPath + filename)){ serial.Serialize(fl, output); } float newfl = 0.0f; try (FileInputStream input = new FileInputStream(directoryPath + filename)){ newfl = ((Double) serial.Deserialize(input)).floatValue(); } Assert.assertEquals(fl, newfl, 0.01); filename = "TestPrimitiveDouble.file"; if (!readOnlyTest) try (FileOutputStream output = new FileOutputStream(directoryPath + filename)){ serial.Serialize(dou, output); } double newdou = 0.0; try (FileInputStream input = new FileInputStream(directoryPath + filename)){ newdou = (double) serial.Deserialize(input); } Assert.assertEquals(dou, newdou, 0.01); filename = "TestPrimitiveBoolean.file"; if (!readOnlyTest) try (FileOutputStream output = new FileOutputStream(directoryPath + filename)){ serial.Serialize(bo, output); } boolean newbo = false; try (FileInputStream input = new FileInputStream(directoryPath + filename)){ newbo = (Boolean) serial.Deserialize(input); } Assert.assertEquals(bo, newbo); filename = "TestPrimitiveChar.file"; if (!readOnlyTest) try (FileOutputStream output = new FileOutputStream(directoryPath + filename)){ serial.Serialize(ch, output); } char newch = 'a'; try (FileInputStream input = new FileInputStream(directoryPath + filename)){ newch = (Character) serial.Deserialize(input); } Assert.assertEquals(ch, newch); filename = "TestPrimitiveString.file"; if (!readOnlyTest) try (FileOutputStream output = new FileOutputStream(directoryPath + filename)){ serial.Serialize(str, output); } String newstr = ""; try (FileInputStream input = new FileInputStream(directoryPath + filename)){ newstr = (String) serial.Deserialize(input); } Assert.assertEquals(str, newstr); } }
echisMOH/echisCommCareMOH-core
src/main/java/org/commcare/cases/query/queryset/CaseModelQuerySetMatcher.java
package org.commcare.cases.query.queryset; import org.commcare.cases.instance.CaseInstanceTreeElement; import org.javarosa.core.model.instance.TreeReference; import org.javarosa.model.xform.XPathReference; import org.javarosa.xpath.expr.XPathEqExpr; import org.javarosa.xpath.expr.XPathExpression; import org.javarosa.xpath.expr.XPathPathExpr; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Vector; /** * Generates potential model query set lookups for references into the case database model. * * Chains entity lookups where relevant using model set transforms, which can be added dynamically. * * example: * [@case_id = current()/@case_id] * * can be directly returned and interpreted as an model query set lookup which gets the current * case without needing to compare string Id's, match on looked up values, etc. * * Created by ctsims on 2/6/2017. */ public class CaseModelQuerySetMatcher implements ModelQuerySetMatcher { private final Collection<XPathExpression> membershipIndexes; private TreeReference caseDbRoot; private Map<Integer, Integer> multiplicityMap; private Vector<QuerySetTransform> querySetTransforms = new Vector<>(); public CaseModelQuerySetMatcher(Map<Integer, Integer> multiplicityMap) { this("casedb", multiplicityMap); } private CaseModelQuerySetMatcher(String modelId, Map<Integer, Integer> multiplicityMap) { caseDbRoot = XPathReference.getPathExpr("instance('" + modelId + "')/casedb/case").getReference(); //Later on we need this to refer to a real element at casedb, not a virtual one caseDbRoot.setMultiplicity(0, 0); this.multiplicityMap = multiplicityMap; membershipIndexes = new Vector<>(); membershipIndexes.add(CaseInstanceTreeElement.CASE_ID_EXPR); membershipIndexes.add(CaseInstanceTreeElement.CASE_ID_EXPR_TWO); addQuerySetTransform(new CaseIdentityQuerySetTransform()); } public void addQuerySetTransform(QuerySetTransform transform) { this.querySetTransforms.add(transform); } @Override public QuerySetLookup getQueryLookupFromPredicate(XPathExpression expr) { if (expr instanceof XPathEqExpr && ((XPathEqExpr)expr).op == XPathEqExpr.EQ) { XPathEqExpr eq = ((XPathEqExpr)expr); if (membershipIndexes.contains(eq.a)) { if (eq.b instanceof XPathPathExpr) { TreeReference ref = ((XPathPathExpr)eq.b).getReference(); return getQuerySetLookup(ref); } } } return null; } @Override public QuerySetLookup getQuerySetLookup(TreeReference ref) { QuerySetLookup lookup; TreeReference remainder; if (caseDbRoot.isParentOf(ref, false)) { if (!ref.hasPredicates()) { return null; } List<XPathExpression> predicates = ref.getPredicate(caseDbRoot.size() - 1); if (predicates == null || predicates.size() > 1) { return null; } lookup = getQueryLookupFromPredicate(predicates.get(0)); if (lookup == null) { return null; } remainder = ref.getRelativeReferenceAfter(caseDbRoot.size()); } else if (isCurrentRef(ref)) { lookup = new CaseQuerySetLookup(caseDbRoot, multiplicityMap); remainder = ref.getRelativeReferenceAfter(0); } else { return null; } return getTransformedQuerySetLookup(lookup, remainder); } private QuerySetLookup getTransformedQuerySetLookup(QuerySetLookup lookup, TreeReference remainder) { for (QuerySetTransform transform : querySetTransforms) { QuerySetLookup retVal = transform.getTransformedLookup(lookup, remainder); if (retVal != null) { return retVal; } } return null; } private boolean isCurrentRef(TreeReference ref) { return ref.getContextType() == TreeReference.CONTEXT_ORIGINAL; } /** * A transform for the situation where the /@case_id step is taken relative to an existing * case model query set lookup. */ private static class CaseIdentityQuerySetTransform implements QuerySetTransform { static TreeReference caseIdRef = CaseInstanceTreeElement.CASE_ID_EXPR.getReference(); @Override public QuerySetLookup getTransformedLookup(QuerySetLookup incoming, TreeReference relativeLookup) { if (caseIdRef.equals(relativeLookup)) { return incoming; } else { return null; } } } }
a56z/8-kyu
Playing with cubes II objects.rb
<reponame>a56z/8-kyu<gh_stars>0 =begin Hey Codewarrior! You already implemented a Cube class, but now we need your help again! I'm talking about constructors. We don't have one. Let's code two: One taking an integer and one handling no given arguments! Also we got a problem with negative values. Correct the code so negative values will be switched to positive ones! The constructor taking no arguments should assign 0 to Cube's Side property. =end class Cube attr_accessor :side alias_method :get_side, :side alias_method :set_side, :side= def initialize(side = 0) @side = side end end
Zaiwen/PMRMP
src/extensionp/CreateExtension.java
<reponame>Zaiwen/PMRMP package extensionp; import java.io.File; import java.io.IOException; import java.sql.ResultSet; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import changecode.Utf8code; import databaseaccess.Access; import finalvariable.BasicPathVariable; public class CreateExtension extends HttpServlet{ private static final long serialVersionUID = 1L; public CreateExtension() { super(); } public void destroy() { super.destroy(); // Just puts "destroy" string in log // Put your code here } public void init(){ // Put your code here } public void doGet(HttpServletRequest request, HttpServletResponse response) { doPost(request, response); } public void doPost(HttpServletRequest request, HttpServletResponse response) { try { String username = (String) request.getSession().getAttribute( "ORGusername"); if (username == null) { request.getSession().setAttribute("extensionerror", "请登陆后再进行操作!"); response.sendRedirect("processManage/createwrong.jsp"); return; } else { String filename = Utf8code.changeCode(request .getParameter("extensionname")); filename = filename.trim(); if (filename.length() < 1) { request.getSession().setAttribute("extensionerror", "请输入extension文件名!"); response.sendRedirect("processManage/createwrong.jsp"); return; } else { String processname =Utf8code.changeCode(request .getParameter("processname")); String processuser=Utf8code.changeCode(request.getParameter("processuser")); String fileroot = BasicPathVariable.extProcessPath+ username; File file = new File(fileroot); if (!file.exists()) {// 以用户名创建文件夹 file.mkdirs(); } fileroot = fileroot + "//" + filename; file = new File(fileroot); if (!file.exists()) {// 可以创建process文件 file.mkdirs(); } else {// 文件名已存在则报错 request.getSession().setAttribute("extensionerror", "extension文件名已存在!"); response.sendRedirect("/BPEP/processManage/createwrong.jsp"); return; } // 成功创建文件 Access ac = new Access(); ac.connDB("bpep"); String sqlextension = "select * from extensioninfo where name='" + filename + "' AND user='" + username + "'"; ResultSet res; res = ac.executeSelectSql(sqlextension); if (!res.next()) { sqlextension = "insert into extensioninfo values(null,'" + filename + "','" + username +"','" + processname+"','" + processuser+"')"; ac.executeUpdateSql(sqlextension); } ac.closeDB(); response.sendRedirect("/BPEP/processManage/org-editor.jsp?name="+filename+"&process="+processname+"&provider="+processuser); return; } } } catch (Exception e) { request.getSession().setAttribute("extensionerror", "创建extension文件时出现异常!"); try { response.sendRedirect("/BPEP/processManage/createwrong.jsp"); } catch (IOException e1) { // TODO Auto-generated catch block return; } return; } } }
OOO-MetaPrime/jprime
main-modules/meta/jprime-common-starter/src/main/java/mp/jprime/security/abac/json/beans/JsonAbacPolicyTarget.java
package mp.jprime.security.abac.json.beans; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import java.util.Collection; import java.util.Collections; /** * Условие - метаописание класса */ @JsonIgnoreProperties(ignoreUnknown = true) @JsonInclude(JsonInclude.Include.NON_NULL) public class JsonAbacPolicyTarget { private Collection<String> jpClasses; public JsonAbacPolicyTarget() { } private JsonAbacPolicyTarget(Collection<String> jpClasses) { this.jpClasses = Collections.unmodifiableCollection(jpClasses != null ? jpClasses : Collections.emptyList()); } /** * Значение * * @return Значение */ public Collection<String> getJpClasses() { return jpClasses; } public void setJpClasses(Collection<String> jpClasses) { this.jpClasses = jpClasses; } public static JsonAbacPolicyTarget from(Collection<String> jpClasses) { return new JsonAbacPolicyTarget(jpClasses); } }
wenwei8268/Alink
python/src/main/python/pyalink/alink/tests/examples/from_docs/test_vectorapproxnearestneighborpredictbatchop.py
<filename>python/src/main/python/pyalink/alink/tests/examples/from_docs/test_vectorapproxnearestneighborpredictbatchop.py import unittest from pyalink.alink import * import numpy as np import pandas as pd class TestVectorApproxNearestNeighborPredictBatchOp(unittest.TestCase): def test_vectorapproxnearestneighborpredictbatchop(self): df = pd.DataFrame([ [0, "0 0 0"], [1, "1 1 1"], [2, "2 2 2"] ]) inOp = BatchOperator.fromDataframe(df, schemaStr='id int, vec string') train = VectorApproxNearestNeighborTrainBatchOp().setIdCol("id").setSelectedCol("vec").linkFrom(inOp) predict = VectorApproxNearestNeighborPredictBatchOp().setSelectedCol("vec").setTopN(3).linkFrom(train, inOp) predict.print() pass
nakomis/incubator-brooklyn
brooklyn-server/utils/rt-felix/src/main/java/org/apache/brooklyn/rt/felix/ManifestHelper.java
<reponame>nakomis/incubator-brooklyn /* * Copyright 2015 The Apache Software Foundation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.brooklyn.rt.felix; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.util.List; import java.util.jar.Manifest; import javax.annotation.Nullable; import org.apache.brooklyn.util.collections.MutableList; import org.apache.brooklyn.util.stream.Streams; import org.apache.felix.framework.util.StringMap; import org.apache.felix.framework.util.manifestparser.ManifestParser; import org.osgi.framework.BundleException; import org.osgi.framework.Version; import org.osgi.framework.namespace.PackageNamespace; import org.osgi.framework.wiring.BundleCapability; /** * The class is not used, staying for future reference. * Remove after OSGi transition is completed. */ public class ManifestHelper { private static ManifestParser parse; private Manifest manifest; private String source; private static final String WIRING_PACKAGE = PackageNamespace.PACKAGE_NAMESPACE; public static ManifestHelper forManifestContents(String contents) throws IOException, BundleException { ManifestHelper result = forManifest(Streams.newInputStreamWithContents(contents)); result.source = contents; return result; } public static ManifestHelper forManifest(URL url) throws IOException, BundleException { InputStream in = null; try { in = url.openStream(); return forManifest(in); } finally { if (in != null) { in.close(); } } } public static ManifestHelper forManifest(InputStream in) throws IOException, BundleException { return forManifest(new Manifest(in)); } public static ManifestHelper forManifest(Manifest manifest) throws BundleException { ManifestHelper result = new ManifestHelper(); result.manifest = manifest; parse = new ManifestParser(null, null, null, new StringMap(manifest.getMainAttributes())); return result; } public String getSymbolicName() { return parse.getSymbolicName(); } public Version getVersion() { return parse.getBundleVersion(); } public String getSymbolicNameVersion() { return getSymbolicName() + ":" + getVersion(); } public List<String> getExportedPackages() { MutableList<String> result = MutableList.of(); for (BundleCapability c : parse.getCapabilities()) { if (WIRING_PACKAGE.equals(c.getNamespace())) { result.add((String) c.getAttributes().get(WIRING_PACKAGE)); } } return result; } @Nullable public String getSource() { return source; } public Manifest getManifest() { return manifest; } }
OpenStack-mobile/summit-app-android
app/src/main/java/org/openstack/android/summit/modules/event_detail/business_logic/IEventDetailInteractor.java
package org.openstack.android.summit.modules.event_detail.business_logic; import org.openstack.android.summit.common.DTOs.EventDetailDTO; import org.openstack.android.summit.common.DTOs.FeedbackDTO; import org.openstack.android.summit.common.business_logic.IScheduleableInteractor; import java.util.List; import io.reactivex.Observable; /** * Created by <NAME> on 1/21/2016. */ public interface IEventDetailInteractor extends IScheduleableInteractor { EventDetailDTO getEventDetail(int eventId); FeedbackDTO getMyFeedbackForEvent(int eventId); Observable<List<FeedbackDTO>> getFeedbackForEvent(int eventId, int page, int objectsPerPage); Observable<Double> getAverageFeedbackForEvent(int eventId); }
tiagopereira/glue
glue/logger.py
from __future__ import absolute_import, division, print_function from logging import getLogger, basicConfig, NullHandler basicConfig() logger = getLogger("glue") # Default to Null unless we override this later logger.addHandler(NullHandler())
valkirilov/FMI-2014-2015
javascript/exercises/2014-11-19/routes/pages.js
var express = require('express'), router = express.Router(), promise = require('../promise'); module.exports = function() { router.get('/', function(req, res) { console.log('Here'); var promise = new Promise(); res.status(200).send({'ok': 'true'}); }); router.get('/test', function(req, res) { console.log('Here'); res.status(200).send({'ok': 'teste'}); }); return router; };
Yonah125/codeql
java/ql/test/library-tests/dataflow/switchexpr/TestSwitchExprStmtConsistency.java
public class TestSwitchExprStmtConsistency { static int f() { return 0; } public static void test(int x) { // Test that getRuleExpression() and getRuleStatement() behave alike for switch expressions and statements using arrow rules. switch(x) { case 1 -> f(); case 2 -> f(); default -> f(); } int result = switch(x) { case 1 -> f(); case 2 -> f(); default -> f(); }; } }
manuel-hegner/conquery
backend/src/test/java/com/bakdata/conquery/util/support/TestConquery.java
package com.bakdata.conquery.util.support; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.fail; import java.io.File; import java.net.ServerSocket; import java.time.Duration; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import javax.validation.Validator; import javax.ws.rs.client.Client; import com.bakdata.conquery.Conquery; import com.bakdata.conquery.commands.ShardNode; import com.bakdata.conquery.commands.StandaloneCommand; import com.bakdata.conquery.integration.IntegrationTests; import com.bakdata.conquery.io.storage.MetaStorage; import com.bakdata.conquery.models.auth.entities.User; import com.bakdata.conquery.models.config.ConqueryConfig; import com.bakdata.conquery.models.config.XodusStoreFactory; import com.bakdata.conquery.models.datasets.Dataset; import com.bakdata.conquery.models.execution.ExecutionState; import com.bakdata.conquery.models.execution.ManagedExecution; import com.bakdata.conquery.models.identifiable.ids.specific.DatasetId; import com.bakdata.conquery.models.worker.DatasetRegistry; import com.bakdata.conquery.models.worker.Namespace; import com.bakdata.conquery.util.Wait; import com.bakdata.conquery.util.io.Cloner; import com.google.common.util.concurrent.Uninterruptibles; import io.dropwizard.client.JerseyClientBuilder; import io.dropwizard.jetty.ConnectorFactory; import io.dropwizard.jetty.HttpConnectorFactory; import io.dropwizard.server.DefaultServerFactory; import io.dropwizard.testing.DropwizardTestSupport; import lombok.Getter; import lombok.RequiredArgsConstructor; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.io.FileUtils; import org.glassfish.jersey.client.ClientProperties; import org.junit.jupiter.api.extension.ExtensionContext; /** * Represents the test instance of Conquery. */ @Slf4j @RequiredArgsConstructor public class TestConquery { private static final ConcurrentHashMap<String, Integer> NAME_COUNTS = new ConcurrentHashMap<>(); private final File tmpDir; private final ConqueryConfig config; @Getter private StandaloneCommand standaloneCommand; @Getter private DropwizardTestSupport<ConqueryConfig> dropwizard; private Set<StandaloneSupport> openSupports = new HashSet<>(); @Getter private Client client; private AtomicBoolean started = new AtomicBoolean(false); /** * Returns the extension context used by the beforeAll-callback. * * @return The context. */ @Getter private ExtensionContext beforeAllContext; // Initial user which is set before each test from the config. private User testUser; @SneakyThrows public static void configurePathsAndLogging(ConqueryConfig config, File tmpDir) { config.setFailOnError(true); XodusStoreFactory storageConfig = new XodusStoreFactory(); storageConfig.setDirectory(tmpDir.toPath()); config.setStorage(storageConfig); config.getStandalone().setNumberOfShardNodes(2); // configure logging config.setLoggingFactory(new TestLoggingFactory()); config.getCluster().setEntityBucketSize(3); } @SneakyThrows public static void configureRandomPorts(ConqueryConfig config) { // set random open ports for (ConnectorFactory con : CollectionUtils .union( ((DefaultServerFactory) config.getServerFactory()).getAdminConnectors(), ((DefaultServerFactory) config.getServerFactory()).getApplicationConnectors() )) { try (ServerSocket s = new ServerSocket(0)) { ((HttpConnectorFactory) con).setPort(s.getLocalPort()); } } try (ServerSocket s = new ServerSocket(0)) { config.getCluster().setPort(s.getLocalPort()); } } public synchronized StandaloneSupport openDataset(DatasetId datasetId) { try { log.info("loading dataset"); return createSupport(datasetId, datasetId.getName()); } catch (Exception e) { return fail("Failed to open dataset " + datasetId, e); } } private synchronized StandaloneSupport createSupport(DatasetId datasetId, String name) { DatasetRegistry datasets = standaloneCommand.getManager().getDatasetRegistry(); Namespace ns = datasets.get(datasetId); assertThat(datasets.getShardNodes()).hasSize(2); // make tmp subdir and change cfg accordingly File localTmpDir = new File(tmpDir, "tmp_" + name); if (!localTmpDir.exists()) { if(!localTmpDir.mkdir()) { throw new IllegalStateException("Could not create directory for Support"); } } else { log.info("Reusing existing folder {} for Support", localTmpDir.getPath()); } ConqueryConfig localCfg = Cloner.clone(config, Map.of(Validator.class, standaloneCommand.getManager().getEnvironment().getValidator()), IntegrationTests.MAPPER); StandaloneSupport support = new StandaloneSupport( this, ns, ns.getStorage().getDataset(), localTmpDir, localCfg, standaloneCommand.getManager().getAdmin().getAdminProcessor(), standaloneCommand.getManager().getAdmin().getAdminDatasetProcessor(), // Getting the User from AuthorizationConfig testUser ); Wait.builder() .total(Duration.ofSeconds(5)) .stepTime(Duration.ofMillis(5)) .build() .until(() -> ns.getWorkers().size() == ns.getNamespaces().getShardNodes().size()); support.waitUntilWorkDone(); openSupports.add(support); return support; } public synchronized StandaloneSupport getSupport(String name) { try { log.info("Setting up dataset"); int count = NAME_COUNTS.merge(name, 0, (a, b) -> a + 1); if (count > 0) { name += "[" + count + "]"; } Dataset dataset = new Dataset(name); standaloneCommand.getManager().getAdmin().getAdminDatasetProcessor().addDataset(dataset); return createSupport(dataset.getId(), name); } catch (Exception e) { return fail("Failed to create a support for " + name, e); } } @SneakyThrows public synchronized void shutdown() { //stop dropwizard directly so ConquerySupport does not delete the tmp directory getDropwizard().after(); openSupports.clear(); } public void beforeAll() throws Exception { log.info("Working in temporary directory {}", tmpDir); // define server dropwizard = new DropwizardTestSupport<ConqueryConfig>(TestBootstrappingConquery.class, config, app -> { standaloneCommand = new StandaloneCommand((Conquery) app); return standaloneCommand; }); // start server dropwizard.before(); // create HTTP client for api tests client = new JerseyClientBuilder(this.getDropwizard().getEnvironment()) .withProperty(ClientProperties.CONNECT_TIMEOUT, 10000) .withProperty(ClientProperties.READ_TIMEOUT, 10000) .build("test client"); } public void afterAll() throws Exception { client.close(); dropwizard.after(); FileUtils.deleteQuietly(tmpDir); } public void afterEach() throws Exception { synchronized (openSupports) { for (StandaloneSupport openSupport : openSupports) { removeSupportDataset(openSupport); } openSupports.clear(); } this.getStandaloneCommand().getManager().getStorage().clear(); waitUntilWorkDone(); } @SneakyThrows public void removeSupportDataset(StandaloneSupport support) { standaloneCommand.getManager().getDatasetRegistry().removeNamespace(support.getDataset().getId()); } public void removeSupport(StandaloneSupport support) { synchronized (openSupports) { openSupports.remove(support); removeSupportDataset(support); } } public void waitUntilWorkDone() { log.info("Waiting for jobs to finish"); //sample multiple times from the job queues to make sure we are done with everything and don't miss late arrivals long started = System.nanoTime(); for (int i = 0; i < 5; i++) { do { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.MILLISECONDS); if(!isBusy()) { break; } if (Duration.ofNanos(System.nanoTime() - started).toSeconds() > 10) { started = System.nanoTime(); log.warn("waiting for done work for a long time"); } } while (true); } log.trace("all jobs finished"); } private boolean isBusy() { boolean busy; busy = standaloneCommand.getManager().getJobManager().isSlowWorkerBusy(); busy |= standaloneCommand.getManager() .getStorage() .getAllExecutions() .stream() .map(ManagedExecution::getState) .anyMatch(ExecutionState.RUNNING::equals); for (Namespace namespace : standaloneCommand.getManager().getDatasetRegistry().getDatasets()) { busy |= namespace.getJobManager().isSlowWorkerBusy(); } for (ShardNode slave : standaloneCommand.getShardNodes()) { busy |= slave.isBusy(); } return busy; } public void beforeEach() { final MetaStorage storage = standaloneCommand.getManager().getStorage(); testUser = standaloneCommand.getManager().getConfig().getAuthorizationRealms().getInitialUsers().get(0).createOrOverwriteUser(storage); storage.updateUser(testUser); } }
cor-serpentis/server
src/main/java/gsg/threads/JobRunnerConfiguration.java
<filename>src/main/java/gsg/threads/JobRunnerConfiguration.java package gsg.threads; import java.util.concurrent.atomic.AtomicBoolean; /** * @author <NAME>, <EMAIL> * Created: 12.07.15 17:47 */ public class JobRunnerConfiguration { private boolean active = true; private long tickTime = 50; private boolean doLog = true; public boolean getActive() { return active; } public void setActive(boolean active) { this.active = active; } public long getTickTime() { return tickTime; } public void setTickTime(long tickTime) { this.tickTime = tickTime; } public boolean isDoLog() { return doLog; } public void setDoLog(boolean doLog) { this.doLog = doLog; } }
CoffeePerry/mercury-py
mercury/services/config.py
# coding=utf-8 from os import path class Config(object): def __init__(self, app=None): """Config constructor. :param app: Application in which to inject the development settings. """ self.app = app self.DEBUG = False # Flask self.SECRET_KEY = None # Flask-JWT-Extended self.JWT_SECRET_KEY = self.SECRET_KEY # DBs self.DATABASE_FOLDER = 'sqldb' self.DATABASE_FILENAME = path.join(self.app.instance_path, self.DATABASE_FOLDER, 'mercury.sqlite3') self.SQLALCHEMY_DATABASE_URI = f'sqlite:///{self.DATABASE_FILENAME}' self.SQLALCHEMY_TRACK_MODIFICATIONS = False self.MONGO_URI = None # Celery - for Tasks self.BROKER_URL = None self.CELERY_RESULT_BACKEND = self.MONGO_URI self.CELERY_LOGS_FOLDER = 'logs' self.CELERY_BEAT_FOLDER = 'celerybeat' self.CELERY_BEAT_CRONTAB_MINUTE = '*' # Every minute # Email self.MAIL_SERVER = None self.MAIL_PORT = None self.MAIL_USE_TLS = True self.MAIL_USERNAME = None self.MAIL_PASSWORD = <PASSWORD> self.MAIL_DEFAULT_SENDER = (None, None)
Md-Sabbir-Ahmed/EEE2110
Laboratory 5 [Function]/Problem06/problem06.cpp
<reponame>Md-Sabbir-Ahmed/EEE2110 /* NAME: <NAME> ID:20200105021 SECTION:A ---------------------- CHAPTER:05 PROBLEM:06 */ #include <iostream> using namespace std; // function to calculate power of x raised to p double power(double x,int p) { double result=1; // to calculate positive power if(p>0) { // multiply x, p times for(int i=0;i<p;i++) result*=x; } // to calculate negative power else if(p<0) { // multiply x, p times for(int i=0;i<-p;i++) result*=x; // take reciprocal of result result=1/result; } return result; } int main() { // test function cout<<"2^3: "<<power(2,3)<<endl; cout<<"3^2: "<<power(3,2)<<endl; cout<<"3^0: "<<power(3,0)<<endl; cout<<"2^-1: "<<power(2,-1)<<endl; cout<<"2^-2: "<<power(2,-2)<<endl; return 0; }
dbmdz/digitalcollections-cms
dc-cudami-server/dc-cudami-server-backend-jdbi/src/main/java/de/digitalcollections/cudami/server/backend/impl/database/migration/V11_05_00__DDL_DML_UrlAlias_replace_targetEntityType_with_targetIdentifiableObjectType.java
package de.digitalcollections.cudami.server.backend.impl.database.migration; import com.github.openjson.JSONObject; import de.digitalcollections.model.identifiable.IdentifiableObjectType; import java.util.List; import java.util.Map; import java.util.UUID; import org.flywaydb.core.api.migration.BaseJavaMigration; import org.flywaydb.core.api.migration.Context; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.jdbc.core.JdbcTemplate; import org.springframework.jdbc.datasource.SingleConnectionDataSource; @SuppressWarnings("checkstyle:typename") public class V11_05_00__DDL_DML_UrlAlias_replace_targetEntityType_with_targetIdentifiableObjectType extends BaseJavaMigration { private static final Logger LOGGER = LoggerFactory.getLogger( V11_05_00__DDL_DML_UrlAlias_replace_targetEntityType_with_targetIdentifiableObjectType .class); private JdbcTemplate jdbcTemplate; @Override public void migrate(Context context) throws Exception { final SingleConnectionDataSource connectionDataSource = new SingleConnectionDataSource(context.getConnection(), true); // do setup jdbcTemplate = new JdbcTemplate(connectionDataSource); // select all needed data for migration String selectQuery = "SELECT identifiables.uuid AS target_uuid, identifiable_objecttype FROM identifiables INNER JOIN (SELECT DISTINCT target_uuid FROM url_aliases) url_aliases ON identifiables.uuid=url_aliases.target_uuid"; List<Map<String, Object>> targetData = jdbcTemplate.queryForList(selectQuery); if (targetData.isEmpty()) { LOGGER.info("No migration necessary."); return; } LOGGER.info("Migrating UrlAliases for {} identifiables", targetData.size()); try { // Disable all triggers for faster updates jdbcTemplate.execute("ALTER TABLE url_aliases DISABLE TRIGGER tr_url_aliases_target_uuid"); // Do the actual migration targetData.forEach( i -> { JSONObject jsonObject = new JSONObject(i.toString()); UUID targetUuid = UUID.fromString(jsonObject.getString("target_uuid")); IdentifiableObjectType targetIdentifiableObjectType = IdentifiableObjectType.valueOf(jsonObject.getString("identifiable_objecttype")); jdbcTemplate.update( "UPDATE url_aliases SET target_identifiable_objecttype = ? WHERE target_uuid = ?", targetIdentifiableObjectType.toString(), targetUuid); }); LOGGER.info("Migration done"); } finally { // Re-enable the triggers jdbcTemplate.execute("ALTER TABLE url_aliases ENABLE TRIGGER tr_url_aliases_target_uuid"); } } }
antropez/concord
src/invite.c
<reponame>antropez/concord<filename>src/invite.c #include <stdio.h> #include <stdlib.h> #include <string.h> #include "discord.h" #include "discord-internal.h" #include "discord-request.h" CCORDcode discord_get_invite(struct discord *client, char *invite_code, struct discord_get_invite *params, struct discord_ret_invite *ret) { struct discord_request req = { 0 }; struct sized_buffer body; char buf[1024]; CCORD_EXPECT(client, NOT_EMPTY_STR(invite_code), CCORD_BAD_PARAMETER, ""); CCORD_EXPECT(client, params != NULL, CCORD_BAD_PARAMETER, ""); body.size = discord_get_invite_to_json(buf, sizeof(buf), params); body.start = buf; DISCORD_REQ_INIT(req, discord_invite, ret); return discord_adapter_run(&client->adapter, &req, &body, HTTP_GET, "/invites/%s", invite_code); } CCORDcode discord_delete_invite(struct discord *client, char *invite_code, struct discord_ret_invite *ret) { struct discord_request req = { 0 }; CCORD_EXPECT(client, NOT_EMPTY_STR(invite_code), CCORD_BAD_PARAMETER, ""); DISCORD_REQ_INIT(req, discord_invite, ret); return discord_adapter_run(&client->adapter, &req, NULL, HTTP_DELETE, "/invites/%s", invite_code); }
RockHong/railscasts-episodes
episode-146/store/config/environments/development.rb
# Settings specified here will take precedence over those in config/environment.rb # In the development environment your application's code is reloaded on # every request. This slows down response time but is perfect for development # since you don't have to restart the webserver when you make code changes. config.cache_classes = false # Log error messages when you accidentally call methods on nil. config.whiny_nils = true # Show full error reports and disable caching config.action_controller.consider_all_requests_local = true config.action_view.debug_rjs = true config.action_controller.perform_caching = false # Don't care if the mailer can't send config.action_mailer.raise_delivery_errors = false config.after_initialize do ActiveMerchant::Billing::Base.mode = :test paypal_options = { :login => "seller_1229899173_biz_api1.railscasts.com", :password => "<PASSWORD>", :signature => "AGjv6SW.mTiKxtkm6L9DcSUCUgePAUDQ3L-kTdszkPG8mRfjaRZDYtSu" } ::STANDARD_GATEWAY = ActiveMerchant::Billing::PaypalGateway.new(paypal_options) ::EXPRESS_GATEWAY = ActiveMerchant::Billing::PaypalExpressGateway.new(paypal_options) end
yxcde/RTP_MIT_RECODED
src/scenes/karstenVeraMolnar/karstenVeraMolnar.cpp
#include "karstenVeraMolnar.h" void karstenVeraMolnar::setup(){ // setup parameters // if your original code use an ofxPanel instance dont use it here, instead // add your parameters to the "parameters" instance as follows. // param was declared in karstenVeraMolnar.h // parameters.add(param.set("param", 5, 0, 100)); setAuthor("<NAME>"); setOriginalArtist("<NAME>"); loadCode("scenes/karstenVeraMolnar/exampleCode.cpp"); parameters.add(numSquares.set("numSquares", 5, 2, 20)); parameters.add(offAmount.set("offAmount", 1.0, 0.0, 3.0)); parameters.add(rotation.set("rotation", 0.0, 0.0, 360.0)); parameters.add(margin.set("margin", 36, 0, 50)); windowSize = dimensions.getWidth(); } void karstenVeraMolnar::update(){ ofSeedRandom(1); squareSize = windowSize/numSquares; } void karstenVeraMolnar::draw(){ ofSetRectMode(OF_RECTMODE_CENTER); ofPushMatrix(); ofTranslate(0, windowSize); ofScale(1, -1); ofSetColor(255); ofDrawRectangle(windowSize/2, windowSize/2, windowSize, windowSize); ofSetColor(0, 0, 0, 200); for(int i = 0; i < numSquares; i++){ for(int j= 0; j < numSquares; j++){ float posX = (ofMap(i, 0, numSquares-1, squareSize, (windowSize - squareSize))); float posY = (ofMap(j, 0, numSquares-1, squareSize, (windowSize - squareSize))); //*ofNoise(i*0.02, j*0.04))*noiseScale); float offset = ofRandom(-offAmount*(squareSize/numSquares), offAmount*(squareSize/numSquares)); ofPushMatrix(); ofTranslate(posX, posY); ofRotateZDeg(rotation); ofDrawRectangle(0+offset, 0+offset, squareSize-margin, squareSize-margin); ofPopMatrix(); } } ofPopMatrix(); }
dcsaorg/DCSA-JIT-Notifications
src/main/java/org/dcsa/jit/notifications/service/TimestampNotificationMailService.java
package org.dcsa.jit.notifications.service; import org.dcsa.core.events.model.Event; import org.dcsa.core.events.model.TimestampDefinition; import org.dcsa.jit.notifications.model.PendingEmailNotification; import reactor.core.publisher.Flux; public interface TimestampNotificationMailService { Flux<PendingEmailNotification> sendEmailNotificationsForEvent(Event event,TimestampDefinition timestampDefinition); }
schnaustin/cloud-sdk-go
pkg/auth/new_authwriter.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package auth import ( "errors" "net/http" "github.com/go-openapi/runtime" "github.com/elastic/cloud-sdk-go/pkg/multierror" ) // Writer wraps the runtime.ClientAuthInfoWriter interface adding a method // to Auth generic http.Request. type Writer interface { runtime.ClientAuthInfoWriter AuthRequest(req *http.Request) *http.Request } // Config to create e new AuthWriters type Config struct { APIKey string Password string Username string } // Validate ensures that the config is usable. func (c Config) Validate() error { var merr = multierror.NewPrefixed("authwriter") var emptyAPIKey = c.APIKey == "" var emptyUser = c.Username == "" var emptyPass = c.Password == "" var emptyCreds = emptyAPIKey && emptyUser && emptyPass if emptyCreds { merr = merr.Append( errors.New("one of apikey or username and password must be specified"), ) } var allCreds = !emptyAPIKey && (!emptyUser || !emptyPass) if allCreds { merr = merr.Append( errors.New("only one of of apikey or username and password can be specified"), ) } return merr.ErrorOrNil() } // NewAuthWriter creates a new instance of one of the implementations of Writer // *APIKey or *UserLogin. func NewAuthWriter(c Config) (Writer, error) { if err := c.Validate(); err != nil { return nil, err } if c.APIKey != "" { return NewAPIKey(c.APIKey) } return NewUserLogin(c.Username, c.Password) }
lirui-intel/hadoop
hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java
<gh_stars>1-10 /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ozone.recon.tasks; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.stream.Collectors; /** * Wrapper class to hold multiple OM DB update events. */ public class OMUpdateEventBatch { private List<OMDBUpdateEvent> events; OMUpdateEventBatch(Collection<OMDBUpdateEvent> e) { events = new ArrayList<>(e); } /** * Get Sequence Number and timestamp of last event in this batch. * @return Event Info instance. */ OMDBUpdateEvent.EventInfo getLastEventInfo() { if (events.isEmpty()) { return new OMDBUpdateEvent.EventInfo(-1, -1); } else { return events.get(events.size() - 1).getEventInfo(); } } /** * Return iterator to Event batch. * @return iterator */ public Iterator<OMDBUpdateEvent> getIterator() { return events.iterator(); } /** * Filter events based on Tables. * @param tables set of tables to filter on. * @return trimmed event batch. */ public OMUpdateEventBatch filter(Collection<String> tables) { return new OMUpdateEventBatch(events .stream() .filter(e -> tables.contains(e.getTable())) .collect(Collectors.toList())); } }
sksingh2546-pro/ethernetIp
cip-core/src/main/java/com/digitalpetri/enip/util/TimeoutCalculator.java
package com.digitalpetri.enip.util; import java.time.Duration; public class TimeoutCalculator { private static final int MIN_TIMEOUT = 1; private static final int MAX_TIMEOUT = 8355840; public static int calculateTimeoutBytes(Duration timeout) { int desiredTimeout = (int) timeout.toMillis(); if (desiredTimeout < MIN_TIMEOUT) desiredTimeout = MIN_TIMEOUT; if (desiredTimeout > MAX_TIMEOUT) desiredTimeout = MAX_TIMEOUT; boolean precisionLost = false; int shifts = 0; int multiplier = desiredTimeout; while (multiplier > 255) { precisionLost |= (multiplier & 1) == 1; multiplier >>= 1; shifts += 1; } if (precisionLost) { multiplier += 1; if (multiplier > 255) { multiplier >>= 1; shifts += 1; } } assert (shifts <= 15); int tick = (int) Math.pow(2, shifts); assert (tick >= 1 && tick <= 32768); assert (multiplier >= 1 && multiplier <= 255); return shifts << 8 | multiplier; } }
unification-com/mainchain-cosmos
x/enterprise/legacy/v040/migrate.go
package v040 import ( v038 "github.com/unification-com/mainchain/x/enterprise/legacy/v038" v040 "github.com/unification-com/mainchain/x/enterprise/types" ) // convertDecision convert an old byte decision to an enum func convertDecision(oldDecision v038.PurchaseOrderStatus) v040.PurchaseOrderStatus { switch oldDecision { case v038.StatusNil: return v040.StatusNil case v038.StatusRaised: return v040.StatusRaised case v038.StatusAccepted: return v040.StatusAccepted case v038.StatusRejected: return v040.StatusRejected case v038.StatusCompleted: return v040.StatusCompleted default: return v040.StatusNil } } func Migrate(oldEnterpriseState v038.GenesisState) *v040.GenesisState { newPos := make(v040.EnterpriseUndPurchaseOrders, len(oldEnterpriseState.PurchaseOrders)) for i, oldPo := range oldEnterpriseState.PurchaseOrders { newDecisions := make(v040.PurchaseOrderDecisions, len(oldPo.Decisions)) for j, oldDecision := range oldPo.Decisions { newDecisions[j] = v040.PurchaseOrderDecision{ Signer: oldDecision.Signer.String(), Decision: convertDecision(oldDecision.Decision), DecisionTime: uint64(oldDecision.DecisionTime), } } newPos[i] = v040.EnterpriseUndPurchaseOrder{ Id: oldPo.PurchaseOrderID, Purchaser: oldPo.Purchaser.String(), Amount: oldPo.Amount, Status: convertDecision(oldPo.Status), RaiseTime: uint64(oldPo.RaisedTime), CompletionTime: uint64(oldPo.CompletionTime), Decisions: newDecisions, } } newLockedUnd := make(v040.LockedUnds, len(oldEnterpriseState.LockedUnds)) for i, oldLockedUnd := range oldEnterpriseState.LockedUnds { newLockedUnd[i] = v040.LockedUnd{ Owner: oldLockedUnd.Owner.String(), Amount: oldLockedUnd.Amount, } } newWhiteList := make(v040.Whitelists, len(oldEnterpriseState.Whitelist)) for i, oldWl := range oldEnterpriseState.Whitelist { newWhiteList[i] = oldWl.String() } return &v040.GenesisState{ Params: v040.Params{ EntSigners: oldEnterpriseState.Params.EntSigners, Denom: oldEnterpriseState.Params.Denom, MinAccepts: oldEnterpriseState.Params.MinAccepts, DecisionTimeLimit: oldEnterpriseState.Params.DecisionLimit, }, StartingPurchaseOrderId: oldEnterpriseState.StartingPurchaseOrderID, PurchaseOrders: newPos, LockedUnd: newLockedUnd, TotalLocked: oldEnterpriseState.TotalLocked, Whitelist: newWhiteList, } }
GamingGuyTyler/KAaNE-JE-
src/theBulb.java
/** * The Bulb * * Started work on 6/16/2019 12:44 AM * * Finished version 0.1 on 2:38 AM * * Version 0.1 */ import javax.swing.*; import java.awt.*; import java.awt.event.*; import java.io.*; import java.util.*; public class theBulb { static JFrame f; private static int stage = 1; private static boolean pressI = false; private static char prevBtn; private static char step1Btn; private static char step23Btn; private static ArrayList<String> stepSystem; private static String color; private static boolean clear; private static boolean light; private static Properties props; private static boolean answer; private static boolean answered = false; private static String remember; public static void module() { System.out.println("[THE BULB]"); f = new JFrame("KAaNE [THE BULB]"); ImageIcon icon = new ImageIcon("imgs/icons/The Bulb.png"); f.setIconImage(icon.getImage()); // Edgework File configFile = new File("config.properties"); props = new Properties(); try { FileReader reader = new FileReader(configFile); props.load(reader); } catch (Exception ex) { ex.printStackTrace(System.out); } // CB String[] colors = {"Blue","Red","Green","Yellow","White","Purple"}; Arrays.sort(colors); JComboBox colorCB = new JComboBox(colors); colorCB.setBounds(5,5,60,20); // Label JLabel colorLabel = new JLabel("Color"); colorLabel.setBounds(70,5,50,20); // CheckBox JCheckBox clearCB = new JCheckBox("See-Through?"); clearCB.setBounds(5,25,120,20); JCheckBox lightCB = new JCheckBox("Light on?"); lightCB.setBounds(5,45,100,20); // Buttons JButton button = new JButton("OK"); button.setBounds(200,45,60,20); JButton yesBtn = new JButton("YES"); yesBtn.setBounds(60,170,60,20); yesBtn.setEnabled(false); JButton noBtn = new JButton("NO"); noBtn.setBounds(180,170,60,20); noBtn.setEnabled(false); // Output JTextArea output = new JTextArea(""); output.setEditable(false); output.setLineWrap(true); output.setWrapStyleWord(true); JScrollPane sp = new JScrollPane(output); sp.setBounds(5,70,285,100); // Add f.add(colorCB); f.add(colorLabel); f.add(clearCB); f.add(lightCB); f.getContentPane().add(sp); f.add(button); f.add(yesBtn); f.add(noBtn); // Define JFrame f.setLayout(null); f.setSize(300,300); f.setResizable(false); Dimension dim = Toolkit.getDefaultToolkit().getScreenSize(); f.setLocation(dim.width/2-f.getSize().width/2, dim.height/2-f.getSize().height/2); f.setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE); f.setVisible(true); button.addActionListener((ActionEvent e) -> { output.setText(""); stage = 1; pressI = false; answered = false; color = (String) colorCB.getItemAt(colorCB.getSelectedIndex()); clear = clearCB.isSelected(); light = lightCB.isSelected(); stepSystem = step1(); String[] arr = new String[stepSystem.size()]; arr = stepSystem.toArray(arr); for (String a : arr) { output.append(a + "\n"); } if (stage == 5 || stage == 6 || stage == 12 || stage == 13) { yesBtn.setEnabled(true); noBtn.setEnabled(true); } }); yesBtn.addActionListener((ActionEvent e) -> { answer = true; answered = true; ArrayList<String> blankArrList = new ArrayList<>(); switch (stage) { case 5: stepSystem = step5(blankArrList); break; case 6: stepSystem = step6(blankArrList); break; case 12: stepSystem = step12(blankArrList); break; case 13: stepSystem = step13(blankArrList); break; } String[] arr = new String[stepSystem.size()]; arr = stepSystem.toArray(arr); for (String a : arr) { output.append(a + "\n"); } }); noBtn.addActionListener((ActionEvent e) -> { answer = false; answered = true; ArrayList<String> blankArrList = new ArrayList<>(); switch (stage) { case 5: stepSystem = step5(blankArrList); break; case 6: stepSystem = step6(blankArrList); break; case 12: stepSystem = step12(blankArrList); break; case 13: stepSystem = step13(blankArrList); break; } String[] arr = new String[stepSystem.size()]; arr = stepSystem.toArray(arr); for (String a : arr) { output.append(a + "\n"); } }); } private static ArrayList<String> step1() { ArrayList<String> output = new ArrayList<>(); System.out.println("Step 1"); if (light && clear) { System.out.println("Press I ; Light + Clear"); prevBtn = 'I'; step1Btn = 'I'; pressI = true; output.add("Press I"); output = step2(output); } else if (light) { System.out.println("Press O ; Light"); prevBtn = 'O'; step1Btn = 'O'; output.add("Press O"); output = step3(output); } else { System.out.println("Unscrew ; No other conditions"); output.add("Unscrew"); output = step4(output); } return output; } private static ArrayList<String> step2(ArrayList<String> output) { System.out.println("Step 2"); stage = 2; if (color.equals("Red")) { System.out.println("Press I, Unscrew ; Color Red"); prevBtn = 'I'; step23Btn = 'I'; pressI = true; output.add("Press I"); output.add("Unscrew"); output = step5(output); } else if (color.equals("White")) { System.out.println("Press O, Unscrew ; Color White"); prevBtn = 'O'; step23Btn = 'O'; output.add("Press O"); output.add("Unscrew"); output = step6(output); } else { System.out.println("Unscrew ; No other conditions"); output.add("Unscrew"); output = step7(output); } return output; } private static ArrayList<String> step3(ArrayList<String> output) { System.out.println("Step 3"); stage = 3; if (color.equals("Green")) { System.out.println("Press I, Unscrew ; Color Green"); prevBtn = 'I'; step23Btn = 'I'; pressI = true; output.add("Press I"); output.add("Unscrew"); output = step6(output); } else if (color.equals("Purple")) { System.out.println("Press O, Unscrew ; Color Purple"); prevBtn = 'O'; step23Btn = 'O'; output = step5(output); } else { System.out.println("Unscrew ; No other conditions"); output.add("Unscrew"); output = step8(output); } return output; } private static ArrayList<String> step4(ArrayList<String> output) { System.out.println("Step 4"); stage = 4; int car = Integer.parseInt(props.getProperty("car")); int ind = Integer.parseInt(props.getProperty("ind")); int msa = Integer.parseInt(props.getProperty("msa")); int snd = Integer.parseInt(props.getProperty("snd")); if (car == 1 || ind == 1 || msa == 1 || snd == 1) { System.out.println("Press I ; CAR/IND/MSA/SND exists"); prevBtn = 'I'; pressI = true; output.add("Press I"); output = step9(output); } else { System.out.println("Press O ; No other conditions"); prevBtn = 'O'; output.add("Press O"); output = step10(output); } return output; } private static ArrayList<String> step5(ArrayList<String> output) { stage = 5; if (!answered) { System.out.println("Step 5"); System.out.println("Did light go off at step 1?"); output.add("Did the light go off at the first instruction?"); } else { if (answer) { System.out.println("Press prev btn, Screw ; Answer is YES"); output.add("Press " + prevBtn); output.add("Screw"); } else { System.out.println("Press unpressed btn, Screw ; Answer is NO"); if (pressI) { prevBtn = 'I'; } else { prevBtn = 'O'; } output.add("Press " + prevBtn); output.add("Screw"); } } return output; } private static ArrayList<String> step6(ArrayList<String> output) { stage = 6; if (!answered) { System.out.println("Step 6"); System.out.println("Did light go off when press I?"); output.add("Did the light go off when pressing I?"); } else { if (answer) { System.out.println("Press " + step1Btn + ", Screw ; Answer is YES"); output.add("Press " + step1Btn); output.add("Screw"); } else { System.out.println("Press " + step23Btn + ", Screw ; Answer is NO"); output.add("Press " + step23Btn); output.add("Screw"); } } return output; } private static ArrayList<String> step7(ArrayList<String> output) { System.out.println("Step 7"); stage = 7; if (color.equals("Green")) { System.out.println("Press I, Rem SIG ; Color Green"); remember = "sig"; output.add("Press I"); output = step11(output); } else if (color.equals("Purple")) { System.out.println("Press I, Screw ; Color Purple"); output.add("Press I"); output.add("Screw"); output = step12(output); } else if (color.equals("Blue")) { System.out.println("Press O, Rem CLR ; Color Blue"); remember = "clr"; output.add("Press O"); output = step11(output); } else { System.out.println("Press O, Screw ; No other conditions"); output.add("Press O"); output.add("Screw"); output = step13(output); } return output; } private static ArrayList<String> step8(ArrayList<String> output) { System.out.println("Step 8"); stage = 8; if (color.equals("White")) { System.out.println("Press I, Rem FRQ ; Color White"); remember = "frq"; output.add("Press I"); output = step11(output); } else if (color.equals("Red")) { System.out.println("Press I, Screw ; Color Red"); output.add("Press I"); output.add("Screw"); output = step13(output); } else if (color.equals("Yellow")) { System.out.println("Press O, Rem FRK ; Color Yellow"); remember = "frk"; output.add("Press O"); output = step11(output); } else { System.out.println("Press O, Screw ; No other conditions"); output.add("Press O"); output.add("Screw"); output = step12(output); } return output; } private static ArrayList<String> step9(ArrayList<String> output) { System.out.println("Step 9"); stage = 9; if (color.equals("Blue")) { System.out.println("Press I ; Color Blue"); output.add("Press I"); output = step14(output); } else if (color.equals("Green")) { System.out.println("Press I, Screw ; Color Green"); output.add("Press I"); output.add("Screw"); output = step12(output); } else if (color.equals("Yellow")) { System.out.println("Press O ; Color Yellow"); output.add("Press O"); output = step15(output); } else if (color.equals("White")) { System.out.println("Press O, Screw ; Color White"); output.add("Press O"); output.add("Screw"); output = step13(output); } else if (color.equals("Purple")) { System.out.println("Screw, Press I; Color Purple"); output.add("Screw"); output.add("Press I"); output = step12(output); } else { System.out.println("Screw, Press O; No other conditions"); output.add("Screw"); output.add("Press O"); output = step13(output); } return output; } private static ArrayList<String> step10(ArrayList<String> output) { System.out.println("Step 10"); stage = 10; if (color.equals("Purple")) { System.out.println("Press I ; Color Purple"); output.add("Press I"); output = step14(output); } else if (color.equals("Red")) { System.out.println("Press I, Screw ; Color Red"); output.add("Press I"); output.add("Screw"); output = step13(output); } else if (color.equals("Blue")) { System.out.println("Press O ; Color Blue"); output.add("Press O"); output = step15(output); } else if (color.equals("Yellow")) { System.out.println("Press O, Screw ; Color Yellow"); output.add("Press O"); output.add("Screw"); output = step12(output); } else if (color.equals("Green")) { System.out.println("Screw, Press I; Color Green"); output.add("Screw"); output.add("Press I"); output = step13(output); } else { System.out.println("Screw, Press O; No other conditions"); output.add("Screw"); output.add("Press O"); output = step12(output); } return output; } private static ArrayList<String> step11(ArrayList<String> output) { System.out.println("Step 11"); stage = 11; int indicator = Integer.parseInt(props.getProperty(remember)); if (indicator == 1) { System.out.println("Press I, Screw ; Bomb has rem indicator"); output.add("Press I"); output.add("Screw"); } else { System.out.println("Press O, Screw ; No other conditions"); output.add("Press O"); output.add("Screw"); } return output; } private static ArrayList<String> step12(ArrayList<String> output) { System.out.println("Step 12"); stage = 12; if (!answered) { System.out.println("Is light on?"); output.add("Is the light now on?"); } else { if (answer) { System.out.println("Press I ; Answer is YES"); output.add("Press I"); } else { System.out.println("Press O ; Answer is NO"); output.add("Press O"); } } return output; } private static ArrayList<String> step13(ArrayList<String> output) { System.out.println("Step 13"); stage = 13; if (!answered) { System.out.println("Is light on?"); output.add("Is the light now on?"); } else { if (answer) { System.out.println("Press O ; Answer is YES"); output.add("Press O"); } else { System.out.println("Press I ; Answer is NO"); output.add("Press I"); } } return output; } private static ArrayList<String> step14(ArrayList<String> output) { System.out.println("Step 14"); stage = 14; if (!clear) { System.out.println("Press I, Screw ; Opaque"); output.add("Press I"); output.add("Screw"); } else { System.out.println("Press O, Screw ; No other conditions"); output.add("Press O"); output.add("Screw"); } return output; } private static ArrayList<String> step15(ArrayList<String> output) { System.out.println("Step 15"); stage = 15; if (clear) { System.out.println("Press I, Screw ; Clear"); output.add("Press I"); output.add("Screw"); } else { System.out.println("Press O, Screw ; No other conditions"); output.add("Press O"); output.add("Screw"); } return output; } }