repo_name stringlengths 6 101 | path stringlengths 4 300 | text stringlengths 7 1.31M |
|---|---|---|
ballcat-projects/Payment-Platform | payment-sdk/src/main/java/live/lingting/sdk/request/MixRequest.java | package live.lingting.sdk.request;
import java.util.Map;
import live.lingting.sdk.domain.HttpProperties;
import live.lingting.sdk.exception.MixException;
import live.lingting.sdk.model.MixModel;
import live.lingting.sdk.response.MixResponse;
/**
* @author lingting 2021/6/7 17:22
*/
public interface MixRequest<M extends MixModel, R extends MixResponse<?>> {
/**
* 获取http请求 path
* @return java.lang.String
* @author lingting 2021-06-07 20:48
*/
String getPath();
/**
* 获取参数基础数据
* @return M
* @author lingting 2021-06-07 19:34
*/
M getModel();
/**
* 请求参数
* @return java.util.Map<java.lang.String,java.lang.String>
* @author lingting 2021-06-07 19:51
* @exception MixException 参数有效性异常
*/
Map<String, String> getParams() throws MixException;
/**
* 获取http请求配置
* @return live.lingting.sdk.domain.HttpProperties
* @author lingting 2021-06-07 20:43
*/
HttpProperties getProperties();
/**
* 返回值字符串转返回值类
* @param resStr 返回值字符串
* @return R
* @author lingting 2021-06-07 21:38
*/
R convert(String resStr);
} |
r4k0nb4k0n/Programming-Challenges | Baekjoon/6588.cpp | #include <cstdio>
#include <cstring>
#define SIZE 1000001
bool is_Prime[SIZE];
void Eratos(){
memset(is_Prime,true,SIZE);
is_Prime[0] = is_Prime[1] = false;
for(int i=2;i*i<=SIZE+1;i++)
if(is_Prime[i])
for(int j=i*i;j<=SIZE+1;j+=i)
is_Prime[j] = false;
return;
}
int main(){
int n;
Eratos();
while(scanf("%d",&n) && n!=0){
int i;
for(i=3;i<=n;i+=2){
if(is_Prime[i] && is_Prime[n-i]){
printf("%d = %d + %d\n",n,i,n-i);
break;
}
}
if(i>n)
printf("Goldbach's conjecture is wrong.\n");
}
return 0;
}
|
leeboardtools/mimon | src/util/FileActions.js | import * as path from 'path';
import { promises as fsPromises } from 'fs';
/**
* Determines if a file exists.
* @async
* @param {string} pathName The path name of the file to check for.
* @returns {Promise<boolean>} <code>true</code> if the file exists.
*/
export async function asyncFileExists(pathName) {
try {
if (!pathName) {
return false;
}
const stat = await fsPromises.stat(pathName);
return stat.isFile();
}
catch (e) {
if (e.code === 'ENOENT') {
return false;
}
}
}
/**
* Replaces the base name of a path (name.ext) with another base name or the base
* name from another path.
* @param {string} pathName The path name whose base name is to be replaced.
* @param {string} baseName The base name or path name whose base name is to be used
* as the base name for pathName.
*/
export function replaceBaseFileName(pathName, baseName) {
const parts = path.parse(pathName);
const baseParts = path.parse(baseName);
parts.base = baseParts.base;
return path.format(parts);
}
/**
* Retrieves a path name given a file name that may just be a base name (name.ext),
* or may have a path. If the file name is just a base name, the path in a reference
* path name will be used.
* @param {string} fileName The base file name or path name.
* @param {string} refPathName The reference path name to use if fileName is just
* a base file name. refPathName is presumed to include a base file name, that base
* file name will be replaced.
*/
export function getFullPathName(fileName, refPathName) {
const fileNameParts = path.parse(fileName);
if (fileNameParts.dir) {
return fileName;
}
return replaceBaseFileName(refPathName, fileNameParts.base);
}
/**
* Interface for the file actions. File actions perform an action on a file, which
* can be reverted until the action is finalized.
* <p>
* A set of file actions are normally created, and then passed to
* {@link performFileActions}. performFileActions() handles calling
* {@link FileAction#apply}, {@link FileAction#finalize}, and
* {@link FileAction#revert} when appropriate.
* @interface
*/
export class FileAction {
constructor(pathName) {
this._pathName = pathName;
}
getPathName() { return this._pathName; }
/**
* Callback called when a backup file is created.
* @callback FileAction~BackupCallback
* @param {string} backupFileName The backupFileName arg that was passed to
* the {@link FileAction#setBackupFileName} call.
* @param {string} backupPathName The full path name of the created backup file.
*/
/**
* Sets a backup file name. If a file with {@link FileAction#getPathName} exists
* when the action is applied and a backup file name has been specified, the
* existing file is renamed to the backup file name.
* @param {string} backupFileName The optional file name for the backup file.
* Only the base name (name.ext) is used.
* @param {FileAction~BackupCallback} [callback] Optional callback function
* that's called when the backup file is created.
*/
setBackupFileName(backupFileName, callback) {
this._backupFileName = backupFileName;
this._backupFileCallback = callback;
}
getBackupFileName() { return this._backupFileName; }
async _applyForExistingFile() {
if (await asyncFileExists(this._pathName)) {
if (this._backupFileName) {
this._backupPathName = getFullPathName(this._backupFileName,
this._pathName);
this._backupAction = new RenameFileAction(this._pathName,
this._backupPathName);
await this._backupAction.apply();
if (this._backupFileCallback) {
this._backupFileCallback(this._backupFileName,
this._backupPathName);
}
}
else if (this._isDeleteExistingFile) {
const parts = path.parse(this._pathName);
const originalBase = parts.base;
let index = 1;
let toDeletePathName;
do {
parts.base = 'to_delete' + index + '_' + originalBase;
++index;
toDeletePathName = path.format(parts);
} while (await asyncFileExists(toDeletePathName));
await fsPromises.rename(this._pathName, toDeletePathName);
this._toDeletePathName = toDeletePathName;
}
}
}
async _finalizeForExistingFile() {
if (this._backupAction) {
await this._backupAction.finalize();
this._backupAction = undefined;
}
else if (this._toDeletePathName) {
await fsPromises.unlink(this._toDeletePathName);
this._toDeletePathName = undefined;
}
}
async _revertForExistingFile() {
if (this._backupAction) {
await this._backupAction.revert();
this._backupAction = undefined;
}
else if (this._toDeletePathName) {
await fsPromises.rename(this._toDeletePathName, this._pathName);
this._toDeletePathName = undefined;
}
}
/**
* Performs the action. This is where the bulk of the action should be performed,
* particularly the parts where errors a reasonably likely to occur.
* The action is not finalized until {@link FileAction#finalize} is called.
* Before then, the action can be reverted by calling {@link FileAction#revert}
*/
async apply() {
try {
await this._applyForExistingFile();
await this._applyMainAction();
}
catch (e) {
await this.revert();
throw e;
}
}
/**
* Finalizes the action. After this is called the action normally cannot be
* reverted.
*/
async finalize() {
try {
await this._finalizeMainAction();
await this._finalizeForExistingFile();
}
catch (e) {
await this.revert();
throw e;
}
}
/**
* Reverts the action. This is normally called when an exception occurs before
* finalization.
* NOTE: Implements must not throw exceptions.
*/
async revert() {
try {
await this._revertMainAction();
}
catch (e) {
// Ignore
}
try {
await this._revertForExistingFile();
}
catch (e) {
// Ignore
}
}
}
/**
* File action that deletes a file. The file to be deleted is temporarily renamed
* and is only deleted when finalized. If the file does not exist this does not do
* anything.
* @class
*/
export class DeleteFileAction extends FileAction {
/**
* @constructor
* @param {string} pathName The full path name of the file to be deleted.
*/
constructor(pathName) {
super(pathName);
this._isDeleteExistingFile = true;
}
async _applyMainAction() {}
async _finalizeMainAction() {}
async _revertMainAction() {}
}
/**
* File action for renaming a file. If the file does not exist nothing happens.
* <p>
* Note that {@link FileAction~setBackupFileName} has no effect on the original file.
* @class
*/
export class RenameFileAction extends FileAction {
/**
* @constructor
* @param {string} oldPathName The full path name of the file to be renamed.
* @param {string} newFileName The base file name or full path name of the
* renamed file.
*/
constructor(oldPathName, newFileName) {
super();
this._oldPathName = oldPathName;
this._newPathName = getFullPathName(newFileName, oldPathName);
}
getOldPathName() { return this._oldPathName; }
getNewPathName() { return this._newPathName; }
async apply() {
if (await asyncFileExists(this._oldPathName)) {
if (await asyncFileExists(this._newPathName)) {
this._existingFileAction = new DeleteFileAction(this._newPathName);
await this._existingFileAction.apply();
}
await fsPromises.rename(this._oldPathName, this._newPathName);
}
}
async finalize() {
if (this._existingFileAction) {
await this._existingFileAction.finalize();
this._existingFileAction = undefined;
}
}
async revert() {
try {
if (await asyncFileExists(this._newPathName)) {
await fsPromises.rename(this._newPathName, this._oldPathName);
}
}
catch (e) {
// Ignore
}
if (this._existingFileAction) {
await this._existingFileAction.revert();
this._existingFileAction = undefined;
}
}
}
/**
* File action for replacing an existing file or creating a new file (i.e. what
* happens when the original file doesn't exist). The replacement is done via a
* callback function.
* @class
*/
export class ReplaceFileAction extends FileAction {
/**
* Callback called by {@link ReplaceFileAction#apply},
* {@link ReplaceFileAction#finalize}, or {@link ReplaceFileAction#revert} to
* handle the actual creation of the file, extra finalization, or reversion.
* For apply() his should handle creating, writing, and closing the file.
* @callback ReplaceFileAction~Callback
* @async
* @param {string} pathName The full path name of the file to be created/written.
* @returns {Promise} A promise should be returned, the callbacks are treated
* as <code>async</code> functions.
*/
/**
* @typedef {Object} ReplaceFileAction~Callbacks
* @property {ReplaceFileAction~Callback} [applyCallback] The callback to be
* called during {@link ReplaceFileAction#apply}.
* @property {ReplaceFileAction~Callback} [finalizeCallback] The callback to
* be called during {@link ReplaceFileAction#finalize}.
* @property {ReplaceFileAction~Callback} [revertCallback] The callback to be
* called during {@link ReplaceFileAction#revert}.
*/
/**
* @constructor
* @param {string} pathName The full path name of the file to be created.
* @param {ReplaceFileAction~Callback|ReplaceFileAction~Callbacks} callback
* Either a single {@link ReplaceFileAction~Callback}, in which
* case it is the callback to be called during {@link ReplaceFileAction#apply},
* or a {@link ReplaceFileAction~Callbacks} object containing the various callbacks.
*/
constructor(pathName, callbacks) {
super(pathName);
this._isDeleteExistingFile = true;
if (typeof callbacks === 'function') {
this._applyCallback = callbacks;
}
else {
this._applyCallback = callbacks.applyCallback;
this._finalizeCallback = callbacks.finalizeCallback;
this._revertCallback = callbacks.revertCallback;
}
}
getApplyCallback() { return this._applyCallback; }
getFinalizeCallback() { return this._finalizeCallback; }
getRevertCallback() { return this._revertCallabck; }
setNoFileBackupFileName(fileName, callback) {
this._noFileBackupFileName = fileName;
this._noFileBackupFileCallback = callback;
}
async _applyMainAction() {
if (this._applyCallback) {
await this._applyCallback(this._pathName);
}
if (this._noFileBackupFileName && !this._backupPathName) {
if (await asyncFileExists(this._pathName)) {
const noFileBackupPathName
= getFullPathName(this._noFileBackupFileName, this._pathName);
this._noFileBackupAction = new ReplaceFileAction(noFileBackupPathName,
async (pathName) => { await fsPromises.writeFile(pathName, ''); });
await this._noFileBackupAction.apply();
if (this._noFileBackupFileCallback) {
this._noFileBackupFileCallback(this._noFileBackupFileName,
noFileBackupPathName);
}
}
}
}
async _finalizeMainAction() {
if (this._finalizeCallback) {
await this._finalizeCallback(this._pathName);
}
if (this._noFileBackupAction) {
await this._noFileBackupAction.finalize();
this._noFileBackupAction = undefined;
}
}
async _revertMainAction() {
if (this._revertCallback) {
try {
await this._revertCallback(this._pathName);
}
catch (e) {
// Ignore
}
}
try {
if (await asyncFileExists(this._pathName)) {
await fsPromises.unlink(this._pathName);
}
}
catch (e) {
// Ignore
}
if (this._noFileBackupAction) {
try {
await this._noFileBackupAction.revert();
}
catch (e) {
// Ignore
}
}
}
}
/**
* File action that keeps an existing file. If {@FileAction~setBackupFileName} is
* called with a file name before the action is applied, and the file exists,
* the existing file will be copied to the backup file name (actually, the existing
* file is renamed to the backup file name, and then a copy of the backup file is
* made with the original name). If the file does not exist or no backup file name
* has been set nothing happens.
*/
export class KeepFileAction extends FileAction {
async _applyMainAction() {
if (this._backupPathName) {
// We need to duplicate the backup file back to us.
await fsPromises.copyFile(this._backupPathName, this._pathName);
this._copiedFileName = this._pathName;
}
}
async _finalizeMainAction() {
this._copiedFileName = undefined;
}
async _revertMainAction() {
if (await asyncFileExists(this._copiedFileName)) {
try {
await fsPromises.unlink(this._copiedFileName);
}
catch (e) {
// Ignore
}
}
}
}
/**
* File action that creates a copy of a file.
*/
export class CopyFileAction extends FileAction {
/**
* @constructor
* @param {string} originalFileName The name of the file to be copied.
* @param {string} newFileName The name of the copy of originalFileName. One or
* both of originalFileName and newFileName should contain a path.
*/
constructor(originalFileName, newFileName) {
super(getFullPathName(newFileName, originalFileName));
this._originalPathName = getFullPathName(originalFileName, newFileName);
}
async _applyMainAction() {
await fsPromises.copyFile(this._originalPathName, this._pathName);
this._copiedFileName = this._pathName;
}
async _finalizeMainAction() {
}
async _revertMainAction() {
if (await asyncFileExists(this._copiedFileName)) {
try {
await fsPromises.unlink(this._copiedFileName);
}
catch (e) {
// Ignore
}
}
}
}
/**
* Applies and finalizes all the {@FileAction}s in an array of file actions,
* calling {@link FileAction#revert} on exceptions.
* <p>
* {@link FileAction#apply} is called in the order of appearance of the file actions
* in the list.
* {@link FileAction#finalize} and {@link FileAction#revert} are called in the
* reverse order of appearance in the list.
* @param {FileAction[]} fileActions The array of file actions to be performed in order.
*/
export async function performFileActions(fileActions) {
try {
for (let i = 0; i < fileActions.length; ++i) {
await fileActions[i].apply();
}
// Note we finalize and revert in the reverse order of apply calls.
for (let i = fileActions.length - 1; i >= 0; --i) {
await fileActions[i].finalize();
}
}
catch (e) {
for (let i = fileActions.length - 1; i >= 0; --i) {
try {
await fileActions[i].revert();
}
catch (e) {
// Ignore
}
}
throw e;
}
}
|
uwgraphics/PhysicsBasedModeling-Core | PhysBAM/Public_Library/PhysBAM_Geometry/Basic_Geometry/SEGMENT_2D.h | <filename>PhysBAM/Public_Library/PhysBAM_Geometry/Basic_Geometry/SEGMENT_2D.h
//#####################################################################
// Copyright 2003-2007, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
// This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt.
//#####################################################################
// Class SEGMENT_2D
//#####################################################################
#ifndef __SEGMENT_2D__
#define __SEGMENT_2D__
#include <PhysBAM_Tools/Arrays/ARRAYS_FORWARD.h>
#include <PhysBAM_Tools/Utilities/TYPE_UTILITIES.h>
#include <PhysBAM_Tools/Vectors/VECTOR_2D.h>
#include <PhysBAM_Geometry/Basic_Geometry/BASIC_GEOMETRY_FORWARD.h>
namespace PhysBAM{
template<class TV> class ORIENTED_BOX;
template<class T>
class SEGMENT_2D
{
typedef VECTOR<T,2> TV;
public:
TV x1,x2;
SEGMENT_2D()
:x1(0,0),x2(1,0)
{}
SEGMENT_2D(const TV& x1_input,const TV& x2_input)
:x1(x1_input),x2(x2_input)
{}
template<class T_ARRAY>
explicit SEGMENT_2D(const T_ARRAY& X_input)
:x1(X_input(1)),x2(X_input(2))
{
STATIC_ASSERT(T_ARRAY::m==2);
}
T Length() const
{return (x2-x1).Magnitude();}
T Size() const
{return Length();}
template<class T_ARRAY>
static T Size(const T_ARRAY& X)
{STATIC_ASSERT(T_ARRAY::m==2);return (X(2)-X(1)).Magnitude();}
template<class T_ARRAY>
static T Signed_Size(const T_ARRAY& X)
{return Size(X);}
TV Center() const
{return (T).5*(x1+x2);}
static TV Normal(const TV& x1,const TV& x2)
{return (x2-x1).Normalized().Rotate_Clockwise_90();}
TV Normal() const
{return SEGMENT_2D<T>::Normal(x1,x2);}
template<class T_ARRAY>
static TV Normal(const T_ARRAY& X)
{STATIC_ASSERT(T_ARRAY::m==2);return Normal(X(1),X(2));}
static TV Barycentric_Coordinates(const TV& location,const TV& x1,const TV& x2)
{TV v=x2-x1;
T denominator=TV::Dot_Product(v,v);
if(denominator == 0) return TV(1,0); // x1 and x2 are a single point
else{
T t=TV::Dot_Product(location-x1,v)/denominator;
return TV(1-t,t);}}
static TV Clamped_Barycentric_Coordinates(const TV& location,const TV& x1,const TV& x2)
{TV v=x2-x1;
T denominator=TV::Dot_Product(v,v);
if(denominator == 0) return TV(1,0); // x1 and x2 are a single point
else{
T t=clamp(TV::Dot_Product(location-x1,v)/denominator,(T)0,(T)1);
return TV(1-t,t);}}
template<class T_ARRAY>
static TV Clamped_Barycentric_Coordinates(const TV& location,const T_ARRAY& X)
{STATIC_ASSERT(T_ARRAY::m==2);return Clamped_Barycentric_Coordinates(location,X(1),X(2));}
TV Sum_Barycentric_Coordinates(const SEGMENT_2D<T>& embedded_segment) const
{return Barycentric_Coordinates(embedded_segment.x1)+Barycentric_Coordinates(embedded_segment.x2);}
TV Barycentric_Coordinates(const TV& location) const
{return Barycentric_Coordinates(location,x1,x2);}
static TV Point_From_Barycentric_Coordinates(const T alpha,const TV& x1,const TV& x2)
{return (x2-x1)*alpha+x1;}
template<class T_ARRAY>
static TV Point_From_Barycentric_Coordinates(const TV& weights,const T_ARRAY& X)
{STATIC_ASSERT(T_ARRAY::m==2);return weights.x*X(1)+weights.y*X(2);}
TV Point_From_Barycentric_Coordinates(const T alpha) const
{return (x2-x1)*alpha+x1;}
template<class T_ARRAY>
static TV Point_From_Barycentric_Coordinates(const T alpha,const T_ARRAY& X)
{STATIC_ASSERT(T_ARRAY::m==2);return Point_From_Barycentric_Coordinates(alpha,X(1),X(2));}
bool Point_Face_Collision(const TV& x,const TV& v,const INDIRECT_ARRAY<ARRAY_VIEW<TV>,VECTOR<int,2>&> V_face,const T dt,const T collision_thickness,T& collision_time,TV& normal,
TV& weights,T& relative_speed,const bool exit_early=false) const
{return Point_Face_Collision(x,v,V_face(1),V_face(2),dt,collision_thickness,collision_time,normal,weights,relative_speed,exit_early);}
bool Point_Face_Interaction(const TV& x,const TV& v,const INDIRECT_ARRAY<ARRAY_VIEW<TV>,VECTOR<int,2>&> V_face,const T interaction_distance,T& distance,
TV& interaction_normal,TV& weights,T& relative_speed,const bool allow_negative_weights,const bool exit_early) const
{return Point_Face_Interaction(x,v,V_face(1),V_face(2),interaction_distance,distance,interaction_normal,weights,relative_speed,allow_negative_weights,exit_early);}
RANGE<TV> Bounding_Box() const
{return RANGE<TV>::Bounding_Box(x1,x2);}
const TV& X(const int i) const
{assert(1<=i && i<=2);
switch(i){
case 1: return x1;
case 2: return x2;}
PHYSBAM_FATAL_ERROR();}
TV& X(const int i)
{assert(1<=i && i<=2);
switch(i){
case 1: return x1;
case 2: return x2;}
PHYSBAM_FATAL_ERROR();}
//#####################################################################
bool Segment_Line_Intersection(const TV& point_on_line,const TV& normal_of_line,T &interpolation_fraction) const;
TV Closest_Point_On_Segment(const TV& point) const;
T Distance_From_Point_To_Segment(const TV& point) const;
TV Closest_Point_On_Line(const TV& point) const;
T Distance_From_Point_To_Line(const TV& point) const;
TV Shortest_Vector_Between_Segments(const SEGMENT_2D<T>& segment,T& a,T& b) const;
int Segment_Segment_Interaction(const SEGMENT_2D<T>& segment,const TV& v1,const TV& v2,const TV& v3,const TV& v4,
const T interaction_distance,T& distance,TV& normal,T& a,T& b,T& relative_speed,const T small_number=0) const;
// int Segment_Segment_Collision(const SEGMENT_2D<T>& segment,const TV& v1,const TV& v2,const TV& v3,const TV& v4,const T dt,
// const T collision_thickness,T& collision_time,TV& normal,T& a,T& b,T& relative_speed,const T small_number=0) const;
ORIENTED_BOX<TV> Thickened_Oriented_Box(const T thickness_over_two=0) const;
bool Inside(const TV& point,const T thickness_over_two=0) const;
bool Linear_Point_Inside_Segment(const TV& X,const T thickness_over_2) const;
static POINT_SIMPLEX_COLLISION_TYPE Robust_Point_Segment_Collision(const SEGMENT_2D<T>& initial_segment,const SEGMENT_2D<T>& final_segment,const TV& x,
const TV& final_x,const T dt,const T collision_thickness,T& collision_time,TV& normal,T& collision_alpha,T& relative_speed);
bool Point_Face_Collision(const TV& x,const TV& v,const TV& v1,const TV& v2,const T dt,const T collision_thickness,T& collision_time,TV& normal,TV& weights,T& relative_speed,
const bool exit_early=false) const;
bool Point_Face_Interaction(const TV& x,const T interaction_distance,const bool allow_negative_weights,T& distance) const;
void Point_Face_Interaction_Data(const TV& x,T& distance,TV& interaction_normal,TV& weights,const bool perform_attractions) const;
bool Point_Face_Interaction(const TV& x,const TV& v,const TV& v1,const TV& v2,const T interaction_distance,T& distance,
TV& interaction_normal,TV& weights,T& relative_speed,const bool allow_negative_weights,const bool exit_early) const;
void Clip_To_Box(const RANGE<TV>& box,ARRAY<SEGMENT_2D<T> >& clipped_simplices) const;
static void Cut_With_Hyperplane_And_Discard_Outside_Simplices(const SEGMENT_2D<T>& segment,const LINE_2D<T>& cutting_plane,ARRAY<SEGMENT_2D<T> >& negative_segments);
bool Clip_To_Box(const RANGE<TV>& box,T& a,T& b) const;
//#####################################################################
};
template<class T> std::ostream &operator<<(std::ostream &output,const SEGMENT_2D<T> &segment)
{output << segment.x1 << ", " << segment.x2;return output;}
}
#endif
|
hao-wang/Montage | js-test-suite/testsuite/8ba607bc18042b54f62da7c0c355b21b.js | <reponame>hao-wang/Montage<gh_stars>10-100
load("201224b0d1c296b45befd2285e95dd42.js");
if (helperThreadCount() === 0)
quit();
evalInWorker(`schedulegc("s1");`);
|
yojiwatanabe/kibana | x-pack/test/functional/apps/maps/embeddable/embeddable_state.js | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import expect from '@kbn/expect';
export default function ({ getPageObjects, getService }) {
const PageObjects = getPageObjects(['common', 'dashboard', 'maps']);
const kibanaServer = getService('kibanaServer');
const dashboardAddPanel = getService('dashboardAddPanel');
const DASHBOARD_NAME = 'verify_map_embeddable_state';
describe('embeddable state', () => {
before(async () => {
await kibanaServer.uiSettings.replace({
defaultIndex: 'c698b940-e149-11e8-a35a-370a8516603a',
});
await PageObjects.common.navigateToApp('dashboard');
await PageObjects.dashboard.clickNewDashboard();
await dashboardAddPanel.addEmbeddable('document example', 'map');
await PageObjects.maps.setView(0.0, 0.0, 10);
await PageObjects.dashboard.saveDashboard(DASHBOARD_NAME);
await PageObjects.dashboard.loadSavedDashboard(DASHBOARD_NAME);
});
it('should render map with center and zoom from embeddable state', async () => {
const { lat, lon, zoom } = await PageObjects.maps.getView();
expect(Math.round(lat)).to.equal(0);
expect(Math.round(lon)).to.equal(0);
expect(Math.round(zoom)).to.equal(10);
});
});
}
|
bozdogan/bozdogan-in-uni | assignments/duplicate_detector/src/main/java/org/bozdogan/DuplicateDetector.java | <filename>assignments/duplicate_detector/src/main/java/org/bozdogan/DuplicateDetector.java<gh_stars>0
package org.bozdogan;
import java.io.IOException;
import java.nio.file.*;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import static org.bozdogan.util.Hashing.sha1FromFile;
import static org.bozdogan.util.Hashing.bytes2hex;
public class DuplicateDetector{
public static void main(String[] args){
Map<String, List<Path>> visitList = new HashMap<>(); // gonna cause OoM exception at some point.
// Starting directory
Path start = Paths.get("D:\\bora\\belge\\__GECICI__");
FileVisitor<Path> visitor = new SimpleFileVisitor<Path>(){
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException{
try{
// System.out.println("Processing "+file.toString());
System.out.print(". ");
String sum = bytes2hex(sha1FromFile(file));
if(! visitList.containsKey(sum))
visitList.put(sum, new LinkedList<>());
visitList.get(sum).add(file);
} catch(FileSystemException e){
// happens when two processes try to reach a file at the same time.
return FileVisitResult.SKIP_SUBTREE;
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFileFailed(Path file, IOException exc)/* throws IOException*/{
// Handling exceptions properly.
if(exc instanceof AccessDeniedException)
System.out.println("Access Denied: "+exc.getMessage());
else if(exc instanceof FileSystemException)
System.out.println("File System Issue: "+exc.getMessage());
else
System.out.println("Some IO Error: "+exc.getMessage());
return FileVisitResult.SKIP_SUBTREE;
}
};
try{
System.out.println("Processing...");
Files.walkFileTree(start, visitor);
} catch(IOException e){ e.printStackTrace(); }
System.out.println("\n\n -- Duplicate files: \n" +
"-----------------------------------------");
for(Map.Entry<String, List<Path>> e: visitList.entrySet()){
if(e.getValue().size()>1){
System.out.println(e.getKey()+":");
for(Path path : e.getValue())
System.out.println(" "+path.toString());
}
}
}
}
//bzdgn |
xpharry/leetcode_and_lintcode_battle | leetcode/cpp/159.cpp | <reponame>xpharry/leetcode_and_lintcode_battle
/*
* Two Pointers (Sliding Window), Hash Table
*
*/
// version 1: recommended version
class Solution {
public:
int lengthOfLongestSubstringTwoDistinct(string s) {
int res = 0, left = 0;
unordered_map<char, int> hash;
for (int i = 0; i < s.size(); ++i) {
++hash[s[i]];
while (hash.size() > 2) {
if (--hash[s[left]] == 0) hash.erase(s[left]);
++left;
}
res = max(res, i - left + 1);
}
return res;
}
};
// version 2:
class Solution {
public:
int lengthOfLongestSubstringTwoDistinct(string s) {
unordered_map<char, int> m;
int n = s.size();
int end = 0, max_len = 0;
for(int i = 0; i < n; i++) {
while(end < n && m.size() <= 2) {
m[s[end]]++;
end++;
}
if(m.size() == 3) {
end--;
m.erase(s[end]);
}
max_len = max(max_len, end - i);
m[s[i]]--;
if(m[s[i]] == 0) m.erase(s[i]);
}
return max_len;
}
};
// Conlusion:
//
// Reference:
// Grandyang: http://www.cnblogs.com/grandyang/p/5185561.html
// http://www.cnblogs.com/grandyang/p/5351347.html
|
nvitya/univio | device/gendev/board/AE5X-64/board_traces.cpp | <filename>device/gendev/board/AE5X-64/board_traces.cpp<gh_stars>0
/*
* file: board/AE5X-64/board_traces.cpp
* brief: Board specific stuff
* version: 1.00
* date: 2021-11-07
* authors: nvitya
*/
#include "board_pins.h"
void board_traces_init()
{
// console (trace) UART
hwpinctrl.PinSetup(PORTNUM_A, 0, PINCFG_OUTPUT | PINCFG_AF_D); // SERCOM0[0]
traceuart.Init(1);
}
|
bobexchen/interest | interest-server/src/main/java/com/interest/model/entity/PostCardEntity.java | <gh_stars>100-1000
package com.interest.model.entity;
import lombok.Data;
/**
* @author wanghuan
*/
@Data
public class PostCardEntity {
private Integer id;
private String title;
private String content;
private Integer interestid;
private String createtime;
private String replytime;
private Integer userid;
}
|
shihab4t/Competitive-Programming | Online-Judges/HackerRank/Data-Structures/Linked-Lists/Insert_a_node_at_the_head_of_a_linked_list.c | <reponame>shihab4t/Competitive-Programming<filename>Online-Judges/HackerRank/Data-Structures/Linked-Lists/Insert_a_node_at_the_head_of_a_linked_list.c
#include <bits/stdc++.h>
using namespace std;
class SinglyLinkedListNode {
public:
int data;
SinglyLinkedListNode *next;
SinglyLinkedListNode(int node_data) {
this->data = node_data;
this->next = nullptr;
}
};
class SinglyLinkedList {
public:
SinglyLinkedListNode *head;
SinglyLinkedListNode *tail;
SinglyLinkedList() {
this->head = nullptr;
this->tail = nullptr;
}
};
void print_singly_linked_list(SinglyLinkedListNode* node, string sep, ofstream& fout) {
while (node) {
fout << node->data;
node = node->next;
if (node) {
fout << sep;
}
}
}
void free_singly_linked_list(SinglyLinkedListNode* node) {
while (node) {
SinglyLinkedListNode* temp = node;
node = node->next;
free(temp);
}
}
// Complete the insertNodeAtHead function below.
/*
* For your reference:
*
* SinglyLinkedListNode {
* int data;
* SinglyLinkedListNode* next;
* };
*
*/
SinglyLinkedListNode* insertNodeAtHead(SinglyLinkedListNode* llist, int data) {
SinglyLinkedListNode *new_node = new SinglyLinkedListNode(data);
new_node->next = llist;
return new_node;
}
int main()
{
ofstream fout(getenv("OUTPUT_PATH"));
SinglyLinkedList* llist = new SinglyLinkedList();
int llist_count;
cin >> llist_count;
cin.ignore(numeric_limits<streamsize>::max(), '\n');
for (int i = 0; i < llist_count; i++) {
int llist_item;
cin >> llist_item;
cin.ignore(numeric_limits<streamsize>::max(), '\n');
SinglyLinkedListNode* llist_head = insertNodeAtHead(llist->head, llist_item);
llist->head = llist_head;
}
print_singly_linked_list(llist->head, "\n", fout);
fout << "\n";
free_singly_linked_list(llist->head);
fout.close();
return 0;
}
|
DiracKeeko/js-financial-tools | src/index.js | <reponame>DiracKeeko/js-financial-tools
import * as calc from "./calc";
import * as display from "./display";
import * as util from "./util";
export default {
...calc,
...display,
...util
} |
BluTree/Ruken | Ruken/Source/Include/ECS/ComponentField.hpp | <gh_stars>1-10
/*
* MIT License
*
* Copyright (c) 2019 <NAME>, <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#include "Build/Namespace.hpp"
BEGIN_RUKEN_NAMESPACE
/**
* \brief Defines a field (or variable) for a component.
* In order to define a field you HAVE to inherit from this class.
* The name of the inheriting class will then be the way to refer to your field.
* Since this name might not be unique, it is recommended to put these classes in unique namespaces for each component to avoid interferences.
* \warning We strongly discourage the reuse of the field across multiple components even if this is possible.
* \warning A field can only be used once per component, doing otherwise will result in undefined behaviors
* \tparam TDataType Type of the field
*/
template <typename TDataType>
struct ComponentField
{
using Type = TDataType;
};
/**
* \brief Defines a component field
* \param in_field_name Name of the field class
* \param ... Type of the component field
*/
#define RUKEN_DEFINE_COMPONENT_FIELD(in_field_name, ...) struct in_field_name: ComponentField<__VA_ARGS__> {}
END_RUKEN_NAMESPACE |
kborkows/libiqxmlrpc | libiqxmlrpc/socket.cc | <gh_stars>10-100
// Libiqxmlrpc - an object-oriented XML-RPC solution.
// Copyright (C) 2011 <NAME>
#include <errno.h>
#include <boost/cerrno.hpp>
#include "socket.h"
#include "net_except.h"
#if _MSC_VER >= 1700
#include <ws2tcpip.h>
#endif
using namespace iqnet;
Socket::Socket()
{
if( (sock = socket( PF_INET, SOCK_STREAM, IPPROTO_TCP )) == -1 )
throw network_error( "Socket::Socket" );
#ifndef WIN32
{
int enable = 1;
setsockopt( sock, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(enable) );
}
#endif //WIN32
#if defined(__APPLE__)
{
int enable = 1;
setsockopt( sock, SOL_SOCKET, SO_NOSIGPIPE, &enable, sizeof(enable) );
}
#endif
}
Socket::Socket( Socket::Handler h, const Inet_addr& addr ):
sock(h),
peer(addr)
{
}
void Socket::shutdown()
{
::shutdown( sock, 2 );
}
void Socket::close()
{
#ifdef WIN32
closesocket(sock);
#else
::close( sock );
#endif //WIN32
}
void Socket::set_non_blocking( bool flag )
{
#ifdef WIN32
unsigned long f = flag ? 1 : 0;
if( ioctlsocket(sock, FIONBIO, &f) != 0 )
throw network_error( "Socket::set_non_blocking");
#else
if( !flag )
return;
if( fcntl( sock, F_SETFL, O_NDELAY ) == -1 )
throw network_error( "Socket::set_non_blocking" );
#endif //WIN32
}
#if defined(MSG_NOSIGNAL)
#define IQXMLRPC_NOPIPE MSG_NOSIGNAL
#else
#define IQXMLRPC_NOPIPE 0
#endif
size_t Socket::send( const char* data, size_t len )
{
int ret = ::send( sock, data, static_cast<int>(len), IQXMLRPC_NOPIPE);
if( ret == -1 )
throw network_error( "Socket::send" );
return static_cast<size_t>(ret);
}
size_t Socket::recv( char* buf, size_t len )
{
int ret = ::recv( sock, buf, static_cast<int>(len), 0 );
if( ret == -1 )
throw network_error( "Socket::recv" );
return static_cast<size_t>(ret);
}
void Socket::send_shutdown( const char* data, size_t len )
{
send(data, len);
const struct linger ling = {1, 0};
::setsockopt( sock, SOL_SOCKET, SO_LINGER, reinterpret_cast<const char*>(&ling), sizeof(ling) );
::shutdown( sock, 1 );
}
void Socket::bind( const Inet_addr& addr )
{
const sockaddr* saddr = reinterpret_cast<const sockaddr*>(addr.get_sockaddr());
if( ::bind( sock, saddr, sizeof(sockaddr_in) ) == -1 )
throw network_error( "Socket::bind" );
}
void Socket::listen( unsigned blog )
{
if( ::listen( sock, blog ) == -1 )
throw network_error( "Socket::listen" );
}
Socket Socket::accept()
{
sockaddr_in addr;
socklen_t len = sizeof(sockaddr_in);
Handler new_sock = ::accept( sock, reinterpret_cast<sockaddr*>(&addr), &len );
if( new_sock == -1 )
throw network_error( "Socket::accept" );
return Socket( new_sock, Inet_addr(addr) );
}
bool Socket::connect( const iqnet::Inet_addr& peer_addr )
{
const sockaddr* saddr = reinterpret_cast<const sockaddr*>(peer_addr.get_sockaddr());
int code = ::connect(sock, saddr, sizeof(sockaddr_in));
bool wouldblock = false;
if( code == -1 ) {
#ifndef WIN32
wouldblock = errno == EINPROGRESS;
#else
wouldblock = get_last_error() == WSAEWOULDBLOCK;
#endif
if (!wouldblock)
throw network_error( "Socket::connect" );
}
peer = peer_addr;
return !wouldblock;
}
Inet_addr Socket::get_addr() const
{
sockaddr_in saddr;
socklen_t saddr_len = sizeof(saddr);
if (::getsockname(sock, reinterpret_cast<sockaddr*>(&saddr), &saddr_len) == -1)
throw network_error( "Socket::get_addr" );
return Inet_addr(reinterpret_cast<const sockaddr_in&>(saddr));
}
int Socket::get_last_error()
{
int err = 0;
#ifndef WIN32
socklen_t int_sz = sizeof(err);
::getsockopt( sock, SOL_SOCKET, SO_ERROR, &err, &int_sz );
#else
err=WSAGetLastError();
#endif
return err;
}
|
opengauss-mirror/openGauss-graph | src/gausskernel/storage/access/rmgrdesc/hashdesc.cpp | /* -------------------------------------------------------------------------
*
* hashdesc.cpp
* rmgr descriptor routines for access/hash/hash.cpp
*
* Portions Copyright (c) 2020 Huawei Technologies Co.,Ltd.
* Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/gausskernel/storage/access/rmgrdesc/hashdesc.cpp
*
* -------------------------------------------------------------------------
*/
#include "postgres.h"
#include "knl/knl_variable.h"
#include "access/hash.h"
void hash_desc(StringInfo buf, XLogReaderState *record)
{
/* nothing to do */
} |
Jahhow/Camera-Roll-Android-App | app/src/main/java/us/koller/cameraroll/data/fileOperations/Move.java | <reponame>Jahhow/Camera-Roll-Android-App
package us.koller.cameraroll.data.fileOperations;
import android.content.Context;
import android.content.Intent;
import android.net.Uri;
import android.os.Build;
import android.os.Environment;
import java.io.File;
import java.util.ArrayList;
import us.koller.cameraroll.R;
import us.koller.cameraroll.data.models.File_POJO;
public class Move extends FileOperation {
public static final String TAG = Move.class.getSimpleName();
public static final String MOVED_FILES_PATHS = "MOVED_FILES_PATHS";
private ArrayList<String> movedFilePaths;
@Override
String getNotificationTitle() {
return getString(R.string.move);
}
@Override
public int getNotificationSmallIconRes() {
return R.drawable.ic_folder_move_white;
}
@Override
public void execute(Intent workIntent) {
File_POJO[] files = getFiles(workIntent);
File_POJO target = workIntent.getParcelableExtra(TARGET);
movedFilePaths = new ArrayList<>();
if (target == null) {
return;
}
int success_count = 0;
onProgress(success_count, files.length);
//check if file is on removable storage
boolean movingOntoRemovableStorage = Util.isOnRemovableStorage(target.getPath());
/*if (movingOntoRemovableStorage) {
//failed = true;
Uri treeUri = getTreeUri(workIntent, target.getPath());
if (treeUri == null) {
return;
}
} else {*/
for (int i = files.length - 1; i >= 0; i--) {
boolean movingFromRemovableStorage = Util.isOnRemovableStorage(files[i].getPath());
boolean result;
if (movingFromRemovableStorage || movingOntoRemovableStorage) {
//failed = true;
Uri treeUri;
if (movingFromRemovableStorage) {
treeUri = getTreeUri(workIntent, files[i].getPath());
} else {
treeUri = getTreeUri(workIntent, target.getPath());
}
if (treeUri == null) {
return;
}
result = copyAndDeleteFiles(getApplicationContext(), treeUri,
files[i].getPath(), target.getPath());
} else {
result = moveFile(files[i].getPath(), target.getPath());
}
if (result) {
movedFilePaths.add(files[i].getPath());
}
if (result)
success_count += 1;
onProgress(success_count, files.length);
}
//}
/*if (failed) {
showRemovableStorageToast();
} else */
if (success_count == 0) {
onProgress(success_count, files.length);
}
}
@Override
public int getType() {
return FileOperation.MOVE;
}
private boolean moveFile(String path, String destination) {
ArrayList<String> oldPaths = Util.getAllChildPaths(new ArrayList<>(), path);
File file = new File(path);
File dstFile = new File(destination, file.getName());
if (file.equals(dstFile)) {
return false;
}
//moving file
boolean success = renameFile(file, dstFile);
//re-scan all paths
ArrayList<String> newPaths = Util.getAllChildPaths(new ArrayList<>(), dstFile.getPath());
addPathsToScan(oldPaths);
addPathsToScan(newPaths);
return success;
}
private boolean copyAndDeleteFiles(Context context, Uri treeUri,
String path, String destination) {
Copy copy = new Copy();
boolean result;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP &&
Environment.isExternalStorageRemovable(new File(path))) {
result = copy.copyFilesRecursively(context, null,
path, destination, true);
} else {
result = copy.copyFilesRecursively(context, treeUri,
path, destination, true);
}
addPathsToScan(copy.getPathsToScan());
//Log.d("Move", "copyAndDeleteFiles(): " + result);
if (result) {
Delete delete = new Delete();
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP
&& Environment.isExternalStorageRemovable(new File(path))) {
result = delete.deleteFileOnRemovableStorage(context, treeUri, path);
} else {
result = delete.deleteFile(path);
}
addPathsToScan(delete.getPathsToScan());
}
return result;
}
private static boolean renameFile(File file, File newFile) {
//moving file
return file.renameTo(newFile);
}
@Override
public Intent getDoneIntent() {
Intent intent = super.getDoneIntent();
intent.putExtra(MOVED_FILES_PATHS, movedFilePaths);
return intent;
}
}
|
test-wiz-sec/pulumi-azure-nextgen | sdk/go/azure/sql/v20170301preview/job.go | <reponame>test-wiz-sec/pulumi-azure-nextgen
// *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20170301preview
import (
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// A job.
type Job struct {
pulumi.CustomResourceState
// User-defined description of the job.
Description pulumi.StringPtrOutput `pulumi:"description"`
// Resource name.
Name pulumi.StringOutput `pulumi:"name"`
// Schedule properties of the job.
Schedule JobScheduleResponsePtrOutput `pulumi:"schedule"`
// Resource type.
Type pulumi.StringOutput `pulumi:"type"`
// The job version number.
Version pulumi.IntOutput `pulumi:"version"`
}
// NewJob registers a new resource with the given unique name, arguments, and options.
func NewJob(ctx *pulumi.Context,
name string, args *JobArgs, opts ...pulumi.ResourceOption) (*Job, error) {
if args == nil || args.JobAgentName == nil {
return nil, errors.New("missing required argument 'JobAgentName'")
}
if args == nil || args.JobName == nil {
return nil, errors.New("missing required argument 'JobName'")
}
if args == nil || args.ResourceGroupName == nil {
return nil, errors.New("missing required argument 'ResourceGroupName'")
}
if args == nil || args.ServerName == nil {
return nil, errors.New("missing required argument 'ServerName'")
}
if args == nil {
args = &JobArgs{}
}
var resource Job
err := ctx.RegisterResource("azure-nextgen:sql/v20170301preview:Job", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetJob gets an existing Job resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetJob(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *JobState, opts ...pulumi.ResourceOption) (*Job, error) {
var resource Job
err := ctx.ReadResource("azure-nextgen:sql/v20170301preview:Job", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering Job resources.
type jobState struct {
// User-defined description of the job.
Description *string `pulumi:"description"`
// Resource name.
Name *string `pulumi:"name"`
// Schedule properties of the job.
Schedule *JobScheduleResponse `pulumi:"schedule"`
// Resource type.
Type *string `pulumi:"type"`
// The job version number.
Version *int `pulumi:"version"`
}
type JobState struct {
// User-defined description of the job.
Description pulumi.StringPtrInput
// Resource name.
Name pulumi.StringPtrInput
// Schedule properties of the job.
Schedule JobScheduleResponsePtrInput
// Resource type.
Type pulumi.StringPtrInput
// The job version number.
Version pulumi.IntPtrInput
}
func (JobState) ElementType() reflect.Type {
return reflect.TypeOf((*jobState)(nil)).Elem()
}
type jobArgs struct {
// User-defined description of the job.
Description *string `pulumi:"description"`
// The name of the job agent.
JobAgentName string `pulumi:"jobAgentName"`
// The name of the job to get.
JobName string `pulumi:"jobName"`
// The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
ResourceGroupName string `pulumi:"resourceGroupName"`
// Schedule properties of the job.
Schedule *JobSchedule `pulumi:"schedule"`
// The name of the server.
ServerName string `pulumi:"serverName"`
}
// The set of arguments for constructing a Job resource.
type JobArgs struct {
// User-defined description of the job.
Description pulumi.StringPtrInput
// The name of the job agent.
JobAgentName pulumi.StringInput
// The name of the job to get.
JobName pulumi.StringInput
// The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
ResourceGroupName pulumi.StringInput
// Schedule properties of the job.
Schedule JobSchedulePtrInput
// The name of the server.
ServerName pulumi.StringInput
}
func (JobArgs) ElementType() reflect.Type {
return reflect.TypeOf((*jobArgs)(nil)).Elem()
}
|
timgates42/processing.py | mode/examples/Basics/Typography/FiveWaysOfWritingText/FiveWaysOfWritingText.pyde | # Demonstration of the 5 ways of calling text() in Python mode.
def setup():
size(500, 500, P3D)
def draw():
background(255)
fill(0)
noStroke()
# text(string, x, y)
text("Shillings", 10, 12)
# text(num, x, y)
text(12.3, 10, 24)
# text(string, x, y, z)
text("Pence", 10, 36, 100 * cos(millis() / 300.0))
# text(num, x, y, z)
text(PI, 10, 48, 100 * sin(millis() / 300.0))
# text(string, left, top, right, bottom)
text(
"Shasta is a delicious, and sometimes underrated, "
"soft drink. I love it, and I drink it virtually "
"every day. Yep.\n\nIf you don't like Shasta, then "
"I don't like you.", 10, 60, 80, 300)
# Show the same box.
stroke('#FF0000')
noFill()
rect(10, 60, 80, 300)
|
phylame/pmm | pbm/src/main/java/pmm/pbm/data/dao/iface/GenreDAO.java | <reponame>phylame/pmm
package pmm.pbm.data.dao.iface;
import java.util.List;
import org.springframework.stereotype.Repository;
import pmm.pbm.service.params.ListGenreDTO;
import pmm.pbm.service.results.GenreVO;
@Repository
public interface GenreDAO {
List<GenreVO> getGenres(ListGenreDTO dto);
}
|
unratito/ceylon.language | runtime-js/jsint/OpenFunction/getParameterDeclaration.js | function (nm){
var pd=this.parameterDeclarations;
for (var i=0; i < pd.size; i++) {
if (nm.equals(pd[i].name))return pd[i];
}
return null;
}
|
wlchs/ews-javascript-api | js/MailboxSearch/SearchMailboxesParameters.js | <gh_stars>0
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var SearchPageDirection_1 = require("../Enumerations/SearchPageDirection");
var SearchResultType_1 = require("../Enumerations/SearchResultType");
var SortDirection_1 = require("../Enumerations/SortDirection");
/**
* Represents search mailbox parameters.
*
* @sealed
*/
var SearchMailboxesParameters = (function () {
function SearchMailboxesParameters() {
/**
* Search queries
*/
this.SearchQueries = null;
/**
* Result type
*/
this.ResultType = SearchResultType_1.SearchResultType.PreviewOnly;
/**
* Sort by property
*/
this.SortBy = null;
/**
* Sort direction
*/
this.SortOrder = SortDirection_1.SortDirection.Ascending;
/**
* Perform deduplication
*/
this.PerformDeduplication = false;
/**
* Page size
*/
this.PageSize = 0;
/**
* Search page direction
*/
this.PageDirection = SearchPageDirection_1.SearchPageDirection.Next;
/**
* Page item reference
*/
this.PageItemReference = null;
/**
* Preview item response shape
*/
this.PreviewItemResponseShape = null;
/**
* Query language
*/
this.Language = null;
}
return SearchMailboxesParameters;
}());
exports.SearchMailboxesParameters = SearchMailboxesParameters;
|
lechuongit/alibaba-cloud-sdk-go | services/sddp/struct_data_limit_list_inner.go | <gh_stars>1000+
package sddp
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// DataLimitListInner is a nested struct in sddp response
type DataLimitListInner struct {
RegionId string `json:"RegionId" xml:"RegionId"`
LocalName string `json:"LocalName" xml:"LocalName"`
ParentId string `json:"ParentId" xml:"ParentId"`
Id int64 `json:"Id" xml:"Id"`
UserName string `json:"UserName" xml:"UserName"`
GmtCreate int64 `json:"GmtCreate" xml:"GmtCreate"`
Connector string `json:"Connector" xml:"Connector"`
CheckStatus int `json:"CheckStatus" xml:"CheckStatus"`
CheckStatusName string `json:"CheckStatusName" xml:"CheckStatusName"`
ResourceType int64 `json:"ResourceType" xml:"ResourceType"`
ResourceTypeCode string `json:"ResourceTypeCode" xml:"ResourceTypeCode"`
AuditStatus int `json:"AuditStatus" xml:"AuditStatus"`
LogStoreDay int `json:"LogStoreDay" xml:"LogStoreDay"`
Enable int `json:"Enable" xml:"Enable"`
AutoScan int `json:"AutoScan" xml:"AutoScan"`
EngineType string `json:"EngineType" xml:"EngineType"`
ProcessStatus int `json:"ProcessStatus" xml:"ProcessStatus"`
ProcessTotalCount int `json:"ProcessTotalCount" xml:"ProcessTotalCount"`
TotalCount int `json:"TotalCount" xml:"TotalCount"`
LastFinishedTime int64 `json:"LastFinishedTime" xml:"LastFinishedTime"`
ErrorCode string `json:"ErrorCode" xml:"ErrorCode"`
ErrorMessage string `json:"ErrorMessage" xml:"ErrorMessage"`
Port int `json:"Port" xml:"Port"`
DbVersion string `json:"DbVersion" xml:"DbVersion"`
SupportDatamask bool `json:"SupportDatamask" xml:"SupportDatamask"`
SupportScan bool `json:"SupportScan" xml:"SupportScan"`
SupportAudit bool `json:"SupportAudit" xml:"SupportAudit"`
DatamaskStatus int `json:"DatamaskStatus" xml:"DatamaskStatus"`
SamplingSize int `json:"SamplingSize" xml:"SamplingSize"`
NextStartTime int64 `json:"NextStartTime" xml:"NextStartTime"`
SupportOcr bool `json:"SupportOcr" xml:"SupportOcr"`
OcrStatus int `json:"OcrStatus" xml:"OcrStatus"`
AgentId string `json:"AgentId" xml:"AgentId"`
AgentState int `json:"AgentState" xml:"AgentState"`
SupportAgentInstall bool `json:"SupportAgentInstall" xml:"SupportAgentInstall"`
AutoCreateAccount bool `json:"AutoCreateAccount" xml:"AutoCreateAccount"`
EventStatus int `json:"EventStatus" xml:"EventStatus"`
SupportEvent bool `json:"SupportEvent" xml:"SupportEvent"`
}
|
emilyparkes/event-web | client/reducers/public-e/public-events.js | import {
RECEIVE_PUBLIC_EVENTS
// ,
// RECEIVE_PUBLIC_EVENT_BY_NAME
} from '../../actions/public-events'
const initialState = []
const publicEvents = (state = initialState, action) => {
switch (action.type) {
case RECEIVE_PUBLIC_EVENTS:
return action.publicEvents
default:
return state
}
}
export default publicEvents
|
eengle/s2-geometry-library-java | tests/com/google/common/geometry/S2ClosestPointQueryTest.java | /*
* Copyright 2015 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.geometry;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Ordering;
import com.google.common.collect.Sets;
import com.google.common.geometry.S2ClosestPointQuery.Result;
import java.util.List;
/** Verifies {@link S2ClosestPointQuery}. */
@GwtCompatible
public class S2ClosestPointQueryTest extends GeometryTestCase {
/** The approximate radius of S2Cap from which query points are chosen. */
protected static final S1Angle QUERY_RADIUS = kmToAngle(10);
/**
* An approximate bound on the distance measurement error for "reasonable" distances (say, less
* than Pi/2) due to using S1ChordAngle.
*/
private static final double MAX_CHORD_ANGLE_ERROR = 1e-15;
public void testNoPoints() {
S2PointIndex<Integer> index = new S2PointIndex<>();
S2ClosestPointQuery<Integer> query = new S2ClosestPointQuery<>(index);
assertNull(query.findClosestPoint(S2Point.X_POS));
assertEquals(0, query.findClosestPoints(S2Point.X_POS).size());
}
public void testManyDuplicatePoints() {
int numPoints = 10000;
S2PointIndex<Integer> index = new S2PointIndex<>();
for (int i = 0; i < numPoints; ++i) {
index.add(S2Point.X_POS, i);
}
S2ClosestPointQuery<Integer> query = new S2ClosestPointQuery<>(index);
assertEquals(numPoints, new PointTarget(S2Point.X_POS).findClosestPoints(query).size());
}
@GwtIncompatible("Edge tests require Real.java, which is incompatible")
public void testPoints() {
int numIndexes = 10;
int numPoints = 1000;
int numQueries = 50;
for (PointFactory factory : PointFactory.values()) {
checkFactory(factory, numIndexes, numPoints, numQueries);
}
}
/**
* Check that result set "x" contains all the expected results from "y", and does not include any
* duplicate results.
*/
private static void checkResultSet(
List<Result<Integer>> x,
List<Result<Integer>> y,
int maxSize,
S1Angle maxDistance,
S1Angle maxError,
S1Angle maxPruningError,
String label) {
// Results should be sorted by distance.
assertTrue(Ordering.natural().reverse().isOrdered(x));
// Make sure there are no duplicate values.
assertEquals("Result set contains duplicates", Sets.newHashSet(x).size(), x.size());
// Result set X should contain all the items from U whose distance is less
// than "limit" computed below.
double limit = 0;
if (x.size() < maxSize) {
// Result set X was not limited by "max_size", so it should contain all
// the items up to "max_distance", except that a few items right near the
// distance limit may be missed because the distance measurements used for
// pruning S2Cells are not conservative.
limit = maxDistance.radians() - maxPruningError.radians();
} else if (!x.isEmpty()) {
// Result set X contains only the closest "max_size" items, to within a
// tolerance of "max_error + max_pruning_error".
limit =
x.get(x.size() - 1).distance().toAngle().radians()
- maxError.radians()
- maxPruningError.radians();
}
for (int i = 0; i < y.size(); ++i) {
Result<Integer> item = y.get(i);
if (item.distance().toAngle().radians() < limit) {
assertEquals(label + " " + item, 1, Iterables.frequency(x, item));
}
}
}
/**
* Compares two sets of "closest" items, where "expected" is computed via brute force (i.e.,
* considering every possible candidate) and "actual" is computed using a spatial data structure.
* Here "maxSize" is a bound on the maximum number of items, "maxDistance" is a limit on the
* distance to any item, and "maxError" is the maximum error allowed when selecting which items
* are closest.
*/
private static void compareResults(
List<Result<Integer>> expected,
List<Result<Integer>> actual,
int maxSize,
S1Angle maxDistance,
S1Angle maxError) {
S1Angle maxPruningError = S1Angle.radians(1e-15);
checkResultSet(actual, expected, maxSize, maxDistance, maxError, maxPruningError, "Missing");
checkResultSet(expected, actual, maxSize, maxDistance, maxError, S1Angle.ZERO, "Extra");
}
/** An abstract class that adds points to an S2PointIndex for benchmarking. */
protected enum PointFactory {
/**
* Generator for points regularly spaced along a circle. The circle is centered within the query
* cap and occupies 25% of its area, so that random query points have a 25% chance of being
* inside the circle.
*
* <p>Points along a circle are nearly the worst case for distance calculations, since many
* points are nearly equidistant from any query point that is not immediately adjacent to the
* circle.
*/
CIRCLE {
@Override
public List<S2Point> createPoints(
S2ClosestPointQueryTest helper, S2Cap queryCap, int numPoints) {
return S2Loop.makeRegularVertices(
queryCap.axis(), S1Angle.radians(0.5 * queryCap.angle().radians()), numPoints);
}
},
/** Generator for points of a fractal whose convex hull approximately matches the query cap. */
FRACTAL {
@Override
public List<S2Point> createPoints(
S2ClosestPointQueryTest helper, S2Cap queryCap, int numPoints) {
S2FractalBuilder builder = new S2FractalBuilder(helper.rand);
builder.setLevelForApproxMaxEdges(numPoints);
builder.setFractalDimension(1.5);
return builder.makeVertices(helper.getRandomFrameAt(queryCap.axis()), queryCap.angle());
}
},
/** Generator for points on a square grid that includes the entire query cap. */
GRID {
@Override
public List<S2Point> createPoints(
S2ClosestPointQueryTest helper, S2Cap queryCap, int numPoints) {
int sqrtNumPoints = (int) Math.ceil(Math.sqrt(numPoints));
Matrix3x3 frame = helper.getRandomFrameAt(queryCap.axis());
double radius = queryCap.angle().radians();
double spacing = 2 * radius / sqrtNumPoints;
List<S2Point> points = Lists.newArrayList();
for (int i = 0; i < sqrtNumPoints; ++i) {
for (int j = 0; j < sqrtNumPoints; ++j) {
points.add(
S2.fromFrame(
frame,
S2Point.normalize(
new S2Point(
Math.tan((i + 0.5) * spacing - radius),
Math.tan((j + 0.5) * spacing - radius),
1.0))));
}
}
return points;
}
};
/**
* Returns a list of approximately {@code numPoints} random points sampled from {@code queryCap}
* by some geometric strategy. Typically the indexed points will occupy some fraction of this
* cap.)
*/
protected abstract List<S2Point> createPoints(
S2ClosestPointQueryTest helper, S2Cap queryCap, int numPoints);
}
private interface Target {
/** Returns the distance from the target to the given point. */
S1Angle getDistance(S2Point x);
/** Returns the queue of results, after verifying both methods produce the same queue. */
List<Result<Integer>> findClosestPoints(S2ClosestPointQuery<Integer> query);
}
private static final class PointTarget implements Target {
final S2Point point;
PointTarget(S2Point point) {
this.point = point;
}
@Override
public S1Angle getDistance(S2Point x) {
return new S1Angle(x, point);
}
@Override
public List<Result<Integer>> findClosestPoints(S2ClosestPointQuery<Integer> query) {
// Fill 'x' with the query results.
List<Result<Integer>> x = Lists.newArrayList();
query.findClosestPoints(x, point);
// Let the query allocate a queue for us.
List<Result<Integer>> y = query.findClosestPoints(point);
// Verify the results are the same and return one of them.
assertEquals(x, y);
return y;
}
}
private static final class EdgeTarget implements Target {
final S2Point a;
final S2Point b;
EdgeTarget(S2Point a, S2Point b) {
this.a = a;
this.b = b;
}
@Override
public S1Angle getDistance(S2Point x) {
return S2EdgeUtil.getDistance(x, a, b);
}
@Override
public List<Result<Integer>> findClosestPoints(S2ClosestPointQuery<Integer> query) {
// Fill 'x' with the query results.
List<Result<Integer>> x = Lists.newArrayList();
query.findClosestPointsToEdge(x, a, b);
// Let the query allocate a queue for us.
List<Result<Integer>> y = query.findClosestPointsToEdge(a, b);
// Verify the results are the same and return one of them.
assertEquals(x, y);
return y;
}
}
/**
* Use "query" to find the closest point(s) to the given target, and extract the query results.
* Also verify that the results satisfy the search criteria.
*/
private static List<Result<Integer>> getClosestPoints(
Target target, S2ClosestPointQuery<Integer> query) {
List<Result<Integer>> actual = target.findClosestPoints(query);
assertTrue(actual.size() <= query.getMaxPoints());
if (query.getRegion() != null && query.getMaxDistance() == S1Angle.INFINITY) {
// We can predict exactly how many points should be returned.
assertEquals(Math.min(query.getMaxPoints(), query.index().numPoints()), actual.size());
}
for (Result<Integer> result : actual) {
// Check that query.distance() is approximately equal to the angle between point and target.
// They may be slightly different because query.distance() is computed using S1ChordAngle.
// Note that the error gets considerably larger (1e-7) as the angle approaches Pi.
S2Point p = result.entry().point();
S1Angle angle = result.distance().toAngle();
assertEquals(target.getDistance(p).radians(), angle.radians(), MAX_CHORD_ANGLE_ERROR);
// Check that the point satisfies the region() criteria.
if (query.getRegion() != null) {
assertTrue(query.getRegion().contains(p));
}
// Check that it satisfies the maxDistance() criteria.
assertTrue(angle.compareTo(query.getMaxDistance()) <= 0);
}
return actual;
}
private static void checkFindClosestPoints(Target target, S2ClosestPointQuery<Integer> query) {
query.useBruteForce(true);
List<Result<Integer>> expected = getClosestPoints(target, query);
query.useBruteForce(false);
List<Result<Integer>> actual = getClosestPoints(target, query);
compareResults(expected, actual, query.getMaxPoints(), query.getMaxDistance(), S1Angle.ZERO);
}
/** Validates a variety of random queries. */
private void checkFactory(PointFactory factory, int numIndexes, int numPoints, int numQueries) {
S2PointIndex<Integer> index = new S2PointIndex<>();
S2ClosestPointQuery<Integer> query = new S2ClosestPointQuery<>(index);
for (int i = 0; i < numIndexes; i++) {
// Generate a point set and index it.
S2Cap queryCap = S2Cap.fromAxisAngle(randomPoint(), QUERY_RADIUS);
index.reset();
addPoints(index, factory.createPoints(this, queryCap, numPoints));
query.reset();
for (int j = 0; j < numQueries; j++) {
query.setMaxPoints((int) uniform(1, 100));
if (oneIn(2)) {
query.setMaxDistance(randomAngle());
}
S2LatLngRect rect =
S2LatLngRect.fromCenterSize(
new S2LatLng(samplePoint(queryCap)), new S2LatLng(randomAngle(), randomAngle()));
if (oneIn(5)) {
query.setRegion(rect);
}
if (oneIn(2)) {
S2Point p = samplePoint(queryCap);
checkFindClosestPoints(new PointTarget(p), query);
} else {
S2Point a = samplePoint(queryCap);
S2Point b = samplePoint(S2Cap.fromAxisAngle(a, randomDecimilliAngle()));
checkFindClosestPoints(new EdgeTarget(a, b), query);
}
}
}
}
protected static void addPoints(S2PointIndex<Integer> index, List<S2Point> points) {
for (int i = 0; i < points.size(); i++) {
index.add(points.get(i), i);
}
}
protected S1Angle randomDecimilliAngle() {
return S1Angle.radians(Math.pow(1e-4, randomAngle().radians()));
}
private S1Angle randomAngle() {
return S1Angle.radians(uniform(0, QUERY_RADIUS.radians()));
}
}
|
genisysram/Chronicle-Core | src/main/java/net/openhft/chronicle/core/Maths.java | /*
* Copyright 2016-2020 chronicle.software
*
* https://chronicle.software
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.openhft.chronicle.core;
import net.openhft.chronicle.core.util.StringUtils;
import org.jetbrains.annotations.NotNull;
import java.util.Arrays;
public enum Maths {
;
/**
* Numbers larger than this are whole numbers due to representation error.
*/
private static final double WHOLE_NUMBER = 1L << 52;
private static final int K0 = 0x6d0f27bd;
private static final int M0 = 0x5bc80bad;
private static final int M1 = 0xea7585d7;
private static final int M2 = 0x7a646e19;
private static final int M3 = 0x855dd4db;
private static final long[] TENS = new long[19];
private static final long[] FIVES = new long[28];
private static final String OUT_OF_RANGE = " out of range";
static {
TENS[0] = FIVES[0] = 1;
for (int i = 1; i < TENS.length; i++)
TENS[i] = 10 * TENS[i - 1];
for (int i = 1; i < FIVES.length; i++)
FIVES[i] = 5 * FIVES[i - 1];
}
/**
* Performs a round which is accurate to within 1 ulp. i.e. for values very close to 0.5 it
* might be rounded up or down. This is a pragmatic choice for performance reasons as it is
* assumed you are not working on the edge of the precision of double.
*
* @param d value to round
* @param digits 0 to 18 digits of precision
* @return rounded value
*/
public static double roundN(double d, int digits) {
final long factor = roundingFactor(digits);
return Math.abs(d) < WHOLE_NUMBER / factor
? (double) (long) (d < 0 ? d * factor - 0.5 : d * factor + 0.5) / factor : d;
}
public static long roundingFactor(int digits) {
return TENS[digits];
}
public static long roundingFactor(double digits) {
int iDigits = (int) digits;
long ten = TENS[iDigits];
switch ((int) ((digits - iDigits) * 10 + 0.5)) {
case 0:
case 1:
case 2:
return ten;
case 3:
case 4:
case 5:
return 2 * ten;
case 6:
return 4 * ten;
case 7:
case 8:
return 5 * ten;
case 9:
return 8 * ten;
default:
return 10 * ten;
}
}
public static double ceilN(double d, int digits) {
final long factor = roundingFactor(digits);
double ulp = Math.ulp(d);
double ulp2 = ulp * factor;
return Math.abs(d) < (double) Long.MAX_VALUE / factor && ulp2 < 1
? Math.ceil((d - ulp) * factor) / factor : d;
}
public static double floorN(double d, int digits) {
final long factor = roundingFactor(digits);
double ulp = Math.ulp(d);
double ulp2 = ulp * factor;
return Math.abs(d) < (double) Long.MAX_VALUE / factor && ulp2 < 1
? Math.floor((d + ulp) * factor) / factor : d;
}
public static double roundN(double d, double digits) {
final long factor = roundingFactor(digits);
return Math.abs(d) < (double) Long.MAX_VALUE / factor
? (double) (long) (d < 0 ? d * factor - 0.5 : d * factor + 0.5) / factor : d;
}
public static double ceilN(double d, double digits) {
final long factor = roundingFactor(digits + 8);
final long factor2 = roundingFactor(digits);
return Math.abs(d) < WHOLE_NUMBER / factor
? Math.ceil(Math.round(d * factor) / 1e8) / factor2 : d;
}
public static double floorN(double d, double digits) {
final long factor = roundingFactor(digits + 8);
final long factor2 = roundingFactor(digits);
return Math.abs(d) < WHOLE_NUMBER / factor
? Math.floor(Math.round(d * factor) / 1e8) / factor2 : d;
}
/**
* Performs a round which is accurate to within 1 ulp. i.e. for values very close to 0.5 it
* might be rounded up or down. This is a pragmatic choice for performance reasons as it is
* assumed you are not working on the edge of the precision of double.
*
* @param d value to round
* @return rounded value
*/
public static double round1(double d) {
final double factor = 1e1;
return Math.abs(d) < WHOLE_NUMBER / factor
? (long) (d < 0 ? d * factor - 0.5 : d * factor + 0.5) / factor : d;
}
/**
* Performs a round which is accurate to within 1 ulp. i.e. for values very close to 0.5 it
* might be rounded up or down. This is a pragmatic choice for performance reasons as it is
* assumed you are not working on the edge of the precision of double.
*
* @param d value to round
* @return rounded value
*/
public static double round2(double d) {
final double factor = 1e2;
return Math.abs(d) < WHOLE_NUMBER / factor
? (long) (d < 0 ? d * factor - 0.5 : d * factor + 0.5) / factor : d;
}
/**
* Performs a round which is accurate to within 1 ulp. i.e. for values very close to 0.5 it
* might be rounded up or down. This is a pragmatic choice for performance reasons as it is
* assumed you are not working on the edge of the precision of double.
*
* @param d value to round
* @return rounded value
*/
public static double round3(double d) {
final double factor = 1e3;
return Math.abs(d) < WHOLE_NUMBER / factor
? (long) (d < 0 ? d * factor - 0.5 : d * factor + 0.5) / factor : d;
}
/**
* Performs a round which is accurate to within 1 ulp. i.e. for values very close to 0.5 it
* might be rounded up or down. This is a pragmatic choice for performance reasons as it is
* assumed you are not working on the edge of the precision of double.
*
* @param d value to round
* @return rounded value
*/
public static double round4(double d) {
final double factor = 1e4;
return Math.abs(d) < WHOLE_NUMBER / factor
? (long) (d < 0 ? d * factor - 0.5 : d * factor + 0.5) / factor : d;
}
/**
* Performs a round which is accurate to within 1 ulp. i.e. for values very close to 0.5 it
* might be rounded up or down. This is a pragmatic choice for performance reasons as it is
* assumed you are not working on the edge of the precision of double.
*
* @param d value to round
* @return rounded value
*/
public static double round5(double d) {
final double factor = 1e5;
return Math.abs(d) < WHOLE_NUMBER / factor
? (long) (d < 0 ? d * factor - 0.5 : d * factor + 0.5) / factor : d;
}
/**
* Performs a round which is accurate to within 1 ulp. i.e. for values very close to 0.5 it
* might be rounded up or down. This is a pragmatic choice for performance reasons as it is
* assumed you are not working on the edge of the precision of double.
*
* @param d value to round
* @return rounded value
*/
public static double round6(double d) {
final double factor = 1e6;
return Math.abs(d) < WHOLE_NUMBER / factor
? (long) (d < 0 ? d * factor - 0.5 : d * factor + 0.5) / factor : d;
}
/**
* Performs a round which is accurate to within 1 ulp. i.e. for values very close to 0.5 it
* might be rounded up or down. This is a pragmatic choice for performance reasons as it is
* assumed you are not working on the edge of the precision of double.
*
* @param d value to round
* @return rounded value
*/
public static double round7(double d) {
final double factor = 1e7;
return Math.abs(d) < WHOLE_NUMBER / factor
? (long) (d < 0 ? d * factor - 0.5 : d * factor + 0.5) / factor : d;
}
/**
* Performs a round which is accurate to within 1 ulp. i.e. for values very close to 0.5 it
* might be rounded up or down. This is a pragmatic choice for performance reasons as it is
* assumed you are not working on the edge of the precision of double.
*
* @param d value to round
* @return rounded value
*/
public static double round8(double d) {
final double factor = 1e8;
return Math.abs(d) < WHOLE_NUMBER / factor
? (long) (d < 0 ? d * factor - 0.5 : d * factor + 0.5) / factor : d;
}
public static int nextPower2(int n, int min) throws IllegalArgumentException {
return (int) Math.min(1 << 30, nextPower2(n, (long) min));
}
public static long nextPower2(long n, long min) throws IllegalArgumentException {
if (!isPowerOf2(min))
throw new IllegalArgumentException(min + " must be a power of 2");
if (n < min) return min;
if (isPowerOf2(n))
return n;
long i = min;
while (i < n) {
i *= 2;
if (i <= 0) return 1L << 62;
}
return i;
}
public static boolean isPowerOf2(long n) {
return Long.bitCount(n) == 1;
}
public static int hash32(@NotNull CharSequence cs) {
long h = hash64(cs);
h ^= h >> 32;
return (int) h;
}
public static int hash32(@NotNull String s) {
long h = hash64(s);
h ^= h >> 32;
return (int) h;
}
public static int hash32(@NotNull StringBuilder s) {
long h = hash64(s);
h ^= h >> 32;
return (int) h;
}
public static int hash32(long l0) {
long h = hash64(l0);
h ^= h >> 32;
return (int) h;
}
public static long hash64(@NotNull CharSequence cs) {
if (cs instanceof String)
return hash64((String) cs);
long hash = 0;
for (int i = 0, len = cs.length(); i < len; i++)
hash = hash * 0x32246e3d + cs.charAt(i);
return agitate(hash);
}
public static long hash64(@NotNull String s) {
long hash = 0;
if (Jvm.isJava9Plus()) {
final byte[] bytes = StringUtils.extractBytes(s);
for (int i = 0, len = s.length(); i < len; i++)
hash = hash * 0x32246e3d + bytes[i];
} else {
final char[] chars = StringUtils.extractChars(s);
for (int i = 0, len = s.length(); i < len; i++)
hash = hash * 0x32246e3d + chars[i];
}
return agitate(hash);
}
public static long hash64(@NotNull StringBuilder s) {
long hash = 0;
if (Jvm.isJava9Plus()) {
final byte[] bytes = StringUtils.extractBytes(s);
for (int i = 0, len = s.length(); i < len; i++)
hash = hash * 0x32246e3d + bytes[i];
} else {
final char[] chars = StringUtils.extractChars(s);
for (int i = 0, len = s.length(); i < len; i++)
hash = hash * 0x32246e3d + chars[i];
}
return agitate(hash);
}
/**
* Returns rounded down log<sub>2</sub>{@code num}, e. g.: {@code intLog2(1) == 0},
* {@code intLog2(2) == 1}, {@code intLog2(7) == 2}, {@code intLog2(8) == 3}, etc.
*
* @throws IllegalArgumentException if the given number <= 0
*/
public static int intLog2(long num) {
if (num <= 0)
throw new IllegalArgumentException("positive argument expected, " + num + " given");
return 63 - Long.numberOfLeadingZeros(num);
}
public static byte toInt8(long x) throws IllegalArgumentException {
if ((byte) x == x)
return (byte) x;
throw new IllegalArgumentException("Byte " + x + OUT_OF_RANGE);
}
public static short toInt16(long x) throws IllegalArgumentException {
if ((short) x == x)
return (short) x;
throw new IllegalArgumentException("Short " + x + OUT_OF_RANGE);
}
public static int toInt32(long x, @NotNull String msg) throws IllegalArgumentException {
if ((int) x == x)
return (int) x;
throw new IllegalArgumentException(String.format(msg, x));
}
public static int toInt32(long x) throws IllegalArgumentException {
if ((int) x == x)
return (int) x;
throw new IllegalArgumentException("Int " + x + OUT_OF_RANGE);
}
public static short toUInt8(long x) throws IllegalArgumentException {
if ((x & 0xFF) == x)
return (short) x;
throw new IllegalArgumentException("Unsigned Byte " + x + OUT_OF_RANGE);
}
public static int toUInt16(long x) throws IllegalArgumentException {
if ((x & 0xFFFF) == x)
return (int) x;
throw new IllegalArgumentException("Unsigned Short " + x + OUT_OF_RANGE);
}
public static int toUInt31(long x) throws IllegalArgumentException {
if ((x & 0x7FFFFFFFL) == x)
return (int) x;
throw new IllegalArgumentException("Unsigned Int 31-bit " + x + OUT_OF_RANGE);
}
public static long toUInt32(long x) throws IllegalArgumentException {
if ((x & 0xFFFFFFFFL) == x)
return x;
throw new IllegalArgumentException("Unsigned Int " + x + OUT_OF_RANGE);
}
public static long agitate(long l) {
l += l >>> 22;
l ^= Long.rotateRight(l, 17);
return l;
}
/**
* A simple hashing algorithm for a 64-bit value
*
* @param l0 to hash
* @return hash value.
*/
public static long hash64(long l0) {
int l0a = (int) (l0 >> 32);
long h0 = l0 * M0 + l0a * M1;
return agitate(h0);
}
/**
* A simple hashing algorithm for a 128-bit value
*
* @param l0 to hash
* @param l1 to hash
* @return hash value.
*/
public static long hash64(long l0, long l1) {
int l0a = (int) (l0 >> 32);
int l1a = (int) (l1 >> 32);
long h0 = (l0 + l1a) * M0;
long h1 = (l1 + l0a) * M1;
return agitate(h0) ^ agitate(h1);
}
/**
* Divide {@code dividend} by divisor, if division is not integral the result is rounded up.
* Examples: {@code divideRoundUp(10, 5) == 2}, {@code divideRoundUp(11, 5) == 3},
* {@code divideRoundUp(-10, 5) == -2}, {@code divideRoundUp(-11, 5) == -3}.
*
* @return the rounded up quotient
*/
public static long divideRoundUp(long dividend, long divisor) {
int sign = (dividend > 0 ? 1 : -1) * (divisor > 0 ? 1 : -1);
return sign * (Math.abs(dividend) + Math.abs(divisor) - 1) / Math.abs(divisor);
}
public static long tens(int decimalPlaces) {
return TENS[decimalPlaces];
}
public static int digits(long num) {
int index = Arrays.binarySearch(TENS, num);
return index < -1 ? -1 - index : index >= 0 ? index + 1 : 1;
}
public static long fives(int decimalPlaces) {
return FIVES[decimalPlaces];
}
public static boolean same(double a, double b) {
return Double.isNaN(a) ? Double.isNaN(b) : a == b;
}
public static boolean same(float a, float b) {
return Float.isNaN(a) ? Float.isNaN(b) : a == b;
}
public static int hash(Object o) {
return o == null ? 0 : o.hashCode();
}
public static int hash(Object o1, Object o2) {
return hash(o1) * M0 + hash(o2);
}
public static int hash(Object o1, Object o2, Object o3) {
return hash(o1, o2) * M0 + hash(o3);
}
public static int hash(Object o1, Object o2, Object o3, Object o4) {
return hash(o1, o2, o3) * M0 + hash(o4);
}
public static int hash(Object o1, Object o2, Object o3, Object o4, Object o5) {
return hash(o1, o2, o3, o4) * M0 + hash(o5);
}
}
|
cristianoperez/vraptor | vraptor-core/src/main/java/br/com/caelum/vraptor/ioc/spring/InjectionBeanPostProcessor.java | /***
* Copyright (c) 2009 Caelum - www.caelum.com.br/opensource
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package br.com.caelum.vraptor.ioc.spring;
import java.lang.reflect.Constructor;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor;
/**
* Enhances the default behavior from Spring, adding support to injection
* through not annotated constructor, if there is only one.
*
* @author <NAME>
*/
class InjectionBeanPostProcessor extends AutowiredAnnotationBeanPostProcessor {
// in case we are required to change the injection annotation:
// public InjectionBeanPostProcessor() {
// this.setAutowiredAnnotationType(In.class);
// }
@Override
@SuppressWarnings({ "unchecked", "rawtypes" })
public Constructor[] determineCandidateConstructors(Class beanClass, String beanName) throws BeansException {
Constructor[] candidates = super.determineCandidateConstructors(beanClass, beanName);
if (candidates == null) {
Constructor constructor = checkIfThereIsOnlyOneNonDefaultConstructor(beanClass);
if (constructor != null) {
candidates = new Constructor[]{constructor};
}
}
return candidates;
}
@SuppressWarnings({ "rawtypes" })
private Constructor checkIfThereIsOnlyOneNonDefaultConstructor(Class beanClass) {
Constructor[] constructors = beanClass.getDeclaredConstructors();
if (constructors.length == 1) {
if (constructors[0].getParameterTypes().length > 0) {
return constructors[0];
}
}
return null;
}
}
|
tkowark/repmine | app/models/owl_wrapper/datatype_property.rb | class DatatypeProperty
# domain is an owl_class object, range an RDF::Resource
attr_accessor :name, :range, :domain, :attribute_url
include RdfSerialization
def initialize(name, range, domain)
@name = name
@range = range.is_a?(RDF::Resource) ? range : RDF::Resource.new(range)
@domain = domain
end
def self.from_sample(name, domain, sample)
return self.new(name, RDF::Literal.new(sample).datatype, domain)
end
def self.from_url(url, range, domain)
name = url.split("/").last.split("#").last
prop = self.new(name, range, domain)
prop.attribute_url = url
return prop
end
def url
return attribute_url || domain.url + "/" + name
end
def rdf_statements
stmts = [
[resource, RDF.type, RDF::OWL.DatatypeProperty],
[resource, RDF::RDFS.range, range],
[resource, RDF::RDFS.label, RDF::Literal.new(name)]
]
stmts << [resource, RDF::RDFS.domain, domain.resource] unless domain.nil?
return stmts
end
def ==(other_object)
return self.url == other_object.url
end
end |
qovalenko/landgreen.github.io | physics/notes/electromagnetism/coulomb/charge1.js | const setup1 = function() {
var canvas = document.getElementById("charge1");
var ctx = canvas.getContext("2d");
canvas.width = document.getElementsByTagName("article")[0].clientWidth;
ctx.font = "30px Arial";
ctx.fillStyle = "#aaa";
ctx.textAlign = "center";
ctx.fillText("click to start simulation", canvas.width / 2, canvas.height / 2);
};
setup1();
function charges1(el) {
el.onclick = null; //stops the function from running on button click
Charge.setCanvas(el);
// var canvas = el
// var ctx = canvas.getContext("2d");
//switch between draw modes
let drawMode = 1;
document.addEventListener("keypress", event => {
if (!pause) {
if (event.charCode === 49) {
drawMode = 1; //particle
el.style.background = "#fff";
ctx.clearRect(0, 0, canvas.width, canvas.height);
} else if (event.charCode === 50) {
drawMode = 2; //particles + electric vector field
el.style.background = "#fff";
ctx.clearRect(0, 0, canvas.width, canvas.height);
} else if (event.charCode === 51) {
drawMode = 3; //electric potential scalar field
el.style.background = "#fff";
ctx.clearRect(0, 0, canvas.width, canvas.height);
} else if (event.charCode === 52) {
drawMode = 4; //cloud chamber
el.style.background = "#000";
ctx.clearRect(0, 0, canvas.width, canvas.height);
}
}
});
//___________________get mouse input___________________
canvas.addEventListener("mousedown", function(event) {
Charge.repulse(q, {
x: (event.offsetX * canvas.width) / canvas.clientWidth,
y: (event.offsetY * canvas.height) / canvas.clientHeight
});
});
let pause = false;
el.addEventListener("mouseleave", function() {
pause = true;
});
el.addEventListener("mouseenter", function() {
Charge.setCanvas(el);
if (pause) requestAnimationFrame(cycle);
pause = false;
});
const q = []; //holds the charges
//spawn p before e to avoid a bug in the class method allPhysics
const separation = 30;
const len = 7;
const offx = canvas.width / 2 - ((len - 1) * separation) / 2;
const offy = canvas.height / 2 - ((len - 1) * separation) / 2;
for (let i = 0; i < len; ++i) {
for (let j = 0; j < len; ++j) {
q[q.length] = new Charge("p", {
x: i * separation + offx,
y: j * separation + offy
});
}
}
for (let i = 0; i < len; ++i) {
for (let j = 0; j < len; ++j) {
q[q.length] = new Charge("e", {
x: i * separation + offx,
y: j * separation + offy
});
}
}
function cycle() {
Charge.physicsAll(q);
//choose a draw mode
if (drawMode === 1) {
ctx.clearRect(0, 0, canvas.width, canvas.height);
Charge.drawAll(q);
} else if (drawMode === 2) {
ctx.clearRect(0, 0, canvas.width, canvas.height);
Charge.vectorField(q);
ctx.globalAlpha = 0.5;
Charge.drawAll(q);
ctx.globalAlpha = 1;
} else if (drawMode === 3) {
Charge.scalarField(q);
} else if (drawMode === 4) {
Charge.drawCloudChamber(q);
}
Charge.bounds(q);
if (!pause) requestAnimationFrame(cycle);
}
requestAnimationFrame(cycle);
}
|
itcomusic/ot | errors.go | <reponame>itcomusic/ot
package ot
import (
"errors"
"fmt"
"regexp"
"github.com/itcomusic/ot/internal/client"
)
var (
regDuplicate = regexp.MustCompile(`^An item with the name '.*' already exists.$`)
ErrTokenExpire = fmt.Errorf("ot: token expired")
)
type NodeRetrievalError struct {
*client.OpError
isNotFound bool
}
func (re *NodeRetrievalError) NotFound() bool {
return re.isNotFound
}
type DuplicateNameError struct {
*client.OpError
}
func errIn(r *client.Response, err error) error {
if err != nil {
return err
}
// -2147482645, -2147482644, -2147482643 login failed
// -2147482642 session expire, when using token may be?
// 903102 not found service
// 903101 custom error
switch r.Status {
case 0:
return nil
case -2147482642:
return ErrTokenExpire
case -2147482645, -2147482644, -2147482643, 903102:
return errors.New("ot: " + r.StatusMessage)
default:
switch r.StatusMessage {
case "DocMan.NodeRetrievalError":
nfound := false
desc, ecode := r.ErrMessage()
switch ecode {
case "662241287":
nfound = true
}
return &NodeRetrievalError{OpError: &client.OpError{Service: r.Service, Err: errors.New(desc)}, isNotFound: nfound}
case "DocMan.DuplicateName":
return &DuplicateNameError{OpError: &client.OpError{Service: r.Service, Err: errors.New(r.Desc)}}
case "DocMan.NodeCreationError": // why is it not a DocMan.Duplicate:(
if regDuplicate.FindStringIndex(r.Desc) != nil {
return &DuplicateNameError{OpError: &client.OpError{Service: r.Service, Err: errors.New(r.Desc)}}
}
}
return errors.New("ot: " + r.Desc)
}
}
|
nadiaschutz/jennifer_dewalt | app/controllers/globulator/page_controller.rb | class Globulator::PageController < ApplicationController
def index
@title = 'Globulator'
end
end
|
MobileDev418/react_redux_master | src/components/widgets/CodePlayground2/src/components/ViewTabs.js | <gh_stars>1-10
import styles from './ViewTabs.module.scss';
import React, { Component, PropTypes } from 'react';
import { Tab, Tabs, TabList, TabPanel } from 'react-tabs';
import CodeMirrorEditor from '../../../../helpers/codeeditor';
const RunButton = require('../../../../helpers/runCodeButton');
const JudgeButton = require('../../../../helpers/judgeCodeButton');
const DEFAULT_HEIGHT = 225;
export default class CodePlaygroundTabsView extends Component {
constructor(props) {
super(props);
this.state = {
selectedIndex: null,
};
this.handleSelect = this.handleSelect.bind(this);
this.handleRunClicked = this.handleRunClicked.bind(this);
}
componentDidMount() {
if (this.props.autoRun) {
this.props.onRunClicked();
}
}
getResultView() {
if (this.props.compiledHtml) {
const style = {
width: '100%',
height: '100%',
border: 'none',
};
if (this.props.height) {
style.height = this.props.height.toString().indexOf('px') === -1 ?
`${this.props.height}px` : this.props.height;
}
return (
<iframe srcDoc={this.props.compiledHtml} style={style}></iframe>
);
}
return <div />;
}
getTabContent(type) {
if (type === 'result') {
return this.getResultView();
}
// Render code tabs
const language = type === 'js' ? 'javascript' : type;
const { files, showLineNumbers, theme, onlyCodeChanged, onCodeChange,
autoRun, readOnly, height, default_themes } = this.props;
let highlightedLines = '';
if (this.props.panelsHighlightedLines) {
highlightedLines = this.props.panelsHighlightedLines[type];
}
let codeTheme = theme;
if (theme === 'default' && default_themes) {
codeTheme = default_themes.RunJS;
}
const activeContent = {
language,
theme: codeTheme,
highlightedLines,
content: files[type],
lineNumbers: showLineNumbers,
};
return (
<div className={styles.codeContainer} style={{ height: height || DEFAULT_HEIGHT }}>
<CodeMirrorEditor
codeContent={activeContent}
readOnly={readOnly}
showRuler={false}
onlyCodeChanged={onlyCodeChanged}
onEditorChange={(value) => {
onCodeChange(type, value, autoRun);
}}
/>
</div>
);
}
getTabsToShow() {
const tabsToShow = [];
const { hideCss, hideJs, hideHtml, hideResult, pane, autoRun, exercise } = this.props;
let count = 0;
let defaultTab = 0;
if (!hideResult) {
tabsToShow.push({
title: 'Output',
type: 'result',
});
if (pane === 'result') {
defaultTab = count;
}
++count;
}
if (!hideJs) {
tabsToShow.push({
title: 'JavaScript',
type: 'js',
});
if (pane === 'js') {
defaultTab = count;
}
++count;
}
if (!hideHtml) {
tabsToShow.push({
title: 'HTML',
type: 'html',
});
if (pane === 'html') {
defaultTab = count;
}
++count;
}
if (!hideCss) {
tabsToShow.push({
title: 'CSS',
type: 'css',
});
if (pane === 'css') {
defaultTab = count;
}
++count;
}
if (this.state.selectedIndex !== null) {
defaultTab = this.state.selectedIndex;
}
return (
<Tabs selectedIndex={defaultTab} onSelect={this.handleSelect}>
<TabList>
{
tabsToShow.map((tab, index) => {
return (
<Tab key={index}>{tab.title}</Tab>
);
})
}
{
(!autoRun || exercise) &&
<span
style={{
float: 'right'
}}
>
{ !autoRun &&
<span style={{ marginLeft: 10 }}>
<RunButton onClick={this.handleRunClicked} executionInProgress={false}/>
</span>
}
{
exercise &&
<span style={{ marginLeft: 10 }}>
<JudgeButton onClick={this.props.onJudgeClicked} executionInProgress={false}/>
</span>
}
</span>
}
</TabList>
{
tabsToShow.map((tab, index) => {
return (
<TabPanel key={index}>{this.getTabContent(tab.type)}</TabPanel>
);
})
}
</Tabs>
);
}
handleSelect(index) {
this.setState({
selectedIndex: index,
});
}
handleRunClicked() {
let selectedIndex = this.state.selectedIndex;
if (!this.props.hideResult) {
selectedIndex = 0;
}
this.setState({
selectedIndex,
}, this.props.onRunClicked());
}
render() {
const { hideNav, hideResult } = this.props;
let compDisplay = null;
if (hideNav) {
compDisplay = this.getTabContent('result');
} else {
compDisplay = this.getTabsToShow();
}
return (
<div>
{compDisplay}
{
hideResult &&
<div style={{ display: 'none' }}>{this.getResultView()}</div>
}
</div>
);
}
}
CodePlaygroundTabsView.propTypes = {
hideCss : PropTypes.bool.isRequired,
hideHtml : PropTypes.bool.isRequired,
hideJs : PropTypes.bool.isRequired,
hideResult : PropTypes.bool.isRequired,
hideNav : PropTypes.bool.isRequired,
showLineNumbers : PropTypes.bool.isRequired,
autoRun : PropTypes.bool.isRequired,
onlyCodeChanged : PropTypes.bool.isRequired,
onCodeChange : PropTypes.func.isRequired,
onRunClicked : PropTypes.func.isRequired,
onJudgeClicked : PropTypes.func.isRequired,
pane : PropTypes.string.isRequired,
theme : PropTypes.string.isRequired,
readOnly : PropTypes.bool.isRequired,
compiledHtml : PropTypes.string,
height : PropTypes.string,
files : PropTypes.objectOf(PropTypes.string).isRequired,
};
|
articuly/alipay-sdk-python-all | alipay/aop/api/domain/AlipayUserCertDocVehicleLicense.py | <reponame>articuly/alipay-sdk-python-all<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserCertDocVehicleLicense(object):
def __init__(self):
self._encoded_img_main = None
self._encoded_img_vice = None
self._engine_no = None
self._issue_date = None
self._model = None
self._owner = None
self._plate_no = None
self._register_date = None
self._vin = None
@property
def encoded_img_main(self):
return self._encoded_img_main
@encoded_img_main.setter
def encoded_img_main(self, value):
self._encoded_img_main = value
@property
def encoded_img_vice(self):
return self._encoded_img_vice
@encoded_img_vice.setter
def encoded_img_vice(self, value):
self._encoded_img_vice = value
@property
def engine_no(self):
return self._engine_no
@engine_no.setter
def engine_no(self, value):
self._engine_no = value
@property
def issue_date(self):
return self._issue_date
@issue_date.setter
def issue_date(self, value):
self._issue_date = value
@property
def model(self):
return self._model
@model.setter
def model(self, value):
self._model = value
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, value):
self._owner = value
@property
def plate_no(self):
return self._plate_no
@plate_no.setter
def plate_no(self, value):
self._plate_no = value
@property
def register_date(self):
return self._register_date
@register_date.setter
def register_date(self, value):
self._register_date = value
@property
def vin(self):
return self._vin
@vin.setter
def vin(self, value):
self._vin = value
def to_alipay_dict(self):
params = dict()
if self.encoded_img_main:
if hasattr(self.encoded_img_main, 'to_alipay_dict'):
params['encoded_img_main'] = self.encoded_img_main.to_alipay_dict()
else:
params['encoded_img_main'] = self.encoded_img_main
if self.encoded_img_vice:
if hasattr(self.encoded_img_vice, 'to_alipay_dict'):
params['encoded_img_vice'] = self.encoded_img_vice.to_alipay_dict()
else:
params['encoded_img_vice'] = self.encoded_img_vice
if self.engine_no:
if hasattr(self.engine_no, 'to_alipay_dict'):
params['engine_no'] = self.engine_no.to_alipay_dict()
else:
params['engine_no'] = self.engine_no
if self.issue_date:
if hasattr(self.issue_date, 'to_alipay_dict'):
params['issue_date'] = self.issue_date.to_alipay_dict()
else:
params['issue_date'] = self.issue_date
if self.model:
if hasattr(self.model, 'to_alipay_dict'):
params['model'] = self.model.to_alipay_dict()
else:
params['model'] = self.model
if self.owner:
if hasattr(self.owner, 'to_alipay_dict'):
params['owner'] = self.owner.to_alipay_dict()
else:
params['owner'] = self.owner
if self.plate_no:
if hasattr(self.plate_no, 'to_alipay_dict'):
params['plate_no'] = self.plate_no.to_alipay_dict()
else:
params['plate_no'] = self.plate_no
if self.register_date:
if hasattr(self.register_date, 'to_alipay_dict'):
params['register_date'] = self.register_date.to_alipay_dict()
else:
params['register_date'] = self.register_date
if self.vin:
if hasattr(self.vin, 'to_alipay_dict'):
params['vin'] = self.vin.to_alipay_dict()
else:
params['vin'] = self.vin
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserCertDocVehicleLicense()
if 'encoded_img_main' in d:
o.encoded_img_main = d['encoded_img_main']
if 'encoded_img_vice' in d:
o.encoded_img_vice = d['encoded_img_vice']
if 'engine_no' in d:
o.engine_no = d['engine_no']
if 'issue_date' in d:
o.issue_date = d['issue_date']
if 'model' in d:
o.model = d['model']
if 'owner' in d:
o.owner = d['owner']
if 'plate_no' in d:
o.plate_no = d['plate_no']
if 'register_date' in d:
o.register_date = d['register_date']
if 'vin' in d:
o.vin = d['vin']
return o
|
zhuxiang/LeetCode-Python | src/48-RotateImage.py | class Solution(object):
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
"""
* clockwise rotate
* first reverse up to down, then swap the symmetry
* 1 2 3 7 8 9 7 4 1
* 4 5 6 => 4 5 6 => 8 5 2
* 7 8 9 1 2 3 9 6 3
"""
matrix.reverse()
n = len(matrix)
for i in xrange(n):
for j in xrange(i+1,n):
matrix[i][j],matrix[j][i] = matrix[j][i],matrix[i][j]
if __name__ == '__main__':
matrix = [[1,2,3],[4,5,6],[7,8,9]]
s = Solution()
s.rotate(matrix)
print matrix
|
boichuk-oleh/new-js-sdk | src/horizon/resources/generic_test_cases.spec.js | <gh_stars>10-100
import {
testRequestSignatureBase,
testGetRequestBase
} from '../../test_helpers/generic_test_cases.spec'
import { HorizonResponse } from '../response'
export function testRequestSignature ({
horizon,
resourceGroup,
method,
args,
path,
params
}) {
testRequestSignatureBase({
server: horizon,
responseClass: HorizonResponse,
resourceGroup,
method,
args,
path,
params
})
}
export function testGetRequest ({
title,
horizon,
resourceGroup,
method,
args,
path,
params
}) {
testGetRequestBase({
title,
server: horizon,
resourceGroup,
method,
args,
path,
params,
responseClass: HorizonResponse
})
}
|
suhao/asioexpress | source/AsioExpress/EventHandling/EventQueue.hpp | // Copyright <NAME> 2013
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#pragma once
#include <vector>
#include <set>
#include <limits>
#include <boost/shared_ptr.hpp>
#include <boost/function.hpp>
#include <boost/bind.hpp>
#include "AsioExpressError/Check.hpp"
#include "AsioExpressError/CallStack.hpp"
#include "AsioExpress/CompletionHandler.hpp"
#include "AsioExpress/ErrorCodes.hpp"
#include "AsioExpress/Timer/Timer.hpp"
#undef max
namespace AsioExpress {
///
/// This is a basic event synchronization primitive for ASIO applications.
/// The EventQueue allows you to decouple the event processor from the async
/// processes generating the events. The queue accepts a maximum size parameter
/// that will block the completion of AsyncAdd to prevent the event queue from
/// getting unreasonably large and providing feedback to the rest of the
/// system.
///
template<typename E>
class EventQueue
{
public:
typedef E Event;
typedef boost::shared_ptr<Event> EventPointer;
typedef size_t SizeType;
EventQueue() :
m_isShutDown(false),
m_maxSize(std::numeric_limits<SizeType>::max())
{
}
EventQueue(SizeType size) :
m_isShutDown(false),
m_maxSize(size)
{
CHECK(size > 0);
}
~EventQueue()
{
}
///
/// Get the next event from the event queue. If no events are queued the
/// async call waits for a new event to be added to the queue.
///
/// @param event The event data that will be received by the
/// caller.
/// @param waitTimer A timer to indicate how long to wait to receive
/// the event.
/// @param completionHandler The completion hander is called when the event
/// is received, the wait time expires, or an error
/// occurs.
void AsyncWait(
EventPointer const & event,
TimerPointer const & waitTimer,
CompletionHandler completionHandler);
///
/// This method adds an event to the event queue.
///
/// @param event The event data that will be received by the
/// caller.
/// @param completionHandler The completion hander is called when the event
/// is queued or the queue has been canceled.
///
void AsyncAdd(
Event const & event,
CompletionHandler completionHandler);
///
/// This method cancels all pending operations on the event queue.
///
void Cancel();
///
/// This method cancels all pending operations on the event queue. If
/// AsyncWait or AsyncAdd is called on a queue that has been shut down,
/// an operation-aborted error is returned immediately.
///
void ShutDown();
private:
void Timeout(
Error error,
TimerPointer timer);
struct EventHandler
{
EventHandler(
EventPointer const & event,
CompletionHandler const & completionHandler,
TimerPointer const & waitTimer) :
event(event),
completionHandler(completionHandler),
timer(waitTimer)
{
}
EventPointer event;
CompletionHandler completionHandler;
TimerPointer timer;
};
struct WaitingEvent
{
WaitingEvent(
Event const & event,
CompletionHandler const & completionHandler) :
waitingEvent(event),
completionHandler(completionHandler)
{
}
Event waitingEvent;
CompletionHandler completionHandler;
};
typedef std::vector<EventHandler> EventHandlerList;
typedef std::vector<Event> RegisteredEvents;
typedef std::vector<WaitingEvent> WaitingEvents;
EventHandlerList m_waitingEventHandlers;
RegisteredEvents m_registeredEvents;
WaitingEvents m_waitingEvents;
bool m_isShutDown;
SizeType m_maxSize;
};
template<typename Event>
void EventQueue<Event>::AsyncWait(
EventPointer const & event,
TimerPointer const & waitTimer,
CompletionHandler completionHandler)
{
// If the queue has been canceled return operation aborted immediately.
if ( m_isShutDown )
{
completionHandler(Error(boost::asio::error::operation_aborted));
return;
}
// Verify that we have a unique event and timer pointer.
{
typename EventHandlerList::iterator it = m_waitingEventHandlers.begin();
typename EventHandlerList::iterator end = m_waitingEventHandlers.end();
for (; it != end; ++it)
{
CHECK_MSG(it->event != event, "Outstanding wait for this event pointer in call to EventQueue::AsyncWait.");
CHECK_MSG(it->timer != waitTimer, "Outstanding wait for this timer in call to EventQueue::AsyncWait.");
}
}
// Look up waiting event.
if (! m_registeredEvents.empty())
{
typename RegisteredEvents::iterator it = m_registeredEvents.begin();
*event = *it;
m_registeredEvents.erase(it);
completionHandler(Error());
// move waiting event to registered event
if (! m_waitingEvents.empty())
{
typename WaitingEvents::iterator it = m_waitingEvents.begin();
m_registeredEvents.push_back(it->waitingEvent);
CompletionHandler handler(it->completionHandler);
m_waitingEvents.erase(it);
handler(Error());
}
return;
}
// Queue this event to be handled.
//
waitTimer->AsyncWait(boost::bind(&EventQueue::Timeout, this, _1, waitTimer));
m_waitingEventHandlers.push_back(
EventHandler(event, completionHandler, waitTimer));
}
template<typename Event>
void EventQueue<Event>::AsyncAdd(
Event const & event,
CompletionHandler completionHandler)
{
// If the queue has been canceled return operation aborted immediately.
if ( m_isShutDown )
{
completionHandler(Error(boost::asio::error::operation_aborted));
return;
}
if(! m_waitingEventHandlers.empty())
{
// Look up waiting handler.
typename EventHandlerList::iterator it = m_waitingEventHandlers.begin();
*(it->event) = event;
TimerPointer timer = it->timer;
CompletionHandler handler(it->completionHandler);
m_waitingEventHandlers.erase(it);
timer->Stop();
handler(Error());
completionHandler(Error());
return;
}
if (m_registeredEvents.size() >= m_maxSize)
{
// Add event and this completion hander to the following queue.
m_waitingEvents.push_back(WaitingEvent(event,completionHandler));
return;
}
// No handler found so we put it in the event queue.
m_registeredEvents.push_back(event);
completionHandler(Error());
}
template<typename Event>
void EventQueue<Event>::Cancel()
{
// Look up waiting handler.
typename EventHandlerList::iterator it = m_waitingEventHandlers.begin();
typename EventHandlerList::iterator end = m_waitingEventHandlers.end();
for (; it != end; ++it)
{
it->timer->Stop();
}
}
template<typename Event>
void EventQueue<Event>::ShutDown()
{
// Indicate that this queue is canceled.
m_isShutDown = true;
Cancel();
}
template<typename Event>
void EventQueue<Event>::Timeout(
Error error,
TimerPointer timer)
{
// Convert to timeout error if timer has expired.
if (!error)
error = Error(ErrorCode::EventQueueTimeout);
typename EventHandlerList::iterator it = m_waitingEventHandlers.begin();
typename EventHandlerList::iterator end = m_waitingEventHandlers.end();
for (; it != end; ++it)
{
if (it->timer == timer)
{
CompletionHandler handler(it->completionHandler);
m_waitingEventHandlers.erase(it);
handler(error);
break;
}
}
}
} // namespace AsioExpress
|
qiuhere/Bench | core/src/main/java/site/ycsb/data_gen/Graph_gen/Graph_gen/glib-adv/hashgenericmp.h | <gh_stars>1000+
#include "bd.h"
#ifdef GLib_GLIBC
inline unsigned int __sync_fetch_and_add_2(volatile unsigned int* p, unsigned int incr)
{
unsigned int result;
asm volatile("lock; xadd %0, %1" :
"=r"(result), "=m"(*p):
"0"(incr), "m"(*p) :
"memory");
return result + 1;
}
#endif
/////////////////////////////////////////////////
// Hash-Table
template<class TKey, class TDat, class THashFunc = TDefaultHashFunc<TKey> >
class THashGenericMP{
public:
//pthread_mutex_t lock;
enum {HashPrimes=32};
static const unsigned int HashPrimeT[HashPrimes];
public:
typedef THashKeyDatI<TKey, TDat> TIter;
private:
typedef THashKeyDat<TKey, TDat> THKeyDat;
typedef TPair<TKey, TDat> TKeyDatP;
TIntV PortV;
TIntV PortLockV;
TVec<THKeyDat> KeyDatV;
TBool AutoSizeP;
TInt FFreeKeyId, FreeKeys;
private:
class THashKeyDatCmp {
public:
const THash<TKey, TDat, THashFunc>& Hash;
bool CmpKey, Asc;
THashKeyDatCmp(THash<TKey, TDat, THashFunc>& _Hash, const bool& _CmpKey, const bool& _Asc) :
Hash(_Hash), CmpKey(_CmpKey), Asc(_Asc) { }
bool operator () (const int& KeyId1, const int& KeyId2) const {
if (CmpKey) {
if (Asc) { return Hash.GetKey(KeyId1) < Hash.GetKey(KeyId2); }
else { return Hash.GetKey(KeyId2) < Hash.GetKey(KeyId1); } }
else {
if (Asc) { return Hash[KeyId1] < Hash[KeyId2]; }
else { return Hash[KeyId2] < Hash[KeyId1]; } } }
};
private:
THKeyDat& GetHashKeyDat(const int& KeyId){
THKeyDat& KeyDat=KeyDatV[KeyId];
Assert(KeyDat.HashCd!=-1); return KeyDat;}
const THKeyDat& GetHashKeyDat(const int& KeyId) const {
const THKeyDat& KeyDat=KeyDatV[KeyId];
Assert(KeyDat.HashCd!=-1); return KeyDat;}
uint GetNextPrime(const uint& Val) const;
void Resize();
public:
THashGenericMP():
PortV(), KeyDatV(),
AutoSizeP(true), FFreeKeyId(-1), FreeKeys(0){
//lock = PTHREAD_MUTEX_INITIALIZER;
}
THashGenericMP(const THashGenericMP& Hash):
PortV(Hash.PortV), KeyDatV(Hash.KeyDatV), AutoSizeP(Hash.AutoSizeP),
FFreeKeyId(Hash.FFreeKeyId), FreeKeys(Hash.FreeKeys) {
//lock = PTHREAD_MUTEX_INITIALIZER;
}
explicit THashGenericMP(const int& ExpectVals, const bool& _AutoSizeP=false);
explicit THashGenericMP(TSIn& SIn):
PortV(SIn), KeyDatV(SIn),
AutoSizeP(SIn), FFreeKeyId(SIn), FreeKeys(SIn){
SIn.LoadCs();
//lock = PTHREAD_MUTEX_INITIALIZER;
}
void Load(TSIn& SIn){
PortV.Load(SIn); KeyDatV.Load(SIn);
AutoSizeP=TBool(SIn); FFreeKeyId=TInt(SIn); FreeKeys=TInt(SIn);
SIn.LoadCs();
//lock = PTHREAD_MUTEX_INITIALIZER;
}
void Save(TSOut& SOut) const {
PortV.Save(SOut); KeyDatV.Save(SOut);
AutoSizeP.Save(SOut); FFreeKeyId.Save(SOut); FreeKeys.Save(SOut);
SOut.SaveCs();
}
void LoadXml(const PXmlTok& XmlTok, const TStr& Nm="");
void SaveXml(TSOut& SOut, const TStr& Nm);
void ResizePar(int);
THashGenericMP& operator=(const THashGenericMP& Hash){
if (this!=&Hash){
PortV=Hash.PortV; KeyDatV=Hash.KeyDatV; AutoSizeP=Hash.AutoSizeP;
FFreeKeyId=Hash.FFreeKeyId; FreeKeys=Hash.FreeKeys;}
return *this;}
bool operator==(const THashGenericMP& Hash) const; //J: zdaj tak kot je treba
bool operator < (const THashGenericMP& Hash) const { Fail; return true; }
const TDat& operator[](const int& KeyId) const {return GetHashKeyDat(KeyId).Dat;}
TDat& operator[](const int& KeyId){return GetHashKeyDat(KeyId).Dat;}
TDat& operator()(const TKey& Key){return AddDat(Key);}
::TSize GetMemUsed() const {
// return PortV.GetMemUsed()+KeyDatV.GetMemUsed()+sizeof(bool)+2*sizeof(int);}
int64 MemUsed = sizeof(bool)+2*sizeof(int);
MemUsed += int64(PortV.Reserved()) * int64(sizeof(TInt));
for (int KeyDatN = 0; KeyDatN < KeyDatV.Len(); KeyDatN++) {
MemUsed += int64(2 * sizeof(TInt));
MemUsed += int64(KeyDatV[KeyDatN].Key.GetMemUsed());
MemUsed += int64(KeyDatV[KeyDatN].Dat.GetMemUsed());
}
return ::TSize(MemUsed);
}
TIter BegI() const {
if (Len() == 0){return TIter(KeyDatV.EndI(), KeyDatV.EndI());}
if (IsKeyIdEqKeyN()) { return TIter(KeyDatV.BegI(), KeyDatV.EndI());}
int FKeyId=-1; FNextKeyId(FKeyId);
return TIter(KeyDatV.BegI()+FKeyId, KeyDatV.EndI()); }
TIter EndI() const {return TIter(KeyDatV.EndI(), KeyDatV.EndI());}
//TIter GetI(const int& KeyId) const {return TIter(&KeyDatV[KeyId], KeyDatV.EndI());}
TIter GetI(const TKey& Key) const {return TIter(&KeyDatV[GetKeyId(Key)], KeyDatV.EndI());}
void Gen(const int& ExpectVals){
PortV.Gen(GetNextPrime(ExpectVals/2)); KeyDatV.Gen(ExpectVals, 0);
FFreeKeyId=-1; FreeKeys=0; PortV.PutAll(TInt(-1));}
void Clr(const bool& DoDel=true, const int& NoDelLim=-1, const bool& ResetDat=true);
bool Empty() const {return Len()==0;}
int Len() const {return KeyDatV.Len()-FreeKeys;}
int GetPorts() const {return PortV.Len();}
bool IsAutoSize() const {return AutoSizeP;}
int GetMxKeyIds() const {return KeyDatV.Len();}
int GetReservedKeyIds() const {return KeyDatV.Reserved();}
bool IsKeyIdEqKeyN() const {return FreeKeys==0;}
int AddKey(const TKey& Key);
int AddKeyPar(const TKey& Key);
TDat& AddDatId(const TKey& Key){
int KeyId=AddKey(Key); return KeyDatV[KeyId].Dat=KeyId;}
TDat& AddDat(const TKey& Key){return KeyDatV[AddKey(Key)].Dat;}
TDat& AddDat(const TKey& Key, const TDat& Dat){
return KeyDatV[AddKey(Key)].Dat=Dat;}
bool AddDatIfNotExist(const TKey& Key, const TDat& Dat);
int AddDatPar(const TKey& Key, const TInt& Dat) {
//if ((KeyDatV.Len()>2*PortV.Len())||PortV.Empty()){
// Resize();
//}
const int PortN=abs(THashFunc::GetPrimHashCd(Key)%PortV.Len());
const int HashCd=abs(THashFunc::GetSecHashCd(Key));
int PrevKeyId=-1;
int KeyId;
int Ret;
bool done = false;
while(!done) {
bool port_lock = false;
int old;
int *ptr = &PortLockV[PortN].Val;
old = PortLockV[PortN];
if (old == -2) {
port_lock = false;
}
else if (__sync_bool_compare_and_swap(ptr, old, -2)) {
port_lock = true;
}
KeyId = PortV[PortN];
while ((KeyId!=-1) &&
!((KeyDatV[KeyId].HashCd==HashCd) && (KeyDatV[KeyId].Key==Key))){
PrevKeyId=KeyId; KeyId=KeyDatV[KeyId].Next;}
if (KeyId==-1) {
if (port_lock == false) continue;
volatile unsigned int *p = (volatile unsigned int *)&FFreeKeyId.Val;
KeyId = __sync_fetch_and_add(p, 1);
//KeyId = __sync_fetch_and_add(&FFreeKeyId.Val, 1);
KeyDatV[KeyId].Next=-1;
KeyDatV[KeyId].HashCd=HashCd;
KeyDatV[KeyId].Key=Key;
int temp;
int* pt = &KeyDatV[KeyId].Next.Val;
while(true) {
temp = KeyDatV[KeyId].Next;
if (temp == -2) continue;
if (__sync_bool_compare_and_swap(pt, temp, -2)) {
KeyDatV[KeyId].Dat.Val1 = 0;
KeyDatV[KeyId].Dat.Val2.Add(Dat);
*pt = temp;
done = true;
Ret = 0;
break;
}
}
if (PrevKeyId==-1){
PortV[PortN] = KeyId;
} else {
KeyDatV[PrevKeyId].Next=KeyId;
}
*ptr = old;
}
else {
int temp, temp1;
int* pt = &KeyDatV[KeyId].Next.Val;
while(true) {
temp = KeyDatV[KeyId].Next;
temp1 = __sync_val_compare_and_swap(pt, temp, -2);
if (temp1 == temp && temp1 != -2) {
KeyDatV[KeyId].Dat.Val2.Add(Dat);
*pt = temp;
if (port_lock) *ptr = old;
done = true;
Ret = KeyDatV[KeyId].Dat.Val1;
break;
}
else {
usleep(20);
}
}
}
}
return Ret;
}
void DelKey(const TKey& Key);
bool DelIfKey(const TKey& Key){
int KeyId; if (IsKey(Key, KeyId)){DelKeyId(KeyId); return true;} return false;}
void DelKeyId(const int& KeyId){DelKey(GetKey(KeyId));}
void DelKeyIdV(const TIntV& KeyIdV){
for (int KeyIdN=0; KeyIdN<KeyIdV.Len(); KeyIdN++){DelKeyId(KeyIdV[KeyIdN]);}}
void MarkDelKey(const TKey& Key); // marks the record as deleted - doesn't delete Dat (to avoid fragmentation)
void MarkDelKeyId(const int& KeyId){MarkDelKey(GetKey(KeyId));}
const TKey& GetKey(const int& KeyId) const { return GetHashKeyDat(KeyId).Key;}
int GetKeyId(const TKey& Key) const;
/// Get an index of a random element. If the hash table has many deleted keys, this may take a long time.
int GetRndKeyId(TRnd& Rnd) const;
/// Get an index of a random element. If the hash table has many deleted keys, defrag the hash table first (that's why the function is non-const).
int GetRndKeyId(TRnd& Rnd, const double& EmptyFrac);
bool IsKey(const TKey& Key) const {return GetKeyId(Key)!=-1;}
bool IsKey(const TKey& Key, int& KeyId) const { KeyId=GetKeyId(Key); return KeyId!=-1;}
bool IsKeyId(const int& KeyId) const {
return (0<=KeyId)&&(KeyId<KeyDatV.Len())&&(KeyDatV[KeyId].HashCd!=-1);}
const TDat& GetDat(const TKey& Key) const {return KeyDatV[GetKeyId(Key)].Dat;}
TDat& GetDat(const TKey& Key){return KeyDatV[GetKeyId(Key)].Dat;}
// TKeyDatP GetKeyDat(const int& KeyId) const {
// TKeyDat& KeyDat=GetHashKeyDat(KeyId);
// return TKeyDatP(KeyDat.Key, KeyDat.Dat);}
void GetKeyDat(const int& KeyId, TKey& Key, TDat& Dat) const {
const THKeyDat& KeyDat=GetHashKeyDat(KeyId);
Key=KeyDat.Key; Dat=KeyDat.Dat;}
bool IsKeyGetDat(const TKey& Key, TDat& Dat) const {int KeyId;
if (IsKey(Key, KeyId)){Dat=GetHashKeyDat(KeyId).Dat; return true;}
else {return false;}}
int FFirstKeyId() const {return 0-1;}
bool FNextKeyId(int& KeyId) const;
void GetKeyV(TVec<TKey>& KeyV) const;
void GetDatV(TVec<TDat>& DatV) const;
void GetKeyDatPrV(TVec<TPair<TKey, TDat> >& KeyDatPrV) const;
void GetDatKeyPrV(TVec<TPair<TDat, TKey> >& DatKeyPrV) const;
void GetKeyDatKdV(TVec<TKeyDat<TKey, TDat> >& KeyDatKdV) const;
void GetDatKeyKdV(TVec<TKeyDat<TDat, TKey> >& DatKeyKdV) const;
void Swap(THashGenericMP& Hash);
void Defrag();
void Pack(){KeyDatV.Pack();}
void Sort(const bool& CmpKey, const bool& Asc);
void SortByKey(const bool& Asc=true) { Sort(true, Asc); }
void SortByDat(const bool& Asc=true) { Sort(false, Asc); }
};
template<class TKey, class TDat, class THashFunc>
const unsigned int THashGenericMP<TKey, TDat, THashFunc>::HashPrimeT[HashPrimes]={
3ul, 5ul, 11ul, 23ul,
53ul, 97ul, 193ul, 389ul, 769ul,
1543ul, 3079ul, 6151ul, 12289ul, 24593ul,
49157ul, 98317ul, 196613ul, 393241ul, 786433ul,
1572869ul, 3145739ul, 6291469ul, 12582917ul, 25165843ul,
50331653ul, 100663319ul, 201326611ul, 402653189ul, 805306457ul,
1610612741ul, 3221225473ul, 4294967291ul
};
template<class TKey, class TDat, class THashFunc>
uint THashGenericMP<TKey, TDat, THashFunc>::GetNextPrime(const uint& Val) const {
const uint* f=(const uint*)HashPrimeT, *m, *l=(const uint*)HashPrimeT + (int)HashPrimes;
int h, len = (int)HashPrimes;
while (len > 0) {
h = len >> 1; m = f + h;
if (*m < Val) { f = m; f++; len = len - h - 1; }
else len = h;
}
return f == l ? *(l - 1) : *f;
}
template<class TKey, class TDat, class THashFunc>
void THashGenericMP<TKey, TDat, THashFunc>::ResizePar(int sz){
if (PortV.Len()==0){
PortV.Gen(sz);
KeyDatV.Gen(sz);
PortLockV.Gen(sz);
} else if (AutoSizeP&&(KeyDatV.Len()>2*PortV.Len())){
PortV.Gen(GetNextPrime(PortV.Len()+1));
} else {
return;
}
PortV.PutAll(TInt(-1));
// rehash keys
for (int KeyId=0; KeyId<KeyDatV.Len(); KeyId++){
THKeyDat& KeyDat=KeyDatV[KeyId];
if (KeyDat.HashCd!=-1){
const int PortN = abs(THashFunc::GetPrimHashCd(KeyDat.Key) % PortV.Len());
KeyDat.Next=PortV[PortN];
PortV[PortN]=KeyId;
}
}
}
template<class TKey, class TDat, class THashFunc>
void THashGenericMP<TKey, TDat, THashFunc>::Resize(){
// resize & initialize port vector
//if (PortV.Len()==0){PortV.Gen(17);}
//else {PortV.Gen(2*PortV.Len()+1);}
if (PortV.Len()==0){
PortV.Gen(17);
} else if (AutoSizeP&&(KeyDatV.Len()>2*PortV.Len())){
PortV.Gen(GetNextPrime(PortV.Len()+1));
} else {
return;
}
PortV.PutAll(TInt(-1));
// rehash keys
for (int KeyId=0; KeyId<KeyDatV.Len(); KeyId++){
THKeyDat& KeyDat=KeyDatV[KeyId];
if (KeyDat.HashCd!=-1){
const int PortN = abs(THashFunc::GetPrimHashCd(KeyDat.Key) % PortV.Len());
KeyDat.Next=PortV[PortN];
PortV[PortN]=KeyId;
}
}
}
template<class TKey, class TDat, class THashFunc>
THashGenericMP<TKey, TDat, THashFunc>::THashGenericMP(const int& ExpectVals, const bool& _AutoSizeP):
PortV(GetNextPrime(ExpectVals/2)), KeyDatV(ExpectVals, 0),
AutoSizeP(_AutoSizeP), FFreeKeyId(-1), FreeKeys(0){
PortV.PutAll(TInt(-1));
}
template<class TKey, class TDat, class THashFunc>
bool THashGenericMP<TKey, TDat, THashFunc>::operator==(const THashGenericMP& Hash) const {
if (Len() != Hash.Len()) { return false; }
for (int i = FFirstKeyId(); FNextKeyId(i); ) {
const TKey& Key = GetKey(i);
if (! Hash.IsKey(Key)) { return false; }
if (GetDat(Key) != Hash.GetDat(Key)) { return false; }
}
return true;
}
template<class TKey, class TDat, class THashFunc>
void THashGenericMP<TKey, TDat, THashFunc>::Clr(const bool& DoDel, const int& NoDelLim, const bool& ResetDat){
if (DoDel){
PortV.Clr(); KeyDatV.Clr();
} else {
PortV.PutAll(TInt(-1));
KeyDatV.Clr(DoDel, NoDelLim);
if (ResetDat){KeyDatV.PutAll(THKeyDat());}
}
FFreeKeyId=TInt(-1); FreeKeys=TInt(0);
}
template<class TKey, class TDat, class THashFunc>
int THashGenericMP<TKey, TDat, THashFunc>::AddKey(const TKey& Key){
//if ((KeyDatV.Len()>2*PortV.Len())||PortV.Empty()){Resize();}
const int PortN=abs(THashFunc::GetPrimHashCd(Key)%PortV.Len());
const int HashCd=abs(THashFunc::GetSecHashCd(Key));
int PrevKeyId=-1;
int KeyId=PortV[PortN];
while ((KeyId!=-1) &&
!((KeyDatV[KeyId].HashCd==HashCd) && (KeyDatV[KeyId].Key==Key))){
PrevKeyId=KeyId; KeyId=KeyDatV[KeyId].Next;}
if (KeyId==-1){
if (FFreeKeyId==-1){
KeyId=KeyDatV.Add(THKeyDat(-1, HashCd, Key));
} else {
KeyId=FFreeKeyId; FFreeKeyId=KeyDatV[FFreeKeyId].Next; FreeKeys--;
//KeyDatV[KeyId]=TKeyDat(-1, HashCd, Key); // slow version
KeyDatV[KeyId].Next=-1;
KeyDatV[KeyId].HashCd=HashCd;
KeyDatV[KeyId].Key=Key;
//KeyDatV[KeyId].Dat=TDat(); // already empty
}
if (PrevKeyId==-1){
PortV[PortN]=KeyId;
} else {
KeyDatV[PrevKeyId].Next=KeyId;
}
}
return KeyId;
}
template<class TKey, class TDat, class THashFunc>
int THashGenericMP<TKey, TDat, THashFunc>::AddKeyPar(const TKey& Key){
//if ((KeyDatV.Len()>2*PortV.Len())||PortV.Empty()){
// Resize();
//}
const int PortN=abs(THashFunc::GetPrimHashCd(Key)%PortV.Len());
const int HashCd=abs(THashFunc::GetSecHashCd(Key));
int PrevKeyId=-1;
int KeyId;
bool done = false;
while(!done) {
bool port_lock = false;
int old;
int *ptr = &PortLockV[PortN].Val;
old = PortLockV[PortN];
if (old == -2) {
port_lock = false;
}
else if (__sync_bool_compare_and_swap(ptr, old, -2)) {
port_lock = true;
}
KeyId = PortV[PortN];
while ((KeyId!=-1) &&
!((KeyDatV[KeyId].HashCd==HashCd) && (KeyDatV[KeyId].Key==Key))){
PrevKeyId=KeyId; KeyId=KeyDatV[KeyId].Next;}
if (KeyId==-1) {
if (port_lock == false) continue;
volatile unsigned int *p = (volatile unsigned int *)&FFreeKeyId.Val;
KeyId = __sync_fetch_and_add(p, 1);
//KeyId = __sync_fetch_and_add(&FFreeKeyId.Val, 1);
KeyDatV[KeyId].Next=-1;
KeyDatV[KeyId].HashCd=HashCd;
KeyDatV[KeyId].Key=Key;
if (PrevKeyId==-1){
PortV[PortN] = KeyId;
} else {
KeyDatV[PrevKeyId].Next=KeyId;
}
*ptr = old;
done = true;
}
else {
done = true;
}
}
return KeyId;
}
template<class TKey, class TDat, class THashFunc>
void THashGenericMP<TKey, TDat, THashFunc>::DelKey(const TKey& Key){
IAssert(!PortV.Empty());
const int PortN=abs(THashFunc::GetPrimHashCd(Key)%PortV.Len());
const int HashCd=abs(THashFunc::GetSecHashCd(Key));
int PrevKeyId=-1;
int KeyId=PortV[PortN];
while ((KeyId!=-1) &&
!((KeyDatV[KeyId].HashCd==HashCd) && (KeyDatV[KeyId].Key==Key))){
PrevKeyId=KeyId; KeyId=KeyDatV[KeyId].Next;}
//IAssertR(KeyId!=-1, Key.GetStr()); //J: some classes do not provide GetStr()?
IAssert(KeyId!=-1); //J: some classes do not provide GetStr()?
if (PrevKeyId==-1){PortV[PortN]=KeyDatV[KeyId].Next;}
else {KeyDatV[PrevKeyId].Next=KeyDatV[KeyId].Next;}
KeyDatV[KeyId].Next=FFreeKeyId; FFreeKeyId=KeyId; FreeKeys++;
KeyDatV[KeyId].HashCd=TInt(-1);
KeyDatV[KeyId].Key=TKey();
KeyDatV[KeyId].Dat=TDat();
}
template<class TKey, class TDat, class THashFunc>
void THashGenericMP<TKey, TDat, THashFunc>::MarkDelKey(const TKey& Key){
// MarkDelKey is same as Delkey except last two lines
IAssert(!PortV.Empty());
const int PortN=abs(THashFunc::GetPrimHashCd(Key)%PortV.Len());
const int HashCd=abs(THashFunc::GetSecHashCd(Key));
int PrevKeyId=-1;
int KeyId=PortV[PortN];
while ((KeyId!=-1) &&
!((KeyDatV[KeyId].HashCd==HashCd) && (KeyDatV[KeyId].Key==Key))){
PrevKeyId=KeyId; KeyId=KeyDatV[KeyId].Next;}
IAssertR(KeyId!=-1, Key.GetStr());
if (PrevKeyId==-1){PortV[PortN]=KeyDatV[KeyId].Next;}
else {KeyDatV[PrevKeyId].Next=KeyDatV[KeyId].Next;}
KeyDatV[KeyId].Next=FFreeKeyId; FFreeKeyId=KeyId; FreeKeys++;
KeyDatV[KeyId].HashCd=TInt(-1);
}
template<class TKey, class TDat, class THashFunc>
int THashGenericMP<TKey, TDat, THashFunc>::GetRndKeyId(TRnd& Rnd) const {
IAssert(! Empty());
int KeyId = abs(Rnd.GetUniDevInt(KeyDatV.Len()));
while (KeyDatV[KeyId].HashCd == -1) { // if the index is empty, just try again
KeyId = abs(Rnd.GetUniDevInt(KeyDatV.Len())); }
return KeyId;
}
// return random KeyId even if the hash table contains deleted keys
// defrags the table if necessary
template<class TKey, class TDat, class THashFunc>
int THashGenericMP<TKey, TDat, THashFunc>::GetRndKeyId(TRnd& Rnd, const double& EmptyFrac) {
IAssert(! Empty());
if (FreeKeys/double(Len()+FreeKeys) > EmptyFrac) { Defrag(); }
int KeyId = Rnd.GetUniDevInt(KeyDatV.Len());
while (KeyDatV[KeyId].HashCd == -1) { // if the index is empty, just try again
KeyId = Rnd.GetUniDevInt(KeyDatV.Len());
}
return KeyId;
}
template<class TKey, class TDat, class THashFunc>
int THashGenericMP<TKey, TDat, THashFunc>::GetKeyId(const TKey& Key) const {
if (PortV.Empty()){return -1;}
const int PortN=abs(THashFunc::GetPrimHashCd(Key)%PortV.Len());
const int HashCd=abs(THashFunc::GetSecHashCd(Key));
int KeyId=PortV[PortN];
while ((KeyId!=-1) &&
!((KeyDatV[KeyId].HashCd==HashCd) && (KeyDatV[KeyId].Key==Key))){
KeyId=KeyDatV[KeyId].Next;}
return KeyId;
}
template<class TKey, class TDat, class THashFunc>
bool THashGenericMP<TKey, TDat, THashFunc>::FNextKeyId(int& KeyId) const {
do {KeyId++;} while ((KeyId<KeyDatV.Len())&&(KeyDatV[KeyId].HashCd==-1));
return KeyId<KeyDatV.Len();
}
template<class TKey, class TDat, class THashFunc>
void THashGenericMP<TKey, TDat, THashFunc>::GetKeyV(TVec<TKey>& KeyV) const {
KeyV.Gen(Len(), 0);
int KeyId=FFirstKeyId();
while (FNextKeyId(KeyId)){
KeyV.Add(GetKey(KeyId));}
}
template<class TKey, class TDat, class THashFunc>
void THashGenericMP<TKey, TDat, THashFunc>::GetDatV(TVec<TDat>& DatV) const {
DatV.Gen(Len(), 0);
int KeyId=FFirstKeyId();
while (FNextKeyId(KeyId)){
DatV.Add(GetHashKeyDat(KeyId).Dat);}
}
template<class TKey, class TDat, class THashFunc>
void THashGenericMP<TKey, TDat, THashFunc>::GetKeyDatPrV(TVec<TPair<TKey, TDat> >& KeyDatPrV) const {
KeyDatPrV.Gen(Len(), 0);
TKey Key; TDat Dat;
int KeyId=FFirstKeyId();
while (FNextKeyId(KeyId)){
GetKeyDat(KeyId, Key, Dat);
KeyDatPrV.Add(TPair<TKey, TDat>(Key, Dat));
}
}
template<class TKey, class TDat, class THashFunc>
void THashGenericMP<TKey, TDat, THashFunc>::GetDatKeyPrV(TVec<TPair<TDat, TKey> >& DatKeyPrV) const {
DatKeyPrV.Gen(Len(), 0);
TKey Key; TDat Dat;
int KeyId=FFirstKeyId();
while (FNextKeyId(KeyId)){
GetKeyDat(KeyId, Key, Dat);
DatKeyPrV.Add(TPair<TDat, TKey>(Dat, Key));
}
}
template<class TKey, class TDat, class THashFunc>
void THashGenericMP<TKey, TDat, THashFunc>::GetKeyDatKdV(TVec<TKeyDat<TKey, TDat> >& KeyDatKdV) const {
KeyDatKdV.Gen(Len(), 0);
TKey Key; TDat Dat;
int KeyId=FFirstKeyId();
while (FNextKeyId(KeyId)){
GetKeyDat(KeyId, Key, Dat);
KeyDatKdV.Add(TKeyDat<TKey, TDat>(Key, Dat));
}
}
template<class TKey, class TDat, class THashFunc>
void THashGenericMP<TKey, TDat, THashFunc>::GetDatKeyKdV(TVec<TKeyDat<TDat, TKey> >& DatKeyKdV) const {
DatKeyKdV.Gen(Len(), 0);
TKey Key; TDat Dat;
int KeyId=FFirstKeyId();
while (FNextKeyId(KeyId)){
GetKeyDat(KeyId, Key, Dat);
DatKeyKdV.Add(TKeyDat<TDat, TKey>(Dat, Key));
}
}
template<class TKey, class TDat, class THashFunc>
void THashGenericMP<TKey, TDat, THashFunc>::Swap(THashGenericMP& Hash) {
if (this!=&Hash){
PortV.Swap(Hash.PortV);
KeyDatV.Swap(Hash.KeyDatV);
::Swap(AutoSizeP, Hash.AutoSizeP);
::Swap(FFreeKeyId, Hash.FFreeKeyId);
::Swap(FreeKeys, Hash.FreeKeys);
}
}
template<class TKey, class TDat, class THashFunc>
void THashGenericMP<TKey, TDat, THashFunc>::Defrag(){
if (!IsKeyIdEqKeyN()){
THashGenericMP<TKey, TDat, THashFunc> Hash(PortV.Len());
int KeyId=FFirstKeyId(); TKey Key; TDat Dat;
while (FNextKeyId(KeyId)){
GetKeyDat(KeyId, Key, Dat);
Hash.AddDat(Key, Dat);
}
Pack();
operator=(Hash);
IAssert(IsKeyIdEqKeyN());
}
}
template<class TKey, class TDat, class THashFunc>
void THashGenericMP<TKey, TDat, THashFunc>::Sort(const bool& CmpKey, const bool& Asc) {
IAssertR(IsKeyIdEqKeyN(), "THash::Sort only works when table has no deleted keys.");
TIntV TargV(Len()), MapV(Len()), StateV(Len());
for (int i = 0; i < TargV.Len(); i++) {
TargV[i] = i; MapV[i] = i; StateV[i] = i;
}
// sort KeyIds
THashKeyDatCmp HashCmp(*this, CmpKey, Asc);
TargV.SortCmp(HashCmp);
// now sort the update vector
THashKeyDat<TKey, TDat> Tmp;
for (int i = 0; i < TargV.Len()-1; i++) {
const int SrcPos = MapV[TargV[i]];
const int Loc = i;
// swap data
Tmp = KeyDatV[SrcPos];
KeyDatV[SrcPos] = KeyDatV[Loc];
KeyDatV[Loc] = Tmp;
// swap keys
MapV[StateV[i]] = SrcPos;
StateV.Swap(Loc, SrcPos);
}
for (int i = 0; i < TargV.Len(); i++) {
MapV[TargV[i]] = i; }
for (int p = 0; p < PortV.Len(); p++) {
if (PortV[p] != -1) {
PortV[p] = MapV[PortV[p]]; } }
for (int i = 0; i < KeyDatV.Len(); i++) {
if (KeyDatV[i].Next != -1) {
KeyDatV[i].Next = MapV[KeyDatV[i].Next]; }
}
}
template<class TKey, class TDat, class THashFunc>
bool THashGenericMP<TKey, TDat, THashFunc>::AddDatIfNotExist(const TKey& Key, const TDat& Dat) {
const int PortN=abs(THashFunc::GetPrimHashCd(Key)%PortV.Len());
const int HashCd=abs(THashFunc::GetSecHashCd(Key));
int PrevKeyId=-1;
int KeyId;
bool done = false;
while(!done) {
bool port_lock = false;
int old;
int *ptr = &PortLockV[PortN].Val;
old = PortLockV[PortN];
if (old == -2) {
port_lock = false;
}
else if (__sync_bool_compare_and_swap(ptr, old, -2)) {
port_lock = true;
}
KeyId = PortV[PortN];
while ((KeyId!=-1) &&
!((KeyDatV[KeyId].HashCd==HashCd) && (KeyDatV[KeyId].Key==Key))){
PrevKeyId=KeyId; KeyId=KeyDatV[KeyId].Next;}
if (KeyId==-1) {
if (port_lock == false) continue;
volatile unsigned int *p = (volatile unsigned int *)&FFreeKeyId.Val;
KeyId = __sync_fetch_and_add(p, 1);
//KeyId = __sync_fetch_and_add(&FFreeKeyId.Val, 1);
KeyDatV[KeyId].Next=-1;
KeyDatV[KeyId].HashCd=HashCd;
KeyDatV[KeyId].Key=Key;
int temp;
int* pt = &KeyDatV[KeyId].Next.Val;
while(true) {
temp = KeyDatV[KeyId].Next;
if (temp == -2) continue;
if (__sync_bool_compare_and_swap(pt, temp, -2)) {
KeyDatV[KeyId].Dat = Dat;
*pt = temp;
done = true;
break;
}
}
if (PrevKeyId==-1){
PortV[PortN] = KeyId;
} else {
KeyDatV[PrevKeyId].Next=KeyId;
}
*ptr = old;
}
else {
if (port_lock) *ptr = old;
break;
}
}
return done;
}
|
ut-osa/syncchar | linux-2.6.16-unmod/arch/x86_64/kernel/vsyscall.c | /*
* linux/arch/x86_64/kernel/vsyscall.c
*
* Copyright (C) 2001 <NAME> <<EMAIL>> SuSE
* Copyright 2003 <NAME>, SuSE Labs.
*
* Thanks to <EMAIL> for some useful hint.
* Special thanks to <NAME> for his early experience with
* a different vsyscall implementation for Linux/IA32 and for the name.
*
* vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
* at virtual address -10Mbyte+1024bytes etc... There are at max 4
* vsyscalls. One vsyscall can reserve more than 1 slot to avoid
* jumping out of line if necessary. We cannot add more with this
* mechanism because older kernels won't return -ENOSYS.
* If we want more than four we need a vDSO.
*
* Note: the concept clashes with user mode linux. If you use UML and
* want per guest time just set the kernel.vsyscall64 sysctl to 0.
*/
#include <linux/time.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/seqlock.h>
#include <linux/jiffies.h>
#include <linux/sysctl.h>
#include <asm/vsyscall.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/fixmap.h>
#include <asm/errno.h>
#include <asm/io.h>
#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
#include <asm/unistd.h>
static __always_inline void timeval_normalize(struct timeval * tv)
{
time_t __sec;
__sec = tv->tv_usec / 1000000;
if (__sec) {
tv->tv_usec %= 1000000;
tv->tv_sec += __sec;
}
}
static __always_inline void do_vgettimeofday(struct timeval * tv)
{
long sequence, t;
unsigned long sec, usec;
do {
sequence = read_seqbegin(&__xtime_lock);
sec = __xtime.tv_sec;
usec = (__xtime.tv_nsec / 1000) +
(__jiffies - __wall_jiffies) * (1000000 / HZ);
if (__vxtime.mode != VXTIME_HPET) {
t = get_cycles_sync();
if (t < __vxtime.last_tsc)
t = __vxtime.last_tsc;
usec += ((t - __vxtime.last_tsc) *
__vxtime.tsc_quot) >> 32;
/* See comment in x86_64 do_gettimeofday. */
} else {
usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
__vxtime.last) * __vxtime.quot) >> 32;
}
} while (read_seqretry(&__xtime_lock, sequence));
tv->tv_sec = sec + usec / 1000000;
tv->tv_usec = usec % 1000000;
}
/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
static __always_inline void do_get_tz(struct timezone * tz)
{
*tz = __sys_tz;
}
static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
{
int ret;
asm volatile("vsysc2: syscall"
: "=a" (ret)
: "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
return ret;
}
static __always_inline long time_syscall(long *t)
{
long secs;
asm volatile("vsysc1: syscall"
: "=a" (secs)
: "0" (__NR_time),"D" (t) : __syscall_clobber);
return secs;
}
int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
{
if (unlikely(!__sysctl_vsyscall))
return gettimeofday(tv,tz);
if (tv)
do_vgettimeofday(tv);
if (tz)
do_get_tz(tz);
return 0;
}
/* This will break when the xtime seconds get inaccurate, but that is
* unlikely */
time_t __vsyscall(1) vtime(time_t *t)
{
if (unlikely(!__sysctl_vsyscall))
return time_syscall(t);
else if (t)
*t = __xtime.tv_sec;
return __xtime.tv_sec;
}
long __vsyscall(2) venosys_0(void)
{
return -ENOSYS;
}
long __vsyscall(3) venosys_1(void)
{
return -ENOSYS;
}
#ifdef CONFIG_SYSCTL
#define SYSCALL 0x050f
#define NOP2 0x9090
/*
* NOP out syscall in vsyscall page when not needed.
*/
static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
extern u16 vsysc1, vsysc2;
u16 *map1, *map2;
int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
if (!write)
return ret;
/* gcc has some trouble with __va(__pa()), so just do it this
way. */
map1 = ioremap(__pa_symbol(&vsysc1), 2);
if (!map1)
return -ENOMEM;
map2 = ioremap(__pa_symbol(&vsysc2), 2);
if (!map2) {
ret = -ENOMEM;
goto out;
}
if (!sysctl_vsyscall) {
*map1 = SYSCALL;
*map2 = SYSCALL;
} else {
*map1 = NOP2;
*map2 = NOP2;
}
iounmap(map2);
out:
iounmap(map1);
return ret;
}
static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen,
void **context)
{
return -ENOSYS;
}
static ctl_table kernel_table2[] = {
{ .ctl_name = 99, .procname = "vsyscall64",
.data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
.strategy = vsyscall_sysctl_nostrat,
.proc_handler = vsyscall_sysctl_change },
{ 0, }
};
static ctl_table kernel_root_table2[] = {
{ .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
.child = kernel_table2 },
{ 0 },
};
#endif
static void __init map_vsyscall(void)
{
extern char __vsyscall_0;
unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
}
static int __init vsyscall_init(void)
{
BUG_ON(((unsigned long) &vgettimeofday !=
VSYSCALL_ADDR(__NR_vgettimeofday)));
BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
map_vsyscall();
#ifdef CONFIG_SYSCTL
register_sysctl_table(kernel_root_table2, 0);
#endif
return 0;
}
__initcall(vsyscall_init);
|
Kitware/super3d | tools/cam_and_homog_picker.cxx | /*ckwg +29
* Copyright 2014-2016 by Kitware, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither name of Kitware, Inc. nor the names of any contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
#include <fstream>
#include <string>
#include <iomanip>
#include <vnl/vnl_double_3x3.h>
#include <vpgl/vpgl_perspective_camera.h>
#include <vul/vul_arg.h>
#include <vul/vul_file.h>
int main(int argc, char *argv[])
{
vul_arg<std::string> homog_file("-h", "homog file", "");
vul_arg<std::string> camera_file("-c", "camera file", "");
vul_arg<std::string> nvm_file("-n", "n view match file", "");
vul_arg<std::string> frame_file("-i", "image list", "");
vul_arg<std::string> frames_str("-f", "frames to extract", "");
vul_arg_parse( argc, argv );
std::istringstream stream(frames_str());
unsigned int f;
std::vector<unsigned int> frames;
while (stream >> f)
{
frames.push_back(f);
}
if (homog_file.set())
{
std::vector<vnl_double_3x3> homogs;
std::ifstream infile(homog_file().c_str());
vnl_double_3x3 h;
while (infile >> h)
{
homogs.push_back(h);
}
std::ofstream outfile((homog_file() + ".pared").c_str());
for (unsigned int i = 0; i < frames.size(); i++)
{
outfile << std::setprecision(10) << i << "\n" << 0 << "\n" << homogs[frames[i]] << "\n\n";
}
}
//extract krtd files from cam file
if (camera_file.set() && frame_file.set())
{
vpgl_perspective_camera<double> cam;
std::vector<vpgl_perspective_camera<double> > cams;
std::ifstream cam_infile(camera_file().c_str());
unsigned int index;
while (cam_infile >> index >> cam)
{
std::cout << index << "\n";
vpgl_calibration_matrix<double> cal = cam.get_calibration();
cal.set_focal_length(cal.focal_length() * cal.x_scale());
cal.set_y_scale(cal.y_scale() / cal.x_scale());
cal.set_x_scale(1.0);
cam.set_calibration(cal);
cams.push_back(cam);
}
std::cout << cams.size();
std::string imgname;
std::vector<std::string> imgnames;
std::ifstream infile(frame_file().c_str());
while (infile >> imgname)
{
imgnames.push_back(imgname);
}
std::string directory = camera_file();
unsigned int found = directory.find_last_of("/\\");
directory = directory.substr(0, found);
for (unsigned int i = 0; i < frames.size(); i++)
{
std::string camname = imgnames[frames[i]];
unsigned int found = camname.find_last_of("/\\");
camname = camname.substr(found+1, camname.size() - 4 - found - 1);
std::cout << camname << "\n";
std::ofstream outfile((directory + "/" + camname + ".krtd").c_str());
outfile << std::setprecision(10) << cams[frames[i]] << "\n0\n";
outfile.close();
}
}
std::cout << nvm_file() << "\n";
if (nvm_file.set())
{
std::vector<vpgl_perspective_camera<double> > cams;
std::ifstream infile(nvm_file().c_str());
std::string temp;
infile >> temp >> temp;
double fx, cx, fy, cy, r;
infile >> fx >> cx >> fy >> cy >> r;
unsigned int numcams;
infile >> numcams;
std::cout << "num cameras: " << numcams << "\n";
std::string directory = nvm_file();
unsigned int found = directory.find_last_of("/\\");
directory = directory.substr(0, found);
for (unsigned int i = 0; i < numcams; i++)
{
std::string filename;
infile >> filename;
double f, w, x, y, z, camx, camy, camz, r, zero;
infile >> f >> w >> x >> y >> z >> camx >> camy >> camz >> r >> zero;
vpgl_perspective_camera<double> cam;
vpgl_calibration_matrix<double> K;
K.set_focal_length(f);
K.set_x_scale(1.0);
K.set_y_scale(fy/fx);
K.set_principal_point(vgl_point_2d<double>(cx, cy));
cam.set_rotation(vgl_rotation_3d<double>(vnl_quaternion<double>(x, y, z, w)));
cam.set_camera_center(vgl_point_3d<double>(camx, camy, camz));
cam.set_calibration(K);
std::string::size_type found = filename.find_last_of("/\\");
if (found != std::string::npos)
filename = filename.substr(found+1, filename.size() - found - 1);
filename = filename.substr(0, filename.size() - 4);
std::cout << "writing cam: " << directory + "/" + filename + ".krtd\n";
std::ofstream outfile((directory + "/" + filename + ".krtd").c_str());
outfile << std::setprecision(10) << cam << "\n0\n";
outfile.close();
}
unsigned int numpts;
infile >> numpts;
std::string line;
std::getline(infile, line);
std::ofstream plyfile((directory + "/model.ply").c_str());
plyfile << "ply\nformat ascii 1.0\nelement vertex " << numpts << "\n";
plyfile << "property float x\nproperty float y\nproperty float z\n";
plyfile << "end_header\n";
for (unsigned int i = 0; i < numpts; i++)
{
std::getline(infile, line);
std::istringstream sstr(line);
float x, y, z;
sstr >> x >> y >> z;
plyfile << x << " " << y << " " << z << "\n";
}
plyfile.close();
}
return 0;
}
|
vanch3d/DEMIST | SimulDoc.cpp | // SimulDoc.cpp : implementation of the CSimulDoc class
//
#include "stdafx.h"
#include "Simul.h"
#include "MainFrm.h"
#include "SimulDoc.h"
#include "Tools\MvDocTemplate.h"
#include <MSimulation\PreyPredModel.h>
#include <MInstruction\LearningUnit.h>
#include <Prefs\Pref.h>
#include "LearnerTrace.h"
#include "BPInfoDlg.h"
#include "Tools\ST_SplitterWnd.h"
#include <math.h>
#ifdef _DEBUG
#define new DEBUG_NEW
#undef THIS_FILE
static char THIS_FILE[] = __FILE__;
#endif
#define NBDATA 500
/////////////////////////////////////////////////////////////////////////////
// CSimulDoc
IMPLEMENT_DYNCREATE(CSimulDoc, CDocument)
BEGIN_MESSAGE_MAP(CSimulDoc, CDocument)
//{{AFX_MSG_MAP(CSimulDoc)
ON_COMMAND_RANGE(ID_VIEW_EXTREP1,ID_VIEW_EXTREP20, OnCmdLauchER)
ON_UPDATE_COMMAND_UI_RANGE( ID_VIEW_EXTREP1, ID_VIEW_EXTREP20, OnUpdateCmdLauchER )
//}}AFX_MSG_MAP
END_MESSAGE_MAP()
/////////////////////////////////////////////////////////////////////////////
// CSimulDoc construction/destruction
CSimulDoc::CSimulDoc()
{
m_bFirstRun = TRUE;
m_nPredMode = PRED_NONE;
m_bTaskDone = TRUE;
m_bTaskRead = TRUE;
m_nModSelItem = 0;
m_currTimer = 0;
m_runTimer = 0;
m_nCurrBP = -1;
m_nNextBP = -1;
m_nRunMode = RUN_NONE;
// MODELS
m_pSimul = new CPreyPredModel();
m_pInstruct = new CInstModel(this);
//m_CurrModel = 0;
if (CPref::g_nMode==1)
m_CurrInst = -1;
else
m_CurrInst = 0;
}
CSimulDoc::~CSimulDoc()
{
if (m_pSimul) delete m_pSimul;
if (m_pInstruct) delete m_pInstruct;
m_cUserInput.RemoveAll();
/* CWnd *pWnd = AfxGetMainWnd();
CMainFrame *pMainFrm = DYNAMIC_DOWNCAST( CMainFrame, pWnd);
if (pMainFrm)
{
pMainFrm->OnSetMERIcons(NULL);
}*/
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when a document is open.
/// \param lpszPathName Path of the DEMIST file to open.
/// \return TRUE if the document is open successfully, FALSE otherwise.
///
/// This function updates the simulation and the instructional models and calls
/// UpdateRestartSimul() to start the scenario.
/////////////////////////////////////////////////////////////////////////////
BOOL CSimulDoc::OnOpenDocument(LPCTSTR lpszPathName)
{
if (!CDocument::OnOpenDocument(lpszPathName))
return FALSE;
// TODO: Add your specialized creation code here
m_pSimul->OnInitiaUpdate();
m_pInstruct->OnInitiaUpdate();
m_Model = "Prey and Predator model\r\n";
SetPathName( lpszPathName );
CTrace::WriteDocHeader(this);
UpdateRestartSimul(FALSE);
/* if (CPref::g_nMode)
{
//BOOL bOld = m_bAutoDelete;
//m_bAutoDelete = FALSE;
OnActivateInitLU();
OnActivateCurrentLU(FALSE);
//m_bAutoDelete = m_bAutoDelete;
}*/
return TRUE;
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when a new document is open.
/// \return TRUE if the document is created successfully, FALSE otherwise.
///
/// This function creates and initialises new empty simulation and instructional models.
/////////////////////////////////////////////////////////////////////////////
BOOL CSimulDoc::OnNewDocument()
{
if (!CDocument::OnNewDocument())
return FALSE;
// TODO: add reinitialization code here
m_pSimul->OnNewModel();
m_pInstruct->OnNewModel();
m_Model = "Prey and Predator model\r\n";
return TRUE;
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when a document is closed.
///
/////////////////////////////////////////////////////////////////////////////
void CSimulDoc::OnCloseDocument()
{
// TODO: Add your specialized code here and/or call the base class
//SaveModified();
CString strDocName = GetPathName();
CDocument::OnCloseDocument();
CTrace::WriteDocEnd(strDocName);
}
/////////////////////////////////////////////////////////////////////////////
// CSimulDoc serialization
void CSimulDoc::Serialize(CArchive& ar)
{
if (ar.IsStoring())
{
// TODO: add storing code here
ar << m_Model;
}
else
{
// TODO: add loading code here
ar >> m_Model;
}
m_pSimul->Serialize(ar);
m_pInstruct->Serialize(ar);
}
/////////////////////////////////////////////////////////////////////////////
// CSimulDoc diagnostics
#ifdef _DEBUG
void CSimulDoc::AssertValid() const
{
CDocument::AssertValid();
}
void CSimulDoc::Dump(CDumpContext& dc) const
{
CDocument::Dump(dc);
}
#endif //_DEBUG
/////////////////////////////////////////////////////////////////////////////
// CSimulDoc commands
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the mathematical models are modified.
/// \param pSender Pointer to the view where the intervention took place.
///
/// This function simply dispatches the message (#DOC_UPDATE_ALLDATA) to all the
/// views in order to proceed the relevant feedback.
/////////////////////////////////////////////////////////////////////////////
void CSimulDoc::UpdateDoc(CView* pSender)
{
UpdateAllViews(NULL,DOC_UPDATE_ALLDATA);
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the simulation is ran or reviewed.
/// \param pSender Pointer to the view where the intervention took place.
///
/// This function simply dispatches the message (#DOC_UPDATE_TIMERDATA) to all the
/// views in order to proceed the relevant feedback.
/////////////////////////////////////////////////////////////////////////////
void CSimulDoc::UpdateTimerDoc(CView* pSender)
{
if (m_nRunMode == RUN_RUN && m_nPredMode == PRED_TEST)
m_nPredMode = PRED_NONE;
//CTimerBreakPoint mInfo;
//mInfo.m_nBPType = GetCurrTime();
//mInfo.m_tBreakPt = m_nRunMode;
//UpdateAllViews(NULL,DOC_UPDATE_TIMERDATA,&mInfo);
UpdateAllViews(NULL,DOC_UPDATE_TIMERDATA,(CObject*)&m_nRunMode);
// UpdateAllViews(NULL,DOC_UPDATE_TIMERDATA);
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the simulation performs a Map Relation
/// \param pSender Pointer to the view where the action took place
/// \param pTransInfo Pointer to the data structure detailling the relation.
///
/// This function updates the Current Time (#m_currTimer) and dispatches
/// the message (#TRANSLATION_MAPRELATION) to all the views in order to proceed
/// the relevant feedback.
/////////////////////////////////////////////////////////////////////////////
void CSimulDoc::UpdateMapRelation(CView* pSender, CTranslationInfo* pTransInfo)
{
if (pTransInfo && pSender)
pTransInfo->m_pWndTarget = pSender;
if (pTransInfo && pTransInfo->m_nTime!=-1)
m_currTimer = pTransInfo->m_nTime;
UpdateAllViews(NULL,TRANSLATION_MAPRELATION,(CObject*)pTransInfo);
CTrace::T_MAPRELATION(this,pTransInfo);
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the user (or the system) is restarting the simulation from scratch.
/// \param bAlert Specify if a confirmation message is proposed to the user (default=TRUE),
///
/// This function reinitialise the simulation by cleaning ALL the user's actions stored in the ERs
/// (CExternRepUnit::m_cUserInput) or in the document (#m_cUserInput), by re-assigning
/// the breakpoints (#m_nCurrBP and #m_nNextBP) and the Timer (#m_currTimer and #m_runTimer) and
/// by recalculating the mathematical models (CMdEquation::OnUpdate(CLearningUnit*,BOOL)).
/// It finally dispatches the message (#DOC_UPDATE_RESTARTSIMUL) to all the
/// views in order to proceed the relevant feedback.
/////////////////////////////////////////////////////////////////////////////
void CSimulDoc::UpdateRestartSimul(BOOL bAlert)
{
CString mstr;
mstr.LoadString(ERR_SIMULATION_RESTART);
BOOL bOK = TRUE;
if (bAlert)
bOK = (AfxGetMainWnd()->MessageBox(mstr,NULL,MB_OKCANCEL|MB_ICONWARNING) == IDOK);
if (bOK)
{
m_currTimer = 0;
m_runTimer = 0;
m_nCurrBP = -1;
m_nNextBP = -1;
CLearningUnit *pLU = GetCurrentLU();
if (pLU)
{
for (int i=0;i<pLU->m_cMERSet.GetSize();i++)
{
CExternRepUnit* pExtRep = pLU->m_cMERSet.GetAt(i);
if (pExtRep)
pExtRep->m_cUserInput.RemoveAll();
}
pLU->CleanTimerBP();
int nbBP = pLU->m_cTBreakSet.GetSize();
if (nbBP)
{
//m_cSlider.SetCurrentBreakPoint(-1);
CTimerBreakPoint *pBP = pLU->m_cTBreakSet.GetAt(0);
BOOL bDoBP = !pLU->m_bFirstRun || (pLU->m_bFirstRun && !m_bFirstRun);
if (pBP && bDoBP)
m_nNextBP = pBP->m_tBreakPt;
}
else
m_nNextBP = -1;
CMdEquation *pEqu = GetCurrentModel();
if (pEqu)
pEqu->OnUpdate(pLU,TRUE);
}
m_cUserInput.RemoveAll();
UpdateAllViews(NULL,DOC_UPDATE_RESTARTSIMUL);
}
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the user performs a Prediction.
/// \param pSender Pointer to the view where the action took place.
/// \param pData Pointer to the data structure detailling the action.
///
/// This function simply dispatches the message (#TRANSLATION_HYPOTHESIS) to all the
/// views in order to proceed the relevant feedback.
/////////////////////////////////////////////////////////////////////////////
void CSimulDoc::UpdateHypothesis(CView* pSender, CUserData *pData)
{
m_nPredMode = PRED_HYPOT;
CTrace::T_HYPOTHESIS(pSender,pData);
UpdateAllViews(NULL,TRANSLATION_HYPOTHESIS,pData);
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the simulation reaches a TEST breakpoint.
/// \param pSender Pointer to the view where the action took place
/// \param pData Pointer to the data structure summarising the action
///
/// This function simply dispatches the message (#TRANSLATION_TEST) to all the
/// views in order to proceed the relevant feedback.
/////////////////////////////////////////////////////////////////////////////
void CSimulDoc::UpdateTest(CView* pSender, CUserData *pData)
{
m_nPredMode = PRED_TEST;
CTrace::T_HYPO_TEST(pSender,pData);
UpdateAllViews(pSender,TRANSLATION_TEST,pData);
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the simulation reaches a breakpoint.
/// \param pSender Pointer to the view where the action took place
/// \param pData Pointer to the data structure summarising the action
///
/// This function simply dispatches the message (#DOC_UPDATE_BREAKPOINT) to all the
/// views in order to proceed the relevant feedback.
/////////////////////////////////////////////////////////////////////////////
void CSimulDoc::UpdatebreakPoint(CView* pSender, CTimerBreakPoint *pData)
{
CTrace::T_BREAKPOINT(pData);
UpdateAllViews(pSender,DOC_UPDATE_BREAKPOINT,pData);
if (pData->m_nBPType == BREAKP_TEST)
{
UpdateTest(NULL,NULL);
}
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the user acts on an ER, by doing either an Action or a Prediction.
/// \param pExtRep Pointer to the ER where the action took place
/// \param pData Pointer to the data structure detailling the action
///
/// It creates a CUserOutput variable based on the information stored in the CUserData structure
/// and stores it in the CExternRepUnit::m_cUserInput field of the ER.
/////////////////////////////////////////////////////////////////////////////
BOOL CSimulDoc::UpdateERUserData(CExternRepUnit *pExtRep, CUserData *pData)
{
if (!pExtRep) return FALSE;
if (!pData) return FALSE;
CTimerBreakPoint pBP;
CLearningUnit *pLU = GetCurrentLU();
BOOL bEditable = pLU->GetTimerBPAt(pData->m_nTime,pBP);
if (!bEditable) return FALSE;
pData->m_nType = pBP.m_nBPType;
CUserOutput pUserData;
BOOL bGet = pExtRep->m_cUserInput.Lookup(pData->m_nTime,pUserData);
if (!bGet)
{
int nbD = pUserData.m_lUserInput.GetCount();
pUserData.SetAt(pData->m_nExpSet,pData->m_nOutcome,*pData);
nbD = pUserData.m_lUserInput.GetCount();
pUserData.m_nTime = pData->m_nTime;
pExtRep->m_cUserInput.SetAt(pData->m_nTime,pUserData);
}
else
{
CUserData pData2;
BOOL bRes = pUserData.GetAt(pData->m_nExpSet,pData->m_nOutcome,pData2);
if (bRes)
{
pData->m_dOldValue = pData2.m_dOldValue;
}
pUserData.SetAt(pData->m_nExpSet,pData->m_nOutcome,*pData);
pExtRep->m_cUserInput.SetAt(pData->m_nTime,pUserData);
}
return TRUE;
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the user acts on an ER, by doing either an Action or a Prediction.
/// \param pSender Pointer to the view where the action took place
/// \param pData Pointer to the data structure summarising the action
///
/// It creates a CUserOutput variable based on the information of the CUserData
/// structure, stores it in the #m_cUserInput field of the document and, depending on the nature
/// of the intervention, call either UpdateAction(CView*,CUserData*) or UpdateHypothesis(CView*,CUserData*)
/// to update all the views.
/////////////////////////////////////////////////////////////////////////////
BOOL CSimulDoc::UpdateUserData(CView* pSender, CUserData *pData)
{
CTimerBreakPoint pBP;
CLearningUnit *pLU = GetCurrentLU();
BOOL bEditable = pLU->GetTimerBPAt(pData->m_nTime,pBP);
if (!bEditable) return FALSE;
// CMdEquation* pEqu = GetDocument()->GetCurrentModel();
// if (!pEqu) return;
pData->m_nType = pBP.m_nBPType;
CUserOutput pUserData;
BOOL bGet = m_cUserInput.Lookup(pData->m_nTime,pUserData);
if (!bGet)
{
int nbD = pUserData.m_lUserInput.GetCount();
pUserData.SetAt(pData->m_nExpSet,pData->m_nOutcome,*pData);
nbD = pUserData.m_lUserInput.GetCount();
pUserData.m_nTime = pData->m_nTime;
m_cUserInput.SetAt(pData->m_nTime,pUserData);
}
else
{
CUserData pData2;
BOOL bRes = pUserData.GetAt(pData->m_nExpSet,pData->m_nOutcome,pData2);
if (bRes)
{
pData->m_dOldValue = pData2.m_dOldValue;
}
pUserData.SetAt(pData->m_nExpSet,pData->m_nOutcome,*pData);
m_cUserInput.SetAt(pData->m_nTime,pUserData);
}
if (pBP.m_nBPType == BREAKP_ACTION) // ACTION
{
//CMdData *PreyInit= pLU->m_cDataPoints.GetAt(pData.m_nOutcome);
//CString mstr=PreyInit->GetDataName();
//double ddt = PreyInit->GetAt(pData.m_nTime,pData.m_nExpSet);
//PreyInit->SetAt(pData.m_dValue,pData.m_nTime,pData.m_nExpSet);
//ddt = PreyInit->GetAt(pData.m_nTime,pData.m_nExpSet);
//pEqu->OnUpdateFrom(pLU,pData.m_nExpSet,pData.m_nTime);
//GetDocument()->UpdateAllViews(this,DOC_UPDATE_ALLDATA);
UpdateAction(pSender,pData);
//OnUpdateData();
//FormatCells(pData.m_nTime,TRUE);
}
else // HYPOTHESIS
{
//OnUpdate(this,TRANSLATION_HYPOTHESIS,&pData);
//GetDocument()->UpdateAllViews(this,TRANSLATION_HYPOTHESIS,&pData);
UpdateHypothesis(pSender,pData);
}
return TRUE;
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the user performs an Action.
/// \param pSender Pointer to the view where the action took place
/// \param pData Pointer to the data structure summarising the action
///
/// It uses the new value defined by the user's action (CUserData::m_dValue) to
/// recalculate the mathematical models (CMdEquation::OnUpdateFrom(CLearningUnit*,int,int,BOOL)),
/// then dispatches the message (#TRANSLATION_ACTION) to all the
/// views in order to proceed the relevant feedback.
/////////////////////////////////////////////////////////////////////////////
void CSimulDoc::UpdateAction(CView* pSender, CUserData *pData)
{
if (!pData) return;
// if (pData->m_bUpdateNow)
{
CLearningUnit* pLU = GetCurrentLU();
CMdEquation* pEqu = GetCurrentModel();
CMdData *pModData= pLU->m_cDataPoints.GetAt(pData->m_nOutcome);
CString mstr=pModData->GetDataName(pLU->m_bShowAbbrev);
double nOldV = pModData->GetAt(pData->m_nTime,pData->m_nExpSet);
pModData->SetAt(pData->m_dValue,pData->m_nTime,pData->m_nExpSet);
double nNewVal = pModData->GetAt(pData->m_nTime,pData->m_nExpSet);
pEqu->OnUpdateFrom(pLU,pData->m_nExpSet,pData->m_nTime,FALSE);
pModData->ResetMinMax(pData->m_nExpSet);
CTrace::T_ACTION(pSender,pData);
}
//UpdateAllViews(pSender,DOC_UPDATE_ALLDATA);
UpdateAllViews(NULL,TRANSLATION_ACTION,pData);
}
/////////////////////////////////////////////////////////////////////////////
/// Get the Learning Unit currently used by the framework.
/// \return A pointer to the relevant LU, NULL if none
/////////////////////////////////////////////////////////////////////////////
CLearningUnit* CSimulDoc::GetCurrentLU()
{
int nb = m_pInstruct->m_cLUSet.GetSize();
if (m_CurrInst==-1 || m_CurrInst>=nb)
return NULL;
else
return m_pInstruct->m_cLUSet.GetAt(m_CurrInst);
}
/////////////////////////////////////////////////////////////////////////////
/// Get the Mathematical Model associated with the current Learning Unit.
/// \return A pointer to the model, NULL if none.
/////////////////////////////////////////////////////////////////////////////
CMdEquation* CSimulDoc::GetCurrentModel()
{
CMdEquation* pEqu = NULL;
CLearningUnit* pLU = GetCurrentLU();
if (pLU->m_nSimulID != -1 && pLU->m_nSimulID<m_pSimul->m_cEquSet.GetSize())
pEqu = m_pSimul->m_cEquSet.GetAt(pLU->m_nSimulID);//GetEquation(m_CurrModel);
return pEqu;
}
/////////////////////////////////////////////////////////////////////////////
/// Get the number of time-step associated with the current Learning Unit.
/// \return the number of time-step (CLearningUnit::GetMaxTimer()) defined in the current LU, 0 if none.
/////////////////////////////////////////////////////////////////////////////
int CSimulDoc::GetMaxTime()
{
CLearningUnit* pLU = GetCurrentLU();
if (pLU)
return pLU->GetMaxTimer();
else
return 0;
}
/////////////////////////////////////////////////////////////////////////////
/// Get the Current Time associated with the current Learning Unit.
/// \return the current time (as stored in the #m_currTimer data) .
/////////////////////////////////////////////////////////////////////////////
int CSimulDoc::GetCurrTime()
{
return m_currTimer;
}
/////////////////////////////////////////////////////////////////////////////
/// Get the Run Time associated with the current Learning Unit.
/// \return the run time (as stored in the #m_runTimer data) .
/////////////////////////////////////////////////////////////////////////////
int CSimulDoc::GetRunTime()
{
return m_runTimer;
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the ER's menu and icons need to be updated.
///
/// This function calls the CLearningUnit::UpdateERMenu() to modify the content
/// of the View menu and CMainFrame::OnSetMERIcons() to modify the content of the
/// ER's toolbar.
/////////////////////////////////////////////////////////////////////////////
void CSimulDoc::UpdateViewMenu()
{
CLearningUnit* pLU = GetCurrentLU();
if (pLU)
{
pLU->UpdateERMenu(NULL);
}
CWnd *pWnd = AfxGetMainWnd();
CMainFrame *pMainFrm = DYNAMIC_DOWNCAST( CMainFrame, pWnd);
if (pMainFrm)
{
pMainFrm->OnSetMERIcons(pLU);
}
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the ER's menu and icons need to be updated.
///
/////////////////////////////////////////////////////////////////////////////
void CSimulDoc::UpdateViewMenuDrop(CWnd* pWnd,CRect rLoc)
{
if (!pWnd) return;
CLearningUnit* pLU = GetCurrentLU();
if (!pLU) return;
//pLU->UpdateERMenu(NULL);
// CRect rc;
// pWnd->SendMessage(TB_GETRECT, pnmtb->iItem, (LPARAM)&rc);
// pWnd->ClientToScreen(&rc);
//ClientToScreen(&point);
CMenu *pMenu = NULL;
pMenu = pLU->GetERsMenu();
if (pMenu)
{
pMenu->TrackPopupMenu(TPM_LEFTALIGN|TPM_RIGHTBUTTON,rLoc.left, rLoc.bottom,pWnd,&rLoc);
pMenu->DestroyMenu();
delete pMenu;
}
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the ER's menu and icons need to be updated.
///
/////////////////////////////////////////////////////////////////////////////
void CSimulDoc::OnUpdateCmdLauchER(CCmdUI* pCmdUI)
{
BOOL bDone = FALSE;
// TODO: Add your command handler code here
CLearningUnit* pLU = GetCurrentLU();
if (pLU)
bDone = pLU->UpdateLauchER(pCmdUI);
if (!bDone)
pCmdUI->Enable(FALSE);
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the user activates an ER by using the menu or the icons
/// \param nID ID of the menu command activated by the user.
///
/////////////////////////////////////////////////////////////////////////////
void CSimulDoc::OnCmdLauchER(UINT nID )
{
// TODO: Add your command handler code here
CLearningUnit* pLU = GetCurrentLU();
if (pLU) pLU->LauchER(nID);
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the user closes a view.
/// \param pView Pointer to the view closed by the user.
/// \return TRUE if the framework can destroy the view. FALSE otherwise.
///
/// This function calles the CLearningUnit::OnCloseER(CView) to clean all thie information
/// related to the External Representation and its view.
/////////////////////////////////////////////////////////////////////////////
BOOL CSimulDoc::OnCloseER(CView *pView)
{
BOOL bCanClose = TRUE;
CLearningUnit* pLU = GetCurrentLU();
if (pLU)
bCanClose = pLU->OnCloseER(pView);
return bCanClose;
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the user requests the next LU in the scenario.
/// \param bMsg Specify if a confirmation message is presented to the user (Default=TRUE).
/// \return TRUE if the next LU can be initialised, FALSE otherwise.
///
/// This function
/////////////////////////////////////////////////////////////////////////////
BOOL CSimulDoc::OnActivateNextLU(BOOL bMsg)
{
CString mstr;
mstr.LoadString(MSG_LU_ACT_NEXT);
int nb = m_pInstruct->m_cLUSet.GetSize();
if (m_CurrInst >= (nb-1)) return FALSE;
if (bMsg && AfxGetMainWnd()->MessageBox(mstr,NULL,MB_YESNO)!=IDYES) return FALSE;
CLearningUnit *pLU = GetCurrentLU();
if (!pLU) return FALSE;
BOOL bOld = m_bAutoDelete;
m_bAutoDelete = FALSE;
pLU->OnCloseAllER();
m_CurrInst++;
m_bTaskDone = TRUE;
m_bTaskRead = TRUE;
pLU = GetCurrentLU();
// if (CPref::g_nMode)
// pLU->m_bCanStart=FALSE;
m_bFirstRun = TRUE;
CTrace::T_STARTUNIT(m_CurrInst,pLU->m_sLUName);
UpdateRestartSimul(FALSE);
UpdateViewMenu();
if (bMsg /*&& pLU->m_bCanStart*/) OnLaunchStartER();
m_bAutoDelete = bOld;
return TRUE;
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the user requests the previous LU in the scenario.
/// \param bMsg Specify if a confirmation message is presented to the user (Default=TRUE).
/// \return TRUE if the previous LU can be initialised, FALSE otherwise.
///
/// This function
/////////////////////////////////////////////////////////////////////////////
BOOL CSimulDoc::OnActivatePrevLU(BOOL bMsg)
{
CString mstr;
mstr.LoadString(MSG_LU_ACT_PREVIOUS);
int nb = m_pInstruct->m_cLUSet.GetSize();
if (!m_CurrInst) return FALSE;
if (bMsg && AfxGetMainWnd()->MessageBox(mstr,NULL,MB_YESNO)!=IDYES) return FALSE;
//CInstModel *pInst = pDoc->m_pInstruct;
CLearningUnit *pLU = GetCurrentLU();
if (!pLU) return FALSE;
BOOL bOld = m_bAutoDelete;
m_bAutoDelete = FALSE;
pLU->OnCloseAllER();
m_bAutoDelete = bOld;
m_CurrInst--;
m_bTaskDone = TRUE;
m_bTaskRead = TRUE;
pLU = GetCurrentLU();
m_bFirstRun = TRUE;
// if (CPref::g_nMode)
// pLU->m_bCanStart=FALSE;
CTrace::T_STARTUNIT(m_CurrInst,pLU->m_sLUName);
UpdateRestartSimul(FALSE);
UpdateViewMenu();
if (bMsg) OnLaunchStartER();
return TRUE;
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the user requests the reinitialisation of the current LU.
/// \param bMsg Specify if a confirmation message is presented to the user (Default=TRUE).
/// \return TRUE if the LU can be initialised, FALSE otherwise.
///
/// This function
/////////////////////////////////////////////////////////////////////////////
BOOL CSimulDoc::OnActivateCurrentLU(BOOL bMsg/*=TRUE*/)
{
CString mstr;
mstr.LoadString(MSG_LU_ACT_CURRENT);
if (bMsg)
if (AfxGetMainWnd()->MessageBox(mstr,NULL,MB_YESNO)!=IDYES)
return FALSE;
CLearningUnit *pLU = GetCurrentLU();
if (!pLU) return FALSE;
pLU->m_bCanStart = TRUE;
BOOL bOld = m_bAutoDelete;
m_bAutoDelete = FALSE;
pLU->OnCloseAllER();
m_bAutoDelete = bOld;
m_bFirstRun = TRUE;
CTrace::T_STARTUNIT(m_CurrInst,pLU->m_sLUName);
m_bTaskDone = TRUE;
m_bTaskRead = TRUE;
UpdateRestartSimul(FALSE);
UpdateViewMenu();
OnLaunchStartER();
return TRUE;
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the user requests the reinitialisation of the current LU.
/// \param bMsg Specify if a confirmation message is presented to the user (Default=TRUE).
/// \return TRUE if the LU can be initialised, FALSE otherwise.
///
/// This function
/////////////////////////////////////////////////////////////////////////////
BOOL CSimulDoc::OnActivateInitLU()
{
CString mstr;
mstr.LoadString(MSG_LU_ACT_INIT);
if (AfxGetMainWnd()->MessageBox(mstr,NULL,MB_YESNO)!=IDYES) return FALSE;
CLearningUnit *pLU = GetCurrentLU();
if (pLU)
{
BOOL bOld = m_bAutoDelete;
m_bAutoDelete = FALSE;
pLU->OnCloseAllER();
m_bAutoDelete = bOld;
}
m_CurrInst = 0;
pLU = GetCurrentLU();
// if (CPref::g_nMode)
// pLU->m_bCanStart = FALSE;
m_bFirstRun = TRUE;
CTrace::T_STARTUNIT(m_CurrInst,pLU->m_sLUName);
m_bTaskDone = TRUE;
m_bTaskRead = TRUE;
UpdateRestartSimul(FALSE);
UpdateViewMenu();
//OnLaunchStartER();
return TRUE;
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the default ERs of the LU have to be displayed
/// \return TRUE if the views can be initialised, FALSE otherwise.
///
/// This function
/////////////////////////////////////////////////////////////////////////////
BOOL CSimulDoc::OnLaunchStartER()
{
CLearningUnit *pLU = GetCurrentLU();
if (!pLU) return FALSE;
int nb = pLU->m_cMERSet.GetSize();
for (int i=0;i<nb;i++)
{
CExternRepUnit *pER=pLU->m_cMERSet.GetAt(i);
if (!pER) continue;
if (pER->m_bOpenStart)
pLU->LauchER(pER->m_CmDId);
}
return TRUE;
}
/////////////////////////////////////////////////////////////////////////////
/// Called by the framework when the default ERs of the LU have to be displayed
/// \return TRUE if the views can be initialised, FALSE otherwise.
///
/// This function
/////////////////////////////////////////////////////////////////////////////
BOOL CSimulDoc::OnShowTask()
{
CString mstr(_T(""));
BOOL bNew = FALSE;
int nCT= m_currTimer;
int nCBP = m_nCurrBP;
int nNBP = m_nNextBP;
if (nCBP != nNBP)
{
nCBP = m_nCurrBP = nNBP;
bNew = TRUE;
}
m_bTaskRead = TRUE;
CTimerBreakPoint pBP;
BOOL bEditable = GetCurrentLU()->GetTimerBPAt(nCBP,pBP);
UINT nType = MB_OK;
UINT nIcon=0;
switch (pBP.m_nBPType) {
case 1:
nType = MB_ICONEXCLAMATION;
//tr = _T("Action : \n\n");
mstr = _T("Action");
nIcon = IDB_BP_ACTION;
break;
case 2:
nType = MB_ICONQUESTION;
//tr = _T("Prediction : \n\n");
mstr = _T("Prediction");
nIcon = IDB_BP_HYPOT;
break;
case 3:
nType = MB_ICONQUESTION;
//tr = _T("Test : \n\n");
mstr = _T("Test");
nIcon = IDB_BP_TEST;
break;
case 0:
default:
nType = MB_ICONINFORMATION;
//mstr = _T("Information : \n\n");
mstr = _T("Information");
nIcon = IDB_BP_INFO;
break;
}
CBPInfoDlg dlg;
dlg.m_strTitle = mstr;
dlg.m_nIcon = nIcon;
dlg.m_strDesc = pBP.m_sDesc;
::MessageBeep(nType);
dlg.DoModal();
//mstr += pBP.m_sDesc;
// AfxMessageBox(mstr,nType);
CUserOutput pUserData;
int nUser = m_cUserInput.GetCount();
BOOL bGet = m_cUserInput.Lookup(nCT,pUserData);
if (!bGet)
{
if (pBP.m_nBPType == BREAKP_ACTION || pBP.m_nBPType == BREAKP_HYPOT)
{
if (pBP.m_nBPType == BREAKP_ACTION)
pUserData.m_nTime = nCT;
else
{
CLearningUnit *pLU = GetCurrentLU();
int nb = pLU->m_cTBreakSet.GetSize();
int nKey = -1;
for (int i=0;i<nb;i++)
{
CTimerBreakPoint* pTB = pLU->m_cTBreakSet.GetAt(i);
if (!pTB) continue;
if (pTB->m_tBreakPt == nCT)
{
nKey = i+1;
break;
}
}
if (nKey!=-1 && nKey<nb)
{
CTimerBreakPoint* pTB = pLU->m_cTBreakSet.GetAt(nKey);
if (pTB)
pUserData.m_nTime = pTB->m_tBreakPt;
else
pUserData.m_nTime = -1;
}
else
pUserData.m_nTime = -1;
}
if (pUserData.m_nTime != -1)
m_cUserInput.SetAt(nCT,pUserData);
}
m_bTaskDone = FALSE;
}
UpdatebreakPoint(NULL, &pBP);
return TRUE;
}
BOOL CSimulDoc::OnCreateMultiPanes(CObject *pWndList,CCreateContext* pContext,CMDIChildWnd *pChildWnd)
{
CPtrArray *m_pSplitterWnds = DYNAMIC_DOWNCAST( CPtrArray, pWndList);
CLearningUnit *pLU = GetCurrentLU();
if (!pLU) return FALSE;
pLU->OnCreateMultiPanes(pWndList,pContext,pChildWnd);
return TRUE;
}
|
ja-pa/probe-engine | httpx/httplog/httplog.go | <filename>httpx/httplog/httplog.go
// Package httplog implements HTTP event logging. In OONI, we use this
// functionality to emit pleasant logging during normal operations.
package httplog
import (
"crypto/tls"
"net"
"net/http"
"strings"
"github.com/ooni/probe-engine/log"
"github.com/ooni/probe-engine/internal/tlsx"
)
// RoundTripLogger is a httptracex.Handler that logs events.
type RoundTripLogger struct {
// Logger is the logs emitter.
Logger log.Logger
// header contains the emitted headers.
headers http.Header
}
// DNSStart is called when we start name resolution.
func (rtl *RoundTripLogger) DNSStart(host string) {
rtl.Logger.Debugf("dns: resolving %s", host)
}
// DNSDone is called after name resolution.
func (rtl *RoundTripLogger) DNSDone(addrs []net.IPAddr, err error) {
if err != nil {
rtl.Logger.Debugf("dns: error: %s", err.Error())
return
}
rtl.Logger.Debugf("dns: got %d entries", len(addrs))
for _, addr := range addrs {
rtl.Logger.Debugf("- %s", addr.String())
}
}
// ConnectStart is called when we start connecting.
func (rtl *RoundTripLogger) ConnectStart(network, addr string) {
rtl.Logger.Debugf("connect: using %s, %s", network, addr)
}
// ConnectDone is called after connect.
func (rtl *RoundTripLogger) ConnectDone(network, addr string, err error) {
if err != nil {
rtl.Logger.Debugf("connect: error: %s", err.Error())
return
}
rtl.Logger.Debugf("connect: connected to %s, %s", network, addr)
}
// TLSHandshakeStart is called when we start the TLS handshake.
func (rtl *RoundTripLogger) TLSHandshakeStart() {
rtl.Logger.Debug("tls: starting handshake")
}
// TLSHandshakeDone is called after the TLS handshake.
func (rtl *RoundTripLogger) TLSHandshakeDone(
state tls.ConnectionState, err error,
) {
if err != nil {
rtl.Logger.Debugf("tls: handshake error: %s", err.Error())
return
}
rtl.Logger.Debug("tls: handshake OK")
rtl.Logger.Debugf("- negotiated protocol: %s", state.NegotiatedProtocol)
rtl.Logger.Debugf("- version: %s", tlsx.VersionString(state.Version))
}
// ConnectionReady is called when a connection is ready to be used.
func (rtl *RoundTripLogger) ConnectionReady(conn net.Conn) {
rtl.Logger.Debugf(
"http: connection to %s ready; sending request", conn.RemoteAddr(),
)
rtl.headers = make(http.Header, 16) // reset
}
func (rtl *RoundTripLogger) logSingleHeader(
http2 bool, prefix, key, value string,
) {
if http2 {
key = strings.ToLower(key)
}
rtl.Logger.Debugf("%s %s: %s", prefix, key, value)
}
func (rtl *RoundTripLogger) logHeaderVector(
http2 bool, prefix, key string, values []string,
) {
for _, value := range values {
rtl.logSingleHeader(http2, prefix, key, value)
}
}
// WroteHeaderField is called when a header field is written.
func (rtl *RoundTripLogger) WroteHeaderField(key string, values []string) {
for _, value := range values {
rtl.headers.Add(key, value)
}
}
// WroteHeaders is called when all headers are written.
func (rtl *RoundTripLogger) WroteHeaders(request *http.Request) {
http2 := rtl.headers.Get(":method") != ""
if !http2 {
rtl.Logger.Debugf(
"> %s %s HTTP/1.1", request.Method, request.URL.RequestURI(),
)
} else {
for _, s := range []string{":method", ":scheme", ":authority", ":path"} {
rtl.logSingleHeader(http2, ">", s, rtl.headers.Get(s))
}
}
for key, values := range rtl.headers {
if strings.HasPrefix(key, ":") {
continue
}
rtl.logHeaderVector(http2, ">", key, values)
}
rtl.Logger.Debug(">")
}
// WroteRequest is called after the request has been written.
func (rtl *RoundTripLogger) WroteRequest(err error) {
if err != nil {
rtl.Logger.Debugf("http: sending request failed: %s", err.Error())
return
}
rtl.Logger.Debugf("http: request sent; waiting for response")
}
// GotFirstResponseByte is called when we start reading the response.
func (rtl *RoundTripLogger) GotFirstResponseByte() {
rtl.Logger.Debugf("http: start receiving response")
}
// GotHeaders is called when we've got the response headers.
func (rtl *RoundTripLogger) GotHeaders(response *http.Response) {
http2 := response.Proto == "HTTP/2" || response.Proto == "HTTP/2.0"
if !http2 {
rtl.Logger.Debugf("< %s %s", response.Proto, response.Status)
} else {
rtl.Logger.Debugf("< :status: %d", response.StatusCode)
}
for key, values := range response.Header {
rtl.logHeaderVector(http2, "<", key, values)
}
rtl.Logger.Debug("<")
}
|
charafau/TurboChat | app/src/main/java/com/nullpointerbay/turbochat/service/UserApiService.java | package com.nullpointerbay.turbochat.service;
import com.nullpointerbay.turbochat.model.User;
import io.reactivex.Flowable;
import retrofit2.http.GET;
import retrofit2.http.Query;
public interface UserApiService {
@GET("/user")
Flowable<User> getUser(@Query("nick") String nick);
}
|
Philipeano/post-it | server/dist/controllers/userController.js | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _index = require('../models/index');
var _index2 = _interopRequireDefault(_index);
var _validator = require('../helpers/validator');
var _validator2 = _interopRequireDefault(_validator);
var _auth = require('../helpers/auth');
var _auth2 = _interopRequireDefault(_auth);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _asyncToGenerator(fn) { return function () { var gen = fn.apply(this, arguments); return new Promise(function (resolve, reject) { function step(key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { return Promise.resolve(value).then(function (value) { step("next", value); }, function (err) { step("throw", err); }); } } return step("next"); }); }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
var reqPasswordHash = void 0;
var errorMessage = void 0;
/**
* @description: Defines controller for manipulating 'user' model
* @class
*/
var UserController = function () {
/**
* @description: Initializes instance with 'user' model as local property
* @constructor
*/
function UserController() {
_classCallCheck(this, UserController);
this.user = _index2.default.User;
}
/**
* @description: Registers a new user
* @param {Object} req The incoming request from the client
* @param {Object} res The outgoing response from the server
* @return {Object} newUser
*/
_createClass(UserController, [{
key: 'signUpUser',
value: function () {
var _ref = _asyncToGenerator(regeneratorRuntime.mark(function _callee(req, res) {
var existingUser, newUser;
return regeneratorRuntime.wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
errorMessage = _validator2.default.checkEmpty([{ Username: req.body.username }, { 'Email Address': req.body.email }, { Password: <PASSWORD> }, { 'Password Retype': req.body.cPassword }]);
if (!(errorMessage.trim() !== '')) {
_context.next = 5;
break;
}
return _context.abrupt('return', res.status(400).json({ message: errorMessage }));
case 5:
if (_validator2.default.passwordsMatch(req.body.password, req.body.cPassword)) {
_context.next = 10;
break;
}
errorMessage = errorMessage + ' ' + _validator2.default.validationMessage;
return _context.abrupt('return', res.status(400).json({ message: errorMessage }));
case 10:
if (_validator2.default.isValidEmail(req.body.email)) {
_context.next = 15;
break;
}
errorMessage = errorMessage + ' ' + _validator2.default.validationMessage;
return _context.abrupt('return', res.status(400).json({ message: errorMessage }));
case 15:
if (_validator2.default.isValidPassword(req.body.password)) {
_context.next = 18;
break;
}
errorMessage = errorMessage + ' ' + _validator2.default.validationMessage;
return _context.abrupt('return', res.status(400).json({ message: errorMessage }));
case 18:
_context.prev = 18;
existingUser = void 0;
_context.next = 22;
return this.user.findOne({
where: { username: req.body.username } });
case 22:
existingUser = _context.sent;
if (!existingUser) {
_context.next = 25;
break;
}
return _context.abrupt('return', res.status(409).json({ message: 'Username is already in use!' }));
case 25:
_context.next = 27;
return this.user.findOne({
where: { email: req.body.email } });
case 27:
existingUser = _context.sent;
if (!existingUser) {
_context.next = 30;
break;
}
return _context.abrupt('return', res.status(409).json({ message: 'Email Address already exists!' }));
case 30:
reqPasswordHash = _validator2.default.generateHash(req.body.password);
_context.next = 33;
return this.user.sync();
case 33:
_context.next = 35;
return this.user.create({
username: req.body.username,
email: req.body.email,
password: <PASSWORD>
});
case 35:
newUser = _context.sent;
res.status(201).json({
message: 'You signed up successfully!',
user: _validator2.default.trimFields(newUser),
token: _auth2.default.generateToken({ userId: newUser.id })
});
_context.next = 42;
break;
case 39:
_context.prev = 39;
_context.t0 = _context['catch'](18);
res.status(500).json({ message: _context.t0.message });
case 42:
case 'end':
return _context.stop();
}
}
}, _callee, this, [[18, 39]]);
}));
function signUpUser(_x, _x2) {
return _ref.apply(this, arguments);
}
return signUpUser;
}()
/**
* @description: Logs in a user
* @param {Object} req The incoming request from the client
* @param {Object} res The outgoing response from the server
* @return {Object} user
*/
}, {
key: 'signInUser',
value: function () {
var _ref2 = _asyncToGenerator(regeneratorRuntime.mark(function _callee2(req, res) {
var matchingUser;
return regeneratorRuntime.wrap(function _callee2$(_context2) {
while (1) {
switch (_context2.prev = _context2.next) {
case 0:
errorMessage = _validator2.default.checkEmpty([{ Username: req.body.username }, { Password: <PASSWORD> }]);
if (!(errorMessage.trim() !== '')) {
_context2.next = 3;
break;
}
return _context2.abrupt('return', res.status(400).json({ message: errorMessage }));
case 3:
_context2.prev = 3;
_context2.next = 6;
return this.user.findOne({
where: { username: req.body.username } });
case 6:
matchingUser = _context2.sent;
if (matchingUser) {
_context2.next = 9;
break;
}
return _context2.abrupt('return', res.status(400).json({ message: 'Username does not exist!' }));
case 9:
if (_validator2.default.verifyPassword(req.body.password, matchingUser.password)) {
_context2.next = 11;
break;
}
return _context2.abrupt('return', res.status(400).json({ message: 'Password is wrong!' }));
case 11:
res.status(200).json({
message: 'You signed in successfully!',
user: _validator2.default.trimFields(matchingUser),
token: _auth2.default.generateToken({ userId: matchingUser.id })
});
_context2.next = 17;
break;
case 14:
_context2.prev = 14;
_context2.t0 = _context2['catch'](3);
res.status(500).json({ message: _context2.t0.message });
case 17:
case 'end':
return _context2.stop();
}
}
}, _callee2, this, [[3, 14]]);
}));
function signInUser(_x3, _x4) {
return _ref2.apply(this, arguments);
}
return signInUser;
}()
/**
* @description: Logs out a user
* @param {Object} req The incoming request from the client
* @param {Object} res The outgoing response from the server
* @return {void}
*/
}, {
key: 'signOutUser',
value: function signOutUser(req, res) {
if (req.headers.token) {
req.headers.token = undefined;
} else if (req.body.token) {
req.body.token = undefined;
}
res.status(200).json({
token: undefined,
message: 'You have been logged out.'
});
}
/**
* @description: Fetches all available users
* @param {Object} req The incoming request from the client
* @param {Object} res The outgoing response from the server
* @return {Object} allUsers
*/
}, {
key: 'getAllUsers',
value: function () {
var _ref3 = _asyncToGenerator(regeneratorRuntime.mark(function _callee3(req, res) {
var allUsers;
return regeneratorRuntime.wrap(function _callee3$(_context3) {
while (1) {
switch (_context3.prev = _context3.next) {
case 0:
_context3.prev = 0;
_context3.next = 3;
return this.user.findAll({
attributes: ['id', 'username', 'email']
});
case 3:
allUsers = _context3.sent;
res.status(200).json({ 'Registered users': allUsers });
_context3.next = 10;
break;
case 7:
_context3.prev = 7;
_context3.t0 = _context3['catch'](0);
res.status(500).json({ message: _context3.t0.message });
case 10:
case 'end':
return _context3.stop();
}
}
}, _callee3, this, [[0, 7]]);
}));
function getAllUsers(_x5, _x6) {
return _ref3.apply(this, arguments);
}
return getAllUsers;
}()
/**
* @description: Fetches a user matching specified userKey
* @param {Object} req The incoming request from the client
* @param {Object} res The outgoing response from the server
* @return {Object} matchingUser
*/
}, {
key: 'getUserByKey',
value: function () {
var _ref4 = _asyncToGenerator(regeneratorRuntime.mark(function _callee4(req, res) {
var matchingUser;
return regeneratorRuntime.wrap(function _callee4$(_context4) {
while (1) {
switch (_context4.prev = _context4.next) {
case 0:
errorMessage = _validator2.default.checkEmpty([{ 'User ID': req.params.userId }]);
if (!(errorMessage.trim() !== '')) {
_context4.next = 3;
break;
}
return _context4.abrupt('return', res.status(400).json({ message: errorMessage }));
case 3:
_context4.prev = 3;
_context4.next = 6;
return this.user.findOne({
attributes: ['id', 'username', 'email'],
where: { id: req.params.userId }
});
case 6:
matchingUser = _context4.sent;
if (matchingUser) {
_context4.next = 9;
break;
}
return _context4.abrupt('return', res.status(404).json({ message: 'Specified user does not exist!' }));
case 9:
res.status(200).json({ 'Specified user': matchingUser });
_context4.next = 15;
break;
case 12:
_context4.prev = 12;
_context4.t0 = _context4['catch'](3);
res.status(500).json({ message: _context4.t0.message });
case 15:
case 'end':
return _context4.stop();
}
}
}, _callee4, this, [[3, 12]]);
}));
function getUserByKey(_x7, _x8) {
return _ref4.apply(this, arguments);
}
return getUserByKey;
}()
/**
* @description: Deletes a user matching specified userKey
* @param {Object} req The incoming request from the client
* @param {Object} res The outgoing response from the server
* @return {Object} null
*/
}, {
key: 'deleteUser',
value: function () {
var _ref5 = _asyncToGenerator(regeneratorRuntime.mark(function _callee5(req, res) {
var matchingUser;
return regeneratorRuntime.wrap(function _callee5$(_context5) {
while (1) {
switch (_context5.prev = _context5.next) {
case 0:
errorMessage = _validator2.default.checkEmpty([{ 'User ID': req.params.userId }]);
if (!(errorMessage.trim() !== '')) {
_context5.next = 3;
break;
}
return _context5.abrupt('return', res.status(400).json({ message: errorMessage }));
case 3:
_context5.prev = 3;
_context5.next = 6;
return this.user.findOne({ where: { id: req.params.userId } });
case 6:
matchingUser = _context5.sent;
if (matchingUser) {
_context5.next = 9;
break;
}
return _context5.abrupt('return', res.status(404).json({ message: 'Specified user does not exist!' }));
case 9:
_context5.next = 11;
return this.user.destroy({ where: { id: req.params.userId } });
case 11:
res.status(200).json({ message: 'User deleted successfully!' });
_context5.next = 17;
break;
case 14:
_context5.prev = 14;
_context5.t0 = _context5['catch'](3);
res.status(500).json({ message: _context5.t0.message });
case 17:
case 'end':
return _context5.stop();
}
}
}, _callee5, this, [[3, 14]]);
}));
function deleteUser(_x9, _x10) {
return _ref5.apply(this, arguments);
}
return deleteUser;
}()
}]);
return UserController;
}();
exports.default = UserController; |
Iiqbal2000/microsite-logos-backend | src/config/db.config.js | <filename>src/config/db.config.js
require('dotenv').config();
const { NODE_ENV, NAME_DB_DEV, NAME_DB_PROD } = process.env;
module.exports = {
HOST: process.env.HOST_DB,
PORT: process.env.PORT_DB,
USER: process.env.USER_DB,
PASSWORD: <PASSWORD>,
DB: NODE_ENV === 'prod' ? NAME_DB_PROD : NAME_DB_DEV,
dialect: 'mysql',
pool: {
max: 5,
min: 0,
acquire: 30000,
idle: 10000,
},
};
|
tencentyun/cos-java-sdk-hadoop-v4 | src/main/java/com/qcloud/cos/request/UploadSliceFileRequest.java | <reponame>tencentyun/cos-java-sdk-hadoop-v4
package com.qcloud.cos.request;
import com.qcloud.cos.common_utils.CommonParamCheckUtils;
import com.qcloud.cos.exception.ParamException;
/**
* @author chengwu 文件分片上传请求
*/
public class UploadSliceFileRequest extends UploadFileRequest {
// 默认分片大小1MB
private static final int DEFAULT_SLICE_SIZE = 1024 * 1024;
// 分片大小,单位字节
private int sliceSize = DEFAULT_SLICE_SIZE;
public UploadSliceFileRequest(UploadFileRequest request) {
super(request.getBucketName(), request.getCosPath(), request.getLocalPath(),
request.getBizAttr());
}
public UploadSliceFileRequest(String bucketName, String cosPath, String localPath,
int sliceSize) {
super(bucketName, cosPath, localPath);
this.sliceSize = sliceSize;
}
public UploadSliceFileRequest(String bucketName, String cosPath, byte[] contentBuffer) {
super(bucketName, cosPath, contentBuffer);
}
public int getSliceSize() {
return sliceSize;
}
public void setSliceSize(int sliceSize) {
this.sliceSize = sliceSize;
}
@Override
public void check_param() throws ParamException {
super.check_param();
CommonParamCheckUtils.AssertLegalSliceSize(this.sliceSize);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
sb.append(", sliceSize:").append(this.sliceSize);
sb.append(", taskNum:").append(String.valueOf(this.taskNum));
sb.append(", enableShaDigest:");
if (enableShaDigest) {
sb.append("1");
} else {
sb.append("0");
}
return sb.toString();
}
}
|
prokopk1n/cpachecker-1 | src/org/sosy_lab/cpachecker/core/specification/PackageSanityTest.java | <reponame>prokopk1n/cpachecker-1
// This file is part of CPAchecker,
// a tool for configurable software verification:
// https://cpachecker.sosy-lab.org
//
// SPDX-FileCopyrightText: 2021 <NAME> <https://www.sosy-lab.org>
//
// SPDX-License-Identifier: Apache-2.0
package org.sosy_lab.cpachecker.core.specification;
import com.google.common.testing.AbstractPackageSanityTests;
import org.sosy_lab.common.ShutdownNotifier;
import org.sosy_lab.common.configuration.Configuration;
import org.sosy_lab.common.log.LogManager;
public class PackageSanityTest extends AbstractPackageSanityTests {
{
setDefault(Configuration.class, Configuration.defaultConfiguration());
setDefault(LogManager.class, LogManager.createTestLogManager());
setDefault(ShutdownNotifier.class, ShutdownNotifier.createDummy());
}
}
|
shnaqawi/social-core | social_core/tests/backends/test_atlassian.py | <reponame>shnaqawi/social-core
import json
from httpretty import HTTPretty
from .oauth import OAuth2Test
class AtlassianOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.atlassian.AtlassianOAuth2'
tenant_url = 'https://api.atlassian.com/oauth/token/accessible-resources'
user_data_url = 'https://api.atlassian.com/ex/jira/FAKED_CLOUD_ID/rest/api/2/myself'
expected_username = 'erlich'
access_token_body = json.dumps({
'access_token': '<PASSWORD>',
'token_type': 'bearer'
})
tenant_data_body = json.dumps([
{
"id": "FAKED_CLOUD_ID",
"name": "bachmanity.com",
"avatarUrl": "https://bachmanity.atlassian.net/avatars/240/site.png",
"scopes": [
"read:jira-user"
]
}
])
user_data_body = json.dumps({
"self": "http://bachmanity.atlassian.net/rest/api/3/user?username=erlich",
"key": "erlich",
"accountId": "99:27935d01-92a7-4687-8272-a9b8d3b2ae2e",
"name": "erlich",
"emailAddress": "<EMAIL>",
"avatarUrls": {
"48x48": "http://bachmanity.atlassian.net/secure/useravatar?size=large&ownerId=erlich",
"24x24": "http://bachmanity.atlassian.net/secure/useravatar?size=small&ownerId=erlich",
"16x16": "http://bachmanity.atlassian.net/secure/useravatar?size=xsmall&ownerId=erlich",
"32x32": "http://bachmanity.atlassian.net/secure/useravatar?size=medium&ownerId=erlich"
},
"displayName": "<NAME>",
"active": True,
"timeZone": "Australia/Sydney",
"groups": {
"size": 3,
"items": []
},
"applicationRoles": {
"size": 1,
"items": []
}
})
def auth_handlers(self, start_url):
target_url = super(AtlassianOAuth2Test, self).auth_handlers(start_url)
HTTPretty.register_uri(HTTPretty.GET,
self.tenant_url,
body=self.tenant_data_body,
content_type='application/json')
return target_url
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
TheInterventionCentre/NorMIT-Plan-App | Libs/MRML/Core/Testing/vtkMRMLDisplayableHierarchyNodeDisplayPropertiesTest.cxx | <reponame>TheInterventionCentre/NorMIT-Plan-App<gh_stars>0
/*==============================================================================
Program: 3D Slicer
Copyright (c) Kitware Inc.
See COPYRIGHT.txt
or http://www.slicer.org/copyright/copyright.txt for details.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file was originally developed by <NAME>, Kitware Inc.
and was partially funded by NIH grant 3P41RR013218-12S1
==============================================================================*/
// MRML includes
#include "vtkMRMLDisplayableHierarchyNode.h"
#include "vtkMRMLModelDisplayNode.h"
#include "vtkMRMLModelHierarchyNode.h"
#include "vtkMRMLModelNode.h"
#include "vtkMRMLScene.h"
// VTK includes
#include <vtkCollection.h>
#include <vtkNew.h>
#include <vtkSmartPointer.h>
// STD includes
#include <cassert>
#include <sstream>
namespace
{
void PopulateScene(vtkMRMLScene* scene, int numberOfLevels);
vtkMRMLModelNode* GetModelNode(vtkMRMLScene* scene, int level);
vtkMRMLDisplayableHierarchyNode* GetHierarchyNode(vtkMRMLScene* scene, int level);
vtkMRMLDisplayableHierarchyNode* GetModelHierarchyNode(vtkMRMLScene* scene, int level);
bool TestExpand();
bool TestDefaults();
bool TestSetVisibilityLevel0();
bool TestSetVisibilityLevel1();
bool TestCollapseLevel0();
bool TestCollapseLevel1();
bool TestCollapseLevels0And1();
bool TestColors(vtkMRMLScene* scene,
double (*modelColors)[3],
double (*hierarchyColors)[3]);
bool TestVisibility(vtkMRMLScene* scene,
int* modelVisibilities,
int* hierarchyVisibilities);
const int LEVEL_COUNT = 3;
double DEFAULT_MODEL_COLORS[3][3] = {{0.5, 0., 0.}, {0.,0.5,0.},{0.,0.,0.5}};
double DEFAULT_HIERARCHY_COLORS[3][3] = {{1., 0., 0.}, {0.,1.,0.},{0.,0.,1.}};
int DEFAULT_MODEL_VISIBILITY[3] = {1, 1, 1};
int DEFAULT_HIERARCHY_VISIBILITY[3] = {1, 1, 1};
} // end of anonymous namespace
//---------------------------------------------------------------------------
int vtkMRMLDisplayableHierarchyNodeDisplayPropertiesTest(int vtkNotUsed(argc),
char * vtkNotUsed(argv)[] )
{
if (!TestExpand())
{
std::cerr << "'TestExpand' call not successful." << std::endl;
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
namespace
{
//---------------------------------------------------------------------------
// Populate a scene with models and hierarchy
// Scene
// +- ModelHierarchyNode (level 0)
// | > ModelNode
// | > DisplayNode
// + HierarchyNode (level 0)
// | > DisplayNode
// + ModelHierarchyNode (level 1)
// | > ModelNode
// | > DisplayNode
// + HierarchyNode (level 1)
// | > DisplayNode
// + ModelHierarchyNode (level 2)
// | > ModelNode
// | > DisplayNode
// + HierarchyNode (level 2)
// > DisplayNode
void PopulateScene(vtkMRMLScene* scene, int numberOfLevels)
{
// At each level, create a Model node and a Hierarchy node, the latter becomes
// the parent of the next level.
vtkMRMLDisplayableHierarchyNode* parentHierarchy = 0;
for (int level = 0; level < numberOfLevels; ++level)
{
// Model
vtkNew<vtkMRMLModelNode> modelNode;
std::stringstream smn;
smn << level << " model";
modelNode->SetName(smn.str().c_str());
scene->AddNode(modelNode.GetPointer());
vtkNew<vtkMRMLModelDisplayNode> modelDisplayNode;
std::stringstream smdn;
smdn << level << " model display";
modelDisplayNode->SetName(smdn.str().c_str());
scene->AddNode(modelDisplayNode.GetPointer());
vtkNew<vtkMRMLModelHierarchyNode> modelHierarchyNode;
std::stringstream smhn;
smhn << level << " model hiearchy";
modelHierarchyNode->SetName(smhn.str().c_str());
scene->AddNode(modelHierarchyNode.GetPointer());
modelNode->SetAndObserveDisplayNodeID(modelDisplayNode->GetID());
modelHierarchyNode->SetDisplayableNodeID(modelNode->GetID());
modelDisplayNode->SetColor(DEFAULT_MODEL_COLORS[level]);
// Hierarchy
vtkNew<vtkMRMLDisplayableHierarchyNode> hierarchyNode;
std::stringstream shn;
shn << level << " hierarchy";
hierarchyNode->SetName(shn.str().c_str());
scene->AddNode(hierarchyNode.GetPointer());
vtkNew<vtkMRMLModelDisplayNode> hierarchyDisplayNode;
std::stringstream shdn;
shdn << level << " hierarchy display";
hierarchyDisplayNode->SetName(shdn.str().c_str());
scene->AddNode(hierarchyDisplayNode.GetPointer());
hierarchyNode->SetAndObserveDisplayNodeID(hierarchyDisplayNode->GetID());
hierarchyDisplayNode->SetColor(DEFAULT_HIERARCHY_COLORS[level]);
if (parentHierarchy)
{
modelHierarchyNode->SetParentNodeID(parentHierarchy->GetID());
hierarchyNode->SetParentNodeID(parentHierarchy->GetID());
}
parentHierarchy = hierarchyNode.GetPointer();
}
assert(scene->GetNumberOfNodes() == numberOfLevels*5);
}
//---------------------------------------------------------------------------
vtkMRMLModelNode* GetModelNode(vtkMRMLScene* scene, int level)
{
scene->InitTraversal();
return vtkMRMLModelNode::SafeDownCast(
scene->GetNthNodeByClass(level, "vtkMRMLModelNode"));
}
//---------------------------------------------------------------------------
vtkMRMLDisplayableHierarchyNode* GetHierarchyNode(vtkMRMLScene* scene, int level)
{
scene->InitTraversal();
return vtkMRMLDisplayableHierarchyNode::SafeDownCast(
scene->GetNthNodeByClass(2*level + 1, "vtkMRMLDisplayableHierarchyNode"));
}
//---------------------------------------------------------------------------
vtkMRMLDisplayableHierarchyNode* GetModelHierarchyNode(vtkMRMLScene* scene, int level)
{
scene->InitTraversal();
return vtkMRMLDisplayableHierarchyNode::SafeDownCast(
scene->GetNthNodeByClass(2*level, "vtkMRMLDisplayableHierarchyNode"));
}
//---------------------------------------------------------------------------
bool TestExpand()
{
if (!TestDefaults())
{
std::cerr << "'TestDefaults' call not successful." << std::endl;
return false;
}
if (!TestSetVisibilityLevel0())
{
std::cerr << "'TestSetVisibilityLevel0' call not successful." << std::endl;
return false;
}
if (!TestSetVisibilityLevel1())
{
std::cerr << "'TestSetVisibilityLevel0' call not successful." << std::endl;
return false;
}
if (!TestCollapseLevel0())
{
std::cerr << "'TestCollapseLevel0' call not successful." << std::endl;
return false;
}
if (!TestCollapseLevel1())
{
std::cerr << "'TestCollapseLevel1' call not successful." << std::endl;
return false;
}
if (!TestCollapseLevels0And1())
{
std::cerr << "'TestCollapseLevels0And1' call not successful." << std::endl;
return false;
}
return true;
}
//---------------------------------------------------------------------------
bool TestDefaults()
{
vtkNew<vtkMRMLScene> scene;
PopulateScene(scene.GetPointer(), LEVEL_COUNT);
bool res = true;
res = TestColors(scene.GetPointer(), DEFAULT_MODEL_COLORS, DEFAULT_HIERARCHY_COLORS) && res;
res = TestVisibility(scene.GetPointer(), DEFAULT_MODEL_VISIBILITY, DEFAULT_HIERARCHY_VISIBILITY) && res;
return res;
}
//---------------------------------------------------------------------------
bool TestSetVisibilityLevel0()
{
vtkNew<vtkMRMLScene> scene;
PopulateScene(scene.GetPointer(), LEVEL_COUNT);
// Color and visibility shouldn't be changed when collapsing hierarchy
vtkMRMLDisplayableHierarchyNode* hierarchyNode= GetHierarchyNode(scene.GetPointer(), 0);
hierarchyNode->GetDisplayNode()->SetVisibility(0);
bool res = true;
res = TestColors(scene.GetPointer(), DEFAULT_MODEL_COLORS, DEFAULT_HIERARCHY_COLORS) && res;
int newHierarchyVisibility[3] = {0, 1, 1};
res = TestVisibility(scene.GetPointer(), DEFAULT_MODEL_VISIBILITY, newHierarchyVisibility) && res;
return res;
}
//---------------------------------------------------------------------------
bool TestSetVisibilityLevel1()
{
vtkNew<vtkMRMLScene> scene;
PopulateScene(scene.GetPointer(), LEVEL_COUNT);
// Color and visibility shouldn't be changed when collapsing hierarchy
vtkMRMLDisplayableHierarchyNode* hierarchyNode= GetHierarchyNode(scene.GetPointer(), 1);
hierarchyNode->GetDisplayNode()->SetVisibility(0);
bool res = true;
res = TestColors(scene.GetPointer(), DEFAULT_MODEL_COLORS, DEFAULT_HIERARCHY_COLORS) && res;
int newHierarchyVisibility[3] = {1, 0, 1};
res = TestVisibility(scene.GetPointer(), DEFAULT_MODEL_VISIBILITY, newHierarchyVisibility) && res;
return res;
}
//---------------------------------------------------------------------------
bool TestCollapseLevel0()
{
vtkNew<vtkMRMLScene> scene;
PopulateScene(scene.GetPointer(), LEVEL_COUNT);
// Color and visibility shouldn't be changed when collapsing hierarchy
vtkMRMLDisplayableHierarchyNode* hierarchyNode= GetHierarchyNode(scene.GetPointer(), 0);
hierarchyNode->SetExpanded(0);
bool res = true;
res = TestColors(scene.GetPointer(), DEFAULT_MODEL_COLORS, DEFAULT_HIERARCHY_COLORS) && res;
res = TestVisibility(scene.GetPointer(), DEFAULT_MODEL_VISIBILITY, DEFAULT_HIERARCHY_VISIBILITY) && res;
if (!res)
{
std::cout << "Color or visibility is wrong" << std::endl;
}
if (GetModelHierarchyNode(scene.GetPointer(), 0)->GetCollapsedParentNode() != 0 ||
GetModelHierarchyNode(scene.GetPointer(), 1)->GetCollapsedParentNode() != hierarchyNode ||
GetModelHierarchyNode(scene.GetPointer(), 2)->GetCollapsedParentNode() != hierarchyNode)
{
std::cout << "CollapsedParentNode is wrong (hierarchy node at level 0 collapsed):" << std::endl
<< GetModelHierarchyNode(scene.GetPointer(), 0)->GetCollapsedParentNode() << " (expected 0), "
<< GetModelHierarchyNode(scene.GetPointer(), 1)->GetCollapsedParentNode() << " (expected " << hierarchyNode << "), "
<< GetModelHierarchyNode(scene.GetPointer(), 2)->GetCollapsedParentNode() << " (expected " << hierarchyNode << ")"
<< std::endl;
res = false;
}
return res;
}
//---------------------------------------------------------------------------
bool TestCollapseLevel1()
{
vtkNew<vtkMRMLScene> scene;
PopulateScene(scene.GetPointer(), LEVEL_COUNT);
// Color and visibility shouldn't be changed when collapsing hierarchy
vtkMRMLDisplayableHierarchyNode* hierarchyNode= GetHierarchyNode(scene.GetPointer(), 1);
hierarchyNode->SetExpanded(0);
bool res = true;
res = TestColors(scene.GetPointer(), DEFAULT_MODEL_COLORS, DEFAULT_HIERARCHY_COLORS) && res;
res = TestVisibility(scene.GetPointer(), DEFAULT_MODEL_VISIBILITY, DEFAULT_HIERARCHY_VISIBILITY) && res;
if (!res)
{
std::cout << "Color or visibility is wrong" << std::endl;
}
if (GetModelHierarchyNode(scene.GetPointer(), 0)->GetCollapsedParentNode() != 0 ||
GetModelHierarchyNode(scene.GetPointer(), 1)->GetCollapsedParentNode() != 0 ||
GetModelHierarchyNode(scene.GetPointer(), 2)->GetCollapsedParentNode() != hierarchyNode)
{
std::cout << "CollapsedParentNode is wrong (hierarchyNode at level 1 collapsed):" << std::endl
<< GetModelHierarchyNode(scene.GetPointer(), 0)->GetCollapsedParentNode() << " (expected 0), "
<< GetModelHierarchyNode(scene.GetPointer(), 1)->GetCollapsedParentNode() << " (expected 0), "
<< GetModelHierarchyNode(scene.GetPointer(), 2)->GetCollapsedParentNode() << " (expected " << hierarchyNode << ")"
<< std::endl;
res = false;
}
return res;
}
//---------------------------------------------------------------------------
bool TestCollapseLevels0And1()
{
vtkNew<vtkMRMLScene> scene;
PopulateScene(scene.GetPointer(), LEVEL_COUNT);
// Color and visibility shouldn't be changed when collapsing hierarchy
vtkMRMLDisplayableHierarchyNode* hierarchyNode= GetHierarchyNode(scene.GetPointer(), 0);
hierarchyNode->SetExpanded(0);
vtkMRMLDisplayableHierarchyNode* subHierarchyNode= GetHierarchyNode(scene.GetPointer(), 1);
subHierarchyNode->SetExpanded(0);
bool res = true;
res = TestColors(scene.GetPointer(), DEFAULT_MODEL_COLORS, DEFAULT_HIERARCHY_COLORS) && res;
res = TestVisibility(scene.GetPointer(), DEFAULT_MODEL_VISIBILITY, DEFAULT_HIERARCHY_VISIBILITY) && res;
if (!res)
{
std::cout << "Color or visibility is wrong" << std::endl;
}
if (GetModelHierarchyNode(scene.GetPointer(), 0)->GetCollapsedParentNode() != 0 ||
GetModelHierarchyNode(scene.GetPointer(), 1)->GetCollapsedParentNode() != hierarchyNode ||
GetModelHierarchyNode(scene.GetPointer(), 2)->GetCollapsedParentNode() != hierarchyNode)
{
std::cout << "CollapsedParentNode is wrong (hierarchies at levels 0 and 1 collapsed):" << std::endl
<< GetModelHierarchyNode(scene.GetPointer(), 0)->GetCollapsedParentNode() << " (expected 0), "
<< GetModelHierarchyNode(scene.GetPointer(), 1)->GetCollapsedParentNode() << " (expected " << hierarchyNode << "), "
<< GetModelHierarchyNode(scene.GetPointer(), 2)->GetCollapsedParentNode() << " (expected " << hierarchyNode << ")"
<< std::endl;
res = false;
}
return res;
}
//---------------------------------------------------------------------------
bool TestColors(vtkMRMLScene* scene,
double (*modelColors)[3],
double (*hierarchyColors)[3])
{
for (int level = 0; level < LEVEL_COUNT; ++level)
{
// Model
vtkMRMLModelNode* modelNode = GetModelNode(scene, level);
if (modelNode == 0)
{
return true;
}
vtkMRMLDisplayNode* modelDisplayNode = modelNode->GetDisplayNode();
if (modelDisplayNode->GetColor()[0] != modelColors[level][0] ||
modelDisplayNode->GetColor()[1] != modelColors[level][1] ||
modelDisplayNode->GetColor()[2] != modelColors[level][2])
{
std::cout << "Wrong color for node \"" << modelDisplayNode->GetName()
<< "\" at level " << level << std::endl;
return false;
}
vtkMRMLDisplayableHierarchyNode* hierarchyNode = GetHierarchyNode(scene, level);
assert(hierarchyNode != 0);
vtkMRMLDisplayNode* hierarchyDisplayNode = hierarchyNode->GetDisplayNode();
if (hierarchyDisplayNode->GetColor()[0] != hierarchyColors[level][0] ||
hierarchyDisplayNode->GetColor()[1] != hierarchyColors[level][1] ||
hierarchyDisplayNode->GetColor()[2] != hierarchyColors[level][2])
{
std::cout << "Wrong color for node \"" << hierarchyDisplayNode->GetName()
<< "\" at level " << level << std::endl;
return false;
}
}
return true;
}
//---------------------------------------------------------------------------
bool TestVisibility(vtkMRMLScene* scene,
int* modelVisibility,
int* hierarchyVisibility)
{
for (int level = 0; level < LEVEL_COUNT; ++level)
{
// Model
vtkMRMLModelNode* modelNode = GetModelNode(scene, level);
if (modelNode == 0)
{
return true;
}
vtkMRMLDisplayNode* modelDisplayNode = modelNode->GetDisplayNode();
if (modelDisplayNode->GetVisibility() != modelVisibility[level])
{
std::cout << "Wrong visibility for node \"" << modelDisplayNode->GetName()
<< "\" at level " << level << std::endl;
return false;
}
vtkMRMLDisplayableHierarchyNode* hierarchyNode = GetHierarchyNode(scene, level);
assert(hierarchyNode != 0);
vtkMRMLDisplayNode* hierarchyDisplayNode = hierarchyNode->GetDisplayNode();
if (hierarchyDisplayNode->GetVisibility() != hierarchyVisibility[level])
{
std::cout << "Wrong visibility for node \"" << hierarchyDisplayNode->GetName()
<< "\" at level " << level << std::endl;
return false;
}
}
return true;
}
} // end of anonymous namespace
|
rdkcmf/rdk-mediaframework | snmp/snmpmanager/ocStbHostSpecificationsInfo.cpp | /*
* If not stated otherwise in this file or this component's LICENSE file the
* following copyright and licenses apply:
*
* Copyright 2011 RDK Management
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Note: this file originally auto-generated by mib2c using
* : mib2c.scalar.conf 11805 2005-01-07 09:37:18Z dts12 $
*/
#include <net-snmp/net-snmp-config.h>
#include <net-snmp/net-snmp-includes.h>
#include <net-snmp/agent/net-snmp-agent-includes.h>
#include "rdk_debug.h"
#include "ocStbHostSpecificationsInfo.h"
/** Initializes the ocStbHostSpecificationsInfo module */
void
init_ocStbHostSpecificationsInfo(void)
{
static oid ocStbHostCfrSpecificationIssue_oid[] =
{ 1, 3, 6, 1, 4, 1, 4491, 2, 3, 1, 1, 4, 5, 5, 1 };
static oid ocStbHostMibSpecificationIssue_oid[] =
{ 1, 3, 6, 1, 4, 1, 4491, 2, 3, 1, 1, 4, 5, 5, 2 };
DEBUGMSGTL(("ocStbHostSpecificationsInfo", "Initializing\n"));
netsnmp_register_scalar(netsnmp_create_handler_registration
("ocStbHostCfrSpecificationIssue",
handle_ocStbHostCfrSpecificationIssue,
ocStbHostCfrSpecificationIssue_oid,
OID_LENGTH
(ocStbHostCfrSpecificationIssue_oid),
HANDLER_CAN_RONLY));
netsnmp_register_scalar(netsnmp_create_handler_registration
("ocStbHostMibSpecificationIssue",
handle_ocStbHostMibSpecificationIssue,
ocStbHostMibSpecificationIssue_oid,
OID_LENGTH
(ocStbHostMibSpecificationIssue_oid),
HANDLER_CAN_RONLY));
}
static char g_CfrSpecificationIssue[255]="OC-SP-HOST2.1-CFR-I11-100507";
static char g_MibSpecificationIssue[255]="OC-SP-MIB-HOST2.X-I14-120531";
int
handle_ocStbHostCfrSpecificationIssue(netsnmp_mib_handler *handler,
netsnmp_handler_registration
*reginfo,
netsnmp_agent_request_info *reqinfo,
netsnmp_request_info *requests)
{
/*
* We are never called for a GETNEXT if it's registered as a
* "instance", as it's "magically" handled for us.
*/
/*
* a instance handler also only hands us one request at a time, so
* we don't need to loop over a list of requests; we'll only get one.
*/
switch (reqinfo->mode) {
case MODE_GET:
RDK_LOG(RDK_LOG_DEBUG, "LOG.RDK.SNMP", "\n Enter into the OC-SP-HOST2.1-CFR-I07-090206 :: display %s \n lenght is %d",g_CfrSpecificationIssue,strlen(g_CfrSpecificationIssue));
snmp_set_var_typed_value(requests->requestvb, ASN_OCTET_STR,
(u_char *)
g_CfrSpecificationIssue
,
strlen(g_CfrSpecificationIssue));
break;
default:
/*
* we should never get here, so this is a really bad error
*/
snmp_log(LOG_ERR,
"unknown mode (%d) in handle_ocStbHostCfrSpecificationIssue\n",
reqinfo->mode);
return SNMP_ERR_GENERR;
}
return SNMP_ERR_NOERROR;
}
int
handle_ocStbHostMibSpecificationIssue(netsnmp_mib_handler *handler,
netsnmp_handler_registration
*reginfo,
netsnmp_agent_request_info *reqinfo,
netsnmp_request_info *requests)
{
/*
* We are never called for a GETNEXT if it's registered as a
* "instance", as it's "magically" handled for us.
*/
/*
* a instance handler also only hands us one request at a time, so
* we don't need to loop over a list of requests; we'll only get one.
*/
switch (reqinfo->mode) {
case MODE_GET:
RDK_LOG(RDK_LOG_DEBUG, "LOG.RDK.SNMP", "\n Enter into the OC-SP-MIB-HOST2.X-I06-090206 :: display %s \n lenght is %d",g_MibSpecificationIssue,strlen(g_MibSpecificationIssue));
snmp_set_var_typed_value(requests->requestvb, ASN_OCTET_STR,
(u_char *) g_MibSpecificationIssue
,
strlen(g_MibSpecificationIssue));
break;
default:
/*
* we should never get here, so this is a really bad error
*/
snmp_log(LOG_ERR,
"unknown mode (%d) in handle_ocStbHostMibSpecificationIssue\n",
reqinfo->mode);
return SNMP_ERR_GENERR;
}
return SNMP_ERR_NOERROR;
}
|
trespasserw/MPS | plugins/mps-kotlin/solutions/kotlin.baseLanguage.runtime/source_gen/jetbrains/mps/kotlin/baseLanguage/toKotlin/JavaParameterDeclaration.java | <filename>plugins/mps-kotlin/solutions/kotlin.baseLanguage.runtime/source_gen/jetbrains/mps/kotlin/baseLanguage/toKotlin/JavaParameterDeclaration.java
package jetbrains.mps.kotlin.baseLanguage.toKotlin;
/*Generated by MPS */
import jetbrains.mps.kotlin.runtime.declaration.ParameterDeclaration;
import org.jetbrains.mps.openapi.model.SNode;
import jetbrains.mps.lang.smodel.generator.smodelAdapter.SLinkOperations;
import jetbrains.mps.lang.smodel.generator.smodelAdapter.SNodeOperations;
import org.jetbrains.mps.openapi.language.SContainmentLink;
import jetbrains.mps.smodel.adapter.structure.MetaAdapterFactory;
import org.jetbrains.mps.openapi.language.SConcept;
public class JavaParameterDeclaration implements ParameterDeclaration {
private final SNode myParam;
private final JavaToKtEngine myConverter;
public JavaParameterDeclaration(SNode param, JavaToKtEngine converter) {
myParam = param;
myConverter = converter;
}
@Override
public SNode getNode() {
return myParam;
}
@Override
public SNode getType() {
SNode type = SLinkOperations.getTarget(myParam, LINKS.type$a1UY);
{
final SNode arity = type;
if (SNodeOperations.isInstanceOf(arity, CONCEPTS.VariableArityType$KF)) {
// Arity handled in isVararg
type = SLinkOperations.getTarget(arity, LINKS.componentType$ypmi);
}
}
return myConverter.convert(type);
}
@Override
public boolean isVararg() {
return SNodeOperations.isInstanceOf(SLinkOperations.getTarget(myParam, LINKS.type$a1UY), CONCEPTS.VariableArityType$KF);
}
@Override
public boolean isOptional() {
return false;
}
private static final class LINKS {
/*package*/ static final SContainmentLink type$a1UY = MetaAdapterFactory.getContainmentLink(0xf3061a5392264cc5L, 0xa443f952ceaf5816L, 0x450368d90ce15bc3L, 0x4ed4d318133c80ceL, "type");
/*package*/ static final SContainmentLink componentType$ypmi = MetaAdapterFactory.getContainmentLink(0xf3061a5392264cc5L, 0xa443f952ceaf5816L, 0x11c08f42e7bL, 0x11c08f5f38cL, "componentType");
}
private static final class CONCEPTS {
/*package*/ static final SConcept VariableArityType$KF = MetaAdapterFactory.getConcept(0xf3061a5392264cc5L, 0xa443f952ceaf5816L, 0x11c08f42e7bL, "jetbrains.mps.baseLanguage.structure.VariableArityType");
}
}
|
tamada/stigmata | src/main/java/com/github/stigmata/BirthmarkElement.java | <filename>src/main/java/com/github/stigmata/BirthmarkElement.java
package com.github.stigmata;
import java.io.Serializable;
/**
* element of birthmark.
*
* @author <NAME>
*/
public class BirthmarkElement implements Serializable{
private static final long serialVersionUID = 943675475343245243L;
/**
* element value.
*/
private String value;
/**
* construct birthmark element with given value.
*/
public BirthmarkElement(String value) {
this.value = value;
}
/**
* return the value of this element.
*/
public Object getValue(){
return value;
}
/**
* to string.
*/
@Override
public String toString(){
return String.valueOf(getValue());
}
/**
* hash code for overriding equals method.
*/
@Override
public int hashCode(){
if(getValue() == null){
return 0;
}
else{
return getValue().hashCode();
}
}
/**
* equals method.
*/
@Override
public boolean equals(Object o){
if(o instanceof BirthmarkElement){
if(getValue() != null){
return getValue().equals(((BirthmarkElement)o).getValue());
}
else{
return ((BirthmarkElement)o).getValue() == null;
}
}
return false;
}
}
|
kaylangan/azure-devops-intellij | plugin.idea/src/com/microsoft/alm/plugin/idea/tfvc/core/TFSVcs.java | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See License.txt in the project root.
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.microsoft.alm.plugin.idea.tfvc.core;
import com.google.common.util.concurrent.SettableFuture;
import com.intellij.ide.BrowserUtil;
import com.intellij.notification.Notification;
import com.intellij.notification.NotificationListener;
import com.intellij.openapi.options.Configurable;
import com.intellij.openapi.options.ShowSettingsUtil;
import com.intellij.openapi.progress.PerformInBackgroundOption;
import com.intellij.openapi.progress.ProgressIndicator;
import com.intellij.openapi.progress.Task;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Disposer;
import com.intellij.openapi.vcs.AbstractVcs;
import com.intellij.openapi.vcs.CheckoutProvider;
import com.intellij.openapi.vcs.CommittedChangesProvider;
import com.intellij.openapi.vcs.FilePath;
import com.intellij.openapi.vcs.ProjectLevelVcsManager;
import com.intellij.openapi.vcs.VcsConfiguration;
import com.intellij.openapi.vcs.VcsException;
import com.intellij.openapi.vcs.VcsKey;
import com.intellij.openapi.vcs.VcsNotifier;
import com.intellij.openapi.vcs.VcsShowConfirmationOption;
import com.intellij.openapi.vcs.VcsShowSettingOption;
import com.intellij.openapi.vcs.VcsVFSListener;
import com.intellij.openapi.vcs.changes.ChangeProvider;
import com.intellij.openapi.vcs.diff.DiffProvider;
import com.intellij.openapi.vcs.history.VcsHistoryProvider;
import com.intellij.openapi.vcs.history.VcsRevisionNumber;
import com.intellij.openapi.vcs.rollback.RollbackEnvironment;
import com.intellij.openapi.vcs.update.UpdateEnvironment;
import com.intellij.openapi.vcs.versionBrowser.ChangeBrowserSettings;
import com.intellij.vcsUtil.VcsUtil;
import com.microsoft.alm.plugin.context.RepositoryContext;
import com.microsoft.alm.plugin.context.ServerContext;
import com.microsoft.alm.plugin.context.ServerContextManager;
import com.microsoft.alm.plugin.external.exceptions.SyncException;
import com.microsoft.alm.plugin.external.exceptions.ToolException;
import com.microsoft.alm.plugin.external.tools.TfTool;
import com.microsoft.alm.plugin.idea.common.resources.TfPluginBundle;
import com.microsoft.alm.plugin.idea.common.services.LocalizationServiceImpl;
import com.microsoft.alm.plugin.idea.common.utils.IdeaHelper;
import com.microsoft.alm.plugin.idea.common.utils.VcsHelper;
import com.microsoft.alm.plugin.idea.tfvc.core.tfs.TfsRevisionNumber;
import org.apache.commons.lang.StringUtils;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.swing.event.HyperlinkEvent;
import javax.ws.rs.NotAuthorizedException;
/**
* Class that sets up the TFS version control extension.
* <p/>
* TODO: comment back in code as more features are added
*/
public class TFSVcs extends AbstractVcs {
public static final Logger logger = LoggerFactory.getLogger(TFSVcs.class);
private static boolean hasVersionBeenVerified = false;
@NonNls
public static final String TFVC_NAME = "TFVC";
public static final String TFVC_ONLINE_HELP_URL = "http://java.visualstudio.com/Docs/tools/intelliJ#_tfvc-support-preview";
public static final String SETTINGS_URL_EVENT = "settings";
private static final VcsKey ourKey = createKey(TFVC_NAME);
private final VcsShowConfirmationOption myAddConfirmation;
private final VcsShowConfirmationOption myDeleteConfirmation;
private final VcsShowSettingOption myCheckoutOptions;
private VcsHistoryProvider myHistoryProvider;
private DiffProvider myDiffProvider;
private TFSCheckinEnvironment myCheckinEnvironment;
private UpdateEnvironment myUpdateEnvironment;
private VcsVFSListener fileListener;
private TFSFileSystemListener tfsFileSystemListener;
private CommittedChangesProvider<TFSChangeList, ChangeBrowserSettings> committedChangesProvider;
public TFSVcs(@NotNull Project project) {
super(project, TFVC_NAME);
final ProjectLevelVcsManager vcsManager = ProjectLevelVcsManager.getInstance(project);
myAddConfirmation = vcsManager.getStandardConfirmation(VcsConfiguration.StandardConfirmation.ADD, this);
myDeleteConfirmation = vcsManager.getStandardConfirmation(VcsConfiguration.StandardConfirmation.REMOVE, this);
myCheckoutOptions = vcsManager.getStandardOption(VcsConfiguration.StandardOption.CHECKOUT, this);
}
public static TFSVcs getInstance(Project project) {
return (TFSVcs) ProjectLevelVcsManager.getInstance(project).findVcsByName(TFVC_NAME);
}
@NonNls
public String getDisplayName() {
return TFVC_NAME;
}
public Configurable getConfigurable() {
return new TFSProjectConfigurable(myProject);
}
@Override
public void activate() {
fileListener = new TFSFileListener(getProject(), this);
if (tfsFileSystemListener == null) {
tfsFileSystemListener = new TFSFileSystemListener();
}
checkCommandLineVersion();
}
@Override
public void deactivate() {
Disposer.dispose(fileListener);
tfsFileSystemListener.dispose();
tfsFileSystemListener = null;
}
public VcsShowConfirmationOption getAddConfirmation() {
return myAddConfirmation;
}
public VcsShowConfirmationOption getDeleteConfirmation() {
return myDeleteConfirmation;
}
public VcsShowSettingOption getCheckoutOptions() {
return myCheckoutOptions;
}
public ChangeProvider getChangeProvider() {
return new TFSChangeProvider(myProject);
}
@NotNull
public TFSCheckinEnvironment createCheckinEnvironment() {
if (myCheckinEnvironment == null) {
myCheckinEnvironment = new TFSCheckinEnvironment(this);
}
return myCheckinEnvironment;
}
@NotNull
public UpdateEnvironment createUpdateEnvironment() {
if (myUpdateEnvironment == null) {
myUpdateEnvironment = new TFSUpdateEnvironment(this);
}
return myUpdateEnvironment;
}
public RollbackEnvironment createRollbackEnvironment() {
return new TFSRollbackEnvironment(this, myProject);
}
public boolean fileIsUnderVcs(final FilePath filePath) {
return isVersionedDirectory(filePath.getVirtualFile());
}
/*
TODO:
public boolean isVersionedDirectory(final VirtualFile dir) {
if (dir == null) {
return false;
}
return (!Workstation.getInstance().findWorkspacesCached(TfsFileUtil.getFilePath(dir), false).isEmpty());
}
public EditFileProvider getEditFileProvider() {
return new TFSEditFileProvider(myProject);
}
*/
@NotNull
public CommittedChangesProvider<TFSChangeList, ChangeBrowserSettings> getCommittedChangesProvider() {
if (committedChangesProvider == null) {
committedChangesProvider = new TFSCommittedChangesProvider(myProject);
}
return committedChangesProvider;
}
@Override
public VcsHistoryProvider getVcsHistoryProvider() {
if (myHistoryProvider == null) {
myHistoryProvider = new TFSHistoryProvider(myProject);
}
return myHistoryProvider;
}
@Override
public DiffProvider getDiffProvider() {
if (myDiffProvider == null) {
myDiffProvider = new TFSDiffProvider(myProject);
}
return myDiffProvider;
}
@Nullable
public VcsRevisionNumber parseRevisionNumber(final String revisionNumberString) {
return TfsRevisionNumber.tryParse(revisionNumberString);
}
@Nullable
public String getRevisionPattern() {
return ourIntegerPattern;
}
public static VcsKey getKey() {
return ourKey;
}
public static boolean isUnderTFS(FilePath path, Project project) {
AbstractVcs vcs = VcsUtil.getVcsFor(project, path);
return vcs != null && TFVC_NAME.equals(vcs.getName());
}
public static VcsException convertToVcsException(final Throwable throwable) {
if (throwable instanceof VcsException) {
return (VcsException) throwable;
}
final VcsException exception = new VcsException(throwable.getMessage(), throwable);
if (throwable instanceof SyncException) {
exception.setIsWarning(((SyncException) throwable).isWarning());
}
return exception;
}
@Override
public CheckoutProvider getCheckoutProvider() {
return null; ///TODO: new TFSCheckoutProvider();
}
/**
* This method is used by the environment classes to get the ServerContext.
* We do not cache it here because it should already be cached in the ServerContextManager.
*/
public ServerContext getServerContext(boolean throwIfNotFound) {
final RepositoryContext repositoryContext = VcsHelper.getRepositoryContext(getProject());
logger.info("TFSVcs.getServerContext repositoryContext is null: " + (repositoryContext == null));
final ServerContext serverContext = repositoryContext != null
&& StringUtils.isNotEmpty(repositoryContext.getTeamProjectName())
&& StringUtils.isNotEmpty(repositoryContext.getUrl()) ?
ServerContextManager.getInstance().createContextFromTfvcServerUrl(
repositoryContext.getUrl(), repositoryContext.getTeamProjectName(), true)
: null;
if (serverContext == null && throwIfNotFound) {
// TODO: throw a better error b/c this is what the user sees and it's confusing
throw new NotAuthorizedException(repositoryContext != null ? repositoryContext.getUrl() : "");
}
return serverContext;
}
private void checkCommandLineVersion() {
if (hasVersionBeenVerified) {
// No need to check the version again if we have already checked it once this session
logger.info("Skipping the attempt to check the version of the TF command line.");
return;
}
hasVersionBeenVerified = true;
// We want to start a background thread to check the version, but that can only be done
// form the UI thread.
IdeaHelper.runOnUIThread(new Runnable() {
@Override
public void run() {
final SettableFuture<String> versionMessage = SettableFuture.create();
(new Task.Backgroundable(getProject(), TfPluginBundle.message(TfPluginBundle.KEY_TFVC_TF_VERSION_WARNING_PROGRESS),
false, PerformInBackgroundOption.ALWAYS_BACKGROUND) {
public void run(@NotNull final ProgressIndicator indicator) {
try {
logger.info("Attempting to check the version of the TF command line.");
TfTool.checkVersion();
versionMessage.set(StringUtils.EMPTY);
} catch (final ToolException ex) {
final String error = LocalizationServiceImpl.getInstance().getExceptionMessage(ex);
logger.warn(error);
versionMessage.set(error);
} catch (final Throwable t) {
// Don't let unknown errors bubble out here
logger.warn("Unexpected error when checking the version of the command line.", t);
}
}
public void onSuccess() {
try {
final String error = versionMessage.get();
if (StringUtils.isNotEmpty(error)) {
logger.info("Notifying the user of the min version problem.");
// Notify the user that they should upgrade their version of the TF command line
VcsNotifier.getInstance(getProject()).notifyImportantWarning(
TfPluginBundle.message(TfPluginBundle.KEY_TFVC_TF_VERSION_WARNING_TITLE),
error, new NotificationListener.Adapter() {
@Override
protected void hyperlinkActivated(@NotNull Notification notification, @NotNull HyperlinkEvent hyperlinkEvent) {
if (SETTINGS_URL_EVENT.equals(hyperlinkEvent.getDescription())) {
ShowSettingsUtil.getInstance().showSettingsDialog(myProject, TFVC_NAME);
} else {
BrowserUtil.browse(TFVC_ONLINE_HELP_URL);
}
}
});
}
} catch (Exception e) {
logger.warn("Failed to warn user about min version of TF command line.", e);
}
}
}).queue();
}
});
}
} |
bolghuar/ShadowEditor | ShadowEditor.Web/src/command/RemoveObjectCommand.js | import Command from './Command';
/**
* 移除物体命令
* @author dforrer / https://github.com/dforrer
* Developed as part of a project at University of Applied Sciences and Arts Northwestern Switzerland (www.fhnw.ch)
* @param object THREE.Object3D
* @constructor
*/
function RemoveObjectCommand(object) {
Command.call(this);
this.type = 'RemoveObjectCommand';
this.name = '移除物体';
this.object = object;
this.parent = (object !== undefined) ? object.parent : undefined;
if (this.parent !== undefined) {
this.index = this.parent.children.indexOf(this.object);
}
};
RemoveObjectCommand.prototype = Object.create(Command.prototype);
Object.assign(RemoveObjectCommand.prototype, {
constructor: RemoveObjectCommand,
execute: function () {
var scope = this.editor;
this.object.traverse(function (child) {
scope.removeHelper(child);
});
this.parent.remove(this.object);
this.editor.select(this.parent);
this.editor.app.call('objectRemoved', this, this.object);
this.editor.app.call('sceneGraphChanged', this);
},
undo: function () {
var scope = this.editor;
this.object.traverse(function (child) {
scope.addHelper(child);
});
this.parent.children.splice(this.index, 0, this.object);
this.object.parent = this.parent;
this.editor.select(this.object);
this.editor.app.call('objectAdded', this, this.object);
this.editor.app.call('sceneGraphChanged', this);
},
toJSON: function () {
var output = Command.prototype.toJSON.call(this);
output.object = this.object.toJSON();
output.index = this.index;
output.parentUuid = this.parent.uuid;
return output;
},
fromJSON: function (json) {
Command.prototype.fromJSON.call(this, json);
this.parent = this.editor.objectByUuid(json.parentUuid);
if (this.parent === undefined) {
this.parent = this.editor.scene;
}
this.index = json.index;
this.object = this.editor.objectByUuid(json.object.object.uuid);
if (this.object === undefined) {
var loader = new THREE.ObjectLoader();
this.object = loader.parse(json.object);
}
}
});
export default RemoveObjectCommand;
|
donnol/demo | golang/stdlib/crypto/sha256/main.go | package main
import (
"crypto/sha256"
"log"
)
func main() {
data := []byte("Hello, I am jd")
h224Data := sha256.Sum224(data)
log.Println(h224Data)
h256Data := sha256.Sum256(data)
log.Println(h256Data)
h2 := sha256.New224()
h2.Write(data)
h2Data := h2.Sum(nil)
log.Println(h2Data)
h := sha256.New()
h.Write(data)
hData := h.Sum(nil)
log.Println(hData)
}
|
Reality-Hack-2022/TEAM-08 | HololensBuild/Il2CppOutputProject/Source/il2cppOutput/Unity.XR.OpenXR.Features.ConformanceAutomation_CodeGen.c | #include "pch-c.h"
#ifndef _MSC_VER
# include <alloca.h>
#else
# include <malloc.h>
#endif
#include "codegen/il2cpp-codegen-metadata.h"
// 0x00000001 System.Boolean UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::OnInstanceCreate(System.UInt64)
extern void ConformanceAutomationFeature_OnInstanceCreate_m353507BDAF85F1F83155419A9C34F4B0F61DCEDF (void);
// 0x00000002 System.Void UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::OnInstanceDestroy(System.UInt64)
extern void ConformanceAutomationFeature_OnInstanceDestroy_m8D8C3BB207F97BE09B82AD26A65AD45774FE5741 (void);
// 0x00000003 System.Void UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::OnSessionCreate(System.UInt64)
extern void ConformanceAutomationFeature_OnSessionCreate_mED9B58A1D5C25B80E1EA366F75844ED6D501D426 (void);
// 0x00000004 System.Void UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::OnSessionDestroy(System.UInt64)
extern void ConformanceAutomationFeature_OnSessionDestroy_m1E03E61B346FA0C487750EB0C3006C95A2C5C72C (void);
// 0x00000005 System.Boolean UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::ConformanceAutomationSetActive(System.String,System.String,System.Boolean)
extern void ConformanceAutomationFeature_ConformanceAutomationSetActive_mFE57A2F5C2045C3CDDA6352C0BDC4A0EF2778813 (void);
// 0x00000006 System.Boolean UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::ConformanceAutomationSetBool(System.String,System.String,System.Boolean)
extern void ConformanceAutomationFeature_ConformanceAutomationSetBool_m7171EED7B7928305E9BE170C5DED9C59067CBF23 (void);
// 0x00000007 System.Boolean UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::ConformanceAutomationSetFloat(System.String,System.String,System.Single)
extern void ConformanceAutomationFeature_ConformanceAutomationSetFloat_mB1B74525A826E136C9F8FA890667A35C8484C7EF (void);
// 0x00000008 System.Boolean UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::ConformanceAutomationSetVec2(System.String,System.String,UnityEngine.Vector2)
extern void ConformanceAutomationFeature_ConformanceAutomationSetVec2_m1A83BABFC115834549BBD6133A24314424633653 (void);
// 0x00000009 System.Boolean UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::ConformanceAutomationSetPose(System.String,System.String,UnityEngine.Vector3,UnityEngine.Quaternion)
extern void ConformanceAutomationFeature_ConformanceAutomationSetPose_m6C8A2777E0D7D8135616EE4D3931A51A6E4064FE (void);
// 0x0000000A System.Boolean UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::ConformanceAutomationSetVelocity(System.String,System.String,System.Boolean,UnityEngine.Vector3,System.Boolean,UnityEngine.Vector3)
extern void ConformanceAutomationFeature_ConformanceAutomationSetVelocity_m7AF333A5E46CDC913BF2493C2D3309D3DA8127C5 (void);
// 0x0000000B System.Void UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::initialize(System.IntPtr,System.UInt64)
extern void ConformanceAutomationFeature_initialize_m428D53A8FB6A4172ECD07DB64E66CAB26B413BA9 (void);
// 0x0000000C System.Boolean UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::xrSetInputDeviceActiveEXT(System.UInt64,System.UInt64,System.UInt64,System.Boolean)
extern void ConformanceAutomationFeature_xrSetInputDeviceActiveEXT_m30E1FB13AC189FFFE19602DC25ED6C9CB52E20BC (void);
// 0x0000000D System.Boolean UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::xrSetInputDeviceStateBoolEXT(System.UInt64,System.UInt64,System.UInt64,System.Boolean)
extern void ConformanceAutomationFeature_xrSetInputDeviceStateBoolEXT_mC077404959CD9B9211C106F9D8A2AE8E582896D3 (void);
// 0x0000000E System.Boolean UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::xrSetInputDeviceStateFloatEXT(System.UInt64,System.UInt64,System.UInt64,System.Single)
extern void ConformanceAutomationFeature_xrSetInputDeviceStateFloatEXT_m88E57BD39FE57B12BEECCB882F29B7453E06E71C (void);
// 0x0000000F System.Boolean UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::xrSetInputDeviceStateVector2fEXT(System.UInt64,System.UInt64,System.UInt64,UnityEngine.XR.OpenXR.NativeTypes.XrVector2f)
extern void ConformanceAutomationFeature_xrSetInputDeviceStateVector2fEXT_mED21A10F231EA75F15D391B0C371C9EC56C82D11 (void);
// 0x00000010 System.Boolean UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::xrSetInputDeviceLocationEXT(System.UInt64,System.UInt64,System.UInt64,System.UInt64,UnityEngine.XR.OpenXR.NativeTypes.XrPosef)
extern void ConformanceAutomationFeature_xrSetInputDeviceLocationEXT_m3D273291D243DB90689C3DF4963A9DA611BAC778 (void);
// 0x00000011 System.Boolean UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::xrSetInputDeviceVelocityUNITY(System.UInt64,System.UInt64,System.UInt64,System.Boolean,UnityEngine.XR.OpenXR.NativeTypes.XrVector3f,System.Boolean,UnityEngine.XR.OpenXR.NativeTypes.XrVector3f)
extern void ConformanceAutomationFeature_xrSetInputDeviceVelocityUNITY_mE69F181D52CE9D99F7BF97B33EF4441F43B1A3B4 (void);
// 0x00000012 System.Void UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::.ctor()
extern void ConformanceAutomationFeature__ctor_mF2871A672422206AD2458EF891727C51197FBA3E (void);
// 0x00000013 System.Void UnityEngine.XR.OpenXR.Features.ConformanceAutomation.ConformanceAutomationFeature::.cctor()
extern void ConformanceAutomationFeature__cctor_m7A2326B97D8CF28584E0AF4557A9FD774FBCFB46 (void);
static Il2CppMethodPointer s_methodPointers[19] =
{
ConformanceAutomationFeature_OnInstanceCreate_m353507BDAF85F1F83155419A9C34F4B0F61DCEDF,
ConformanceAutomationFeature_OnInstanceDestroy_m8D8C3BB207F97BE09B82AD26A65AD45774FE5741,
ConformanceAutomationFeature_OnSessionCreate_mED9B58A1D5C25B80E1EA366F75844ED6D501D426,
ConformanceAutomationFeature_OnSessionDestroy_m1E03E61B346FA0C487750EB0C3006C95A2C5C72C,
ConformanceAutomationFeature_ConformanceAutomationSetActive_mFE57A2F5C2045C3CDDA6352C0BDC4A0EF2778813,
ConformanceAutomationFeature_ConformanceAutomationSetBool_m7171EED7B7928305E9BE170C5DED9C59067CBF23,
ConformanceAutomationFeature_ConformanceAutomationSetFloat_mB1B74525A826E136C9F8FA890667A35C8484C7EF,
ConformanceAutomationFeature_ConformanceAutomationSetVec2_m1A83BABFC115834549BBD6133A24314424633653,
ConformanceAutomationFeature_ConformanceAutomationSetPose_m6C8A2777E0D7D8135616EE4D3931A51A6E4064FE,
ConformanceAutomationFeature_ConformanceAutomationSetVelocity_m7AF333A5E46CDC913BF2493C2D3309D3DA8127C5,
ConformanceAutomationFeature_initialize_m428D53A8FB6A4172ECD07DB64E66CAB26B413BA9,
ConformanceAutomationFeature_xrSetInputDeviceActiveEXT_m30E1FB13AC189FFFE19602DC25ED6C9CB52E20BC,
ConformanceAutomationFeature_xrSetInputDeviceStateBoolEXT_mC077404959CD9B9211C106F9D8A2AE8E582896D3,
ConformanceAutomationFeature_xrSetInputDeviceStateFloatEXT_m88E57BD39FE57B12BEECCB882F29B7453E06E71C,
ConformanceAutomationFeature_xrSetInputDeviceStateVector2fEXT_mED21A10F231EA75F15D391B0C371C9EC56C82D11,
ConformanceAutomationFeature_xrSetInputDeviceLocationEXT_m3D273291D243DB90689C3DF4963A9DA611BAC778,
ConformanceAutomationFeature_xrSetInputDeviceVelocityUNITY_mE69F181D52CE9D99F7BF97B33EF4441F43B1A3B4,
ConformanceAutomationFeature__ctor_mF2871A672422206AD2458EF891727C51197FBA3E,
ConformanceAutomationFeature__cctor_m7A2326B97D8CF28584E0AF4557A9FD774FBCFB46,
};
static const int32_t s_InvokerIndices[19] =
{
3082,
4344,
4344,
4344,
6146,
6146,
6149,
6152,
5800,
5427,
6897,
5811,
5811,
5812,
5813,
5535,
5376,
5201,
7617,
};
extern const CustomAttributesCacheGenerator g_Unity_XR_OpenXR_Features_ConformanceAutomation_AttributeGenerators[];
IL2CPP_EXTERN_C const Il2CppCodeGenModule g_Unity_XR_OpenXR_Features_ConformanceAutomation_CodeGenModule;
const Il2CppCodeGenModule g_Unity_XR_OpenXR_Features_ConformanceAutomation_CodeGenModule =
{
"Unity.XR.OpenXR.Features.ConformanceAutomation.dll",
19,
s_methodPointers,
0,
NULL,
s_InvokerIndices,
0,
NULL,
0,
NULL,
0,
NULL,
NULL,
g_Unity_XR_OpenXR_Features_ConformanceAutomation_AttributeGenerators,
NULL, // module initializer,
NULL,
NULL,
NULL,
};
|
slix007/XChange | xchange-ripple/src/test/java/org/knowm/xchange/ripple/dto/account/RippleAccountIntegration.java | package org.knowm.xchange.ripple.dto.account;
import static org.fest.assertions.api.Assertions.assertThat;
import java.io.IOException;
import org.junit.Test;
import org.knowm.xchange.Exchange;
import org.knowm.xchange.ExchangeFactory;
import org.knowm.xchange.ripple.RippleExchange;
import org.knowm.xchange.ripple.service.RippleAccountServiceRaw;
public class RippleAccountIntegration {
@Test
public void accountSettingsTest() throws IOException {
final Exchange exchange = ExchangeFactory.INSTANCE.createExchange(RippleExchange.class.getName());
final RippleAccountServiceRaw accountService = (RippleAccountServiceRaw) exchange.getAccountService();
final RippleSettings settings = accountService.getRippleAccountSettings("<KEY>").getSettings();
assertThat(settings.getAccount()).isEqualTo("<KEY>");
assertThat(settings.getDomain()).isEqualTo("bitstamp.net");
}
}
|
OSADP/C2C-RI | C2CRIBuildDir/projects/C2C-RI/src/NTCIP2306v01_69/src/org/fhwa/c2cri/ntcip2306v109/tags/RIValidateTag.java | <filename>C2CRIBuildDir/projects/C2C-RI/src/NTCIP2306v01_69/src/org/fhwa/c2cri/ntcip2306v109/tags/RIValidateTag.java
/**
*
*/
package org.fhwa.c2cri.ntcip2306v109.tags;
import java.net.URL;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import net.sf.jameleon.exception.JameleonScriptException;
import org.fhwa.c2cri.messagemanager.Message;
import org.fhwa.c2cri.messagemanager.MessageManager;
import org.fhwa.c2cri.ntcip2306v109.NTCIP2306FunctionTag;
import org.fhwa.c2cri.ntcip2306v109.encoding.XMLValidator;
import org.fhwa.c2cri.ntcip2306v109.wsdl.RIWSDL;
/**
* Validates the requested message
*
* @author TransCore ITS,LLC
* Last Updated: 9/12/2013
* @jameleon.function name="ri-validate" type="action"
* @jameleon.step Validate the requested message
*/
public class RIValidateTag extends NTCIP2306FunctionTag {
/**
* The name of the message to validate
*
* @jameleon.attribute contextName="RIMessageName" required="true"
*/
protected String messageName;
/**
* Future: Flag indicating whether the message should be validated against the testConfig WSDL
* or the Test Suite WSDL.
*
* @jameleon.attribute
*/
protected boolean useTheProvidedWSDL;
/**
* Test block.
*
* Pre-Conditions: N/A
* Post-Conditions: N/A
*/
public void testBlock() {
RIWSDL riWSDLInstance;
try {
MessageManager theManager = MessageManager.getInstance();
Message newMessage = theManager.getMessage(messageName);
String wsdlErrors="";
String soapErrors="";
String messageErrors="";
riWSDLInstance = sessionTag.getWsdlInstance();
if (!riWSDLInstance.isSchemaDocumentsExist() || !riWSDLInstance.isWsdlValidatedAgainstSchema()) {
List<String> wsdlResults = riWSDLInstance.getWsdlErrors();
for (String wsdlError : wsdlResults) {
log.debug("WSDL Error: " + wsdlError);
wsdlErrors = wsdlErrors.concat(wsdlError + "\n");
}
throw new JameleonScriptException("The WSDL contains the following errors: \n" + wsdlErrors, this);
} else {
XMLValidator theValidator = new XMLValidator();
Map<String, URL> schemaMap = riWSDLInstance.getWsdlSchemas();
ArrayList<String> schemaList = new ArrayList();
Iterator schemaIterator = schemaMap.values().iterator();
while (schemaIterator.hasNext()) {
schemaList.add(((URL) schemaIterator.next()).getPath());
}
Thread.currentThread().setContextClassLoader( RIValidateTag.class.getClassLoader() );
theValidator.setSchemaReferenceList(schemaList);
String messageContent = new String(newMessage.getMessageBody());
if (!messageContent.isEmpty()) {
theValidator.isValidXML(messageContent);
theValidator.isXMLValidatedToSchema(messageContent);
if ((theValidator.getErrors().size() > 0)||(theValidator.getSchemaValidationValueErrorList().size()>0)||(theValidator.getSchemaValidationContentErrorList().size()>0)) {
int numberOfErrors = theValidator.getErrors().size()+theValidator.getSchemaValidationContentErrorList().size()+theValidator.getSchemaValidationValueErrorList().size();
if (numberOfErrors > 50) {
messageErrors = "Printing the first 50 of " + numberOfErrors + " Errors Found.\n";
}
int ii = 0;
for (String theError : theValidator.getErrors()) {
if (ii < 50) {
messageErrors = messageErrors.concat(theError + "\n");
}
ii++;
}
for (String theError : theValidator.getSchemaValidationContentErrorList()) {
if (ii < 50) {
messageErrors = messageErrors.concat(theError + "\n");
}
ii++;
}
for (String theError : theValidator.getSchemaValidationValueErrorList()) {
if (ii < 50) {
messageErrors = messageErrors.concat(theError + "\n");
}
ii++;
}
throw new JameleonScriptException("Error Validating Message: The following errors were found.\n" + messageErrors, this);
}
} else {
throw new JameleonScriptException("Error Validating Message: There is no message content to validate.", this);
}
}
} catch (Exception ex) {
ex.printStackTrace();
throw new JameleonScriptException("Error validating the message: '" + ex.getMessage(), this);
}
}
}
|
PRImA-Research-Lab/semantic-labelling | src/org/primaresearch/clc/phd/workflow/validation/gui/model/ValidationResultTreeItem.java | <reponame>PRImA-Research-Lab/semantic-labelling
package org.primaresearch.clc.phd.workflow.validation.gui.model;
import java.util.Iterator;
import javax.swing.tree.DefaultMutableTreeNode;
import org.primaresearch.clc.phd.workflow.validation.WorkflowValidationResult;
/**
* Tree item specialisation for workflow validation result items (errors, warnings etc.).
*
* @author clc
*
*/
public class ValidationResultTreeItem extends DefaultMutableTreeNode {
private static final long serialVersionUID = 1L;
private WorkflowValidationResult validationResult;
public ValidationResultTreeItem(WorkflowValidationResult res) {
this.validationResult = res;
if (res.getChildren() != null) {
for (Iterator<WorkflowValidationResult> it=res.getChildren().iterator(); it.hasNext(); )
add(new ValidationResultTreeItem(it.next()));
}
}
public String toString() {
return validationResult.getCaption();
}
public WorkflowValidationResult getValidationResult() {
return validationResult;
}
}
|
vkuznet/PyQueryBuilder | pyquerybuilder/qb/LinkObj.py | #!/usr/bin/env python
"""
This class reads sqlalchemy schema metadata in order to construct
joins for an arbitrary query.
Review all the foreign key links.
"""
__author__ = "<NAME> <<EMAIL>>"
__revision__ = "$Revision: 1.11 $"
class LinkObj(object):
"""class encapsulate for foreign key"""
def __init__(self, sqlalchemyfk=None):
"""initialize"""
if sqlalchemyfk:
self.name = sqlalchemyfk.name
self.lcolumn = sqlalchemyfk.parent.name
self.rcolumn = sqlalchemyfk.column.name
self.ltable = sqlalchemyfk.parent.table.name
self.rtable = sqlalchemyfk.column.table.name
self.fkey = sqlalchemyfk
else:
self.name = ""
self.lcolumn = ""
self.rcolumn = ""
self.ltable = ""
self.rtable = ""
def __repr__(self):
return self.name
def __str__(self):
return self.name
def __eq__(self, other):
return (isinstance(other, self.__class__) and\
self.name == other.name)
def __ne__(self, other):
return not self.__eq__(other)
def set(self, name, ltable, rtable, lcolumn, rcolumn):
"""set LinkObj by table/columns"""
self.name = name
self.ltable = ltable
self.rtable = rtable
self.lcolumn = lcolumn
self.rcolumn = rcolumn
class CLinkObj(object):
"""
class encapsulate for complex link between two table
It may be a composition of several foreignkey links or custormed
links, but all the links must have exactly same ltable and rtable.
"""
def __init__(self, foreignkeys=None, name=None):
"""initialize CLinkObj with name
String ltable rtable
list lcolumn rcolumn
"""
self.name = name
self.lcolumn = []
self.rcolumn = []
self.ltable = None
self.rtable = None
self.linklist = set()
self.fks = foreignkeys
if foreignkeys != None:
for fks in foreignkeys:
link = LinkObj(fks)
if self.ltable == None:
self.ltable = link.ltable
self.rtable = link.rtable
if self.ltable != link.ltable or self.rtable != link.rtable:
raise Exception("""conflict on links, different direction
or more than three table involving.""")
self.lcolumn.append(link.lcolumn)
self.rcolumn.append(link.rcolumn)
self.linklist.add(link)
self.weight = 0
self.indexed = 1
def __repr__(self):
return self.name
def __str__(self):
return self.name
def __eq__(self, other):
return (isinstance(other, self.__class__) and\
self.name == other.name)
def __ne__(self, other):
return not self.__eq__(other)
def set(self, name, links):
"""set CLinkObj by LinkObjs"""
self.name = name
for link in links:
self.ltable = link.ltable
self.rtable = link.rtable
if self.ltable != link.ltable or self.rtable != link.rtable:
raise Exception("""conflict on links, different direction
or more than three table involving.""")
self.lcolumn.append(link.lcolumn)
self.rcolumn.append(link.rcolumn)
self.linklist = links
|
MikkoVirenius/ptv-1.7 | src/PTV.Application.Web/wwwroot/js/app/appComponents/PublishingEntityDialog/PublishingEntityDialog.js | <gh_stars>0
/**
* The MIT License
* Copyright (c) 2016 Population Register Centre (VRK)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
import React from 'react'
import PropTypes from 'prop-types'
import { defineMessages, injectIntl } from 'react-intl'
import { ModalContent } from 'sema-ui-components'
import { compose } from 'redux'
import { connect } from 'react-redux'
import { updateUI } from 'util/redux-ui/action-reducer'
import ModalDialog from 'appComponents/ModalDialog'
import PublishingEntityForm from 'appComponents/PublishingEntityForm'
const messages = defineMessages({
dialogTitle: {
id: 'Components.PublishingEntityDialog.Title',
defaultMessage: 'Kieliversioiden näkyvyys'
},
dialogDescription: {
id: 'Components.PublishingEntityDialog.Description',
defaultMessage: 'Valitse, mitkä kieliversiot haluat julkaista. ' +
'Jos valitset, että kieliversio näkyy vain PTV:ssä, ja klikkaat Julkaise-nappia, ' +
'tällöin myös palvelun nykyinen kieliversio piilotetaan loppukäyttäjiltä.'
}
})
const PublishingEntityDialog = (
{ intl: { formatMessage },
type,
name,
updateUI,
entityId,
...rest
}) => {
const handleCloseDialog = () => updateUI([name], 'isOpen', false)
return (
<ModalDialog
name={name}
title={formatMessage(messages.dialogTitle)}
description={formatMessage(messages.dialogDescription)}
contentLabel='Publishing entity'
{...rest}
>
<ModalContent>
<PublishingEntityForm onCancel={handleCloseDialog} type={type} id={entityId} />
</ModalContent>
</ModalDialog>
)
}
PublishingEntityDialog.propTypes = {
type: PropTypes.string.isRequired,
entityId: PropTypes.string,
name: PropTypes.string.isRequired,
intl: PropTypes.object.isRequired,
actions: PropTypes.object,
updateUI: PropTypes.func.isRequired
}
export default compose(
injectIntl,
connect(null, { updateUI })
)(PublishingEntityDialog)
|
cstom4994/SourceEngineRebuild | src/engine/src/net_chan.cpp | //========= Copyright Valve Corporation, All rights reserved. ============//
//
// Purpose: net_chan.cpp: implementation of the CNetChan_t struct.
//
//=============================================================================//
#include "../thirdparty/bzip2/bzlib.h"
#include "net_chan.h"
#include "tier1/strtools.h"
#include "filesystem_engine.h"
#include "demo.h"
#include "convar.h"
#include "mathlib/mathlib.h"
#include "protocol.h"
#include "inetmsghandler.h"
#include "host.h"
#include "netmessages.h"
#include "tier0/vcrmode.h"
#include "tier0/etwprof.h"
#include "tier0/vprof.h"
#include "net_ws_headers.h"
#include "net_ws_queued_packet_sender.h"
#include "filesystem_init.h"
// memdbgon must be the last include file in a .cpp file!!!
#include "tier0/memdbgon.h"
//////////////////////////////////////////////////////////////////////
// Construction/Destruction
//////////////////////////////////////////////////////////////////////
ConVar net_showudp("net_showudp", "0", 0, "Dump UDP packets summary to console");
ConVar net_showtcp("net_showtcp", "0", 0, "Dump TCP stream summary to console");
ConVar net_blocksize("net_maxfragments", NETSTRING(MAX_ROUTABLE_PAYLOAD), 0, "Max fragment bytes per packet", true,
FRAGMENT_SIZE, true, MAX_ROUTABLE_PAYLOAD);
static ConVar net_showmsg("net_showmsg", "0", 0, "Show incoming message: <0|1|name>");
static ConVar net_showfragments("net_showfragments", "0", 0, "Show netchannel fragments");
static ConVar net_showpeaks("net_showpeaks", "0", 0, "Show messages for large packets only: <size>");
static ConVar net_blockmsg("net_blockmsg", "0", FCVAR_CHEAT,
"Discards incoming message: <0|1|name>"); // "none" here is bad, causes superfluous strcmp on every net message
static ConVar net_showdrop("net_showdrop", "0", 0, "Show dropped packets in console");
static ConVar net_drawslider("net_drawslider", "0", 0, "Draw completion slider during signon");
static ConVar net_chokeloopback("net_chokeloop", "0", 0, "Apply bandwidth choke to loopback packets");
static ConVar net_maxfilesize("net_maxfilesize", "16", 0, "Maximum allowed file size for uploading in MB", true, 0,
true, 64);
static ConVar net_compresspackets("net_compresspackets", "1", 0, "Use compression on game packets.");
static ConVar net_compresspackets_minsize("net_compresspackets_minsize", "1000", 0,
"Don't bother compressing packets below this size.");
static ConVar net_maxcleartime("net_maxcleartime", "4.0", 0,
"Max # of seconds we can wait for next packets to be sent based on rate setting (0 == no limit).");
static ConVar net_maxpacketdrop("net_maxpacketdrop", "0", 0,
"Ignore any packets with the sequence number more than this ahead (0 == no limit)");
static ConVar net_droponsendoverflow("net_droponsendoverflow", "1", 0,
"If enabled, channel will drop client when sending too much data causes buffer overrun");
extern ConVar net_maxroutable;
extern int NET_ConnectSocket(int nSock, const netadr_t &addr);
extern void NET_CloseSocket(int hSocket, int sock = -1);
extern int NET_SendStream(int nSock, const char *buf, int len, int flags);
extern int NET_ReceiveStream(int nSock, char *buf, int len, int flags);
// If the network connection hasn't been active in this many seconds, display some warning text.
#define CONNECTION_PROBLEM_TIME 4.0f // assume network problem after this time
#define BYTES2FRAGMENTS(i) ((i+FRAGMENT_SIZE-1)/FRAGMENT_SIZE)
#define FLIPBIT(v, b) if (v&b) v &= ~b; else v |= b;
// We only need to checksum packets on the PC and only when we're actually sending them over the network.
static bool ShouldChecksumPackets() {
if (!IsPC())
return false;
return NET_IsMultiplayer();
}
bool CNetChan::IsLoopback() const {
return remote_address.IsLoopback();
}
bool CNetChan::IsNull() const {
return remote_address.GetType() == NA_NULL ? true : false;
}
/*
==============================
CNetChan::Clear
==============================
*/
void CNetChan::Clear() {
int i;
// clear waiting lists
for (i = 0; i < MAX_STREAMS; i++) {
while (m_WaitingList[i].Count())
RemoveHeadInWaitingList(i);
if (m_ReceiveList[i].buffer) {
delete[] m_ReceiveList[i].buffer;
m_ReceiveList[i].buffer = NULL;
}
}
for (i = 0; i < MAX_SUBCHANNELS; i++) {
if (m_SubChannels[i].state == SUBCHANNEL_TOSEND) {
int bit = 1 << i; // flip bit back since data was send yet
FLIPBIT(m_nOutReliableState, bit);
m_SubChannels[i].Free();
} else if (m_SubChannels[i].state == SUBCHANNEL_WAITING) {
// data is already out, mark channel as dirty
m_SubChannels[i].state = SUBCHANNEL_DIRTY;
}
}
if (m_bProcessingMessages) {
// ProcessMessages() needs to know we just nuked the receive list from under it or bad things ensue.
m_bClearedDuringProcessing = true;
}
Reset();
}
void CNetChan::CompressFragments() {
// We don't want this to go in the VCR file, because the compressed size can be different. The reason is
// that the bf_writes that contributed to this message may have uninitialized bits at the end of the buffer
// (for example if it uses only the first couple bits of the last byte in the message). If the
// last few bits are different, it can produce a different compressed size.
if (!m_bUseCompression || !net_compresspackets.GetBool())
return;
if (VCRGetMode() != VCR_Disabled)
return;
VPROF_BUDGET("CNetChan::CompressFragments", VPROF_BUDGETGROUP_OTHER_NETWORKING);
// write fragemnts for both streams
for (int i = 0; i < MAX_STREAMS; i++) {
if (m_WaitingList[i].Count() == 0)
continue;
// get the first fragments block which is send next
dataFragments_t *data = m_WaitingList[i][0];
// if data is already compressed or too small, skip it
if (data->isCompressed || (int) data->bytes < net_compresspackets_minsize.GetInt())
continue;
// if we already started sending this block, we can't compress it anymore
if (data->ackedFragments > 0 || data->pendingFragments > 0)
continue;
//ok, compress it.
if (data->buffer) {
CFastTimer compressTimer;
compressTimer.Start();
// fragments data is in memory
unsigned int compressedSize = COM_GetIdealDestinationCompressionBufferSize_Snappy(data->bytes);
char *compressedData = new char[compressedSize];
if (COM_BufferToBufferCompress_Snappy(compressedData, &compressedSize, data->buffer, data->bytes) &&
(compressedSize < data->bytes)) {
compressTimer.End();
DevMsg("Compressing fragments (%d -> %d bytes): %.2fms\n",
data->bytes, compressedSize, compressTimer.GetDuration().GetMillisecondsF());
// copy compressed data but dont reallocate memory
Q_memcpy(data->buffer, compressedData, compressedSize);
data->nUncompressedSize = data->bytes;
data->bytes = compressedSize;
data->numFragments = BYTES2FRAGMENTS(data->bytes);
data->isCompressed = true;
}
delete[] compressedData; // free temp buffer
} else // it's a file
{
Assert(data->file != FILESYSTEM_INVALID_HANDLE);
char compressedfilename[MAX_OSPATH];
int compressedFileSize = -1;
FileHandle_t hZipFile = FILESYSTEM_INVALID_HANDLE;
// check to see if there is a compressed version of the file
Q_snprintf(compressedfilename, sizeof(compressedfilename), "%s.ztmp", data->filename);
// check the timestamps
int compressedFileTime = g_pFileSystem->GetFileTime(compressedfilename);
int fileTime = g_pFileSystem->GetFileTime(data->filename);
if (compressedFileTime >= fileTime) {
// compressed file is newer than uncompressed file, use this one
hZipFile = g_pFileSystem->Open(compressedfilename, "rb", NULL);
}
if (hZipFile != FILESYSTEM_INVALID_HANDLE) {
// use the existing compressed file
compressedFileSize = g_pFileSystem->Size(hZipFile);
} else {
// create compressed version of source file
unsigned int uncompressedSize = data->bytes;
unsigned int compressedSize = COM_GetIdealDestinationCompressionBufferSize_Snappy(uncompressedSize);
char *uncompressed = new char[uncompressedSize];
char *compressed = new char[compressedSize];
// read in source file
g_pFileSystem->Read(uncompressed, data->bytes, data->file);
// compress into buffer
if (COM_BufferToBufferCompress_Snappy(compressed, &compressedSize, uncompressed, uncompressedSize)) {
// write out to disk compressed version
hZipFile = g_pFileSystem->Open(compressedfilename, "wb", NULL);
if (hZipFile != FILESYSTEM_INVALID_HANDLE) {
DevMsg("Creating compressed version of file %s (%d -> %d)\n", data->filename, data->bytes,
compressedSize);
g_pFileSystem->Write(compressed, compressedSize, hZipFile);
g_pFileSystem->Close(hZipFile);
// and open zip file it again for reading
hZipFile = g_pFileSystem->Open(compressedfilename, "rb", NULL);
if (hZipFile != FILESYSTEM_INVALID_HANDLE) {
// ok, now everything if fine
compressedFileSize = compressedSize;
}
}
}
delete[] uncompressed;
delete[] compressed;
}
if (compressedFileSize > 0) {
// use compressed file handle instead of original file
g_pFileSystem->Close(data->file);
data->file = hZipFile;
data->nUncompressedSize = data->bytes;
data->bytes = compressedFileSize;
data->numFragments = BYTES2FRAGMENTS(data->bytes);
data->isCompressed = true;
}
}
}
}
void CNetChan::UncompressFragments(dataFragments_t *data) {
if (!data->isCompressed)
return;
VPROF("UncompressFragments");
// allocate buffer for uncompressed data, align to 4 bytes boundary
char *newbuffer = new char[PAD_NUMBER(data->nUncompressedSize, 4)];
unsigned int uncompressedSize = data->nUncompressedSize;
// uncompress data
COM_BufferToBufferDecompress(newbuffer, &uncompressedSize, data->buffer, data->bytes);
Assert(uncompressedSize == data->nUncompressedSize);
// free old buffer and set new buffer
delete[] data->buffer;
data->buffer = newbuffer;
data->bytes = uncompressedSize;
data->isCompressed = false;
}
unsigned int CNetChan::RequestFile(const char *filename) {
m_FileRequestCounter++;
if (net_showfragments.GetInt() == 2) {
DevMsg("RequestFile: %s (ID %i)\n", filename, m_FileRequestCounter);
}
m_StreamReliable.WriteUBitLong(net_File, NETMSG_TYPE_BITS);
m_StreamReliable.WriteUBitLong(m_FileRequestCounter, 32);
m_StreamReliable.WriteString(filename);
m_StreamReliable.WriteOneBit(1); // reqest this file
return m_FileRequestCounter;
}
void CNetChan::RequestFile_OLD(const char *filename, unsigned int transferID) {
Error("Called RequestFile_OLD");
}
void CNetChan::DenyFile(const char *filename, unsigned int transferID) {
if (net_showfragments.GetInt() == 2) {
DevMsg("DenyFile: %s (ID %i)\n", filename, transferID);
}
m_StreamReliable.WriteUBitLong(net_File, NETMSG_TYPE_BITS);
m_StreamReliable.WriteUBitLong(transferID, 32);
m_StreamReliable.WriteString(filename);
m_StreamReliable.WriteOneBit(0); // deny this file
}
bool CNetChan::SendFile(const char *filename, unsigned int transferID) {
// add file to waiting list
if (IsNull())
return true;
if (!filename)
return false;
const char *sendfile = filename;
while (sendfile[0] && PATHSEPARATOR(sendfile[0])) {
sendfile = sendfile + 1;
}
// Don't transfer exe, vbs, com, bat-type files.
if (!IsValidFileForTransfer(sendfile))
return false;
if (!CreateFragmentsFromFile(sendfile, FRAG_FILE_STREAM, transferID)) {
DenyFile(sendfile, transferID); // send host a deny message
return false;
}
if (net_showfragments.GetInt() == 2) {
DevMsg("SendFile: %s (ID %i)\n", sendfile, transferID);
}
return true;
}
void CNetChan::Shutdown(const char *pReason) {
// send disconnect
if (m_Socket < 0)
return;
Clear(); // free all buffers (reliable & unreliable)
if (pReason) {
// send disconnect message
m_StreamUnreliable.WriteUBitLong(net_Disconnect, NETMSG_TYPE_BITS);
m_StreamUnreliable.WriteString(pReason);
Transmit(); // push message out
}
if (m_StreamSocket) {
NET_CloseSocket(m_StreamSocket, m_Socket);
m_StreamSocket = 0;
m_StreamActive = false;
}
m_Socket = -1; // signals that netchannel isn't valid anymore
remote_address.Clear();
if (m_MessageHandler) {
m_MessageHandler->ConnectionClosing(pReason);
m_MessageHandler = NULL;
}
// free new messages
int numtypes = m_NetMessages.Count();
for (int i = 0; i < numtypes; i++) {
Assert(m_NetMessages[i]);
delete m_NetMessages[i];
}
m_NetMessages.Purge();
m_DemoRecorder = NULL;
if (m_bProcessingMessages) {
NET_RemoveNetChannel(this, false); // Delay the deletion or it'll crash in the message-processing loop.
m_bShouldDelete = true;
} else {
NET_RemoveNetChannel(this, true);
}
}
CNetChan::CNetChan() {
m_nSplitPacketSequence = 1;
m_nMaxRoutablePayloadSize = MAX_ROUTABLE_PAYLOAD;
m_bProcessingMessages = false;
m_bShouldDelete = false;
m_bClearedDuringProcessing = false;
m_bStreamContainsChallenge = false;
m_Socket = -1; // invalid
remote_address.Clear();
last_received = 0;
connect_time = 0;
m_nProtocolVersion = -1; // invalid
Q_strncpy(m_Name, "", sizeof(m_Name));
m_MessageHandler = NULL;
m_DemoRecorder = NULL;
m_StreamUnreliable.SetDebugName("netchan_t::unreliabledata");
m_StreamReliable.SetDebugName("netchan_t::reliabledata");
m_Rate = DEFAULT_RATE;
m_Timeout = SIGNON_TIME_OUT;
// Prevent the first message from getting dropped after connection is set up.
m_nOutSequenceNr = 1; // otherwise it looks like a
m_nInSequenceNr = 0;
m_nOutSequenceNrAck = 0;
m_nOutReliableState = 0; // our current reliable state
m_nInReliableState = 0; // last remote reliable state
// m_nLostPackets = 0;
m_ChallengeNr = 0;
m_StreamSocket = 0;
m_StreamActive = false;
ResetStreaming();
m_MaxReliablePayloadSize = NET_MAX_PAYLOAD;
m_FileRequestCounter = 0;
m_bFileBackgroundTranmission = true;
m_bUseCompression = false;
m_nQueuedPackets = 0;
m_flRemoteFrameTime = 0;
m_flRemoteFrameTimeStdDeviation = 0;
m_NetMessages.SetGrowSize(1);
FlowReset();
}
CNetChan::~CNetChan() {
Shutdown("NetChannel removed.");
}
/*
==============
CNetChan::Setup
called to open a channel to a remote system
==============
*/
void CNetChan::Setup(int sock, netadr_t *adr, const char *name, INetChannelHandler *handler,
int nProtocolVersion) {
Assert(name);
Assert (handler);
m_Socket = sock;
if (m_StreamSocket) {
NET_CloseSocket(m_StreamSocket);
m_StreamSocket = 0;
}
// remote_address may be NULL for fake channels (demo playback etc)
if (adr) {
remote_address = *adr;
} else {
remote_address.Clear(); // it's a demo fake channel
remote_address.SetType(NA_NULL);
}
last_received = net_time;
connect_time = net_time;
Q_strncpy(m_Name, name, sizeof(m_Name));
m_MessageHandler = handler;
m_nProtocolVersion = nProtocolVersion;
m_DemoRecorder = NULL;
MEM_ALLOC_CREDIT();
SetMaxBufferSize(false, NET_MAX_DATAGRAM_PAYLOAD);
SetMaxBufferSize(false, NET_MAX_DATAGRAM_PAYLOAD, true); //Set up voice buffer
SetMaxBufferSize(true, NET_MAX_PAYLOAD);
m_Rate = DEFAULT_RATE;
m_Timeout = SIGNON_TIME_OUT;
// Prevent the first message from getting dropped after connection is set up.
m_nOutSequenceNr = 1; // otherwise it looks like a
m_nInSequenceNr = 0;
m_nOutSequenceNrAck = 0;
m_nOutReliableState = 0; // our current reliable state
m_nInReliableState = 0; // last remote reliable state
m_nChokedPackets = 0;
m_fClearTime = 0.0;
m_ChallengeNr = 0;
m_StreamSocket = 0;
m_StreamActive = false;
m_ReceiveList[FRAG_NORMAL_STREAM].buffer = NULL;
m_ReceiveList[FRAG_FILE_STREAM].buffer = NULL;
// init 8 subchannels
for (int i = 0; i < MAX_SUBCHANNELS; i++) {
m_SubChannels[i].index = i; // set index once
m_SubChannels[i].Free();
}
ResetStreaming();
if (NET_IsMultiplayer()) {
m_MaxReliablePayloadSize = net_blocksize.GetInt();
} else {
m_MaxReliablePayloadSize = NET_MAX_PAYLOAD;
}
FlowReset();
// tell message handler to register known netmessages
m_MessageHandler->ConnectionStart(this);
}
void CNetChan::ResetStreaming(void) {
m_SteamType = STREAM_CMD_NONE;
m_StreamLength = 0;
m_StreamReceived = 0;
m_StreamSeqNr = 0;
m_SteamFile[0] = 0;
}
bool CNetChan::StartStreaming(unsigned int challengeNr) {
// reset stream state maschine
ResetStreaming();
m_ChallengeNr = challengeNr;
if (!NET_IsMultiplayer()) {
m_StreamSocket = 0;
return true; // streaming is done via loopback buffers in SP mode
}
#ifdef _XBOX
// We don't want to go into here because it'll eat up 192k extra memory in the client and server's m_StreamData.
Error( "StartStreaming not allowed on XBOX." );
#endif
MEM_ALLOC_CREDIT();
m_StreamSocket = NET_ConnectSocket(m_Socket, remote_address);
m_StreamData.EnsureCapacity(NET_MAX_PAYLOAD);
return (m_StreamSocket != 0);
}
void CNetChan::SetChallengeNr(unsigned int chnr) {
m_ChallengeNr = chnr;
}
unsigned int CNetChan::GetChallengeNr(void) const {
return m_ChallengeNr;
}
void CNetChan::GetSequenceData(int &nOutSequenceNr, int &nInSequenceNr, int &nOutSequenceNrAck) {
nOutSequenceNr = m_nOutSequenceNr;
nInSequenceNr = m_nInSequenceNr;
nOutSequenceNrAck = m_nOutSequenceNrAck;
}
void CNetChan::SetSequenceData(int nOutSequenceNr, int nInSequenceNr, int nOutSequenceNrAck) {
Assert(IsPlayback());
m_nOutSequenceNr = nOutSequenceNr;
m_nInSequenceNr = nInSequenceNr;
m_nOutSequenceNrAck = nOutSequenceNrAck;
}
void CNetChan::SetDemoRecorder(IDemoRecorder *recorder) {
m_DemoRecorder = recorder;
}
void CNetChan::SetTimeout(float seconds) {
m_Timeout = seconds;
if (m_Timeout > 3600.0f) {
m_Timeout = 3600.0f; // 1 hour maximum
} else if (m_Timeout <= 0.0f) {
m_Timeout = -1.0f; // never time out (demo files)
} else if (m_Timeout < CONNECTION_PROBLEM_TIME) {
m_Timeout = CONNECTION_PROBLEM_TIME; // allow at least this minimum
}
}
void CNetChan::SetMaxBufferSize(bool bReliable, int nBytes, bool bVoice) {
// force min/max sizes 4-96kB
nBytes = clamp(nBytes, NET_MAX_DATAGRAM_PAYLOAD, NET_MAX_PAYLOAD);
bf_write *stream;
CUtlMemory<byte> *buffer;
if (bReliable) {
stream = &m_StreamReliable;
buffer = &m_ReliableDataBuffer;
} else if (bVoice == true) {
stream = &m_StreamVoice;
buffer = &m_VoiceDataBuffer;
} else {
stream = &m_StreamUnreliable;
buffer = &m_UnreliableDataBuffer;
}
if (buffer->Count() == nBytes)
return;
byte *copybuf = NULL;
int copybits = stream->GetNumBitsWritten();
int copybytes = Bits2Bytes(copybits);
if (copybytes >= nBytes) {
ConMsg("CNetChan::SetMaxBufferSize: cant preserve exiting data %i>%i.\n", copybytes, nBytes);
return;
}
if (copybits > 0) {
copybuf = new byte[copybytes];
Q_memcpy(copybuf, buffer->Base(), copybytes);
}
buffer->Purge();
MEM_ALLOC_CREDIT();
buffer->EnsureCapacity(nBytes);
if (copybits > 0) {
Q_memcpy(buffer->Base(), copybuf, copybytes);
delete[] copybuf;
copybuf = NULL;
}
stream->StartWriting(buffer->Base(), nBytes, copybits);
}
void CNetChan::SetFileTransmissionMode(bool bBackgroundMode) {
m_bFileBackgroundTranmission = bBackgroundMode;
}
void CNetChan::SetCompressionMode(bool bUseCompression) {
m_bUseCompression = bUseCompression;
}
void CNetChan::SetDataRate(float rate) {
m_Rate = clamp(rate, (float) MIN_RATE, (float) MAX_RATE);
}
const char *CNetChan::GetName() const {
return m_Name;
}
const char *CNetChan::GetAddress() const {
return remote_address.ToString();
}
int CNetChan::GetDropNumber() const {
return m_PacketDrop;
}
/*
===============
CNetChan::CanPacket
Returns true if the bandwidth choke isn't active
================
*/
bool CNetChan::CanPacket() const {
// Never choke loopback packets.
if (!net_chokeloopback.GetInt() && remote_address.IsLoopback()) {
return true;
}
if (HasQueuedPackets()) {
return false;
}
return m_fClearTime - net_time < 0.001;
}
bool CNetChan::IsPlayback(void) const {
#if !defined(SWDS) && !defined(_XBOX)
return demoplayer->IsPlayingBack();
#else
return false;
#endif
}
void CNetChan::FlowReset(void) {
Q_memset(m_DataFlow, 0, sizeof(m_DataFlow));
Q_memset(m_MsgStats, 0, sizeof(m_MsgStats));
}
void CNetChan::FlowNewPacket(int flow, int seqnr, int acknr, int nChoked, int nDropped, int nSize) {
netflow_t *pflow = &m_DataFlow[flow];
// if frame_number != ( current + 1 ) mark frames between as invalid
netframe_t *pframe = NULL;
if (seqnr > pflow->currentindex) {
//
// The following loop must execute no more than NET_FRAMES_BACKUP times
// since that's the amount of storage in frame_headers & frames arrays,
// a malformed client packet pushing "seqnr" by 1,000,000 can cause this
// loop to watchdog.
//
for (int i = pflow->currentindex + 1, numPacketFramesOverflow = 0;
(i <= seqnr) && (numPacketFramesOverflow < NET_FRAMES_BACKUP);
++i, ++numPacketFramesOverflow) {
int nBackTrack = seqnr - i;
pframe = &pflow->frames[i & NET_FRAMES_MASK];
pframe->time = net_time; // now
pframe->valid = false;
pframe->size = 0;
pframe->latency = -1.0f; // not acknowledged yet
pframe->avg_latency = GetAvgLatency(FLOW_OUTGOING);
pframe->choked = 0; // not acknowledged yet
pframe->dropped = 0;
pframe->m_flInterpolationAmount = 0.0f;
Q_memset(&pframe->msggroups, 0, sizeof(pframe->msggroups));
if (nBackTrack < (nChoked + nDropped)) {
if (nBackTrack < nChoked) {
pframe->choked = 1;
} else {
pframe->dropped = 1;
}
}
}
pframe->dropped = nDropped;
pframe->choked = nChoked;
pframe->size = nSize;
pframe->valid = true;
pframe->avg_latency = GetAvgLatency(FLOW_OUTGOING);
pframe->m_flInterpolationAmount = m_flInterpolationAmount;
} else {
#if defined( SWDS )
Assert(seqnr > pflow->currentindex);
#else
Assert(demoplayer->IsPlayingBack() || seqnr > pflow->currentindex);
#endif
}
pflow->totalpackets++;
pflow->currentindex = seqnr;
pflow->currentframe = pframe;
// updated ping for acknowledged packet
int aflow = (flow == FLOW_OUTGOING) ? FLOW_INCOMING : FLOW_OUTGOING;
if (acknr <= (m_DataFlow[aflow].currentindex - NET_FRAMES_BACKUP))
return; // acknowledged packet isn't in backup buffer anymore
netframe_t *aframe = &m_DataFlow[aflow].frames[acknr & NET_FRAMES_MASK];
if (aframe->valid && aframe->latency == -1.0f) {
// update ping for acknowledged packet, if not already acknowledged before
aframe->latency = net_time - aframe->time;
if (aframe->latency < 0.0f)
aframe->latency = 0.0f;
}
}
void CNetChan::FlowUpdate(int flow, int addbytes) {
netflow_t *pflow = &m_DataFlow[flow];
pflow->totalbytes += addbytes;
if (pflow->nextcompute > net_time)
return;
pflow->nextcompute = net_time + FLOW_INTERVAL;
int totalvalid = 0;
int totalinvalid = 0;
int totalbytes = 0;
float totallatency = 0.0f;
int totallatencycount = 0;
int totalchoked = 0;
float starttime = FLT_MAX;
float endtime = 0.0f;
netframe_t *pcurr;
for (int i = 0; i < NET_FRAMES_BACKUP; i++) {
// Most recent message then backward from there
pcurr = &pflow->frames[i];
if (pcurr->valid) {
if (pcurr->time < starttime)
starttime = pcurr->time;
if (pcurr->time > endtime)
endtime = pcurr->time;
totalvalid++;
totalchoked += pcurr->choked;
totalbytes += pcurr->size;
if (pcurr->latency > -1.0f) {
totallatency += pcurr->latency;
totallatencycount++;
}
} else {
totalinvalid++;
}
}
float totaltime = endtime - starttime;
if (totaltime > 0.0f) {
pflow->avgbytespersec *= FLOW_AVG;
pflow->avgbytespersec += (1.0f - FLOW_AVG) * ((float) totalbytes / totaltime);
pflow->avgpacketspersec *= FLOW_AVG;
pflow->avgpacketspersec += (1.0f - FLOW_AVG) * ((float) totalvalid / totaltime);
}
int totalPackets = totalvalid + totalinvalid;
if (totalPackets > 0) {
pflow->avgloss *= FLOW_AVG;
pflow->avgloss += (1.0f - FLOW_AVG) * ((float) (totalinvalid - totalchoked) / totalPackets);
if (totalinvalid - totalchoked < 0)
pflow->avgloss = 0; // snap loss to zero if nothing lost over last ticks
pflow->avgchoke *= FLOW_AVG;
pflow->avgchoke += (1.0f - FLOW_AVG) * ((float) totalchoked / totalPackets);
if (totalchoked <= 0)
pflow->avgchoke = 0; // snap choke to zero if nothing lost over last ticks
} else {
pflow->avgloss = 0;
pflow->avgchoke = 0;
}
if (totallatencycount > 0) {
float newping = totallatency / totallatencycount;
pflow->latency = newping;
pflow->avglatency *= FLOW_AVG;
pflow->avglatency += (1.0f - FLOW_AVG) * newping;
}
}
void CNetChan::SetChoked(void) {
m_nOutSequenceNr++; // sends to be done since move command use sequence number
m_nChokedPackets++;
}
bool CNetChan::Transmit(bool onlyReliable) {
if (onlyReliable)
m_StreamUnreliable.Reset();
return (SendDatagram(NULL) != 0);
}
bool CNetChan::IsFileInWaitingList(const char *filename) {
if (!filename || !filename[0])
return true;
for (int stream = 0; stream < MAX_STREAMS; stream++) {
for (int i = 0; i < m_WaitingList[stream].Count(); i++) {
dataFragments_t *data = m_WaitingList[stream][i];
if (!Q_strcmp(data->filename, filename))
return true; // alread in list
}
}
return false; // file not found
}
void CNetChan::RemoveHeadInWaitingList(int nList) {
Assert(m_WaitingList[nList].Count());
dataFragments_t *data = m_WaitingList[nList][0]; // get head
if (data->buffer)
delete[] data->buffer; // free data buffer
if (data->file != FILESYSTEM_INVALID_HANDLE) {
g_pFileSystem->Close(data->file);
data->file = FILESYSTEM_INVALID_HANDLE;
}
// data->fragments.Purge();
m_WaitingList[nList].FindAndRemove(data); // remove from list
delete data; //free structure itself
}
bool CNetChan::CreateFragmentsFromBuffer(bf_write *buffer, int stream) {
VPROF_BUDGET("CNetChan::CreateFragmentsFromBuffer", VPROF_BUDGETGROUP_OTHER_NETWORKING);
bf_write bfwrite;
dataFragments_t *data = NULL;
// if we have more than one item in the waiting list, try to add the
// reliable data to the last item. that doesn't work with the first item
// since it may have been already send and is waiting for acknowledge
int count = m_WaitingList[stream].Count();
if (count > 1) {
// get last item in waiting list
data = m_WaitingList[stream][count - 1];
int totalBytes = Bits2Bytes(data->bits + buffer->GetNumBitsWritten());
totalBytes = PAD_NUMBER(totalBytes, 4); // align to 4 bytes boundary
if (totalBytes < NET_MAX_PAYLOAD && data->buffer) {
// we have enough space for it, create new larger mem buffer
char *newBuf = new char[totalBytes];
Q_memcpy(newBuf, data->buffer, data->bytes);
delete[] data->buffer; // free old buffer
data->buffer = newBuf; // set new buffer
bfwrite.StartWriting(newBuf, totalBytes, data->bits);
} else {
data = NULL; // reset to NULL
}
}
// if not added to existing item, create a new reliable data waiting buffer
if (!data) {
int totalBytes = Bits2Bytes(buffer->GetNumBitsWritten());
totalBytes = PAD_NUMBER(totalBytes, 4); // align to 4 bytes boundary
data = new dataFragments_t;
data->bytes = 0; // not filled yet
data->bits = 0;
data->buffer = new char[totalBytes];
data->isCompressed = false;
data->nUncompressedSize = 0;
data->file = FILESYSTEM_INVALID_HANDLE;
data->filename[0] = 0;
bfwrite.StartWriting(data->buffer, totalBytes);
m_WaitingList[stream].AddToTail(data); // that's it for now
}
// write new reliable data to buffer
bfwrite.WriteBits(buffer->GetData(), buffer->GetNumBitsWritten());
// fill last bits in last byte with NOP if necessary
int nRemainingBits = bfwrite.GetNumBitsWritten() % 8;
if (nRemainingBits > 0 && nRemainingBits <= (8 - NETMSG_TYPE_BITS)) {
bfwrite.WriteUBitLong(net_NOP, NETMSG_TYPE_BITS);
}
// update bit length
data->bits += buffer->GetNumBitsWritten();
data->bytes = Bits2Bytes(data->bits);
// check if send as stream or with snapshot
data->asTCP = m_StreamActive && (data->bytes > m_MaxReliablePayloadSize);
// calc number of fragments needed
data->numFragments = BYTES2FRAGMENTS(data->bytes);
data->ackedFragments = 0;
data->pendingFragments = 0;
return true;
}
bool CNetChan::CreateFragmentsFromFile(const char *filename, int stream, unsigned int transferID) {
if (IsFileInWaitingList(filename))
return true; // already scheduled for upload
const char *pPathID = "GAME";
if (!g_pFileSystem->FileExists(filename, pPathID)) {
ConMsg("CreateFragmentsFromFile: '%s' doesn't exist.\n", filename);
return false;
}
int totalBytes = g_pFileSystem->Size(filename, pPathID);
if (totalBytes >= (net_maxfilesize.GetInt() * 1024 * 1024)) {
ConMsg("CreateFragmentsFromFile: '%s' size exceeds net_maxfilesize limit (%i MB).\n", filename,
net_maxfilesize.GetInt());
return false;
}
if (totalBytes >= MAX_FILE_SIZE) {
ConMsg("CreateFragmentsFromFile: '%s' too big (max %i bytes).\n", filename, MAX_FILE_SIZE);
return false;
}
dataFragments_t *data = new dataFragments_t;
data->bytes = totalBytes;
data->bits = data->bytes * 8;
data->buffer = NULL;
data->isCompressed = false;
data->nUncompressedSize = 0;
data->file = g_pFileSystem->Open(filename, "rb", pPathID);
if (data->file == FILESYSTEM_INVALID_HANDLE) {
ConMsg("CreateFragmentsFromFile: couldn't open '%s'.\n", filename);
delete data;
return false;
}
data->transferID = transferID;
Q_strncpy(data->filename, filename, sizeof(data->filename));
m_WaitingList[stream].AddToTail(data); // that's it for now
// check if send as stream or with snapshot
data->asTCP = false; // m_StreamActive && ( Bits2Bytes(data->length) > m_MaxReliablePayloadSize );
// calc number of fragments needed
data->numFragments = BYTES2FRAGMENTS(data->bytes);
data->ackedFragments = 0;
data->pendingFragments = 0;
return true;
}
void CNetChan::SendTCPData(void) {
if (m_WaitingList[FRAG_NORMAL_STREAM].Count() == 0)
return; // nothing in line
dataFragments_t *data = m_WaitingList[FRAG_NORMAL_STREAM][0];
if (!data->asTCP)
return; // not as TCP
if (data->pendingFragments > 0)
return; // already send, wait for ACK
// OK send it now
SendReliableViaStream(data);
}
bool CNetChan::SendSubChannelData(bf_write &buf) {
VPROF_BUDGET("CNetChan::SendSubChannelData", VPROF_BUDGETGROUP_OTHER_NETWORKING);
subChannel_s *subChan = NULL;
int i;
CompressFragments();
SendTCPData();
UpdateSubChannels();
// find subchannl with data to send/resend:
for (i = 0; i < MAX_SUBCHANNELS; i++) {
subChan = &m_SubChannels[i];
if (subChan->state == SUBCHANNEL_TOSEND)
break;
}
if (i == MAX_SUBCHANNELS)
return false; // no data to send in any subchannel
// first write subchannel index
buf.WriteUBitLong(i, 3);
// write fragemnts for both streams
for (i = 0; i < MAX_STREAMS; i++) {
if (subChan->numFragments[i] == 0) {
buf.WriteOneBit(0); // no data for this stream
continue;
}
dataFragments_t *data = m_WaitingList[i][0];
buf.WriteOneBit(1); // data follows:
unsigned int offset = subChan->startFraggment[i] * FRAGMENT_SIZE;
unsigned int length = subChan->numFragments[i] * FRAGMENT_SIZE;
if ((subChan->startFraggment[i] + subChan->numFragments[i]) == data->numFragments) {
// we are sending the last fragment, adjust length
int rest = FRAGMENT_SIZE - (data->bytes % FRAGMENT_SIZE);
if (rest < FRAGMENT_SIZE)
length -= rest;
}
// if all fragments can be send within a single packet, avoid overhead (if not a file)
bool bSingleBlock = (subChan->numFragments[i] == data->numFragments) &&
(data->file == FILESYSTEM_INVALID_HANDLE);
if (bSingleBlock) {
Assert(length == data->bytes);
Assert(length < NET_MAX_PAYLOAD);
Assert(offset == 0);
buf.WriteOneBit(0); // single block bit
// data compressed ?
if (data->isCompressed) {
buf.WriteOneBit(1);
buf.WriteUBitLong(data->nUncompressedSize, MAX_FILE_SIZE_BITS);
} else {
buf.WriteOneBit(0);
}
buf.WriteVarInt32(data->bytes);
} else {
buf.WriteOneBit(1); // uses fragments with start fragment offset byte
buf.WriteUBitLong(subChan->startFraggment[i], (MAX_FILE_SIZE_BITS - FRAGMENT_BITS));
buf.WriteUBitLong(subChan->numFragments[i], 3);
if (offset == 0) {
// this is the first fragment, write header info
if (data->file != FILESYSTEM_INVALID_HANDLE) {
buf.WriteOneBit(1); // file transmission net message stream
buf.WriteUBitLong(data->transferID, 32);
buf.WriteString(data->filename);
} else {
buf.WriteOneBit(0); // normal net message stream
}
// data compressed ?
if (data->isCompressed) {
buf.WriteOneBit(1);
buf.WriteUBitLong(data->nUncompressedSize, MAX_FILE_SIZE_BITS);
} else {
buf.WriteOneBit(0);
}
buf.WriteUBitLong(data->bytes, MAX_FILE_SIZE_BITS); // 4MB max for files
}
}
// write fragments to buffer
if (data->buffer) {
Assert(data->file == FILESYSTEM_INVALID_HANDLE);
// send from memory block
buf.WriteBytes(data->buffer + offset, length);
} else // if ( data->file != FILESYSTEM_INVALID_HANDLE )
{
// send from file
Assert(data->file != FILESYSTEM_INVALID_HANDLE);
char *tmpbuf = new char[MAX(length, 1)];
g_pFileSystem->Seek(data->file, offset, FILESYSTEM_SEEK_HEAD);
g_pFileSystem->Read(tmpbuf, length, data->file);
buf.WriteBytes(tmpbuf, length);
delete[] tmpbuf;
}
if (net_showfragments.GetBool()) {
ConMsg("Sending subchan %i: start %i, num %i\n", subChan->index, subChan->startFraggment[i],
subChan->numFragments[i]);
}
subChan->sendSeqNr = m_nOutSequenceNr;
subChan->state = SUBCHANNEL_WAITING;
}
return true;
}
bool CNetChan::ReadSubChannelData(bf_read &buf, int stream) {
dataFragments_t *data = &m_ReceiveList[stream]; // get list
int startFragment = 0;
int numFragments = 0;
unsigned int offset = 0;
unsigned int length = 0;
bool bSingleBlock = buf.ReadOneBit() == 0; // is single block ?
if (!bSingleBlock) {
startFragment = buf.ReadUBitLong(MAX_FILE_SIZE_BITS - FRAGMENT_BITS); // 16 MB max
numFragments = buf.ReadUBitLong(3); // 8 fragments per packet max
offset = startFragment * FRAGMENT_SIZE;
length = numFragments * FRAGMENT_SIZE;
}
if (offset == 0) // first fragment, read header info
{
data->filename[0] = 0;
data->isCompressed = false;
data->transferID = 0;
if (bSingleBlock) {
// data compressed ?
if (buf.ReadOneBit()) {
data->isCompressed = true;
data->nUncompressedSize = buf.ReadUBitLong(MAX_FILE_SIZE_BITS);
} else {
data->isCompressed = false;
}
data->bytes = buf.ReadVarInt32();
} else {
if (buf.ReadOneBit()) // is it a file ?
{
data->transferID = buf.ReadUBitLong(32);
buf.ReadString(data->filename, MAX_OSPATH);
}
// data compressed ?
if (buf.ReadOneBit()) {
data->isCompressed = true;
data->nUncompressedSize = buf.ReadUBitLong(MAX_FILE_SIZE_BITS);
} else {
data->isCompressed = false;
}
data->bytes = buf.ReadUBitLong(MAX_FILE_SIZE_BITS);
}
if (data->buffer) {
// last transmission was aborted, free data
delete[] data->buffer;
data->buffer = NULL;
ConDMsg("Fragment transmission aborted at %i/%i from %s.\n", data->ackedFragments, data->numFragments,
GetAddress());
}
data->bits = data->bytes * 8;
data->asTCP = false;
data->numFragments = BYTES2FRAGMENTS(data->bytes);
data->ackedFragments = 0;
data->file = FILESYSTEM_INVALID_HANDLE;
if (bSingleBlock) {
numFragments = data->numFragments;
length = numFragments * FRAGMENT_SIZE;
}
if (data->bytes > MAX_FILE_SIZE) {
// This can happen with the compressed path above, which uses VarInt32 rather than MAX_FILE_SIZE_BITS
Warning("Net message exceeds max size (%u / %u)\n", MAX_FILE_SIZE, data->bytes);
// Subsequent packets for this transfer will treated as invalid since we never setup a buffer.
return false;
}
if (data->isCompressed && data->nUncompressedSize > MAX_FILE_SIZE) {
// This can happen with the compressed path above, which uses VarInt32 rather than MAX_FILE_SIZE_BITS
Warning("Net message uncompressed size exceeds max size (%u / compressed %u / uncompressed %u)\n",
MAX_FILE_SIZE, data->bytes, data->nUncompressedSize);
// Subsequent packets for this transfer will treated as invalid since we never setup a buffer.
return false;
}
data->buffer = new char[PAD_NUMBER(data->bytes, 4)];
} else {
if (data->buffer == NULL) {
// This can occur if the packet containing the "header" (offset == 0) is dropped. Since we need the header to arrive we'll just wait
// for a retry
// ConDMsg("Received fragment out of order: %i/%i\n", startFragment, numFragments );
return false;
}
}
if ((startFragment + numFragments) == data->numFragments) {
// we are receiving the last fragment, adjust length
int rest = FRAGMENT_SIZE - (data->bytes % FRAGMENT_SIZE);
if (rest < FRAGMENT_SIZE)
length -= rest;
} else if ((startFragment + numFragments) > data->numFragments) {
// a malicious client can send a fragment beyond what was arranged in fragment#0 header
// old code will overrun the allocated buffer and likely cause a server crash
// it could also cause a client memory overrun because the offset can be anywhere from 0 to 16MB range
// drop the packet and wait for client to retry
ConDMsg("Received fragment chunk out of bounds: %i+%i>%i from %s\n", startFragment, numFragments,
data->numFragments, GetAddress());
return false;
}
Assert ((offset + length) <= data->bytes);
if (length == 0 || (offset + length > data->bytes)) {
delete[] data->buffer;
data->buffer = NULL;
ConMsg("Malformed fragment ofs %i len %d, buffer size %d from %s\n", offset, length, PAD_NUMBER(data->bytes, 4),
remote_address.ToString());
return false;
}
buf.ReadBytes(data->buffer + offset, length); // read data
data->ackedFragments += numFragments;
if (net_showfragments.GetBool())
ConMsg("Received fragments: start %i, num %i\n", startFragment, numFragments);
return true;
}
void CNetChan::UpdateSubChannels() {
// first check if there is a free subchannel
subChannel_s *freeSubChan = GetFreeSubChannel();
if (freeSubChan == NULL)
return; //all subchannels in use right now
int i, nSendMaxFragments = m_MaxReliablePayloadSize / FRAGMENT_SIZE;
bool bSendData = false;
for (i = 0; i < MAX_STREAMS; i++) {
if (m_WaitingList[i].Count() <= 0)
continue;
dataFragments_s *data = m_WaitingList[i][0]; // get head
if (data->asTCP)
continue;
int nSentFragments = data->ackedFragments + data->pendingFragments;
Assert(nSentFragments <= data->numFragments);
if (nSentFragments == data->numFragments)
continue; // all fragments already send
// how many fragments can we send ?
int numFragments = MIN(nSendMaxFragments, data->numFragments - nSentFragments);
// if we are in file background transmission mode, just send one fragment per packet
if (i == FRAG_FILE_STREAM && m_bFileBackgroundTranmission)
numFragments = MIN(1, numFragments);
// copy fragment data into subchannel
freeSubChan->startFraggment[i] = nSentFragments;
freeSubChan->numFragments[i] = numFragments;
data->pendingFragments += numFragments;
bSendData = true;
nSendMaxFragments -= numFragments;
if (nSendMaxFragments <= 0)
break;
}
if (bSendData) {
// flip channel bit
int bit = 1 << freeSubChan->index;
FLIPBIT(m_nOutReliableState, bit);
freeSubChan->state = SUBCHANNEL_TOSEND;
freeSubChan->sendSeqNr = 0;
}
}
#if 1
unsigned short BufferToShortChecksum(const void *pvData, size_t nLength) {
CRC32_t crc = CRC32_ProcessSingleBuffer(pvData, nLength);
unsigned short lowpart = (crc & 0xffff);
unsigned short highpart = ((crc >> 16) & 0xffff);
return (unsigned short) (lowpart ^ highpart);
}
#else
// If the CRC version ever is deemed to expensive, here's a quick xor version.
// It's probably not super robust.
inline unsigned short BufferToShortChecksum( const void *pvData, size_t nSize )
{
const uint32 *pData = (const uint32 *)pvData;
unsigned short us = 0;
while ( nSize >= sizeof( uint32 ) )
{
us ^= ( *pData & 0xffff );
us ^= ( ( *pData >> 16 ) & 0xffff );
nSize -= sizeof( uint32 );
pData += sizeof( uint32 );
}
const byte *pbData = (const byte *)pData;
while ( nSize > 0 )
{
us ^= *pbData;
++pbData;
--nSize;
}
return us;
}
#endif
#define MIN_ROUTABLE_TESTING
#if defined( _DEBUG ) || defined( MIN_ROUTABLE_TESTING )
static ConVar net_minroutable("net_minroutable", "16", FCVAR_DEVELOPMENTONLY, "Forces larger payloads.");
#endif
/*
===============
CNetChan::TransmitBits
tries to send an unreliable message to a connection, and handles the
transmition / retransmition of the reliable messages.
A 0 length will still generate a packet and deal with the reliable messages.
================
*/
int CNetChan::SendDatagram(bf_write *datagram) {
VPROF("CNetChan::SendDatagram");
ALIGN4 byte send_buf[NET_MAX_MESSAGE] ALIGN4_POST;
#ifndef NO_VCR
if (vcr_verbose.GetInt() && datagram && datagram->GetNumBytesWritten() > 0)
VCRGenericValueVerify("datagram", datagram->GetBasePointer(), datagram->GetNumBytesWritten() - 1);
#endif
// Make sure for the client that the max routable payload size is up to date
if (m_Socket == NS_CLIENT) {
if (net_maxroutable.GetInt() != GetMaxRoutablePayloadSize()) {
SetMaxRoutablePayloadSize(net_maxroutable.GetInt());
}
}
// first increase out sequence number
// check, if fake client, then fake send also
if (remote_address.GetType() == NA_NULL) {
// this is a demo channel, fake sending all data
m_fClearTime = 0.0; // no bandwidth delay
m_nChokedPackets = 0; // Reset choke state
m_StreamReliable.Reset(); // clear current reliable buffer
m_StreamUnreliable.Reset(); // clear current unrelaible buffer
m_nOutSequenceNr++;
return m_nOutSequenceNr - 1;
}
// process all new and pending reliable data, return true if reliable data should
// been send with this packet
if (m_StreamReliable.IsOverflowed()) {
ConMsg("%s:send reliable stream overflow\n", remote_address.ToString());
return 0;
} else if (m_StreamReliable.GetNumBitsWritten() > 0) {
CreateFragmentsFromBuffer(&m_StreamReliable, FRAG_NORMAL_STREAM);
m_StreamReliable.Reset();
}
bf_write send("CNetChan_TransmitBits->send", send_buf, sizeof(send_buf));
// Prepare the packet header
// build packet flags
unsigned char flags = 0;
// start writing packet
send.WriteLong(m_nOutSequenceNr);
send.WriteLong(m_nInSequenceNr);
bf_write flagsPos = send; // remember flags byte position
send.WriteByte(0); // write correct flags value later
if (ShouldChecksumPackets()) {
send.WriteShort(0); // write correct checksum later
Assert(!(send.GetNumBitsWritten() % 8));
}
// Note, this only matters on the PC
int nCheckSumStart = send.GetNumBytesWritten();
send.WriteByte(m_nInReliableState);
if (m_nChokedPackets > 0) {
flags |= PACKET_FLAG_CHOKED;
send.WriteByte(m_nChokedPackets & 0xFF); // send number of choked packets
}
// always append a challenge number
flags |= PACKET_FLAG_CHALLENGE;
// append the challenge number itself right on the end
send.WriteLong(m_ChallengeNr);
if (SendSubChannelData(send)) {
flags |= PACKET_FLAG_RELIABLE;
}
// Is there room for given datagram data. the datagram data
// is somewhat more important than the normal unreliable data
// this is done to allow some kind of snapshot behavior
// weather all data in datagram is transmitted or none.
if (datagram) {
if (datagram->GetNumBitsWritten() < send.GetNumBitsLeft()) {
send.WriteBits(datagram->GetData(), datagram->GetNumBitsWritten());
} else {
ConDMsg("CNetChan::SendDatagram: data would overfow, ignoring\n");
}
}
// Is there room for the unreliable payload?
if (m_StreamUnreliable.GetNumBitsWritten() < send.GetNumBitsLeft()) {
send.WriteBits(m_StreamUnreliable.GetData(), m_StreamUnreliable.GetNumBitsWritten());
} else {
ConDMsg("CNetChan::SendDatagram: Unreliable would overfow, ignoring\n");
}
m_StreamUnreliable.Reset(); // clear unreliable data buffer
// On the PC the voice data is in the main packet
if (!IsX360() &&
m_StreamVoice.GetNumBitsWritten() > 0 && m_StreamVoice.GetNumBitsWritten() < send.GetNumBitsLeft()) {
send.WriteBits(m_StreamVoice.GetData(), m_StreamVoice.GetNumBitsWritten());
m_StreamVoice.Reset();
}
int nMinRoutablePayload = MIN_ROUTABLE_PAYLOAD;
#if defined( _DEBUG ) || defined( MIN_ROUTABLE_TESTING )
if (m_Socket == NS_SERVER) {
nMinRoutablePayload = net_minroutable.GetInt();
}
#endif
// Deal with packets that are too small for some networks
while (send.GetNumBytesWritten() < nMinRoutablePayload) {
// Go ahead and pad some bits as long as needed
send.WriteUBitLong(net_NOP, NETMSG_TYPE_BITS);
}
// Make sure we have enough bits to read a final net_NOP opcode before compressing
int nRemainingBits = send.GetNumBitsWritten() % 8;
if (nRemainingBits > 0 && nRemainingBits <= (8 - NETMSG_TYPE_BITS)) {
send.WriteUBitLong(net_NOP, NETMSG_TYPE_BITS);
}
// if ( IsX360() )
{
// Now round up to byte boundary
nRemainingBits = send.GetNumBitsWritten() % 8;
if (nRemainingBits > 0) {
int nPadBits = 8 - nRemainingBits;
flags |= ENCODE_PAD_BITS(nPadBits);
// Pad with ones
if (nPadBits > 0) {
unsigned int unOnes = GetBitForBitnum(nPadBits) - 1;
send.WriteUBitLong(unOnes, nPadBits);
}
}
}
int nBitsPerPayload = net_blocksize.GetInt() * 8;
// FIXME: This isn't actually correct since compression might make the main payload usage a bit smaller
// On 360, only add voice data if the packet isn't going to be split
bool bSendVoice = IsX360() && (m_StreamVoice.GetNumBitsWritten() > 0 &&
m_StreamVoice.GetNumBitsWritten() + send.GetNumBitsWritten() < nBitsPerPayload);
bool bClearVoice = (bSendVoice || m_StreamVoice.GetNumBitsWritten() >= nBitsPerPayload);
bool bCompress = false;
if (net_compresspackets.GetBool()) {
if (send.GetNumBytesWritten() >= net_compresspackets_minsize.GetInt()) {
bCompress = true;
}
}
// write correct flags value and the checksum
flagsPos.WriteByte(flags);
// Compute checksum (must be aligned to a byte boundary!!)
if (ShouldChecksumPackets()) {
const void *pvData = send.GetData() + nCheckSumStart;
Assert(!(send.GetNumBitsWritten() % 8));
int nCheckSumBytes = send.GetNumBytesWritten() - nCheckSumStart;
unsigned short usCheckSum = BufferToShortChecksum(pvData, nCheckSumBytes);
flagsPos.WriteUBitLong(usCheckSum, 16);
}
// Send the datagram
int bytesSent = NET_SendPacket(this, m_Socket, remote_address, send.GetData(), send.GetNumBytesWritten(),
bSendVoice ? &m_StreamVoice : 0, bCompress);
if (bClearVoice || !IsX360()) {
m_StreamVoice.Reset();
}
if (net_showudp.GetInt() && net_showudp.GetInt() != 2) {
int mask = 63;
char comp[64] = {0};
if (net_compresspackets.GetBool() &&
bytesSent &&
(bytesSent < send.GetNumBytesWritten())) {
Q_snprintf(comp, sizeof(comp), " compression=%5u [%5.2f %%]", bytesSent,
100.0f * float(bytesSent) / float(send.GetNumBytesWritten()));
}
ConMsg("UDP -> %12.12s: sz=%5i seq=%5i ack=%5i rel=%1i ch=%1i tm=%f rt=%f%s\n", GetName(),
send.GetNumBytesWritten(), (m_nOutSequenceNr) & mask, m_nInSequenceNr & mask,
(flags & PACKET_FLAG_RELIABLE) ? 1 : 0, flags & PACKET_FLAG_CHALLENGE ? 1 : 0, (float) net_time,
(float) Plat_FloatTime(), comp);
}
// update stats
int nTotalSize = bytesSent + UDP_HEADER_SIZE;
FlowNewPacket(FLOW_OUTGOING, m_nOutSequenceNr, m_nInSequenceNr, m_nChokedPackets, 0, nTotalSize);
FlowUpdate(FLOW_OUTGOING, nTotalSize);
// UNDONE(mastercoms): clear time should be in wall time
#if 0
if ( m_fClearTime < net_time )
{
m_fClearTime = net_time;
}
#endif
// calculate net_time when channel will be ready for next packet (throttling)
const std::size_t nMaxRoutableSize = GetMaxRoutablePayloadSize();
const std::size_t nSplitPacketSize = sizeof(SPLITPACKET);
const std::size_t nPacketSize = (std::size_t) nTotalSize;
const std::size_t nSplitPackets =
nPacketSize > nMaxRoutableSize ? nPacketSize / (nMaxRoutableSize - nSplitPacketSize) : 0;
double fAddTime = ((double) nTotalSize + (double) nSplitPacketSize * (double) nSplitPackets) / m_Rate;
m_fClearTime += fAddTime;
if (net_maxcleartime.GetFloat() > 0.0f) {
double m_flLatestClearTime = net_time + net_maxcleartime.GetFloat();
if (m_fClearTime > m_flLatestClearTime) {
m_fClearTime = m_flLatestClearTime;
}
}
m_nChokedPackets = 0;
m_nOutSequenceNr++;
return m_nOutSequenceNr - 1; // return send seq nr
}
bool CNetChan::ProcessControlMessage(int cmd, bf_read &buf) {
char string[1024];
if (cmd == net_NOP) {
return true;
}
if (cmd == net_Disconnect) {
buf.ReadString(string, sizeof(string));
m_MessageHandler->ConnectionClosing(string);
return false;
}
if (cmd == net_File) {
unsigned int transferID = buf.ReadUBitLong(32);
buf.ReadString(string, sizeof(string));
if (buf.ReadOneBit() != 0 && IsValidFileForTransfer(string)) {
m_MessageHandler->FileRequested(string, transferID);
} else {
m_MessageHandler->FileDenied(string, transferID);
}
return true;
}
ConMsg("Netchannel: received bad control cmd %i from %s.\n", cmd, remote_address.ToString());
return false;
}
bool CNetChan::ProcessMessages(bf_read &buf) {
VPROF("CNetChan::ProcessMessages");
const char *showmsgname = net_showmsg.GetString();
const char *blockmsgname = net_blockmsg.GetString();
if (showmsgname[0] == '0') {
showmsgname = NULL; // dont do strcmp all the time
}
if (blockmsgname[0] == '0') {
blockmsgname = NULL; // dont do strcmp all the time
}
if (net_showpeaks.GetInt() > 0 && net_showpeaks.GetInt() < buf.GetNumBytesLeft()) {
showmsgname = "1"; // show messages for this packet only
}
bf_read democopy = buf; // create a copy of reading buffer state for demo recording
int startbit = buf.GetNumBitsRead();
while (true) {
if (buf.IsOverflowed()) {
m_MessageHandler->ConnectionCrashed("Buffer overflow in net message");
return false;
}
// Are we at the end?
if (buf.GetNumBitsLeft() < NETMSG_TYPE_BITS) {
break;
}
unsigned char cmd = buf.ReadUBitLong(NETMSG_TYPE_BITS);
if (cmd <= net_File) {
if (!ProcessControlMessage(cmd, buf)) {
return false; // disconnect or error
}
continue;
}
// see if we have a registered message object for this type
INetMessage *netmsg = FindMessage(cmd);
if (netmsg) {
int nMsgStartBit = buf.GetNumBitsRead();
if (!netmsg->ReadFromBuffer(buf)) {
ConMsg("Netchannel: failed reading message %d from %s.\n", cmd, remote_address.ToString());
Assert (0);
return false;
}
UpdateMessageStats(netmsg->GetGroup(), buf.GetNumBitsRead() - nMsgStartBit);
if (showmsgname) {
if ((*showmsgname == '1') || !Q_stricmp(showmsgname, netmsg->GetName())) {
ConMsg("Msg from %s: %s\n", remote_address.ToString(), netmsg->ToString());
}
}
if (blockmsgname) {
if ((*blockmsgname == '1') || !Q_stricmp(blockmsgname, netmsg->GetName())) {
ConMsg("Blocking message %s\n", netmsg->ToString());
continue;
}
}
// netmessage calls the Process function that was registered by it's MessageHandler
m_bProcessingMessages = true;
bool bRet = netmsg->Process();
m_bProcessingMessages = false;
// This means we were deleted during the processing of that message.
if (m_bShouldDelete) {
delete this;
return false;
}
// This means our message buffer was freed or invalidated during the processing of that message.
if (m_bClearedDuringProcessing) {
// Clear() was called during processing, our buffer is no longer valid
m_bClearedDuringProcessing = false;
return false;
}
if (!bRet) {
ConDMsg("Netchannel: failed processing message %s.\n", netmsg->GetName());
Assert (0);
return false;
}
if (IsOverflowed()) {
return false;
}
} else {
ConMsg("Netchannel: unknown net message (%i) from %s.\n", cmd, remote_address.ToString());
Assert (0);
return false;
}
}
#if !defined(SWDS) && !defined(_XBOX)
// all messages could be parsed, write packet to demo file
if (m_DemoRecorder && !demoplayer->IsPlayingBack()) {
// only record if any message was paresd
m_DemoRecorder->RecordMessages(democopy, buf.GetNumBitsRead() - startbit);
}
#endif
return true; // ok fine
}
void CNetChan::ProcessPlayback(void) {
#if !defined(SWDS) && !defined(_XBOX)
netpacket_t *packet;
while ((packet = demoplayer->ReadPacket()) != NULL) {
// Update data flow stats
FlowNewPacket(FLOW_INCOMING, m_nInSequenceNr, m_nOutSequenceNrAck, 0, 0, packet->wiresize);
last_received = net_time;
m_MessageHandler->PacketStart(m_nInSequenceNr, m_nOutSequenceNrAck);
if (ProcessMessages(packet->message)) {
m_MessageHandler->PacketEnd();
} else {
break;
}
}
#endif
}
CNetChan::subChannel_s *CNetChan::GetFreeSubChannel() {
for (int i = 0; i < MAX_SUBCHANNELS; i++) {
if (m_SubChannels[i].state == SUBCHANNEL_FREE)
return &m_SubChannels[i];
}
return NULL;
}
void CNetChan::CheckWaitingList(int nList) {
// go thru waiting lists and mark fragments send with this seqnr packet
if (m_WaitingList[nList].Count() == 0 || m_nOutSequenceNrAck <= 0)
return; // no data in list
dataFragments_t *data = m_WaitingList[nList][0]; // get head
if (data->ackedFragments == data->numFragments) {
// all fragmenst were send successfully
if (net_showfragments.GetBool())
ConMsg("Sending complete: %i fragments, %i bytes.\n", data->numFragments, data->bytes);
RemoveHeadInWaitingList(nList);
return;
} else if (data->ackedFragments > data->numFragments) {
//ConMsg("CheckWaitingList: invalid acknowledge fragments %i/%i.\n", data->ackedFragments, data->numFragments );
}
// else: still pending fragments
}
#ifdef STAGING_ONLY
CON_COMMAND( netchan_test_upload, "[filename]: Uploads a file to server." )
{
if ( args.ArgC() != 2 )
{
Msg( "Usage: netchan_test_upload [filename]\n" );
return;
}
//$ TODO: the con command system is truncating the filenames we're passing in. Need to workaround this...
const char *filename = args.GetCommandString() + V_strlen( "netchan_test_upload " );
Msg( "Sending '%s'\n", filename );
bool bRet = CNetChan::TestUpload( filename );
Msg( "%s returned %d\n", __FUNCTION__, bRet );
}
bool CNetChan::TestUpload( const char *filename )
{
dataFragments_t data;
static char s_buf[] = "The quick brown\nfox\n";
data.file = FILESYSTEM_INVALID_HANDLE; // open file handle
V_strcpy_safe( data.filename, filename ); // filename
data.buffer = s_buf; // if NULL it's a file
data.bytes = sizeof( s_buf ) - 1; // size in bytes
data.bits = data.bytes * 8; // size in bits
data.transferID = 123; // only for files
data.isCompressed = false; // true if data is bzip compressed
data.nUncompressedSize = data.bytes; // full size in bytes
data.asTCP = 0; // send as TCP stream
data.numFragments = 0; // number of total fragments
data.ackedFragments = 0; // number of fragments send & acknowledged
data.pendingFragments = 0; // number of fragments send, but not acknowledged yet
return HandleUpload( &data, NULL );
}
#endif // STAGING_ONLY
bool CNetChan::HandleUpload(dataFragments_t *data, INetChannelHandler *MessageHandler) {
const char *szErrorStr = NULL;
static ConVar *s_pAllowUpload = g_pCVar->FindVar("sv_allowupload");
if (!s_pAllowUpload || !s_pAllowUpload->GetBool()) {
szErrorStr = "ignored. File uploads are disabled!";
} else {
// Make sure that this file is not being written to a location above the current directory, isn't in
// writing to any locations we don't want, isn't an unsupported
if (!CNetChan::IsValidFileForTransfer(data->filename)) {
szErrorStr = "has invalid path or extension!";
} else {
// There's a special write path for this stuff
const char *pszPathID = "download";
// we received a file, write it to disk and notify host
if (g_pFileSystem->FileExists(data->filename, pszPathID)) {
szErrorStr = "already exists!";
} else {
// Make sure path exists
char szParentDir[MAX_PATH];
if (!V_ExtractFilePath(data->filename, szParentDir, sizeof(szParentDir)))
szParentDir[0] = '\0';
g_pFileSystem->CreateDirHierarchy(szParentDir, pszPathID);
// Open new file for write binary.
data->file = g_pFileSystem->Open(data->filename, "wb", pszPathID);
if (FILESYSTEM_INVALID_HANDLE == data->file) {
szErrorStr = "failed to write!";
} else {
g_pFileSystem->Write(data->buffer, data->bytes, data->file);
g_pFileSystem->Close(data->file);
if (net_showfragments.GetInt() == 2) {
DevMsg("FileReceived: %s, %i bytes (ID %i)\n", data->filename, data->bytes, data->transferID);
}
if (MessageHandler) {
MessageHandler->FileReceived(data->filename, data->transferID);
}
}
}
}
}
if (szErrorStr) {
ConMsg("Download file '%s' %s\n", data->filename, szErrorStr);
}
return true;
}
bool CNetChan::CheckReceivingList(int nList) {
dataFragments_t *data = &m_ReceiveList[nList]; // get list
if (data->buffer == NULL)
return true;
if (data->ackedFragments < data->numFragments)
return true;
if (data->ackedFragments > data->numFragments) {
ConMsg("Receiving failed: too many fragments %i/%i from %s\n", data->ackedFragments, data->numFragments,
GetAddress());
return false;
}
// Got all fragments.
if (net_showfragments.GetBool())
ConMsg("Receiving complete: %i fragments, %i bytes\n", data->numFragments, data->bytes);
if (data->isCompressed) {
UncompressFragments(data);
}
if (!data->filename[0]) {
bf_read buffer(data->buffer, data->bytes);
if (!ProcessMessages(buffer)) // parse net message
{
return false; // stop reading any further
}
} else {
HandleUpload(data, m_MessageHandler);
}
// clear receiveList
if (data->buffer) {
delete[] data->buffer;
data->buffer = NULL;
}
return true;
}
int CNetChan::ProcessPacketHeader(netpacket_t *packet) {
// get sequence numbers
int sequence = packet->message.ReadLong();
int sequence_ack = packet->message.ReadLong();
int flags = packet->message.ReadByte();
if (ShouldChecksumPackets()) {
unsigned short usCheckSum = (unsigned short) packet->message.ReadUBitLong(16);
// Checksum applies to rest of packet
Assert(!(packet->message.GetNumBitsRead() % 8));
int nOffset = packet->message.GetNumBitsRead() >> 3;
int nCheckSumBytes = packet->message.TotalBytesAvailable() - nOffset;
const void *pvData = packet->message.GetBasePointer() + nOffset;
unsigned short usDataCheckSum = BufferToShortChecksum(pvData, nCheckSumBytes);
if (usDataCheckSum != usCheckSum) {
ConMsg("%s:corrupted packet %i at %i\n", remote_address.ToString(), sequence, m_nInSequenceNr);
return -1;
}
}
int relState = packet->message.ReadByte(); // reliable state of 8 subchannels
int nChoked = 0; // read later if choked flag is set
int i, j;
if (flags & PACKET_FLAG_CHOKED)
nChoked = packet->message.ReadByte();
if (flags & PACKET_FLAG_CHALLENGE) {
unsigned int nChallenge = packet->message.ReadLong();
if (nChallenge != m_ChallengeNr)
return -1;
// challenge was good, latch we saw a good one
m_bStreamContainsChallenge = true;
} else if (m_bStreamContainsChallenge)
return -1; // what, no challenge in this packet but we got them before?
// discard stale or duplicated packets
if (sequence <= m_nInSequenceNr) {
if (net_showdrop.GetInt()) {
if (sequence == m_nInSequenceNr) {
ConMsg("%s:duplicate packet %i at %i\n", remote_address.ToString(), sequence, m_nInSequenceNr);
} else {
ConMsg("%s:out of order packet %i at %i\n", remote_address.ToString(), sequence, m_nInSequenceNr);
}
}
return -1;
}
//
// dropped packets don't keep the message from being used
//
m_PacketDrop = sequence - (m_nInSequenceNr + nChoked + 1);
if (m_PacketDrop > 0) {
if (net_showdrop.GetInt()) {
ConMsg("%s:Dropped %i packets at %i\n", remote_address.ToString(), m_PacketDrop, sequence);
}
}
if (net_maxpacketdrop.GetInt() > 0 && m_PacketDrop > net_maxpacketdrop.GetInt()) {
if (net_showdrop.GetInt()) {
ConMsg("%s:Too many dropped packets (%i) at %i\n", remote_address.ToString(), m_PacketDrop, sequence);
}
return -1;
}
for (i = 0; i < MAX_SUBCHANNELS; i++) {
int bitmask = (1 << i);
// data of channel i has been acknowledged
subChannel_s *subchan = &m_SubChannels[i];
Assert(subchan->index == i);
if ((m_nOutReliableState & bitmask) == (relState & bitmask)) {
if (subchan->state == SUBCHANNEL_DIRTY) {
// subchannel was marked dirty during changelevel, waiting list is already cleared
subchan->Free();
} else if (subchan->sendSeqNr > sequence_ack) {
ConMsg("%s:reliable state invalid (%i).\n", remote_address.ToString(), i);
Assert(0);
return -1;
} else if (subchan->state == SUBCHANNEL_WAITING) {
for (j = 0; j < MAX_STREAMS; j++) {
if (subchan->numFragments[j] == 0)
continue;
Assert(m_WaitingList[j].Count() > 0);
dataFragments_t *data = m_WaitingList[j][0];
// tell waiting list, that we received the acknowledge
data->ackedFragments += subchan->numFragments[j];
data->pendingFragments -= subchan->numFragments[j];
}
subchan->Free(); // mark subchannel as free again
}
} else // subchannel doesn't match
{
if (subchan->sendSeqNr <= sequence_ack) {
Assert(subchan->state != SUBCHANNEL_FREE);
if (subchan->state == SUBCHANNEL_WAITING) {
if (net_showfragments.GetBool()) {
ConMsg("Resending subchan %i: start %i, num %i\n", subchan->index, subchan->startFraggment[0],
subchan->numFragments[0]);
}
subchan->state = SUBCHANNEL_TOSEND; // schedule for resend
} else if (subchan->state == SUBCHANNEL_DIRTY) {
// remote host lost dirty channel data, flip bit back
int bit = 1 << subchan->index; // flip bit back since data was send yet
FLIPBIT(m_nOutReliableState, bit);
subchan->Free();
}
}
}
}
m_nInSequenceNr = sequence;
m_nOutSequenceNrAck = sequence_ack;
ETWReadPacket(packet->from.ToString(), packet->wiresize, m_nInSequenceNr, m_nOutSequenceNr);
// Update waiting list status
for (i = 0; i < MAX_STREAMS; i++)
CheckWaitingList(i);
// Update data flow stats (use wiresize (compressed))
FlowNewPacket(FLOW_INCOMING, m_nInSequenceNr, m_nOutSequenceNrAck, nChoked, m_PacketDrop,
packet->wiresize + UDP_HEADER_SIZE);
return flags;
}
/*
=================
CNetChan::ProcessPacket
called when a new packet has arrived for this netchannel
sequence numbers are extracted, fragments/file streams stripped
and then the netmessages processed
=================
*/
void CNetChan::ProcessPacket(netpacket_t *packet, bool bHasHeader) {
VPROF("CNetChan::ProcessPacket");
Assert(packet);
bf_read &msg = packet->message; // handy shortcut
if (remote_address.IsValid() && !packet->from.CompareAdr(remote_address)) {
return;
}
// Update data flow stats
FlowUpdate(FLOW_INCOMING, packet->wiresize + UDP_HEADER_SIZE);
int flags = 0;
if (bHasHeader) {
flags = ProcessPacketHeader(packet);
}
if (flags == -1)
return; // invalid header/packet
if (net_showudp.GetInt() && net_showudp.GetInt() != 3) {
ConMsg("UDP <- %s: sz=%i seq=%i ack=%i rel=%i ch=%d, tm=%f rt=%f wire=%i\n", GetName(), packet->size,
m_nInSequenceNr & 63, m_nOutSequenceNrAck & 63, flags & PACKET_FLAG_RELIABLE ? 1 : 0,
flags & PACKET_FLAG_CHALLENGE ? 1 : 0, net_time, (float) Plat_FloatTime(), packet->wiresize);
}
last_received = net_time;
// tell message handler that a new packet has arrived
m_MessageHandler->PacketStart(m_nInSequenceNr, m_nOutSequenceNrAck);
if (flags & PACKET_FLAG_RELIABLE) {
int i, bit = 1 << msg.ReadUBitLong(3);
for (i = 0; i < MAX_STREAMS; i++) {
if (msg.ReadOneBit() != 0) {
if (!ReadSubChannelData(msg, i))
return; // error while reading fragments, drop whole packet
}
}
// flip subChannel bit to signal successful receiving
FLIPBIT(m_nInReliableState, bit);
for (i = 0; i < MAX_STREAMS; i++) {
if (!CheckReceivingList(i))
return; // error while processing
}
}
// Is there anything left to process?
if (msg.GetNumBitsLeft() > 0) {
// parse and handle all messeges
if (!ProcessMessages(msg)) {
return; // disconnect or error
}
}
// tell message handler that packet is completely parsed
m_MessageHandler->PacketEnd();
#if !defined(SWDS) && !defined(_XBOX)
// tell demo system that packet is completely parsed
if (m_DemoRecorder && !demoplayer->IsPlayingBack()) {
m_DemoRecorder->RecordPacket();
}
#endif
}
int CNetChan::GetNumBitsWritten(bool bReliable) {
bf_write *pStream = &m_StreamUnreliable;
if (bReliable) {
pStream = &m_StreamReliable;
}
return pStream->GetNumBitsWritten();
}
bool CNetChan::SendNetMsg(INetMessage &msg, bool bForceReliable, bool bVoice) {
if (remote_address.GetType() == NA_NULL)
return true;
bf_write *pStream = &m_StreamUnreliable;
if (msg.IsReliable() || bForceReliable) {
pStream = &m_StreamReliable;
}
if (bVoice) {
pStream = &m_StreamVoice;
}
bool bResult;
#ifndef NO_VCR
if (vcr_verbose.GetInt()) {
int nOldBytes = pStream->GetNumBytesWritten();
bResult = msg.WriteToBuffer(*pStream);
int nNewBytes = pStream->GetNumBytesWritten();
if (nNewBytes > nOldBytes) {
VCRGenericValueVerify("NetMsg", &pStream->GetBasePointer()[nOldBytes], nNewBytes - nOldBytes - 1);
}
} else
#endif
{
bResult = msg.WriteToBuffer(*pStream);
}
if (!bResult) {
Warning("SendNetMsg %s: stream[%s] buffer overflow (maxsize = %d)!\n", GetAddress(), pStream->GetDebugName(),
(pStream->GetMaxNumBits() + 7) / 8);
Assert(0);
if (net_droponsendoverflow.GetBool()) {
m_MessageHandler->ConnectionCrashed("Buffer overflow in send net message");
return false;
}
}
return bResult;
}
INetMessage *CNetChan::FindMessage(int type) {
int numtypes = m_NetMessages.Count();
for (int i = 0; i < numtypes; i++) {
if (m_NetMessages[i]->GetType() == type) {
return m_NetMessages[i];
}
}
return NULL;
}
bool CNetChan::RegisterMessage(INetMessage *msg) {
Assert(msg);
int Type = msg->GetType();
if (FindMessage(Type)) {
return false;
}
m_NetMessages.AddToTail(msg);
msg->SetNetChannel(this);
return true;
}
bool CNetChan::SendData(bf_write &msg, bool bReliable) {
// Always queue any pending reliable data ahead of the fragmentation buffer
if (remote_address.GetType() == NA_NULL)
return true;
if (msg.GetNumBitsWritten() <= 0)
return true;
if (msg.IsOverflowed() && !bReliable)
return true;
bf_write *buf = bReliable ? &m_StreamReliable : &m_StreamUnreliable;
if (msg.GetNumBitsWritten() > buf->GetNumBitsLeft()) {
if (bReliable) {
ConMsg("ERROR! SendData reliabe data too big (%i)", msg.GetNumBytesWritten());
}
return false;
}
return buf->WriteBits(msg.GetData(), msg.GetNumBitsWritten());
}
bool CNetChan::SendReliableViaStream(dataFragments_t *data) {
// Always queue any pending reliable data ahead of the fragmentation buffer
ALIGN4 char headerBuf[32] ALIGN4_POST;
bf_write header("outDataHeader", headerBuf, sizeof(headerBuf));
data->transferID = m_nOutSequenceNr; // used for acknowledging
data->pendingFragments = data->numFragments; // send, but not ACKed yet
header.WriteByte(STREAM_CMD_DATA);
header.WriteWord(data->bytes); // bytes
header.WriteLong(data->transferID);
if (net_showtcp.GetInt()) {
ConMsg("TCP -> %s: sz=%i seq=%i\n", remote_address.ToString(), data->bytes, m_nOutSequenceNr);
}
NET_SendStream(m_StreamSocket, (char *) header.GetData(), header.GetNumBytesWritten(), 0);
return NET_SendStream(m_StreamSocket, data->buffer, data->bytes, 0) != -1;
}
bool CNetChan::SendReliableAcknowledge(int seqnr) {
// Always queue any pending reliable data ahead of the fragmentation buffer
ALIGN4 char headerBuf[32] ALIGN4_POST;
bf_write header("outAcknHeader", headerBuf, sizeof(headerBuf));
header.WriteByte(STREAM_CMD_ACKN);
header.WriteLong(seqnr); // used for acknowledging
if (net_showtcp.GetInt()) {
ConMsg("TCP -> %s: ACKN seq=%i\n", remote_address.ToString(), seqnr);
}
return NET_SendStream(m_StreamSocket, (char *) header.GetData(), header.GetNumBytesWritten(), 0) > 0;
}
bool CNetChan::ProcessStream(void) {
char cmd;
ALIGN4 char headerBuf[512] ALIGN4_POST;
if (!m_StreamSocket)
return true;
if (m_SteamType == STREAM_CMD_NONE) {
// read command byte
int ret = NET_ReceiveStream(m_StreamSocket, &cmd, 1, 0);
if (ret == 0) {
// nothing received, but ok
return true;
} else if (ret == -1) {
// something failed with the TCP connection
return false;
}
ResetStreaming(); // clear all state values
m_SteamType = cmd;
}
bf_read header("inDataHeader", headerBuf, sizeof(headerBuf));
// now check command type
if (m_SteamType == STREAM_CMD_AUTH) {
// server accpeted connection, send challenge nr
m_StreamActive = true;
ResetStreaming();
return SendReliableAcknowledge(m_ChallengeNr);
}
if ((m_SteamType == STREAM_CMD_DATA) && (m_StreamLength == 0)) {
int ret = NET_ReceiveStream(m_StreamSocket, (char *) &headerBuf, 6, 0);
if (ret == 0) {
// nothing received, but ok
return true;
} else if (ret == -1) {
// something failed with the TCP connection
return false;
}
m_StreamLength = header.ReadWord();
m_StreamSeqNr = header.ReadLong();
const int cMaxPayload = GetProtocolVersion() > PROTOCOL_VERSION_23 ? NET_MAX_PAYLOAD : NET_MAX_PAYLOAD_V23;
if (m_StreamLength > cMaxPayload) {
ConMsg("ERROR! Stream indata too big (%i)", m_StreamLength);
return false;
}
}
if ((m_SteamType == STREAM_CMD_FILE) && (m_SteamFile[0] == 0)) {
Assert (0);
return false;
}
if ((m_SteamType == STREAM_CMD_ACKN) && (m_StreamSeqNr == 0)) {
int ret = NET_ReceiveStream(m_StreamSocket, (char *) &headerBuf, 4, 0);
if (ret == 0) {
// nothing received, but ok
return true;
} else if (ret == -1) {
// something failed with the TCP connection
return false;
}
m_StreamSeqNr = header.ReadLong();
dataFragments_t *data = m_WaitingList[FRAG_NORMAL_STREAM][0];
if (data->transferID == (unsigned) m_StreamSeqNr) {
if (net_showtcp.GetInt()) {
ConMsg("TCP <- %s: ACKN seqnr=%i\n", remote_address.ToString(), m_StreamSeqNr);
}
Assert(data->pendingFragments == data->numFragments);
RemoveHeadInWaitingList(FRAG_NORMAL_STREAM);
} else {
ConMsg("TCP <- %s: invalid ACKN seqnr=%i\n", remote_address.ToString(), m_StreamSeqNr);
}
ResetStreaming();
return true;
}
if (m_StreamReceived < m_StreamLength) {
// read in 4kB chuncks
int bytesLeft = (m_StreamLength - m_StreamReceived);
int bytesRecv = NET_ReceiveStream(m_StreamSocket, (char *) m_StreamData.Base() + m_StreamReceived, bytesLeft,
0);
if (bytesRecv == 0) {
return true;
} else if (bytesRecv == -1) {
return false;
}
m_StreamReceived += bytesRecv;
if (m_StreamReceived > m_StreamLength) {
ConMsg("ERROR! Stream indata oversize.");
return false;
}
if (m_StreamReceived == m_StreamLength) {
int ackseqnr = m_StreamSeqNr;
bf_read buffer(m_StreamData.Base(), m_StreamLength);
ProcessMessages(buffer);
// reset stream state
ResetStreaming();
return SendReliableAcknowledge(ackseqnr); // tell sender that we have it
}
}
return true;
}
int CNetChan::GetDataRate() const {
return m_Rate;
}
bool CNetChan::HasPendingReliableData(void) {
return (m_StreamReliable.GetNumBitsWritten() > 0) ||
(m_WaitingList[FRAG_NORMAL_STREAM].Count() > 0) ||
(m_WaitingList[FRAG_FILE_STREAM].Count() > 0);
}
float CNetChan::GetTimeConnected() const {
float t = net_time - connect_time;
return (t > 0.0f) ? t : 0.0f;
}
const netadr_t &CNetChan::GetRemoteAddress() const {
return remote_address;
}
INetChannelHandler *CNetChan::GetMsgHandler(void) const {
return m_MessageHandler;
}
bool CNetChan::IsTimedOut() const {
if (m_Timeout == -1.0f)
return false;
else
return (last_received + m_Timeout) < net_time;
}
bool CNetChan::IsTimingOut() const {
if (m_Timeout == -1.0f)
return false;
else
return (last_received + CONNECTION_PROBLEM_TIME) < net_time;
}
float CNetChan::GetTimeoutSeconds() const {
return m_Timeout;
}
float CNetChan::GetTimeSinceLastReceived() const {
float t = net_time - last_received;
return (t > 0.0f) ? t : 0.0f;
}
bool CNetChan::IsOverflowed() const {
return m_StreamReliable.IsOverflowed();
}
void CNetChan::Reset() {
// FlowReset();
m_StreamUnreliable.Reset(); // clear any pending unreliable data messages
m_StreamReliable.Reset(); // clear any pending reliable data messages
m_fClearTime = 0.0; // ready to send
m_nChokedPackets = 0;
m_nSplitPacketSequence = 1;
}
int CNetChan::GetSocket() const {
return m_Socket;
}
float CNetChan::GetAvgData(int flow) const {
return m_DataFlow[flow].avgbytespersec;
}
float CNetChan::GetAvgPackets(int flow) const {
return m_DataFlow[flow].avgpacketspersec;
}
//-----------------------------------------------------------------------------
// Purpose:
// Input : *chan -
//-----------------------------------------------------------------------------
int CNetChan::GetTotalData(int flow) const {
return m_DataFlow[flow].totalbytes;
}
int CNetChan::GetSequenceNr(int flow) const {
if (flow == FLOW_OUTGOING) {
return m_nOutSequenceNr;
} else if (flow == FLOW_INCOMING) {
return m_nInSequenceNr;
}
return 0;
}
int CNetChan::GetBufferSize(void) const {
return NET_FRAMES_BACKUP;
}
bool CNetChan::IsValidPacket(int flow, int frame_number) const {
return m_DataFlow[flow].frames[frame_number & NET_FRAMES_MASK].valid;
}
float CNetChan::GetPacketTime(int flow, int frame_number) const {
return m_DataFlow[flow].frames[frame_number & NET_FRAMES_MASK].time;
}
void CNetChan::GetPacketResponseLatency(int flow, int frame_number, int *pnLatencyMsecs, int *pnChoke) const {
const netframe_t &nf = m_DataFlow[flow].frames[frame_number & NET_FRAMES_MASK];
if (pnLatencyMsecs) {
if (nf.dropped) {
*pnLatencyMsecs = 9999;
} else {
*pnLatencyMsecs = (int) (1000.0f * nf.avg_latency);
}
}
if (pnChoke) {
*pnChoke = nf.choked;
}
}
void CNetChan::GetRemoteFramerate(float *pflFrameTime, float *pflRemoteFrameTimeStdDeviation) const {
if (pflFrameTime) {
*pflFrameTime = m_flRemoteFrameTime;
}
if (pflRemoteFrameTimeStdDeviation) {
*pflRemoteFrameTimeStdDeviation = m_flRemoteFrameTimeStdDeviation;
}
}
float CNetChan::GetLatency(int flow) const {
return m_DataFlow[flow].latency;
}
float CNetChan::GetAvgChoke(int flow) const {
return m_DataFlow[flow].avgchoke;
}
float CNetChan::GetAvgLatency(int flow) const {
return m_DataFlow[flow].avglatency;
}
float CNetChan::GetAvgLoss(int flow) const {
return m_DataFlow[flow].avgloss;
}
float CNetChan::GetTime(void) const {
return net_time;
}
bool CNetChan::GetStreamProgress(int flow, int *received, int *total) const {
(*total) = 0;
(*received) = 0;
if (flow == FLOW_INCOMING) {
for (int i = 0; i < MAX_STREAMS; i++) {
if (m_ReceiveList[i].buffer != NULL) {
(*total) += m_ReceiveList[i].numFragments * FRAGMENT_SIZE;
(*received) += m_ReceiveList[i].ackedFragments * FRAGMENT_SIZE;
}
}
return ((*total) > 0);
}
if (flow == FLOW_OUTGOING) {
for (int i = 0; i < MAX_STREAMS; i++) {
if (m_WaitingList[i].Count() > 0) {
(*total) += m_WaitingList[i][0]->numFragments * FRAGMENT_SIZE;
(*received) += m_WaitingList[i][0]->ackedFragments * FRAGMENT_SIZE;
}
}
return ((*total) > 0);
}
return false; // TODO TCP progress
}
float CNetChan::GetCommandInterpolationAmount(int flow, int frame_number) const {
return m_DataFlow[flow].frames[frame_number & NET_FRAMES_MASK].m_flInterpolationAmount;
}
int CNetChan::GetPacketBytes(int flow, int frame_number, int group) const {
if (group >= INetChannelInfo::TOTAL) {
return m_DataFlow[flow].frames[frame_number & NET_FRAMES_MASK].size;
} else {
return Bits2Bytes(m_DataFlow[flow].frames[frame_number & NET_FRAMES_MASK].msggroups[group]);
}
}
void CNetChan::UpdateMessageStats(int msggroup, int bits) {
netflow_t *pflow = &m_DataFlow[FLOW_INCOMING];
netframe_t *pframe = pflow->currentframe;
Assert((msggroup >= INetChannelInfo::GENERIC) && (msggroup < INetChannelInfo::TOTAL));
m_MsgStats[msggroup] += bits;
if (pframe)
pframe->msggroups[msggroup] += bits;
}
void CNetChan::IncrementQueuedPackets() {
++m_nQueuedPackets;
}
void CNetChan::DecrementQueuedPackets() {
--m_nQueuedPackets;
Assert(m_nQueuedPackets >= 0);
if (m_nQueuedPackets < 0)
m_nQueuedPackets = 0;
}
bool CNetChan::HasQueuedPackets() const {
if (g_pQueuedPackedSender->HasQueuedPackets(this)) {
return true;
}
return m_nQueuedPackets > 0;
}
void CNetChan::SetInterpolationAmount(float flInterpolationAmount) {
m_flInterpolationAmount = flInterpolationAmount;
}
void CNetChan::SetRemoteFramerate(float flFrameTime, float flFrameTimeStdDeviation) {
m_flRemoteFrameTime = flFrameTime;
m_flRemoteFrameTimeStdDeviation = flFrameTimeStdDeviation;
}
// Max # of payload bytes before we must split/fragment the packet
void CNetChan::SetMaxRoutablePayloadSize(int nSplitSize) {
if (m_nMaxRoutablePayloadSize != nSplitSize) {
DevMsg("Setting max routable payload size from %d to %d for %s\n",
m_nMaxRoutablePayloadSize, nSplitSize, GetName());
}
m_nMaxRoutablePayloadSize = nSplitSize;
}
int CNetChan::GetMaxRoutablePayloadSize() {
return m_nMaxRoutablePayloadSize;
}
int CNetChan::GetProtocolVersion() {
AssertMsg(
m_nProtocolVersion >= 0 && m_nProtocolVersion <= PROTOCOL_VERSION,
"This is probably not being initialized somewhere"
);
return m_nProtocolVersion;
}
int CNetChan::IncrementSplitPacketSequence() {
return ++m_nSplitPacketSequence;
}
bool CNetChan::IsValidFileForTransfer(const char *pszFilename) {
if (!pszFilename || !pszFilename[0])
return false;
// No absolute paths or weaseling up the tree with ".." allowed.
if (!COM_IsValidPath(pszFilename) || V_IsAbsolutePath(pszFilename))
return false;
int len = V_strlen(pszFilename);
if (len >= MAX_PATH)
return false;
char szTemp[MAX_PATH];
V_strcpy_safe(szTemp, pszFilename);
// Convert so we've got all forward slashes in the path.
V_FixSlashes(szTemp, '/');
V_FixDoubleSlashes(szTemp);
if (szTemp[len - 1] == '/')
return false;
int slash_count = 0;
for (const char *psz = szTemp; *psz; psz++) {
if (*psz == '/')
slash_count++;
}
// Really no reason to have deeper directory than this?
if (slash_count >= 32)
return false;
// Don't allow filenames with unicode whitespace in them.
if (Q_RemoveAllEvilCharacters(szTemp))
return false;
if (V_stristr(szTemp, "lua/") ||
V_stristr(szTemp, "gamemodes/") ||
V_stristr(szTemp, "addons/") ||
V_stristr(szTemp, "~/") ||
// V_stristr( szTemp, "//" ) || // Don't allow '//'. TODO: Is this check ok?
V_stristr(szTemp, "./././") || // Don't allow folks to make crazy long paths with ././././ stuff.
V_stristr(szTemp, " ") || // Don't allow multiple spaces or tab (was being used for an exploit).
V_stristr(szTemp, "\t")) {
return false;
}
// If .exe or .EXE or these other strings exist _anywhere_ in the filename, reject it.
if (V_stristr(szTemp, ".cfg") ||
V_stristr(szTemp, ".lst") ||
V_stristr(szTemp, ".exe") ||
V_stristr(szTemp, ".vbs") ||
V_stristr(szTemp, ".com") ||
V_stristr(szTemp, ".bat") ||
V_stristr(szTemp, ".cmd") ||
V_stristr(szTemp, ".dll") ||
V_stristr(szTemp, ".so") ||
V_stristr(szTemp, ".dylib") ||
V_stristr(szTemp, ".ini") ||
V_stristr(szTemp, ".log") ||
V_stristr(szTemp, ".lua") ||
V_stristr(szTemp, ".nut") ||
V_stristr(szTemp, ".vdf") ||
V_stristr(szTemp, ".smx") ||
V_stristr(szTemp, ".gcf") ||
V_stristr(szTemp, ".lmp") ||
V_stristr(szTemp, ".sys")) {
return false;
}
// Allow only bsp and nav file transfers to not overwrite any assets in maps directory
if (V_stristr(pszFilename, "maps/") &&
!V_stristr(pszFilename, ".bsp") &&
!V_stristr(pszFilename, ".ain") &&
!V_stristr(pszFilename, ".nav"))
return false;
// Search for the first . in the base filename, and bail if not found.
// We don't want people passing in things like 'cfg/.wp.so'...
const char *basename = strrchr(szTemp, '/');
if (!basename)
basename = szTemp;
const char *extension = strchr(basename, '.');
if (!extension)
return false;
// If the extension is not exactly 3 or 4 characters, bail.
int extension_len = V_strlen(extension);
if ((extension_len != 3) &&
(extension_len != 4) &&
V_stricmp(extension, ".bsp.bz2") &&
V_stricmp(extension, ".xbox.vtx") &&
V_stricmp(extension, ".dx80.vtx") &&
V_stricmp(extension, ".dx90.vtx") &&
V_stricmp(extension, ".sw.vtx")) {
return false;
}
// If there are any spaces in the extension, bail. (Windows exploit).
if (strchr(extension, ' '))
return false;
return true;
}
|
antoniojkim/CalcPlusPlus | Tests/Tests/StatisticsTests/meanTests.h | #include "../EngineTest.h"
#include <Catch2>
TEST_CASE("Mean Function Evaluation Tests", "[mean]") {
// SECTION("`Empty Test"){
// requireIsEqual("mean()", "Insufficient Number of Arguments for Function: mean");
// }
SECTION("`mean` Test 1"){
requireIsEqual("mean(3.93, -9.89, 4.34, 3.89, 6.51, 5.45)", 2.3716666667);
}
SECTION("`mean` Test 2"){
requireIsEqual("mean(-1.78, 4.45, -5.46, -1.19, 1.33, 6.93)", 0.7133333333);
}
SECTION("`mean` Test 3"){
requireIsEqual("mean(-9.58, 2.9, -6.6, 2.88, 4.78, -6.29, -9.68)", -3.0842857143);
}
SECTION("`mean` Test 4"){
requireIsEqual("mean(7.26, 1.68)", 4.47);
}
SECTION("`mean` Test 5"){
requireIsEqual("mean(7.56, 3.36, -5.61, -8.2, 7.75, -1.68, 5.98, 1.33, 9.68)", 2.2411111111);
}
SECTION("`mean` Test 6"){
requireIsEqual("mean(-5.48, -2.04, 2.93, 8.83, -0.23, -7.05, -2.18, 9.53)", 0.53875);
}
SECTION("`mean` Test 7"){
requireIsEqual("mean(3.79, -5.61, 8.21, 5.66, -6.5)", 1.11);
}
SECTION("`mean` Test 8"){
requireIsEqual("mean(-6.91, -5.55, -8.42)", -6.96);
}
SECTION("`mean` Test 9"){
requireIsEqual("mean(-1.42, 7.41, -5.47, -2.33, 9.33, -3.92)", 0.6);
}
SECTION("`mean` Test 10"){
requireIsEqual("mean(-4.86, 5.67, -0.86, -2.96, 1.83, -6.98, 1.2)", -0.9942857143);
}
}
|
everyvoter/everyvoter | geodataset/__init__.py | """Geodataset app"""
|
kebernet/erigo | ios/app/Erigo/com/google/common/hash/BloomFilter.h | //
// Generated by the J2ObjC translator. DO NOT EDIT!
// source: /Volumes/Personal/Documents/raspi-config/client-framework/build/j2oSources/com/google/common/hash/BloomFilter.java
//
#include "J2ObjC_header.h"
#pragma push_macro("INCLUDE_ALL_ComGoogleCommonHashBloomFilter")
#ifdef RESTRICT_ComGoogleCommonHashBloomFilter
#define INCLUDE_ALL_ComGoogleCommonHashBloomFilter 0
#else
#define INCLUDE_ALL_ComGoogleCommonHashBloomFilter 1
#endif
#undef RESTRICT_ComGoogleCommonHashBloomFilter
#if __has_feature(nullability)
#pragma clang diagnostic push
#pragma GCC diagnostic ignored "-Wnullability-completeness"
#endif
#if !defined (ComGoogleCommonHashBloomFilter_) && (INCLUDE_ALL_ComGoogleCommonHashBloomFilter || defined(INCLUDE_ComGoogleCommonHashBloomFilter))
#define ComGoogleCommonHashBloomFilter_
#define RESTRICT_ComGoogleCommonBasePredicate 1
#define INCLUDE_ComGoogleCommonBasePredicate 1
#include "com/google/common/base/Predicate.h"
#define RESTRICT_JavaIoSerializable 1
#define INCLUDE_JavaIoSerializable 1
#include "java/io/Serializable.h"
@class JavaIoInputStream;
@class JavaIoOutputStream;
@protocol ComGoogleCommonHashBloomFilter_Strategy;
@protocol ComGoogleCommonHashFunnel;
@protocol JavaUtilFunctionPredicate;
@interface ComGoogleCommonHashBloomFilter : NSObject < ComGoogleCommonBasePredicate, JavaIoSerializable >
#pragma mark Public
- (jboolean)applyWithId:(id)input;
- (jlong)approximateElementCount;
- (ComGoogleCommonHashBloomFilter *)copy__ OBJC_METHOD_FAMILY_NONE;
+ (ComGoogleCommonHashBloomFilter *)createWithComGoogleCommonHashFunnel:(id<ComGoogleCommonHashFunnel>)funnel
withInt:(jint)expectedInsertions;
+ (ComGoogleCommonHashBloomFilter *)createWithComGoogleCommonHashFunnel:(id<ComGoogleCommonHashFunnel>)funnel
withInt:(jint)expectedInsertions
withDouble:(jdouble)fpp;
+ (ComGoogleCommonHashBloomFilter *)createWithComGoogleCommonHashFunnel:(id<ComGoogleCommonHashFunnel>)funnel
withLong:(jlong)expectedInsertions;
+ (ComGoogleCommonHashBloomFilter *)createWithComGoogleCommonHashFunnel:(id<ComGoogleCommonHashFunnel>)funnel
withLong:(jlong)expectedInsertions
withDouble:(jdouble)fpp;
- (jboolean)isEqual:(id)object;
- (jdouble)expectedFpp;
- (NSUInteger)hash;
- (jboolean)isCompatibleWithComGoogleCommonHashBloomFilter:(ComGoogleCommonHashBloomFilter *)that;
- (jboolean)mightContainWithId:(id)object;
- (jboolean)putWithId:(id)object;
- (void)putAllWithComGoogleCommonHashBloomFilter:(ComGoogleCommonHashBloomFilter *)that;
+ (ComGoogleCommonHashBloomFilter *)readFromWithJavaIoInputStream:(JavaIoInputStream *)inArg
withComGoogleCommonHashFunnel:(id<ComGoogleCommonHashFunnel>)funnel;
- (void)writeToWithJavaIoOutputStream:(JavaIoOutputStream *)outArg;
#pragma mark Package-Private
- (jlong)bitSize;
+ (ComGoogleCommonHashBloomFilter *)createWithComGoogleCommonHashFunnel:(id<ComGoogleCommonHashFunnel>)funnel
withLong:(jlong)expectedInsertions
withDouble:(jdouble)fpp
withComGoogleCommonHashBloomFilter_Strategy:(id<ComGoogleCommonHashBloomFilter_Strategy>)strategy;
+ (jlong)optimalNumOfBitsWithLong:(jlong)n
withDouble:(jdouble)p;
+ (jint)optimalNumOfHashFunctionsWithLong:(jlong)n
withLong:(jlong)m;
@end
J2OBJC_EMPTY_STATIC_INIT(ComGoogleCommonHashBloomFilter)
FOUNDATION_EXPORT ComGoogleCommonHashBloomFilter *ComGoogleCommonHashBloomFilter_createWithComGoogleCommonHashFunnel_withInt_withDouble_(id<ComGoogleCommonHashFunnel> funnel, jint expectedInsertions, jdouble fpp);
FOUNDATION_EXPORT ComGoogleCommonHashBloomFilter *ComGoogleCommonHashBloomFilter_createWithComGoogleCommonHashFunnel_withLong_withDouble_(id<ComGoogleCommonHashFunnel> funnel, jlong expectedInsertions, jdouble fpp);
FOUNDATION_EXPORT ComGoogleCommonHashBloomFilter *ComGoogleCommonHashBloomFilter_createWithComGoogleCommonHashFunnel_withLong_withDouble_withComGoogleCommonHashBloomFilter_Strategy_(id<ComGoogleCommonHashFunnel> funnel, jlong expectedInsertions, jdouble fpp, id<ComGoogleCommonHashBloomFilter_Strategy> strategy);
FOUNDATION_EXPORT ComGoogleCommonHashBloomFilter *ComGoogleCommonHashBloomFilter_createWithComGoogleCommonHashFunnel_withInt_(id<ComGoogleCommonHashFunnel> funnel, jint expectedInsertions);
FOUNDATION_EXPORT ComGoogleCommonHashBloomFilter *ComGoogleCommonHashBloomFilter_createWithComGoogleCommonHashFunnel_withLong_(id<ComGoogleCommonHashFunnel> funnel, jlong expectedInsertions);
FOUNDATION_EXPORT jint ComGoogleCommonHashBloomFilter_optimalNumOfHashFunctionsWithLong_withLong_(jlong n, jlong m);
FOUNDATION_EXPORT jlong ComGoogleCommonHashBloomFilter_optimalNumOfBitsWithLong_withDouble_(jlong n, jdouble p);
FOUNDATION_EXPORT ComGoogleCommonHashBloomFilter *ComGoogleCommonHashBloomFilter_readFromWithJavaIoInputStream_withComGoogleCommonHashFunnel_(JavaIoInputStream *inArg, id<ComGoogleCommonHashFunnel> funnel);
J2OBJC_TYPE_LITERAL_HEADER(ComGoogleCommonHashBloomFilter)
#endif
#if !defined (ComGoogleCommonHashBloomFilter_Strategy_) && (INCLUDE_ALL_ComGoogleCommonHashBloomFilter || defined(INCLUDE_ComGoogleCommonHashBloomFilter_Strategy))
#define ComGoogleCommonHashBloomFilter_Strategy_
#define RESTRICT_JavaIoSerializable 1
#define INCLUDE_JavaIoSerializable 1
#include "java/io/Serializable.h"
@class ComGoogleCommonHashBloomFilterStrategies_BitArray;
@protocol ComGoogleCommonHashFunnel;
@protocol ComGoogleCommonHashBloomFilter_Strategy < JavaIoSerializable, JavaObject >
- (jboolean)putWithId:(id)object
withComGoogleCommonHashFunnel:(id<ComGoogleCommonHashFunnel>)funnel
withInt:(jint)numHashFunctions
withComGoogleCommonHashBloomFilterStrategies_BitArray:(ComGoogleCommonHashBloomFilterStrategies_BitArray *)bits;
- (jboolean)mightContainWithId:(id)object
withComGoogleCommonHashFunnel:(id<ComGoogleCommonHashFunnel>)funnel
withInt:(jint)numHashFunctions
withComGoogleCommonHashBloomFilterStrategies_BitArray:(ComGoogleCommonHashBloomFilterStrategies_BitArray *)bits;
- (jint)ordinal;
@end
J2OBJC_EMPTY_STATIC_INIT(ComGoogleCommonHashBloomFilter_Strategy)
J2OBJC_TYPE_LITERAL_HEADER(ComGoogleCommonHashBloomFilter_Strategy)
#endif
#if __has_feature(nullability)
#pragma clang diagnostic pop
#endif
#pragma pop_macro("INCLUDE_ALL_ComGoogleCommonHashBloomFilter")
|
devinrsmith/deephaven-core | Util/src/test/java/io/deephaven/util/datastructures/TestRandomAccessDeque.java | package io.deephaven.util.datastructures;
import io.deephaven.base.testing.BaseArrayTestCase;
import junit.framework.TestCase;
import java.util.*;
import java.util.stream.Collectors;
public class TestRandomAccessDeque extends BaseArrayTestCase {
public void testSimple() {
List<Integer> values = new ArrayList<>(Arrays.asList(2, 3, 4, 5));
RandomAccessDeque<Integer> deque = new RandomAccessDeque<>(values);
checkEquals(values, deque);
values.add(0, 1);
deque.addFirst(1);
checkEquals(values, deque);
values.add(6);
deque.addLast(6);
checkEquals(values, deque);
Iterator<Integer> vit = values.iterator();
Iterator<Integer> dit = values.iterator();
while (vit.hasNext()) {
TestCase.assertEquals(vit.hasNext(), dit.hasNext());
Integer vv = vit.next();
Integer dd = dit.next();
TestCase.assertEquals(vv, dd);
TestCase.assertEquals(vit.hasNext(), dit.hasNext());
}
show(values, deque);
int n = 0;
vit = values.iterator();
dit = deque.iterator();
while (vit.hasNext()) {
TestCase.assertEquals(vit.hasNext(), dit.hasNext());
Integer vv = vit.next();
Integer dd = dit.next();
TestCase.assertEquals(vv, dd);
TestCase.assertEquals(vit.hasNext(), dit.hasNext());
if (++n % 2 == 0) {
vit.remove();
dit.remove();
}
}
checkEquals(values, deque);
show(values, deque);
values.removeIf(x -> x%3 == 0);
assertTrue( deque.removeIf(x -> x == 3) );
checkEquals(values, deque);
assertFalse( deque.removeIf(x -> x == 999) );
TestCase.assertTrue(Arrays.equals(values.toArray(), deque.toArray()));
TestCase.assertTrue(Arrays.equals(values.toArray(new Integer[0]), deque.toArray(new Integer[0])));
TestCase.assertTrue(Arrays.equals(values.toArray(new Integer[values.size()]), deque.toArray(new Integer[deque.size()])));
values.addAll(Arrays.asList(7, 8, 9));
deque.addAll(Arrays.asList(7, 8, 9));
checkEquals(values, deque);
values.removeAll(Arrays.asList(3, 7));
deque.removeAll(Arrays.asList(3, 7));
checkEquals(values, deque);
values.retainAll(Arrays.asList(1, 8));
deque.retainAll(Arrays.asList(1, 8));
checkEquals(values, deque);
values.remove(new Integer(1));
deque.remove(1);
show(values, deque);
checkEquals(values, deque);
deque.clear();
TestCase.assertEquals(true, deque.isEmpty());
values.clear();
checkEquals(values, deque);
values.addAll(Arrays.asList(10, 11, 12, 13, 14, 15, 16, 17, 18));
deque.addAll(Arrays.asList(10, 11, 12, 13, 14, 15, 16, 17, 18));
checkEquals(values, deque);
for (int ii = 0; ii < 1000; ++ii) {
values.add(ii);
deque.add(ii);
}
checkEquals(values, deque);
//noinspection SimplifyStreamApiCallChains
List<Integer> streamResult = deque.stream().collect(Collectors.toList());
TestCase.assertEquals(values, streamResult);
Set<Integer> psResult = new HashSet<>(deque.parallelStream().collect(Collectors.toSet()));
Set<Integer> valuesSet = new HashSet<>(values);
Set<Integer> missing = new HashSet<>(valuesSet);
missing.removeAll(psResult);
System.out.println("Missing from psResult: " + missing);
Set<Integer> missing2 = new HashSet<>(psResult);
missing2.removeAll(valuesSet);
System.out.println("Missing from values: " + missing2);
TestCase.assertEquals(valuesSet, psResult);
}
private void show(List<Integer> values, RandomAccessDeque<Integer> deque) {
StringBuilder builder = new StringBuilder();
builder.append(values.size()).append(": ");
for (int ii = 0; ii < values.size(); ++ii) {
if (ii > 0) {
builder.append(", ");
}
Integer vv = values.get(ii);
boolean different = ii >= deque.size() || !Objects.equals(deque.get(ii), vv);
if (different) {
builder.append("*");
}
builder.append(vv);
if (different) {
builder.append("*");
}
}
builder.append("\n");
builder.append(deque.size()).append(": ");
for (int ii = 0; ii < deque.size(); ++ii) {
if (ii > 0) {
builder.append(", ");
}
Integer dd = deque.get(ii);
boolean different = ii >= values.size() || !Objects.equals(values.get(ii), dd);
if (different) {
builder.append("*");
}
builder.append(dd);
if (different) {
builder.append("*");
}
}
builder.append("\n");
System.out.println(builder);
}
private void checkEquals(List<Integer> values, RandomAccessDeque<Integer> deque) {
TestCase.assertEquals(values.size(), deque.size());
for (int ii = 0; ii < deque.size(); ++ii) {
TestCase.assertEquals(values.get(ii), deque.get(ii));
}
}
}
|
georgehorrell/core | v23/query/engine/internal/query.go | // Copyright 2015 The Vanadium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package internal
import (
"reflect"
"strconv"
"sync"
ds "v.io/v23/query/engine/datasource"
"v.io/v23/query/engine/internal/querychecker"
"v.io/v23/query/engine/internal/queryfunctions"
"v.io/v23/query/engine/internal/queryparser"
"v.io/v23/query/engine/public"
"v.io/v23/query/syncql"
"v.io/v23/vdl"
"v.io/v23/vom"
)
type queryEngineImpl struct {
db ds.Database
mutexNextID sync.Mutex
nextID int64
mutexPreparedStatements sync.Mutex
preparedStatements map[int64]*queryparser.Statement
}
type preparedStatementImpl struct {
qe *queryEngineImpl
id int64 // key to AST stored in queryEngineImpl.
}
func Create(db ds.Database) public.QueryEngine {
return &queryEngineImpl{db: db, nextID: 0, preparedStatements: map[int64]*queryparser.Statement{}}
}
func (qe *queryEngineImpl) Exec(q string) ([]string, syncql.ResultStream, error) {
return Exec(qe.db, q)
}
func (qe *queryEngineImpl) GetPreparedStatement(handle int64) (public.PreparedStatement, error) {
qe.mutexPreparedStatements.Lock()
_, ok := qe.preparedStatements[handle]
qe.mutexPreparedStatements.Unlock()
if ok {
return &preparedStatementImpl{qe, handle}, nil
}
return nil, syncql.ErrorfPreparedStatementNotFound(qe.db.GetContext(), "[0]prepared statement not found")
}
func (qe *queryEngineImpl) PrepareStatement(q string) (public.PreparedStatement, error) {
s, err := queryparser.Parse(qe.db, q)
if err != nil {
return nil, err
}
qe.mutexNextID.Lock()
id := qe.nextID
qe.nextID++
qe.mutexNextID.Unlock()
qe.mutexPreparedStatements.Lock()
qe.preparedStatements[id] = s
qe.mutexPreparedStatements.Unlock()
return &preparedStatementImpl{qe, id}, nil
}
func (p *preparedStatementImpl) Exec(paramValues ...*vom.RawBytes) ([]string, syncql.ResultStream, error) {
// Find the AST
p.qe.mutexPreparedStatements.Lock()
s := p.qe.preparedStatements[p.id]
p.qe.mutexPreparedStatements.Unlock()
vvs := make([]*vdl.Value, len(paramValues))
for i := range paramValues {
if err := paramValues[i].ToValue(&vvs[i]); err != nil {
return nil, nil, err
}
}
// Copy the AST and substitute any parameters with actual values.
// Note: Not all of the AST is copied as most parts are immutable.
sCopy, err := (*s).CopyAndSubstitute(p.qe.db, vvs)
if err != nil {
return nil, nil, err
}
// Sematnically check the copied AST and then execute it.
return checkAndExec(p.qe.db, &sCopy)
}
func (p *preparedStatementImpl) Handle() int64 {
return p.id
}
func (p *preparedStatementImpl) Close() {
p.qe.mutexPreparedStatements.Lock()
delete(p.qe.preparedStatements, p.id)
p.qe.mutexPreparedStatements.Unlock()
}
func Exec(db ds.Database, q string) ([]string, syncql.ResultStream, error) {
s, err := queryparser.Parse(db, q)
if err != nil {
return nil, nil, err
}
return checkAndExec(db, s)
}
func checkAndExec(db ds.Database, s *queryparser.Statement) ([]string, syncql.ResultStream, error) {
if err := querychecker.Check(db, s); err != nil {
return nil, nil, err
}
switch (*s).(type) {
case queryparser.SelectStatement, queryparser.DeleteStatement:
return execStatement(db, s)
default:
return nil, nil, syncql.ErrorfExecOfUnknownStatementType(db.GetContext(), "[%v]cannot execute unknown statement type: %v", (*s).Offset(), reflect.TypeOf(*s).Name())
}
}
// Given a key, a value and a SelectClause, return the projection.
// This function is only called if Eval returned true on the WhereClause expression.
func ComposeProjection(db ds.Database, k string, v *vdl.Value, s *queryparser.SelectClause) []*vom.RawBytes {
var projection []*vom.RawBytes
for _, selector := range s.Selectors {
switch selector.Type {
case queryparser.TypSelField:
// If field not found, nil is returned (as per specification).
f := ResolveField(db, k, v, selector.Field)
projection = append(projection, vom.RawBytesOf(f))
case queryparser.TypSelFunc:
if selector.Function.Computed {
projection = append(projection, queryfunctions.ConvertFunctionRetValueToRawBytes(selector.Function.RetValue))
} else {
// need to exec function
// If error executing function, return nil (as per specification).
retValue, err := resolveArgsAndExecFunction(db, k, v, selector.Function)
if err != nil {
retValue = nil
}
projection = append(projection, queryfunctions.ConvertFunctionRetValueToRawBytes(retValue))
}
}
}
return projection
}
// For testing purposes, given a SelectStatement, k and v;
// return nil if row not selected, else return the projection (type []*vdl.Value).
// Note: limit and offset clauses are ignored for this function as they make no sense
// for a single row.
func ExecSelectSingleRow(db ds.Database, k string, v *vdl.Value, s *queryparser.SelectStatement) []*vom.RawBytes {
if !Eval(db, k, v, s.Where.Expr) {
rs := []*vom.RawBytes{}
return rs
}
return ComposeProjection(db, k, v, s.Select)
}
func getColumnHeadings(s *queryparser.SelectStatement) []string {
columnHeaders := []string{}
for _, selector := range s.Select.Selectors {
columnName := ""
if selector.As != nil {
columnName = selector.As.AltName.Value
} else {
switch selector.Type {
case queryparser.TypSelField:
sep := ""
for _, segment := range selector.Field.Segments {
columnName = columnName + sep + segment.Value
for _, key := range segment.Keys {
columnName += getSegmentKeyAsHeading(key)
}
sep = "."
}
case queryparser.TypSelFunc:
columnName = selector.Function.Name
}
}
columnHeaders = append(columnHeaders, columnName)
}
return columnHeaders
}
// TODO(jkline): Should we really include key/index of a map/set/array/list in the header?
// The column names can get quite long. Perhaps just "[]" at the end of the segment
// would be better. The author of the query can always use the As clause to specify a
// better heading. Note: for functions, just the function name is included in the header.
// When a decision is made, it's best to be consistent for functions and key/indexes.
func getSegmentKeyAsHeading(segKey *queryparser.Operand) string {
val := "["
switch segKey.Type {
case queryparser.TypBigInt:
val += segKey.BigInt.String()
case queryparser.TypBigRat:
val += segKey.BigRat.String()
case queryparser.TypField:
sep := ""
for _, segment := range segKey.Column.Segments {
val += sep + segment.Value
for _, key := range segment.Keys {
val += getSegmentKeyAsHeading(key)
}
sep = "."
}
case queryparser.TypBool:
val += strconv.FormatBool(segKey.Bool)
case queryparser.TypInt:
val += strconv.FormatInt(segKey.Int, 10)
case queryparser.TypFloat:
val += strconv.FormatFloat(segKey.Float, 'f', -1, 64)
case queryparser.TypFunction:
val += segKey.Function.Name
case queryparser.TypStr:
val += segKey.Str
case queryparser.TypTime:
val += segKey.Time.Format("Mon Jan 2 15:04:05 -0700 MST 2006")
case queryparser.TypNil:
val += "<nil>"
case queryparser.TypObject:
val += "<object>"
default:
val += "<?>"
}
val += "]"
return val
}
func getIndexRanges(db ds.Database, tableName string, tableOff int64, indexFields []ds.Index, w *queryparser.WhereClause) ([]ds.IndexRanges, error) {
indexes := []ds.IndexRanges{}
// Get IndexRanges for k
kField := &queryparser.Field{Segments: []queryparser.Segment{{Value: "k"}}}
idxRanges := *querychecker.CompileIndexRanges(kField, vdl.String, w)
indexes = append(indexes, idxRanges)
// Get IndexRanges for secondary indexes.
for _, idx := range indexFields {
if idx.Kind != vdl.String {
return nil, syncql.ErrorfIndexKindNotSupported(db.GetContext(), "[%v]Index kind %v of field %v on table %v not supported.", tableOff, idx.Kind.String(), idx.FieldName, tableName)
}
var err error
var idxField *queryparser.Field
// Construct a Field from the string. Use the parser as it knows best.
if idxField, err = queryparser.ParseIndexField(db, idx.FieldName, tableName); err != nil {
return nil, err
}
idxRanges := *querychecker.CompileIndexRanges(idxField, idx.Kind, w)
indexes = append(indexes, idxRanges)
}
return indexes, nil
}
func execStatement(db ds.Database, s *queryparser.Statement) ([]string, syncql.ResultStream, error) { //nolint:gocyclo
switch st := (*s).(type) {
// Select
case queryparser.SelectStatement:
indexes, err := getIndexRanges(db, st.From.Table.Name, st.From.Table.Off, st.From.Table.DBTable.GetIndexFields(), st.Where)
if err != nil {
return nil, nil, err
}
keyValueStream, err := st.From.Table.DBTable.Scan(indexes...)
if err != nil {
return nil, nil, syncql.ErrorfScanError(db.GetContext(), "[%v]scan error: %v", st.Off, err)
}
var resultStream selectResultStreamImpl
resultStream.db = db
resultStream.selectStatement = &st
resultStream.keyValueStream = keyValueStream
return getColumnHeadings(&st), &resultStream, nil
// Delete
case queryparser.DeleteStatement:
indexes, err := getIndexRanges(db, st.From.Table.Name, st.From.Table.Off, st.From.Table.DBTable.GetIndexFields(), st.Where)
if err != nil {
return nil, nil, err
}
keyValueStream, err := st.From.Table.DBTable.Scan(indexes...)
if err != nil {
return nil, nil, syncql.ErrorfScanError(db.GetContext(), "[%v]scan error: %v", st.Off, err)
}
deleteCount := int64(0)
for keyValueStream.Advance() {
if st.Limit != nil && deleteCount >= st.Limit.Limit.Value {
defer keyValueStream.Cancel()
break
}
k, v := keyValueStream.KeyValue()
// EvalWhereUsingOnlyKey
// INCLUDE: the row should be included in the results
// EXCLUDE: the row should NOT be included
// FETCH_VALUE: the value and/or type of the value are required to make determination.
rv := EvalWhereUsingOnlyKey(db, st.Where, k)
var match bool
switch rv {
case Include:
match = true
case Exclude:
match = false
case FetchValue:
match = Eval(db, k, vdl.ValueOf(v), st.Where.Expr)
}
if match {
b, err := st.From.Table.DBTable.Delete(k)
// May not have delete permission to delete this k/v pair.
// Continue, but don't increment delete count.
if err == nil && b {
deleteCount++
}
}
}
if err := keyValueStream.Err(); err != nil {
return nil, nil, syncql.ErrorfKeyValueStreamError(db.GetContext(), "[%v]KeyValueStream error: %v", st.Off, err)
}
var resultStream deleteResultStreamImpl
resultStream.db = db
resultStream.deleteStatement = &st
resultStream.deleteCursor = 0
resultStream.deleteCount = deleteCount
return []string{"Count"}, &resultStream, nil
}
return nil, nil, syncql.ErrorfOperationNotSupported(db.GetContext(), "[0]%v not supported.", "uknown")
}
|
aa8y/leetcode | java/src/test/java/co/aa8y/leetcode/NumberOf1BitsTest.java | <gh_stars>1-10
package co.aa8y.leetcode;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.junit.jupiter.api.Test;
public class NumberOf1BitsTest {
private final NumberOf1BitsIterative solutionIterative = new NumberOf1BitsIterative();
private final NumberOf1BitsRecursive solutionRecursive = new NumberOf1BitsRecursive();
@Test
public void testLeetCodeExample1() {
int n = 0b00000000000000000000000000001011;
int expected = 3;
int actualIterative = solutionIterative.hammingWeight(n);
int actualRecursive = solutionRecursive.hammingWeight(n);
assertEquals(expected, actualIterative);
assertEquals(expected, actualRecursive);
}
@Test
public void testLeetCodeExample2() {
int n = 0b00000000000000000000000010000000;
int expected = 1;
int actualIterative = solutionIterative.hammingWeight(n);
int actualRecursive = solutionRecursive.hammingWeight(n);
assertEquals(expected, actualIterative);
assertEquals(expected, actualRecursive);
}
@Test
public void testLeetCodeExample3() {
int n = 0b11111111111111111111111111111101;
int expected = 31;
int actualIterative = solutionIterative.hammingWeight(n);
int actualRecursive = solutionRecursive.hammingWeight(n);
assertEquals(expected, actualIterative);
assertEquals(expected, actualRecursive);
}
}
|
zhanghai/Douya | app/src/main/java/com/google/android/material/textfield/ExpandedHintTextInputLayout.java | /*
* Copyright (c) 2018 <NAME> <<EMAIL>>
* All Rights Reserved.
*/
package com.google.android.material.textfield;
import android.annotation.SuppressLint;
import android.content.Context;
import android.util.AttributeSet;
import android.widget.EditText;
import me.zhanghai.android.douya.R;
@SuppressLint("RestrictedApi")
public class ExpandedHintTextInputLayout extends TextInputLayout {
private EditText mHasTextEditText;
public ExpandedHintTextInputLayout(Context context) {
super(context);
init();
}
public ExpandedHintTextInputLayout(Context context, AttributeSet attrs) {
super(context, attrs);
init();
}
public ExpandedHintTextInputLayout(Context context, AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
init();
}
private void init() {
collapsingTextHelper.setCollapsedTextAppearance(R.style.TextAppearance_AppCompat_Caption);
mHasTextEditText = new EditText(getContext());
mHasTextEditText.setText(" ");
}
@Override
void updateLabelState(boolean animate) {
EditText realEditText = editText;
editText = mHasTextEditText;
super.updateLabelState(animate);
editText = realEditText;
}
}
|
google/ndash | ndash/src/extractor/seek_map.h | <reponame>google/ndash
/*
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NDASH_EXTRACTOR_SEEK_MAP_H_
#define NDASH_EXTRACTOR_SEEK_MAP_H_
#include <cstdint>
#include <memory>
namespace ndash {
namespace extractor {
class SeekMapInterface {
public:
SeekMapInterface() {}
virtual ~SeekMapInterface() {}
// Whether or not the seeking is supported.
// If seeking is not supported then the only valid seek position is the start
// of the file, and so GetPosition() will return 0 for all input values.
// Returns true if seeking is supported. False otherwise.
virtual bool IsSeekable() const = 0;
// Maps a seek position in microseconds to a corresponding position (byte
// offset) in the stream from which data can be provided to the extractor.
//
// time_us: A seek position in microseconds.
// Returns the corresponding position (byte offset) in the stream from which
// data can be provided to the extractor, or 0 if IsSeekable() returns false.
virtual int64_t GetPosition(int64_t time_us) const = 0;
};
class Unseekable : public SeekMapInterface {
public:
Unseekable();
~Unseekable() override;
bool IsSeekable() const override;
int64_t GetPosition(int64_t time_us) const override;
};
} // namespace extractor
} // namespace ndash
#endif // NDASH_EXTRACTOR_SEEK_MAP_H_
|
SinnerSchraderMobileMirrors/superdb | SuperDBCore/SuperDBCore/NumberPrivate.h | /* NumberPrivate.h Copyright (c) 1998-2009 <NAME>. */
/* This software is open source. See the license. */
extern id FSNumberClass;
extern id NSNumberClass;
// MACROS
#define VERIF_OP_NSNUMBER(METHOD) {if (![operand isKindOfClass:NSNumberClass]) FSArgumentError(operand,1,@"NSNumber",METHOD);}
|
fangedward/pylot | pylot/perception/detection/detection_eval_operator.py | <gh_stars>0
"""Implements an operator that eveluates detection output."""
import heapq
import time
import erdos
import pylot.perception.detection.utils
from pylot.utils import time_epoch_ms
class DetectionEvalOperator(erdos.Operator):
"""Operator that computes accuracy metrics using detected obstacles.
Args:
obstacles_stream (:py:class:`erdos.ReadStream`): The stream on which
detected obstacles are received.
ground_obstacles_stream: The stream on which
:py:class:`~pylot.perception.messages.ObstaclesMessage` are
received from the simulator.
flags (absl.flags): Object to be used to access absl flags.
"""
def __init__(self, obstacles_stream, ground_obstacles_stream, flags):
obstacles_stream.add_callback(self.on_obstacles)
ground_obstacles_stream.add_callback(self.on_ground_obstacles)
erdos.add_watermark_callback(
[obstacles_stream, ground_obstacles_stream], [], self.on_watermark)
self._flags = flags
self._logger = erdos.utils.setup_logging(self.config.name,
self.config.log_file_name)
self._csv_logger = erdos.utils.setup_csv_logging(
self.config.name + '-csv', self.config.csv_log_file_name)
self._last_notification = None
# Buffer of detected obstacles.
self._detected_obstacles = []
# Buffer of ground obstacles.
self._ground_obstacles = []
# Heap storing pairs of (ground/output time, game time).
self._detector_start_end_times = []
self._sim_interval = None
@staticmethod
def connect(obstacles_stream, ground_obstacles_stream):
"""Connects the operator to other streams.
Args:
obstacles_stream (:py:class:`erdos.ReadStream`): The stream
on which detected obstacles are received.
ground_obstacles_stream: The stream on which
:py:class:`~pylot.perception.messages.ObstaclesMessage` are
received from the simulator.
"""
return []
def on_watermark(self, timestamp):
"""Invoked when all input streams have received a watermark.
Args:
timestamp (:py:class:`erdos.timestamp.Timestamp`): The timestamp of
the watermark.
"""
assert len(timestamp.coordinates) == 1
op_start_time = time.time()
game_time = timestamp.coordinates[0]
if not self._last_notification:
self._last_notification = game_time
return
else:
self._sim_interval = (game_time - self._last_notification)
self._last_notification = game_time
sim_time = timestamp.coordinates[0]
while len(self._detector_start_end_times) > 0:
(end_time, start_time) = self._detector_start_end_times[0]
# We can compute mAP if the endtime is not greater than the ground
# time.
if end_time <= game_time:
# This is the closest ground bounding box to the end time.
heapq.heappop(self._detector_start_end_times)
ground_obstacles = self.__get_ground_obstacles_at(end_time)
# Get detector output obstacles.
obstacles = self.__get_obstacles_at(start_time)
if (len(obstacles) > 0 or len(ground_obstacles) > 0):
mAP = pylot.perception.detection.utils.get_mAP(
ground_obstacles, obstacles)
# Get runtime in ms
runtime = (time.time() - op_start_time) * 1000
self._csv_logger.info('{},{},{},{},{:.4f}'.format(
time_epoch_ms(), sim_time, self.config.name, 'runtime',
runtime))
self._logger.info('mAP is: {}'.format(mAP))
self._csv_logger.info('{},{},{},{},{:.4f}'.format(
time_epoch_ms(), sim_time, self.config.name, 'mAP',
mAP))
self._logger.debug('Computing accuracy for {} {}'.format(
end_time, start_time))
else:
# The remaining entries require newer ground obstacles.
break
self.__garbage_collect_obstacles()
def __get_ground_obstacles_at(self, timestamp):
for (time, obstacles) in self._ground_obstacles:
if time == timestamp:
return obstacles
elif time > timestamp:
break
self._logger.fatal(
'Could not find ground obstacles for {}'.format(timestamp))
def __get_obstacles_at(self, timestamp):
for (time, obstacles) in self._detected_obstacles:
if time == timestamp:
return obstacles
elif time > timestamp:
break
self._logger.fatal(
'Could not find detected obstacles for {}'.format(timestamp))
def __garbage_collect_obstacles(self):
# Get the minimum watermark.
watermark = None
for (_, start_time) in self._detector_start_end_times:
if watermark is None or start_time < watermark:
watermark = start_time
if watermark is None:
return
# Remove all detected obstacles that are below the watermark.
index = 0
while (index < len(self._detected_obstacles)
and self._detected_obstacles[index][0] < watermark):
index += 1
if index > 0:
self._detected_obstacles = self._detected_obstacles[index:]
# Remove all the ground obstacles that are below the watermark.
index = 0
while (index < len(self._ground_obstacles)
and self._ground_obstacles[index][0] < watermark):
index += 1
if index > 0:
self._ground_obstacles = self._ground_obstacles[index:]
def on_obstacles(self, msg):
self._logger.debug('@{}: {} received obstacles'.format(
msg.timestamp, self.config.name))
game_time = msg.timestamp.coordinates[0]
vehicles, people, _ = self.__get_obstacles_by_category(msg.obstacles)
self._detected_obstacles.append((game_time, vehicles + people))
# Two metrics: 1) mAP, and 2) timely-mAP
if self._flags.detection_metric == 'mAP':
# We will compare the obstacles with the ground truth at the same
# game time.
heapq.heappush(self._detector_start_end_times,
(game_time, game_time))
elif self._flags.detection_metric == 'timely-mAP':
# Ground obstacles time should be as close as possible to the time
# of the obstacles + detector runtime.
ground_obstacles_time = self.__compute_closest_frame_time(
game_time + msg.runtime)
# Round time to nearest frame.
heapq.heappush(self._detector_start_end_times,
(ground_obstacles_time, game_time))
else:
raise ValueError('Unexpected detection metric {}'.format(
self._flags.detection_metric))
def on_ground_obstacles(self, msg):
self._logger.debug('@{}: {} received ground obstacles'.format(
msg.timestamp, self.config.name))
game_time = msg.timestamp.coordinates[0]
vehicles, people, _ = self.__get_obstacles_by_category(msg.obstacles)
self._ground_obstacles.append((game_time, people + vehicles))
def __compute_closest_frame_time(self, time):
base = int(time) / self._sim_interval * self._sim_interval
if time - base < self._sim_interval / 2:
return base
else:
return base + self._sim_interval
def __get_obstacles_by_category(self, obstacles):
""" Divides perception.detection.utils.DetectedObstacle by labels."""
vehicles = []
people = []
traffic_lights = []
for obstacle in obstacles:
if (obstacle.label in
pylot.perception.detection.utils.VEHICLE_LABELS):
vehicles.append(obstacle)
elif obstacle.label == 'person':
people.append(obstacle)
elif obstacle.label == 'traffic_light':
traffic_lights.append(obstacle)
else:
self._logger.warning('Unexpected label {}'.format(
obstacle.label))
return vehicles, people, traffic_lights
|
mirkomorati/elaborato_ING_SW | doc/doxygen/html/search/classes_0.js | var searchData =
[
['aboutdialog', ['AboutDialog', ['../d7/d05/classmm_1_1_about_dialog.html', 1, 'mm']]],
['addpatientdialog', ['AddPatientDialog', ['../d7/d32/classmm_1_1_add_patient_dialog.html', 1, 'mm']]],
['addprescriptiondialog', ['AddPrescriptionDialog', ['../db/d16/classmm_1_1_add_prescription_dialog.html', 1, 'mm']]]
];
|
alexey-lukyanenko/jdrive | src/game/util/Trackdir.java | package game.util;
/*
public enum Trackdir {
TRACKDIR_DIAG1_NE ( 0),
TRACKDIR_DIAG2_SE ( 1),
TRACKDIR_UPPER_E ( 2),
TRACKDIR_LOWER_E ( 3),
TRACKDIR_LEFT_S ( 4),
TRACKDIR_RIGHT_S ( 5),
//* Note the two missing values here. This enables trackdir -> track
// * conversion by doing (trackdir & 7) * /
TRACKDIR_DIAG1_SW ( 8),
TRACKDIR_DIAG2_NW ( 9),
TRACKDIR_UPPER_W ( 10),
TRACKDIR_LOWER_W ( 11),
TRACKDIR_LEFT_N ( 12),
TRACKDIR_RIGHT_N ( 13),
TRACKDIR_END(14),
INVALID_TRACKDIR ( 0xFF),
;
//** These are a combination of tracks and directions. Values are 0-5 in one
//direction (corresponding to the Track enum) and 8-13 in the other direction. *
//
//typedef enum TrackdirBits {
public static final int TRACKDIR_BIT_DIAG1_NE = 0x1;
public static final int TRACKDIR_BIT_DIAG2_SE = 0x2;
public static final int TRACKDIR_BIT_UPPER_E = 0x4;
public static final int TRACKDIR_BIT_LOWER_E = 0x8;
public static final int TRACKDIR_BIT_LEFT_S = 0x10;
public static final int TRACKDIR_BIT_RIGHT_S = 0x20;
//* Again; note the two missing values here. This enables trackdir -> track conversion by doing (trackdir & 0xFF)
public static final int TRACKDIR_BIT_DIAG1_SW = 0x0100;
public static final int TRACKDIR_BIT_DIAG2_NW = 0x0200;
public static final int TRACKDIR_BIT_UPPER_W = 0x0400;
public static final int TRACKDIR_BIT_LOWER_W = 0x0800;
public static final int TRACKDIR_BIT_LEFT_N = 0x1000;
public static final int TRACKDIR_BIT_RIGHT_N = 0x2000;
public static final int TRACKDIR_BIT_MASK = 0x3F3F;
public static final int INVALID_TRACKDIR_BIT = 0xFFFF;
// } TrackdirBits;
private int value;
private Trackdir(int value)
{
this.value = value;
}
public int getValue() {
return value;
}
public static final Trackdir values[] = values();
//* Maps a trackdir to the (4-way) direction the tile is exited when following
// * that trackdir *
private static final DiagDirection _trackdir_to_exitdir[] = {
DIAGDIR_NE,DIAGDIR_SE,DIAGDIR_NE,DIAGDIR_SE,DIAGDIR_SW,DIAGDIR_SE, DIAGDIR_NE,DIAGDIR_NE,
DIAGDIR_SW,DIAGDIR_NW,DIAGDIR_NW,DIAGDIR_SW,DIAGDIR_NW,DIAGDIR_NE,
};
//**
// * Maps a trackdir to the (4-way) direction the tile is exited when following
// * that trackdir.
// *
public static DiagDirection TrackdirToExitdir()
{
//extern const DiagDirection _trackdir_to_exitdir[TRACKDIR_END];
return _trackdir_to_exitdir[ordinal()];
}
static Trackdir fromInt(int direction) {
return values[direction];
}
}
*/
|
dgusoff/cas | support/cas-server-support-aup-core/src/main/java/org/apereo/cas/aup/AcceptableUsagePolicyRepository.java | package org.apereo.cas.aup;
import org.springframework.webflow.execution.RequestContext;
import java.io.Serializable;
import java.util.Optional;
/**
* This is {@link AcceptableUsagePolicyRepository}.
*
* @author <NAME>
* @since 4.2
*/
public interface AcceptableUsagePolicyRepository extends Serializable {
/**
* Verify whether the policy is accepted.
*
* @param requestContext the request context
* @return result/status if policy is accepted along with principal.
*/
AcceptableUsagePolicyStatus verify(RequestContext requestContext);
/**
* Record the fact that the policy is accepted..
*
* @param requestContext the request context
* @return true if choice was saved.
*/
boolean submit(RequestContext requestContext);
/**
* Fetch policy as optional.
*
* @param requestContext the request context
* @return the optional
*/
Optional<AcceptableUsagePolicyTerms> fetchPolicy(RequestContext requestContext);
}
|
Semicheche/foa_frappe_docker | frappe-bench/apps/erpnext/erpnext/manufacturing/page/production_analytics/production_analytics.js | // Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
// License: GNU General Public License v3. See license.txt
frappe.pages['production-analytics'].on_page_load = function(wrapper) {
frappe.ui.make_app_page({
parent: wrapper,
title: __('Production Analytics'),
single_column: true
});
new erpnext.ProductionAnalytics(wrapper);
frappe.breadcrumbs.add("Manufacturing");
}
erpnext.ProductionAnalytics = frappe.views.GridReportWithPlot.extend({
init: function(wrapper) {
this._super({
title: __("Production Analytics"),
parent: $(wrapper).find('.layout-main'),
page: wrapper.page,
doctypes: ["Item", "Company", "Fiscal Year", "Work Order"]
});
},
setup_columns: function() {
var std_columns = [
{id: "name", name: __("Status"), field: "name", width: 100}
];
this.make_date_range_columns();
this.columns = std_columns.concat(this.columns);
},
filters: [
{fieldtype:"Select", label: __("Company"), link:"Company", fieldname: "company",
default_value: __("Select Company...")},
{fieldtype:"Date", label: __("From Date"), fieldname: "from_date"},
{fieldtype:"Date", label: __("To Date"), fieldname: "to_date"},
{fieldtype:"Select", label: __("Range"), fieldname: "range",
options:[{label: __("Daily"), value: "Daily"}, {label: __("Weekly"), value: "Weekly"},
{label: __("Monthly"), value: "Monthly"}, {label: __("Quarterly"), value: "Quarterly"},
{label: __("Yearly"), value: "Yearly"}]}
],
setup_filters: function() {
var me = this;
this._super();
this.trigger_refresh_on_change(["company"]);
this.trigger_refresh_on_change(["range"]);
this.show_zero_check();
},
init_filter_values: function() {
this._super();
this.filter_inputs.range.val('Monthly');
},
setup_chart: function() {
var me = this;
var chart_data = this.get_chart_data ? this.get_chart_data() : null;
const parent = this.wrapper.find('.chart')[0];
this.chart = new Chart(parent, {
height: 200,
data: chart_data,
type: 'line'
});
},
set_default_values: function() {
var values = {
from_date: frappe.datetime.str_to_user(frappe.datetime.add_months(frappe.datetime.now_datetime(),-12) ),
to_date: frappe.datetime.str_to_user(frappe.datetime.add_months(frappe.datetime.now_datetime(),1))
}
var me = this;
$.each(values, function(i, v) {
if(me.filter_inputs[i] && !me.filter_inputs[i].val())
me.filter_inputs[i].val(v);
})
},
prepare_data: function() {
// add Opening, Closing, Totals rows
// if filtered by account and / or voucher
var me = this;
var all_open_orders = {name:"All Work Orders", "id": "all-open-pos",
checked:true};
var not_started = {name:"Not Started", "id":"not-started-pos",
checked:true};
var overdue = {name:"Overdue (Not Started)", "id":"overdue-pos",
checked:true};
var pending = {name:"Pending", "id":"pending-pos",
checked:true};
var completed = {name:"Completed", "id":"completed-pos",
checked:true};
$.each(frappe.report_dump.data["Work Order"], function(i, d) {
var dateobj = frappe.datetime.str_to_obj(d.creation);
var date = frappe.datetime.str_to_user(d.creation.split(" ")[0]);
$.each(me.columns, function(i,col) {
if (i > 1){
var start_period = frappe.datetime.user_to_obj(frappe.datetime.str_to_user(col.id));
var end_period = frappe.datetime.user_to_obj(frappe.datetime.str_to_user(col.name));
var astart_date = frappe.datetime.user_to_obj(frappe.datetime.str_to_user(d.actual_start_date));
var planned_start_date = frappe.datetime.user_to_obj(frappe.datetime.str_to_user(d.planned_start_date));
var aend_date = frappe.datetime.user_to_obj(frappe.datetime.str_to_user(d.actual_end_date));
var modified = frappe.datetime.user_to_obj(frappe.datetime.str_to_user(d.modified));
if (dateobj <= start_period || dateobj <= end_period) {
all_open_orders[col.field] = flt(all_open_orders[col.field]) + 1;
if(d.status=="Completed") {
if(aend_date < start_period || modified < start_period) {
completed[col.field] = flt(completed[col.field]) + 1;
}
else if (astart_date < start_period) {
pending[col.field] = flt(pending[col.field]) + 1;
}
else if (planned_start_date < start_period) {
overdue[col.field] = flt(overdue[col.field]) + 1;
} else {
not_started[col.field] = flt(not_started[col.field]) + 1;
}
}else if(d.status == "In Process")
{
if (astart_date < start_period || modified < start_period){
pending[col.field] = flt(pending[col.field]) + 1;
}else if (planned_start_date < start_period) {
overdue[col.field] = flt(overdue[col.field]) + 1;
}else{
not_started[col.field] = flt(not_started[col.field]) + 1;
}
}else if(d.status == "Not Started") {
if (planned_start_date < start_period){
overdue[col.field] = flt(overdue[col.field]) + 1;
}else{
not_started[col.field] = flt(not_started[col.field]) + 1;
}
}
}
}
});
});
if(me.columns.length < 30){
this.chart_area.toggle(true);
}else {
this.chart_area.toggle(false);
}
this.data = [all_open_orders, not_started, overdue, pending, completed];
}
});
|
fujy/ROS-Project | src/rbx2/rbx2_arm_nav/scripts/moveit_fk_demo.py | #!/usr/bin/env python
"""
moveit_fk_demo.py - Version 0.1 2014-01-14
Use forward kinemtatics to move the arm to a specified set of joint angles
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2014 <NAME>. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy, sys
import moveit_commander
from control_msgs.msg import GripperCommand
class MoveItDemo:
def __init__(self):
# Initialize the move_group API
moveit_commander.roscpp_initialize(sys.argv)
# Initialize the ROS node
rospy.init_node('moveit_demo', anonymous=True)
GRIPPER_OPEN = [0.04]
GRIPPER_CLOSED = [-0.03]
GRIPPER_NEUTRAL = [0.01]
# Connect to the right_arm move group
right_arm = moveit_commander.MoveGroupCommander('right_arm')
# Connect to the right_gripper move group
right_gripper = moveit_commander.MoveGroupCommander('right_gripper')
# Get the name of the end-effector link
end_effector_link = right_arm.get_end_effector_link()
# Display the name of the end_effector link
rospy.loginfo("The end effector link is: " + str(end_effector_link))
# Set a small tolerance on joint angles
right_arm.set_goal_joint_tolerance(0.001)
right_gripper.set_goal_joint_tolerance(0.001)
# Start the arm target in "resting" pose stored in the SRDF file
right_arm.set_named_target('resting')
# Plan a trajectory to the goal configuration
traj = right_arm.plan()
# Execute the planned trajectory
right_arm.execute(traj)
# Pause for a moment
rospy.sleep(1)
# Set the gripper target to neutal position using a joint value target
right_gripper.set_joint_value_target(GRIPPER_NEUTRAL)
# Plan and execute the gripper motion
right_gripper.go()
rospy.sleep(1)
# Set target joint values for the arm: joints are in the order they appear in
# the kinematic tree.
joint_positions = [-0.0867, -1.274, 0.02832, 0.0820, -1.273, -0.003]
# Set the arm's goal configuration to the be the joint positions
right_arm.set_joint_value_target(joint_positions)
# Plan and execute the motion
right_arm.go()
rospy.sleep(1)
# Save this configuration for later
right_arm.remember_joint_values('saved_config', joint_positions)
# Close the gripper as if picking something up
right_gripper.set_joint_value_target(GRIPPER_CLOSED)
right_gripper.go()
rospy.sleep(1)
# Set the arm target to the named "straight_out" pose stored in the SRDF file
right_arm.set_named_target('straight_forward')
# Plan and execute the motion
right_arm.go()
rospy.sleep(1)
# Set the goal configuration to the named configuration saved earlier
right_arm.set_named_target('saved_config')
# Plan and execute the motion
right_arm.go()
rospy.sleep(1)
# Open the gripper as if letting something go
right_gripper.set_joint_value_target(GRIPPER_OPEN)
right_gripper.go()
rospy.sleep(1)
# Return the arm to the named "resting" pose stored in the SRDF file
right_arm.set_named_target('resting')
right_arm.go()
rospy.sleep(1)
# Return the gripper target to neutral position
right_gripper.set_joint_value_target(GRIPPER_NEUTRAL)
right_gripper.go()
rospy.sleep(1)
# Cleanly shut down MoveIt
moveit_commander.roscpp_shutdown()
# Exit the script
moveit_commander.os._exit(0)
if __name__ == "__main__":
try:
MoveItDemo()
except rospy.ROSInterruptException:
pass
|
SummitRobotics/FRC2022 | src/main/java/frc/robot/commands/conveyor/NoI2cConveyor.java | // Copyright (c) FIRST and other WPILib contributors.
// Open Source Software; you can modify and/or share it under the terms of
// the WPILib BSD license file in the root directory of this project.
package frc.robot.commands.conveyor;
import edu.wpi.first.wpilibj2.command.CommandBase;
import frc.robot.subsystems.Conveyor;
import frc.robot.subsystems.Shooter;
import frc.robot.subsystems.Intake;
/**
*
*/
public class NoI2cConveyor extends CommandBase {
private Conveyor conveyor;
private Shooter shooter;
private Intake intake;
/** Creates a new noI2cConveyor. */
public NoI2cConveyor(Conveyor conveyor, Shooter shooter, Intake intake) {
this.intake = intake;
this.conveyor = conveyor;
this.shooter = shooter;
// Use addRequirements() here to declare subsystem dependencies.
addRequirements(shooter, conveyor, intake);
}
// Called when the command is initially scheduled.
@Override
public void initialize() {}
// Called every time the scheduler runs while the command is scheduled.
@Override
public void execute() {
if (intake.getState() == Intake.States.DOWN) {
conveyor.setBeltMotorPower(.5);
} else if (shooter.getState() == Shooter.States.READY_TO_FIRE) {
conveyor.setBeltMotorPower(.5);
conveyor.setIndexTargetPosition(10);
} else {
conveyor.setIndexMotorPower(0);
conveyor.setBeltMotorPower(0);
}
}
// Called once the command ends or is interrupted.
@Override
public void end(boolean interrupted) {
conveyor.stop();
}
// Returns true when the command should end.
@Override
public boolean isFinished() {
return false;
}
}
|
imaginate/vitals | src/methods/fs/get.js | /**
* -----------------------------------------------------------------------------
* VITALS FS METHOD: get
* -----------------------------------------------------------------------------
* @section fs
* @version 4.1.3
* @see [vitals.get]{@link https://github.com/imaginate/vitals/wiki/vitals.get}
*
* @author <NAME> <<EMAIL>> (https://github.com/imaginate)
* @copyright 2017 <NAME> <<EMAIL>> (https://github.com/imaginate)
*
* Annotations:
* @see [JSDoc3](http://usejsdoc.org)
* @see [Closure Compiler JSDoc Syntax](https://developers.google.com/closure/compiler/docs/js-for-compiler)
*/
'use strict';
var newErrorMaker = require('../helpers/new-error-maker.js');
var normalize = require('../helpers/normalize.js');
var own = require('../helpers/own.js');
var _is = require('./helpers/is.js');
var fs = require('fs');
var get = {};
////////////////////////////////////////////////////////////////////////////////
// VITALS FS METHOD: get
////////////////////////////////////////////////////////////////////////////////
(function fsGetPrivateScope() {
//////////////////////////////////////////////////////////
// PUBLIC METHODS
// - get.file
// - get.dirpaths
// - get.filepaths
//////////////////////////////////////////////////////////
/**
* Gets the contents of a file.
*
* @public
* @param {string} filepath
* @param {(boolean|Object)=} opts - A boolean value sets opts.buffer.
* @param {boolean=} opts.buffer - [default= false] If `true` a buffer is
* returned.
* @param {string=} opts.encoding - [default= "utf8"]
* @param {?string=} opts.eol - [default= "LF"] The end of line character
* to use when normalizing the result. If opts.eol is `null` or opts.buffer
* is `true` no normalization is completed.
* Optional values:
* - `"LF"`
* - `"CR"`
* - `"CRLF"`
* @return {(!Buffer|string)}
*/
get.file = function getFile(filepath, opts) {
opts = _is.bool(opts) ? { buffer: opts } : opts;
if ( !_is.file(filepath) ) throw _error.type('filepath', 'file');
if ( !_is.nil.un.obj(opts) ) throw _error.type('opts', 'file');
if (opts) {
if ( !_is.un.bool(opts.buffer) ) throw _error.type('opts.buffer', 'file');
if ( !_is.un.str(opts.encoding) ) throw _error.type('opts.encoding', 'file');
if ( !_is.nil.un.str(opts.eol) ) throw _error.type('opts.eol', 'file');
if ( opts.eol && !_is.eol(opts.eol) ) throw _error.range('opts.eol', '"LF", "CR", "CRLF"', 'file');
}
opts = _prepOptions(opts);
return _getFile(filepath, opts);
};
/**
* Gets all of the directory paths in a directory.
*
* @public
* @param {string} dirpath - Must be a valid directory.
* @param {(boolean|Object)=} opts - A boolean value sets opts.deep.
* @param {boolean=} opts.deep - [default= false] Whether to include sub
* directories.
* @param {boolean=} opts.recursive - Alias for opts.deep.
* @param {boolean=} opts.base - [default= false] Whether to append the base
* dirpath to the results.
* @param {boolean=} opts.basepath - Alias for opts.base.
* @param {(RegExp|Array<string>|?string)=} opts.validDirs - If string use
* `"|"` to separate valid directory names.
* @param {(RegExp|Array<string>|?string)=} opts.invalidDirs - If string use
* `"|"` to separate invalid directory names.
* @return {!Array<string>}
*/
get.dirpaths = function getDirpaths(dirpath, opts) {
/** @type {!Array<string>} */
var dirpaths;
/** @type {function(string): boolean} */
var isValid;
opts = _is.bool(opts) ? { deep: opts } : opts;
if ( !_is.dir(dirpath) ) throw _error.type('dirpath', 'dirpaths');
if ( !_is.nil.un.obj(opts) ) throw _error.type('opts', 'dirpaths');
if (opts) {
if ( !_is.un.bool(opts.deep) ) throw _error.type('opts.deep', 'dirpaths');
if ( !_is.un.bool(opts.recursive) ) throw _error.type('opts.recursive', 'dirpaths');
if ( !_is.un.bool(opts.base) ) throw _error.type('opts.base', 'dirpaths');
if ( !_is.un.bool(opts.basepath) ) throw _error.type('opts.basepath', 'dirpaths');
if ( !_isValid(opts.validDirs) ) throw _error.type('opts.validDirs', 'dirpaths');
if ( !_isValid(opts.invalidDirs) ) throw _error.type('opts.invalidDirs', 'dirpaths');
}
dirpath = _prepDir(dirpath);
opts = _parseOptions(opts);
isValid = _makeTest(opts.validDirs, opts.invalidDirs);
dirpaths = opts.deep
? _getDirpathsDeep(dirpath, isValid)
: _getDirpaths(dirpath, isValid);
return opts.base ? _addBasepath(dirpaths, dirpath) : dirpaths;
};
/**
* Gets all of the file paths in a directory.
*
* @public
* @param {string} dirpath - Must be a valid directory.
* @param {(boolean|Object)=} opts - A boolean value sets opts.deep.
* @param {boolean=} opts.deep - [default= false] Whether to include
* sub-directory files.
* @param {boolean=} opts.recursive - Alias for opts.deep.
* @param {boolean=} opts.base - [default= false] Whether to append the base
* dirpath to the results.
* @param {boolean=} opts.basepath - Alias for opts.base.
* @param {(RegExp|Array<string>|?string)=} opts.validDirs
* @param {(RegExp|Array<string>|?string)=} opts.validExts - [.]ext
* @param {(RegExp|Array<string>|?string)=} opts.validNames - filename
* @param {(RegExp|Array<string>|?string)=} opts.validFiles - filename.ext
* @param {(RegExp|Array<string>|?string)=} opts.invalidDirs
* @param {(RegExp|Array<string>|?string)=} opts.invalidExts - [.]ext
* @param {(RegExp|Array<string>|?string)=} opts.invalidNames - filename
* @param {(RegExp|Array<string>|?string)=} opts.invalidFiles - filename.ext
* @return {!Array<string>}
*/
get.filepaths = function getFilepaths(dirpath, opts) {
/** @type {function(string): boolean} */
var isValidDir;
/** @type {!Array<string>} */
var filepaths;
/** @type {function(string): boolean} */
var isValid;
/** @type {!Array} */
var invalid;
/** @type {!Array} */
var valid;
opts = _is.bool(opts) ? { deep: opts } : opts;
if ( !_is.dir(dirpath) ) throw _error.type('dirpath', 'filepaths');
if ( !_is.nil.un.obj(opts) ) throw _error.type('opts', 'filepaths');
if (opts) {
if ( !_is.un.bool(opts.deep) ) throw _error.type('opts.deep', 'filepaths');
if ( !_is.un.bool(opts.recursive) ) throw _error.type('opts.recursive', 'filepaths');
if ( !_is.un.bool(opts.base) ) throw _error.type('opts.base', 'filepaths');
if ( !_is.un.bool(opts.basepath) ) throw _error.type('opts.basepath', 'filepaths');
if ( !_isValid(opts.validDirs) ) throw _error.type('opts.validDirs', 'filepaths');
if ( !_isValid(opts.validExts) ) throw _error.type('opts.validExts', 'filepaths');
if ( !_isValid(opts.validNames) ) throw _error.type('opts.validNames', 'filepaths');
if ( !_isValid(opts.validFiles) ) throw _error.type('opts.validFiles', 'filepaths');
if ( !_isValid(opts.invalidDirs) ) throw _error.type('opts.invalidDirs', 'filepaths');
if ( !_isValid(opts.invalidExts) ) throw _error.type('opts.invalidExts', 'filepaths');
if ( !_isValid(opts.invalidNames) ) throw _error.type('opts.invalidNames', 'filepaths');
if ( !_isValid(opts.invalidFiles) ) throw _error.type('opts.invalidFiles', 'filepaths');
}
dirpath = _prepDir(dirpath);
opts = _parseOptions(opts);
valid = [ opts.validExts, opts.validNames, opts.validFiles ];
invalid = [ opts.invalidExts, opts.invalidNames, opts.invalidFiles ];
isValid = _makeTest(valid, invalid);
if (opts.deep) {
isValidDir = _makeTest(opts.validDirs, opts.invalidDirs);
filepaths = _getFilepathsDeep(dirpath, isValid, isValidDir);
}
else filepaths = _getFilepaths(dirpath, isValid);
return opts.base ? _addBasepath(filepaths, dirpath) : filepaths;
};
//////////////////////////////////////////////////////////
// PRIVATE METHODS - MAIN
//////////////////////////////////////////////////////////
/**
* @private
* @param {string} source
* @param {!Object} options
* @return {(string|Buffer)}
*/
function _getFile(source, options) {
/** @type {string} */
var contents;
if (options.buffer) return fs.readFileSync(source);
contents = fs.readFileSync(source, options.encoding);
return options.eol ? normalize(contents, options.eol) : contents;
}
/**
* @private
* @param {string} basepath
* @param {function(string): boolean} isValid
* @return {!Array<string>}
*/
function _getDirpaths(basepath, isValid) {
/** @type {!Array<string>} */
var dirpaths;
/** @type {!Array<string>} */
var newpaths;
/** @type {string} */
var dirpath;
/** @type {number} */
var len;
/** @type {number} */
var i;
dirpaths = fs.readdirSync(basepath);
newpaths = [];
len = dirpaths.length;
i = -1;
while (++i < len) {
dirpath = dirpaths[i];
isValid(dirpath) && _is.dir(basepath + dirpath) && newpaths.push(dirpath);
}
return newpaths;
}
/**
* @private
* @param {string} basepath
* @param {function(string): boolean} isValid
* @return {!Array<string>}
*/
function _getDirpathsDeep(basepath, isValid) {
/** @type {!Array<string>} */
var dirpaths;
/** @type {!Array<string>} */
var newpaths;
/** @type {string} */
var dirpath;
/** @type {number} */
var len;
/** @type {number} */
var ii;
/** @type {number} */
var i;
dirpaths = _getDirpaths(basepath, isValid);
i = -1;
while (++i < dirpaths.length) {
dirpath = _prepDir(dirpaths[i]);
newpaths = _getDirpaths(basepath + dirpath, isValid);
len = newpaths.length;
ii = -1;
while (++ii < len) dirpaths.push(dirpath + newpaths[ii]);
}
return dirpaths;
}
/**
* @private
* @param {string} basepath
* @param {function(string): boolean} isValid
* @return {!Array<string>}
*/
function _getFilepaths(basepath, isValid) {
/** @type {!Array<string>} */
var filepaths;
/** @type {!Array<string>} */
var newpaths;
/** @type {string} */
var filepath;
/** @type {number} */
var len;
/** @type {number} */
var i;
filepaths = fs.readdirSync(basepath);
newpaths = [];
len = filepaths.length;
i = -1;
while (++i < len) {
filepath = filepaths[i];
isValid(filepath) && _is.file(basepath + filepath) && newpaths.push(filepath);
}
return newpaths;
}
/**
* @private
* @param {string} basepath
* @param {function(string): boolean} isValid
* @param {function(string): boolean} isValidDir
* @return {!Array<string>}
*/
function _getFilepathsDeep(basepath, isValid, isValidDir) {
/** @type {!Array<string>} */
var filepaths;
/** @type {!Array<string>} */
var dirpaths;
/** @type {!Array<string>} */
var newpaths;
/** @type {string} */
var dirpath;
/** @type {number} */
var _len;
/** @type {number} */
var len;
/** @type {number} */
var _i;
/** @type {number} */
var i;
filepaths = _getFilepaths(basepath, isValid);
dirpaths = _getDirpathsDeep(basepath, isValidDir);
len = dirpaths.length;
i = -1;
while (++i < len) {
dirpath = _prepDir(dirpaths[i]);
newpaths = _getFilepaths(basepath + dirpath, isValid);
_len = newpaths.length;
_i = -1;
while (++_i < _len) filepaths.push(dirpath + newpaths[_i]);
}
return filepaths;
}
//////////////////////////////////////////////////////////
// PRIVATE METHODS - PREP
//////////////////////////////////////////////////////////
/**
* @private
* @param {string} dirpath
* @return {string}
*/
function _prepDir(dirpath) {
return dirpath.replace(/[^\/]$/, '$&/');
}
/**
* @private
* @param {Object} opts
* @return {!Object}
*/
function _prepOptions(opts) {
opts = opts || {};
opts.encoding = opts.encoding || 'utf8';
opts.eol = _is.undefined(opts.eol) ? 'LF' : opts.eol;
opts.eol = opts.eol && opts.eol.toUpperCase();
return opts;
}
/**
* @private
* @param {!Array<string>} paths
* @param {string} basepath
* @return {!Array<string>}
*/
function _addBasepath(paths, basepath) {
/** @type {number} */
var len;
/** @type {number} */
var i;
len = paths.length;
i = -1;
while (++i < len) paths[i] = basepath + paths[i];
return paths;
}
//////////////////////////////////////////////////////////
// PRIVATE METHODS - OPTION PARSING
//////////////////////////////////////////////////////////
/**
* @private
* @type {!RegExp}
* @const
*/
var ESCAPE_CHARS = /[\\^$.+?(){}[\]]/g;
/**
* @private
* @type {!RegExp}
* @const
*/
var VALID = /^(?:in)?valid([A-Z][a-z]+)s$/;
/**
* @private
* @param {Object=} options
* @return {!Object}
*/
function _parseOptions(options) {
/** @type {!Object} */
var opts;
/** @type {string} */
var key;
if (!options) return {};
options.deep = _is.bool(options.deep) ? options.deep : options.recursive;
options.base = _is.bool(options.base) ? options.base : options.basepath;
opts = {};
for (key in options) {
if ( own(options, key) ) {
opts[key] = VALID.test(key)
? _parseOption(options[key], key.replace(VALID, '$1'))
: options[key];
}
}
return opts;
}
/**
* @private
* @param {(RegExp|Array<string>|?string|undefined)} option
* @param {string} type
* @return {?RegExp}
*/
function _parseOption(option, type) {
if (!option) return null;
type = type.toLowerCase();
option = _is.arr(option) ? option.join('|') : option;
return _is.str(option) ? _parseOptStr(option, type) : option;
}
/**
* @private
* @param {string} option
* @param {string} type
* @return {!RegExp}
*/
function _parseOptStr(option, type) {
if (type === 'ext') option = option.replace(/\B\.\b/g, '');
option = option.replace(ESCAPE_CHARS, '\\$&');
option = option.replace(/(\\)?\*/g, function(org, match) {
return match === '\\' ? org : '.*';
});
switch (type) {
case 'dir' :
case 'file': option = '^(?:' + option + ')$'; break;
case 'ext' : option = '^.*\\.(?:' + option + ')$'; break;
case 'name': option = '^(?:' + option + ')(?:\\.[a-z]+)+$';
}
return new RegExp(option, 'i');
}
//////////////////////////////////////////////////////////
// PRIVATE METHODS - TEST FACTORIES
//////////////////////////////////////////////////////////
/**
* @private
* @param {(Array|RegExp)} valid
* @param {(Array|RegExp)} invalid
* @return {function}
*/
function _makeTest(valid, invalid) {
/** @type {function(string): boolean} */
var isInvalid;
/** @type {function(string): boolean} */
var isValid;
isInvalid = _makeCheck(false, invalid);
isValid = _makeCheck(true, valid);
return function isValidPath(str) {
return isInvalid(str) ? false : isValid(str);
};
}
/**
* @private
* @param {boolean} valid
* @param {(Array|RegExp)} regexps
* @return {function}
*/
function _makeCheck(valid, regexps) {
/** @type {number} */
var i;
if ( !_is.arr(regexps) ) return _makeOneCheck(valid, regexps);
i = regexps.length;
while (i--) regexps[i] || regexps.splice(i, 1);
return regexps.length > 1
? valid
? _makeValidCheck(regexps)
: _makeInvalidCheck(regexps)
: _makeOneCheck(valid, regexps[0]);
}
/**
* @private
* @param {boolean} valid
* @param {?RegExp=} regex
* @return {function}
*/
function _makeOneCheck(valid, regex) {
return valid
? regex
? function isValid(str) { return regex.test(str); }
: function isValid() { return true; }
: regex
? function isInvalid(str) { return regex.test(str); }
: function isInvalid() { return false; };
}
/**
* @private
* @param {!Array<!RegExp>} regexps
* @return {function}
*/
function _makeValidCheck(regexps) {
/** @type {number} */
var len;
len = regexps.length;
return function isValid(str) {
/** @type {number} */
var i;
i = -1;
while (++i < len) {
if ( !regexps[i].test(str) ) return false;
}
return true;
};
}
/**
* @private
* @param {!Array<!RegExp>} regexps
* @return {function}
*/
function _makeInvalidCheck(regexps) {
/** @type {number} */
var len;
len = regexps.length;
return function isInvalid(str) {
/** @type {number} */
var i;
i = -1;
while (++i < len) {
if ( regexps[i].test(str) ) return true;
}
return false;
};
}
//////////////////////////////////////////////////////////
// PRIVATE METHODS - GENERAL
//////////////////////////////////////////////////////////
/**
* @private
* @type {!ErrorAid}
*/
var _error = newErrorMaker('get');
/**
* @param {*} val
* @return {boolean}
*/
function _isValid(val) {
return val
? _is.regex(val) || _is.str(val) || _is.arr(val)
: _is.nil.un.str(val);
}
//////////////////////////////////////////////////////////
// END OF PRIVATE SCOPE FOR GET
})();
module.exports = get;
|
MortalViews/tao1 | tao1/sites/dao/apps/app/view.py | <gh_stars>10-100
import sys, os, time, jinja2, aiohttp_jinja2
from aiohttp import web
import aiohttp
from aiohttp.web import Application, Response, MsgType, WebSocketResponse
from core.union import cache
# from pymongo import *
# from gridfs import GridFS
from aiohttp_session import get_session
@cache("main_page", expire=7)
async def page(request):
return templ('apps.app:index', request, {'key':'val'} )
async def test_db(request):
session = await get_session(request)
session['last_visit'] = time.time()
request.db.doc.save({"_id":"test", "val":"test_db", "status":"success"})
val = request.db.doc.find_one({"_id":"test"})
return templ('apps.app:db_test', request, {'key':val})
async def ws(request):
return templ('apps.app:chat', request, {} )
async def ws_handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
# ws.start(request)
async for msg in ws:
if msg.tp == MsgType.text:
if msg.data == 'close':
await ws.close()
else:
ws.send_str(msg.data + '/answer')
elif msg.tp == aiohttp.MsgType.close: print('websocket connection closed')
elif msg.tp == aiohttp.MsgType.error: print('ws connection closed with exception %s', ws.exception())
return ws
|
zhangyugehu/Go-Steps | queue/queue.go | package queue
type Queue []int
func (q *Queue) Push(v int){
*q = append(*q, v)
}
func (q *Queue) Pop() int{
return 0
}
|
snltd/wavefront-sdk | spec/wavefront-sdk/usergroup_spec.rb | #!/usr/bin/env ruby
# frozen_string_literal: true
require_relative '../spec_helper'
require_relative '../test_mixins/general'
# Unit tests for WavefrontUserGroup
#
class WavefrontUserGroupTest < WavefrontTestBase
attr_reader :users, :groups, :permission, :invalid_groups, :invalid_users,
:roles, :invalid_roles
include WavefrontTest::Create
include WavefrontTest::Delete
include WavefrontTest::Describe
include WavefrontTest::List
include WavefrontTest::Update
def test_add_users_to_group
assert_posts("/api/v2/usergroup/#{id}/addUsers", users.to_json) do
wf.add_users_to_group(id, users)
end
assert_raises(Wavefront::Exception::InvalidUserId) do
wf.add_users_to_group(id, invalid_users)
end
assert_invalid_id { wf.add_users_to_group(invalid_id, users) }
end
def test_remove_users_from_group
assert_posts("/api/v2/usergroup/#{id}/removeUsers", users.to_json) do
wf.remove_users_from_group(id, users)
end
assert_raises(Wavefront::Exception::InvalidUserId) do
wf.remove_users_from_group(id, invalid_users)
end
assert_invalid_id { wf.remove_users_from_group(invalid_id, users) }
end
def test_add_roles_to_group
assert_posts("/api/v2/usergroup/#{id}/addRoles", roles.to_json) do
wf.add_roles_to_group(id, roles)
end
assert_raises(Wavefront::Exception::InvalidRoleId) do
wf.add_roles_to_group(id, invalid_roles)
end
assert_invalid_id { wf.add_roles_to_group(invalid_id, roles) }
end
def test_remove_roles_from_group
assert_posts("/api/v2/usergroup/#{id}/removeRoles", roles.to_json) do
wf.remove_roles_from_group(id, roles)
end
assert_raises(Wavefront::Exception::InvalidRoleId) do
wf.remove_roles_from_group(id, invalid_roles)
end
assert_invalid_id { wf.remove_roles_from_group(invalid_id, roles) }
end
def setup_fixtures
@permission = 'alerts_management'
@invalid_groups = %w[some-nonsense more-nonsense]
@groups = %w[f8dc0c14-91a0-4ca9-8a2a-7d47f4db4672
2659191e-aad4-4302-a94e-9667e1517127]
@users = %w[<EMAIL> <EMAIL>]
@roles = %w[abcdef14-91a0-4ca9-8a2a-7d47f4db4672
fedcba1e-aad4-4302-a94e-9667e1517127]
@invalid_users = ['bad' * 500, '']
@invalid_roles = ['some nonsense']
end
private
def api_class
'usergroup'
end
def id
'f8dc0c14-91a0-4ca9-8a2a-7d47f4db4672'
end
def invalid_id
'this is not what you call a group'
end
def payload
{ name: 'test group',
permissions: %w[alerts_management dashboard_management
events_management] }
end
end
|
atsgen/tf-vro-plugin | o11nplugin-contrail-workflows/src/main/js/workflows/removeRuleFromSecurityGroup.js | var index = ContrailUtils.stringToIndex(rule);
var list = item.getEntries().getPolicyRule();
list.splice(index, 1);
item.setEntries(new ContrailPolicyEntriesType(list));
item.update(); |
Falumpaset/handson-ml2 | backend/crawler/src/main/java/de/immomio/service/contract/DigitalContractItpStatusService.java | <gh_stars>0
package de.immomio.service.contract;
import de.immomio.data.propertysearcher.entity.itp.ItpCheckResponseBean;
import de.immomio.data.shared.entity.contract.signer.DigitalContractSigner;
import de.immomio.data.shared.entity.contract.signer.history.aes.itp.DigitalContractItpHistory;
import de.immomio.data.shared.entity.contract.signer.history.aes.itp.DigitalContractItpState;
import de.immomio.data.shared.entity.contract.signer.history.aes.itp.ItpMaskedRequestBean;
import de.immomio.itp.client.ItpClient;
import de.immomio.model.repository.core.shared.contract.signer.BaseDigitalContractSignerRepository;
import de.immomio.model.repository.core.shared.contract.signer.aes.BaseDigitalContractItpHistoryRepository;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Mono;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ThreadLocalRandom;
import static de.immomio.data.shared.entity.contract.signer.history.aes.itp.DigitalContractItpState.INIT;
import static de.immomio.data.shared.entity.contract.signer.history.aes.itp.DigitalContractItpState.PENDING;
import static de.immomio.data.shared.entity.contract.signer.history.aes.itp.DigitalContractItpState.UPLOADED;
@Slf4j
@Service
public class DigitalContractItpStatusService {
private static final List<String> ITP_STATES_IN_PROGRESS = List.of(
INIT.toString(), UPLOADED.toString(), PENDING.toString());
private static final Map<String, List<String>> ITP_MOCK_UUIDS = Map.of(
"101", List.of("102", "103", "104", "105"),
"102", List.of("103", "104", "105"),
"103", List.of("104", "105"),
"104", List.of("105")
);
private final ItpClient itpClient;
private final BaseDigitalContractSignerRepository contractSignerRepository;
private final BaseDigitalContractItpHistoryRepository itpHistoryRepository;
@Autowired
public DigitalContractItpStatusService(
ItpClient itpClient,
BaseDigitalContractSignerRepository contractSignerRepository,
BaseDigitalContractItpHistoryRepository itpHistoryRepository
) {
this.itpClient = itpClient;
this.contractSignerRepository = contractSignerRepository;
this.itpHistoryRepository = itpHistoryRepository;
}
public void handleItpStatusChanges() {
List<DigitalContractSigner> signers =
contractSignerRepository.findSignersWithAesItpState(ITP_STATES_IN_PROGRESS);
if (signers != null) {
signers.forEach(this::identCheckStatus);
}
}
private void identCheckStatus(DigitalContractSigner signer) {
String identcheckUuid = signer.getAesVerificationData().getIdentcheckUuid();
log.info("Checking ITP state for {}", identcheckUuid);
if (ITP_MOCK_UUIDS.containsKey(identcheckUuid)) {
identCheckStatusMock(signer, identcheckUuid);
} else {
Mono<ItpCheckResponseBean> itpCheckResponseBeanMono = itpClient.identCheckStatus(identcheckUuid);
itpCheckResponseBeanMono.subscribe(responseBean -> handleItpStatusResponse(responseBean, signer));
}
}
private void identCheckStatusMock(DigitalContractSigner signer, String identcheckUuid) {
log.info("Mock state {}", identcheckUuid);
List<String> possibleMockUuids = ITP_MOCK_UUIDS.get(identcheckUuid);
int randomElementIndex
= ThreadLocalRandom.current().nextInt(possibleMockUuids.size()) % possibleMockUuids.size();
identcheckUuid = possibleMockUuids.get(randomElementIndex);
Mono<ItpCheckResponseBean> itpCheckResponseBeanMono = itpClient.mockedIdentCheckStatus(identcheckUuid);
itpCheckResponseBeanMono.subscribe(responseBean -> handleItpStatusResponse(responseBean, signer));
}
private void handleItpStatusResponse(ItpCheckResponseBean itpCheckResponseBean, DigitalContractSigner signer) {
String identcheckUuid = itpCheckResponseBean.getIdentcheckUuid();
DigitalContractItpState itpState =
DigitalContractItpState.findByInternalState(itpCheckResponseBean.getProcessStatus());
log.info("New ITP state {}", itpState);
ItpMaskedRequestBean maskedRequestBean = ItpMaskedRequestBean.builder()
.identcheckUuid(identcheckUuid)
.build();
DigitalContractItpHistory itpHistory = new DigitalContractItpHistory();
itpHistory.setItpRequest(maskedRequestBean);
itpHistory.setItpResponse(itpCheckResponseBean);
itpHistory.setSigner(signer);
itpHistory.setState(itpState);
itpHistoryRepository.save(itpHistory);
signer.getCurrentState().setItpState(itpState);
signer.getAesVerificationData().setIdentcheckUuid(identcheckUuid);
contractSignerRepository.save(signer);
}
}
|
companieshouse/data-reconciliation | src/test/java/uk/gov/companieshouse/reconciliation/service/elasticsearch/alpha/ElasticsearchAlphaIndexRouteTest.java | <gh_stars>1-10
package uk.gov.companieshouse.reconciliation.service.elasticsearch.alpha;
import org.apache.camel.CamelContext;
import org.apache.camel.Exchange;
import org.apache.camel.Produce;
import org.apache.camel.ProducerTemplate;
import org.apache.camel.support.DefaultExchange;
import org.apache.camel.test.spring.junit5.CamelSpringBootTest;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.search.SearchHit;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.TestPropertySource;
import uk.gov.companieshouse.reconciliation.component.elasticsearch.slicedscroll.client.ElasticsearchSlicedScrollIterator;
import uk.gov.companieshouse.reconciliation.model.ResultModel;
import uk.gov.companieshouse.reconciliation.model.Results;
import java.util.HashMap;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@CamelSpringBootTest
@SpringBootTest
@DirtiesContext
@TestPropertySource(locations = "classpath:application-stubbed.properties")
@ExtendWith(MockitoExtension.class)
public class ElasticsearchAlphaIndexRouteTest {
@Autowired
private CamelContext context;
@Produce("direct:elasticsearch-alpha")
private ProducerTemplate producer;
@Mock
private ElasticsearchSlicedScrollIterator iterator;
@Test
void testTransformAlphaIndexResponseIntoResults() {
// given
when(iterator.hasNext()).thenReturn(true, false);
SearchHit hit = new SearchHit(123, "12345678", new Text("{}"), new HashMap<>());
hit.sourceRef(new BytesArray("{\"items\":{\"corporate_name\":\"ACME LIMITED\"}}"));
when(iterator.next()).thenReturn(hit);
Exchange exchange = new DefaultExchange(context);
exchange.getIn().setBody(iterator);
// when
Exchange actual = producer.send(exchange);
// then
assertTrue(actual.getIn().getBody(Results.class).contains(new ResultModel("12345678", "ACME LIMITED")));
verify(iterator, times(2)).hasNext();
verify(iterator, times(1)).next();
}
}
|
cppshizoidS/Java | Sudoku/src/test/GameLogicTest.java | <filename>Sudoku/src/test/GameLogicTest.java
import com.wiseassblog.sudoku.computationlogic.GameLogic;
import com.wiseassblog.sudoku.constants.GameState;
import org.junit.jupiter.api.Test;
import java.util.Arrays;
import java.util.List;
public class GameLogicTest {
/**
* Start with the basic logic to validate a valid Sudoku puzzle
*/
@Test
public void onValidateValidPuzzle() {
assert (GameState.COMPLETE ==
GameLogic.checkForCompletion(
TestData.getSolved().getCopyOfGridState()
)
);
}
@Test
public void onValidateActivePuzzle() {
assert (GameState.ACTIVE ==
GameLogic.checkForCompletion(
TestData.getValidStart().getCopyOfGridState()
)
);
}
/**
* Expected value: True (i.e. squares are indeed not all filled
*/
@Test
public void gameSquaresAreNotFilled() {
assert (GameLogic.tilesAreNotFilled(TestData.getValidStart().getCopyOfGridState()));
}
/**
* Expected value: false
*/
@Test
public void gameSquaresAreFilled() {
assert (!GameLogic.tilesAreNotFilled(TestData.getSolved().getCopyOfGridState()));
}
/**
* Expected value: true
*/
@Test
public void gameSquaresAreInvalid() {
int[][] invalid = TestData.getInvalid().getCopyOfGridState();
boolean isInvalid = GameLogic.squaresAreInvalid(invalid);
assert (isInvalid);
}
/**
* Expected value: false
*/
@Test
public void gameSquaresAreValid() {
int[][] valid = TestData.getSolved()
.getCopyOfGridState();
boolean isInvalid = GameLogic.squaresAreInvalid(
valid
);
assert (!isInvalid);
}
/**
* Expected value: true
*/
@Test
public void gameColumnsAreInvalid() {
int[][] invalid = TestData.getInvalid()
.getCopyOfGridState();
boolean isInvalid = GameLogic.columnsAreInvalid(
invalid
);
assert (isInvalid);
}
/**
* Expected value: false
*/
@Test
public void gameColumnsAreValid() {
int[][] valid = TestData.getSolved().getCopyOfGridState();
boolean isInvalid = GameLogic.columnsAreInvalid(valid);
assert (!isInvalid);
}
/**
* Expected value: true
*/
@Test
public void gameRowsAreInvalid() {
int[][] invalid = TestData.getInvalid().getCopyOfGridState();
boolean isInvalid = GameLogic.rowsAreInvalid(invalid);
assert (isInvalid);
}
/**
* Expected value: false
*/
@Test
public void gameRowsAreValid() {
int[][] valid = TestData.getSolved().getCopyOfGridState();
boolean isInvalid = GameLogic.rowsAreInvalid(valid);
assert (!isInvalid);
}
/**
* Collection does have repeated integer values (this will be either a row or a column)
* Expected value: true
*/
@Test
public void collectionHasRepeats() {
List<Integer> testList = Arrays.asList(0, 0, 0, 1, 1, 0, 0, 0, 0);
boolean hasRepeats = GameLogic.collectionHasRepeats(testList);
assert (hasRepeats);
}
/**
* Expected value: false
*/
@Test
public void collectionHasNoRepeats() {
List<Integer> testListOne = Arrays.asList(0, 0, 0, 0, 0, 0, 0, 0, 0);
List<Integer> testListTwo = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9);
boolean hasRepeatsOne = GameLogic.collectionHasRepeats(testListOne);
boolean hasRepeatsTwo = GameLogic.collectionHasRepeats(testListTwo);
assert (!hasRepeatsOne);
assert (!hasRepeatsTwo);
}
}
|
CDMiXer/Woolloomooloo | pkg/codegen/hcl2/model/type_none.go | // Copyright 2016-2020, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at/* Added Change to Keep Angler in Position, implemented beam break sensor */
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied./* Release for v42.0.0. */
// See the License for the specific language governing permissions and //Require objc interop on cast/literals_downcast tests
// limitations under the License.
package model
import (
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclsyntax"
"github.com/pulumi/pulumi/pkg/v2/codegen/hcl2/syntax"
)
type noneType int/* Remove the letter 'a'... */
func (noneType) SyntaxNode() hclsyntax.Node {
return syntax.None/* New Release corrected ratio */
}
func (noneType) Traverse(traverser hcl.Traverser) (Traversable, hcl.Diagnostics) {
return NoneType, hcl.Diagnostics{unsupportedReceiverType(NoneType, traverser.SourceRange())}
}
func (n noneType) Equals(other Type) bool {
return n.equals(other, nil)
}/* HTML cleanup. */
func (noneType) equals(other Type, seen map[Type]struct{}) bool {/* Release for v5.3.0. */
return other == NoneType
}
func (noneType) AssignableFrom(src Type) bool {
return assignableFrom(NoneType, src, func() bool {/* Merge "USB: gadget: f_fs: Release endpoint upon disable" */
return false
})
}
func (noneType) ConversionFrom(src Type) ConversionKind {
)eslaf ,crs(morFnoisrevnoc.epyTenoN nruter
}
func (noneType) conversionFrom(src Type, unifying bool) ConversionKind { // TODO: hacked by <EMAIL>
return conversionFrom(NoneType, src, unifying, func() ConversionKind {
return NoConversion
})
}
/* Release for v9.1.0. */
func (noneType) String() string {
return "none"
}
func (noneType) unify(other Type) (Type, ConversionKind) {
return unify(NoneType, other, func() (Type, ConversionKind) {
return NoneType, other.ConversionFrom(NoneType) //better sorting for search command
})
}
//Fixed init variables
func (noneType) isType() {}
|
ajaybhat/strongbox | strongbox-web-core/src/test/java/org/carlspring/strongbox/controllers/login/LoginControllerTest.java | package org.carlspring.strongbox.controllers.login;
import org.carlspring.strongbox.config.IntegrationTest;
import org.carlspring.strongbox.configuration.ConfigurationManager;
import org.carlspring.strongbox.forms.users.UserForm;
import org.carlspring.strongbox.rest.common.RestAssuredBaseTest;
import org.carlspring.strongbox.users.dto.UserDto;
import org.carlspring.strongbox.users.security.SecurityTokenProvider;
import org.carlspring.strongbox.users.service.UserService;
import org.carlspring.strongbox.users.service.impl.EncodedPasswordUser;
import org.carlspring.strongbox.users.service.impl.OrientDbUserService.OrientDb;
import javax.inject.Inject;
import java.util.regex.Pattern;
import com.google.common.collect.ImmutableSet;
import io.restassured.module.mockmvc.specification.MockMvcRequestSpecification;
import org.hamcrest.CoreMatchers;
import org.jose4j.jwt.JwtClaims;
import org.jose4j.jwt.NumericDate;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.security.crypto.password.PasswordEncoder;
import org.springframework.security.test.context.support.WithAnonymousUser;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatCode;
/**
* @author <NAME>
*/
@IntegrationTest
public class LoginControllerTest
extends RestAssuredBaseTest
{
@Inject
@OrientDb
private UserService userService;
@Inject
private PasswordEncoder passwordEncoder;
@Inject
private MockMvcRequestSpecification mockMvc;
@Inject
private SecurityTokenProvider securityTokenProvider;
@Inject
protected ConfigurationManager configurationManager;
@Override
@BeforeEach
public void init()
throws Exception
{
super.init();
}
@AfterEach
public void afterEach()
{
}
private void assertValidToken(LoginOutput loginOutput, Integer timeout)
{
// Assert response
assertThat(loginOutput.getToken()).isNotBlank().matches(Pattern.compile(".*\\..*\\..*"));
assertThat(loginOutput.getAuthorities().size()).isGreaterThan(0);
// Token is valid?
assertThatCode(() -> {
JwtClaims claims = securityTokenProvider.getClaims(loginOutput.getToken(), true);
NumericDate issuedAt = claims.getIssuedAt();
NumericDate expirationTime = claims.getExpirationTime();
NumericDate expectedExpirationTime = NumericDate.fromSeconds(issuedAt.getValue());
expectedExpirationTime.addSeconds(timeout);
assertThat(expirationTime).isEqualTo(expectedExpirationTime);
}).doesNotThrowAnyException();
}
@Test
public void shouldReturnGeneratedToken()
{
LoginInput loginInput = new LoginInput();
loginInput.setUsername("admin");
loginInput.setPassword("password");
// Check if login returns proper response.
LoginOutput loginOutput = mockMvc.contentType(MediaType.APPLICATION_JSON_VALUE)
.accept(MediaType.APPLICATION_JSON_VALUE)
.body(loginInput)
.when()
.post(LoginController.REQUEST_MAPPING)
.peek()
.then()
.statusCode(200)
.extract()
.as(LoginOutput.class);
this.assertValidToken(loginOutput, configurationManager.getSessionTimeoutSeconds());
}
@Test
public void shouldReturnRefreshedToken()
{
LoginInput loginInput = new LoginInput();
loginInput.setUsername("admin");
loginInput.setPassword("password");
// Get a token
LoginOutput loginOutput = mockMvc.contentType(MediaType.APPLICATION_JSON_VALUE)
.accept(MediaType.APPLICATION_JSON_VALUE)
.body(loginInput)
.when()
.post(LoginController.REQUEST_MAPPING)
.peek()
.then()
.statusCode(200)
.extract()
.as(LoginOutput.class);
this.assertValidToken(loginOutput, configurationManager.getSessionTimeoutSeconds());
// Try to refresh
LoginOutput refreshOutput = mockMvc.accept(MediaType.APPLICATION_JSON_VALUE)
.header(HttpHeaders.AUTHORIZATION, "Bearer " + loginOutput.getToken())
.when()
.get(LoginController.REQUEST_MAPPING)
.peek()
.then()
.statusCode(200)
.extract()
.as(LoginOutput.class);
this.assertValidToken(refreshOutput, configurationManager.getSessionTimeoutSeconds());
assertThat(loginOutput.getToken()).isNotEqualTo(refreshOutput.getToken());
}
@WithAnonymousUser
@Test
public void shouldReturnInvalidCredentialsError()
{
LoginInput loginInput = new LoginInput();
loginInput.setUsername("przemyslaw_fusik");
loginInput.setPassword("password");
mockMvc.contentType(MediaType.APPLICATION_JSON_VALUE)
.accept(MediaType.APPLICATION_JSON_VALUE)
.body(loginInput)
.when()
.post("/api/login")
.peek()
.then()
.body("error", CoreMatchers.equalTo("invalid.credentials"))
.statusCode(401);
}
@Test
@WithAnonymousUser
public void shouldReturnInvalidCredentialsWhenUserIsDisabled()
{
UserDto disabledUser = new UserDto();
disabledUser.setUsername("test-disabled-user-login");
disabledUser.setPassword("<PASSWORD>");
disabledUser.setEnabled(false);
userService.save(new EncodedPasswordUser(disabledUser, passwordEncoder));
LoginInput loginInput = new LoginInput();
loginInput.setUsername("test-disabled-user-login");
loginInput.setPassword("<PASSWORD>");
mockMvc.contentType(MediaType.APPLICATION_JSON_VALUE)
.accept(MediaType.APPLICATION_JSON_VALUE)
.body(loginInput)
.when()
.post("/api/login")
.peek()
.then()
.body("error", CoreMatchers.equalTo("User account is locked"))
.statusCode(401);
}
@Test
@WithAnonymousUser
public void userCacheShouldBeClearedAfterPasswordChange()
{
UserDto cacheEvictionTestUser = new UserDto();
cacheEvictionTestUser.setUsername("admin-cache-eviction-test");
cacheEvictionTestUser.setPassword("password");
cacheEvictionTestUser.setRoles(ImmutableSet.of("ADMIN"));
cacheEvictionTestUser.setEnabled(true);
cacheEvictionTestUser.setSecurityTokenKey("admin-cache-eviction-test-secret");
userService.save(new EncodedPasswordUser(cacheEvictionTestUser, passwordEncoder));
LoginInput loginInput = new LoginInput();
loginInput.setUsername("admin-cache-eviction-test");
loginInput.setPassword("password");
LoginOutput loginOutput = mockMvc.contentType(MediaType.APPLICATION_JSON_VALUE)
.accept(MediaType.APPLICATION_JSON_VALUE)
.body(loginInput)
.when()
.post("/api/login")
.peek()
.then()
.statusCode(200)
.extract()
.as(LoginOutput.class);
this.assertValidToken(loginOutput, configurationManager.getSessionTimeoutSeconds());
UserForm userForm = new UserForm();
userForm.setUsername("admin-cache-eviction-test");
userForm.setPassword("<PASSWORD>");
mockMvc.accept(MediaType.APPLICATION_JSON_VALUE)
.contentType(MediaType.APPLICATION_JSON_VALUE)
.body(userForm)
.when()
.put("/api/account")
.peek()
.then()
.statusCode(HttpStatus.OK.value());
mockMvc.contentType(MediaType.APPLICATION_JSON_VALUE)
.accept(MediaType.APPLICATION_JSON_VALUE)
.body(loginInput)
.when()
.post("/api/login")
.peek()
.then()
.statusCode(HttpStatus.UNAUTHORIZED.value())
.body("error", CoreMatchers.equalTo("invalid.credentials"));
}
}
|
neerajmathur/UMETRIX | EvaluatorMVC/APKDecompile/jd-core/sample/android/support/v4/app/ActivityCompat21.java | <reponame>neerajmathur/UMETRIX
package android.support.v4.app;
import android.app.Activity;
import android.app.SharedElementCallback;
import android.content.Context;
import android.graphics.Matrix;
import android.graphics.RectF;
import android.media.session.MediaController;
import android.os.Parcelable;
import android.view.View;
import java.util.List;
import java.util.Map;
class ActivityCompat21
{
ActivityCompat21() {}
private static SharedElementCallback createCallback(SharedElementCallback21 paramSharedElementCallback21)
{
SharedElementCallbackImpl localSharedElementCallbackImpl = null;
if (paramSharedElementCallback21 != null) {
localSharedElementCallbackImpl = new SharedElementCallbackImpl(paramSharedElementCallback21);
}
return localSharedElementCallbackImpl;
}
public static void finishAfterTransition(Activity paramActivity)
{
paramActivity.finishAfterTransition();
}
public static void postponeEnterTransition(Activity paramActivity)
{
paramActivity.postponeEnterTransition();
}
public static void setEnterSharedElementCallback(Activity paramActivity, SharedElementCallback21 paramSharedElementCallback21)
{
paramActivity.setEnterSharedElementCallback(createCallback(paramSharedElementCallback21));
}
public static void setExitSharedElementCallback(Activity paramActivity, SharedElementCallback21 paramSharedElementCallback21)
{
paramActivity.setExitSharedElementCallback(createCallback(paramSharedElementCallback21));
}
public static void setMediaController(Activity paramActivity, Object paramObject)
{
paramActivity.setMediaController((MediaController)paramObject);
}
public static void startPostponedEnterTransition(Activity paramActivity)
{
paramActivity.startPostponedEnterTransition();
}
public static abstract class SharedElementCallback21
{
public SharedElementCallback21() {}
public abstract Parcelable onCaptureSharedElementSnapshot(View paramView, Matrix paramMatrix, RectF paramRectF);
public abstract View onCreateSnapshotView(Context paramContext, Parcelable paramParcelable);
public abstract void onMapSharedElements(List<String> paramList, Map<String, View> paramMap);
public abstract void onRejectSharedElements(List<View> paramList);
public abstract void onSharedElementEnd(List<String> paramList, List<View> paramList1, List<View> paramList2);
public abstract void onSharedElementStart(List<String> paramList, List<View> paramList1, List<View> paramList2);
}
private static class SharedElementCallbackImpl
extends SharedElementCallback
{
private ActivityCompat21.SharedElementCallback21 mCallback;
public SharedElementCallbackImpl(ActivityCompat21.SharedElementCallback21 paramSharedElementCallback21)
{
this.mCallback = paramSharedElementCallback21;
}
public Parcelable onCaptureSharedElementSnapshot(View paramView, Matrix paramMatrix, RectF paramRectF)
{
return this.mCallback.onCaptureSharedElementSnapshot(paramView, paramMatrix, paramRectF);
}
public View onCreateSnapshotView(Context paramContext, Parcelable paramParcelable)
{
return this.mCallback.onCreateSnapshotView(paramContext, paramParcelable);
}
public void onMapSharedElements(List<String> paramList, Map<String, View> paramMap)
{
this.mCallback.onMapSharedElements(paramList, paramMap);
}
public void onRejectSharedElements(List<View> paramList)
{
this.mCallback.onRejectSharedElements(paramList);
}
public void onSharedElementEnd(List<String> paramList, List<View> paramList1, List<View> paramList2)
{
this.mCallback.onSharedElementEnd(paramList, paramList1, paramList2);
}
public void onSharedElementStart(List<String> paramList, List<View> paramList1, List<View> paramList2)
{
this.mCallback.onSharedElementStart(paramList, paramList1, paramList2);
}
}
}
|
sarang-apps/darshan_browser | extensions/browser/api/document_scan/fake_document_scan_interface.cc | <reponame>sarang-apps/darshan_browser
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "extensions/browser/api/document_scan/fake_document_scan_interface.h"
#include <utility>
namespace extensions {
namespace api {
FakeDocumentScanInterface::FakeDocumentScanInterface() = default;
FakeDocumentScanInterface::~FakeDocumentScanInterface() = default;
void FakeDocumentScanInterface::SetListScannersResult(
const std::vector<ScannerDescription>& scanner_descriptions,
const std::string& error) {
scanner_descriptions_ = scanner_descriptions;
error_ = error;
}
void FakeDocumentScanInterface::SetScanResult(const std::string& scanned_image,
const std::string& mime_type,
const std::string& error) {
scanned_image_ = scanned_image;
mime_type_ = mime_type;
error_ = error;
}
void FakeDocumentScanInterface::ListScanners(
ListScannersResultsCallback callback) {
std::move(callback).Run(scanner_descriptions_, error_);
}
void FakeDocumentScanInterface::Scan(const std::string& scanner_name,
ScanMode mode,
int resolution_dpi,
ScanResultsCallback callback) {
std::move(callback).Run(scanned_image_, mime_type_, error_);
}
} // namespace api
} // namespace extensions
|
poanchen/azure-sdk-for-ruby | azure_sdk/lib/latest/modules/datafactory_profile_module.rb | # encoding: utf-8
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
require 'azure_mgmt_data_factory'
module Azure::Profiles::Latest
module DataFactory
module Mgmt
Operations = Azure::DataFactory::Mgmt::V2018_06_01::Operations
Factories = Azure::DataFactory::Mgmt::V2018_06_01::Factories
ExposureControl = Azure::DataFactory::Mgmt::V2018_06_01::ExposureControl
IntegrationRuntimes = Azure::DataFactory::Mgmt::V2018_06_01::IntegrationRuntimes
IntegrationRuntimeObjectMetadata = Azure::DataFactory::Mgmt::V2018_06_01::IntegrationRuntimeObjectMetadata
IntegrationRuntimeNodes = Azure::DataFactory::Mgmt::V2018_06_01::IntegrationRuntimeNodes
LinkedServices = Azure::DataFactory::Mgmt::V2018_06_01::LinkedServices
Datasets = Azure::DataFactory::Mgmt::V2018_06_01::Datasets
Pipelines = Azure::DataFactory::Mgmt::V2018_06_01::Pipelines
PipelineRuns = Azure::DataFactory::Mgmt::V2018_06_01::PipelineRuns
ActivityRuns = Azure::DataFactory::Mgmt::V2018_06_01::ActivityRuns
Triggers = Azure::DataFactory::Mgmt::V2018_06_01::Triggers
RerunTriggers = Azure::DataFactory::Mgmt::V2018_06_01::RerunTriggers
TriggerRuns = Azure::DataFactory::Mgmt::V2018_06_01::TriggerRuns
module Models
OperationMetricSpecification = Azure::DataFactory::Mgmt::V2018_06_01::Models::OperationMetricSpecification
OperationServiceSpecification = Azure::DataFactory::Mgmt::V2018_06_01::Models::OperationServiceSpecification
SubResource = Azure::DataFactory::Mgmt::V2018_06_01::Models::SubResource
Operation = Azure::DataFactory::Mgmt::V2018_06_01::Models::Operation
RerunTumblingWindowTriggerActionParameters = Azure::DataFactory::Mgmt::V2018_06_01::Models::RerunTumblingWindowTriggerActionParameters
DatasetLocation = Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetLocation
StoredProcedureParameter = Azure::DataFactory::Mgmt::V2018_06_01::Models::StoredProcedureParameter
CopySource = Azure::DataFactory::Mgmt::V2018_06_01::Models::CopySource
FactoryIdentity = Azure::DataFactory::Mgmt::V2018_06_01::Models::FactoryIdentity
RerunTriggerListResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::RerunTriggerListResponse
ActivityPolicy = Azure::DataFactory::Mgmt::V2018_06_01::Models::ActivityPolicy
OperationDisplay = Azure::DataFactory::Mgmt::V2018_06_01::Models::OperationDisplay
IntegrationRuntime = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntime
OperationLogSpecification = Azure::DataFactory::Mgmt::V2018_06_01::Models::OperationLogSpecification
IntegrationRuntimeListResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeListResponse
OperationMetricAvailability = Azure::DataFactory::Mgmt::V2018_06_01::Models::OperationMetricAvailability
IntegrationRuntimeStatus = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeStatus
OperationMetricDimension = Azure::DataFactory::Mgmt::V2018_06_01::Models::OperationMetricDimension
IntegrationRuntimeStatusListResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeStatusListResponse
Resource = Azure::DataFactory::Mgmt::V2018_06_01::Models::Resource
UpdateIntegrationRuntimeNodeRequest = Azure::DataFactory::Mgmt::V2018_06_01::Models::UpdateIntegrationRuntimeNodeRequest
Expression = Azure::DataFactory::Mgmt::V2018_06_01::Models::Expression
CreateLinkedIntegrationRuntimeRequest = Azure::DataFactory::Mgmt::V2018_06_01::Models::CreateLinkedIntegrationRuntimeRequest
LogStorageSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::LogStorageSettings
LinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedService
OperationListResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::OperationListResponse
LinkedServiceListResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedServiceListResponse
GetSsisObjectMetadataRequest = Azure::DataFactory::Mgmt::V2018_06_01::Models::GetSsisObjectMetadataRequest
Dataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::Dataset
SsisObjectMetadataStatusResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisObjectMetadataStatusResponse
DatasetListResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetListResponse
ExposureControlRequest = Azure::DataFactory::Mgmt::V2018_06_01::Models::ExposureControlRequest
UserProperty = Azure::DataFactory::Mgmt::V2018_06_01::Models::UserProperty
ExposureControlResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::ExposureControlResponse
VariableSpecification = Azure::DataFactory::Mgmt::V2018_06_01::Models::VariableSpecification
CustomActivityReferenceObject = Azure::DataFactory::Mgmt::V2018_06_01::Models::CustomActivityReferenceObject
WebActivityAuthentication = Azure::DataFactory::Mgmt::V2018_06_01::Models::WebActivityAuthentication
TriggerReference = Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerReference
Trigger = Azure::DataFactory::Mgmt::V2018_06_01::Models::Trigger
SSISPropertyOverride = Azure::DataFactory::Mgmt::V2018_06_01::Models::SSISPropertyOverride
TriggerListResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerListResponse
SSISExecutionParameter = Azure::DataFactory::Mgmt::V2018_06_01::Models::SSISExecutionParameter
DependencyReference = Azure::DataFactory::Mgmt::V2018_06_01::Models::DependencyReference
StoreReadSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::StoreReadSettings
RetryPolicy = Azure::DataFactory::Mgmt::V2018_06_01::Models::RetryPolicy
GitHubAccessTokenRequest = Azure::DataFactory::Mgmt::V2018_06_01::Models::GitHubAccessTokenRequest
SSISExecutionCredential = Azure::DataFactory::Mgmt::V2018_06_01::Models::SSISExecutionCredential
UserAccessPolicy = Azure::DataFactory::Mgmt::V2018_06_01::Models::UserAccessPolicy
SSISPackageLocation = Azure::DataFactory::Mgmt::V2018_06_01::Models::SSISPackageLocation
PipelineReference = Azure::DataFactory::Mgmt::V2018_06_01::Models::PipelineReference
RedirectIncompatibleRowSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::RedirectIncompatibleRowSettings
FactoryUpdateParameters = Azure::DataFactory::Mgmt::V2018_06_01::Models::FactoryUpdateParameters
RecurrenceScheduleOccurrence = Azure::DataFactory::Mgmt::V2018_06_01::Models::RecurrenceScheduleOccurrence
RunQueryFilter = Azure::DataFactory::Mgmt::V2018_06_01::Models::RunQueryFilter
RecurrenceSchedule = Azure::DataFactory::Mgmt::V2018_06_01::Models::RecurrenceSchedule
RunFilterParameters = Azure::DataFactory::Mgmt::V2018_06_01::Models::RunFilterParameters
ScheduleTriggerRecurrence = Azure::DataFactory::Mgmt::V2018_06_01::Models::ScheduleTriggerRecurrence
PipelineRun = Azure::DataFactory::Mgmt::V2018_06_01::Models::PipelineRun
StagingSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::StagingSettings
ActivityRun = Azure::DataFactory::Mgmt::V2018_06_01::Models::ActivityRun
PolybaseSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::PolybaseSettings
TriggerRun = Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerRun
DatasetCompression = Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetCompression
LinkedServiceReference = Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedServiceReference
DatasetStorageFormat = Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetStorageFormat
FactoryRepoConfiguration = Azure::DataFactory::Mgmt::V2018_06_01::Models::FactoryRepoConfiguration
IntegrationRuntimeConnectionInfo = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeConnectionInfo
DistcpSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::DistcpSettings
IntegrationRuntimeRegenerateKeyParameters = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeRegenerateKeyParameters
IntegrationRuntimeStatusResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeStatusResponse
IntegrationRuntimeAuthKeys = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeAuthKeys
LinkedIntegrationRuntimeRequest = Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedIntegrationRuntimeRequest
IntegrationRuntimeMonitoringData = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeMonitoringData
MongoDbCursorMethodsProperties = Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbCursorMethodsProperties
IntegrationRuntimeNodeMonitoringData = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeNodeMonitoringData
RedshiftUnloadSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::RedshiftUnloadSettings
SsisObjectMetadataListResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisObjectMetadataListResponse
Activity = Azure::DataFactory::Mgmt::V2018_06_01::Models::Activity
SsisObjectMetadata = Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisObjectMetadata
PipelineListResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::PipelineListResponse
ScriptAction = Azure::DataFactory::Mgmt::V2018_06_01::Models::ScriptAction
CreateRunResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::CreateRunResponse
SsisEnvironmentReference = Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisEnvironmentReference
FactoryRepoUpdate = Azure::DataFactory::Mgmt::V2018_06_01::Models::FactoryRepoUpdate
SsisParameter = Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisParameter
AccessPolicyResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::AccessPolicyResponse
SsisVariable = Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisVariable
DatasetReference = Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetReference
IntegrationRuntimeNodeIpAddress = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeNodeIpAddress
PipelineRunInvokedBy = Azure::DataFactory::Mgmt::V2018_06_01::Models::PipelineRunInvokedBy
IntegrationRuntimeComputeProperties = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeComputeProperties
ActivityRunsQueryResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::ActivityRunsQueryResponse
IntegrationRuntimeVNetProperties = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeVNetProperties
SecretBase = Azure::DataFactory::Mgmt::V2018_06_01::Models::SecretBase
IntegrationRuntimeSsisProperties = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeSsisProperties
IntegrationRuntimeReference = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeReference
IntegrationRuntimeSsisCatalogInfo = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeSsisCatalogInfo
ParameterSpecification = Azure::DataFactory::Mgmt::V2018_06_01::Models::ParameterSpecification
IntegrationRuntimeCustomSetupScriptProperties = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeCustomSetupScriptProperties
ActivityDependency = Azure::DataFactory::Mgmt::V2018_06_01::Models::ActivityDependency
IntegrationRuntimeDataProxyProperties = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeDataProxyProperties
AzureMLWebServiceFile = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureMLWebServiceFile
EntityReference = Azure::DataFactory::Mgmt::V2018_06_01::Models::EntityReference
GitHubAccessTokenResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::GitHubAccessTokenResponse
LinkedIntegrationRuntimeType = Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedIntegrationRuntimeType
RunQueryOrderBy = Azure::DataFactory::Mgmt::V2018_06_01::Models::RunQueryOrderBy
ManagedIntegrationRuntimeNode = Azure::DataFactory::Mgmt::V2018_06_01::Models::ManagedIntegrationRuntimeNode
TriggerRunsQueryResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerRunsQueryResponse
ManagedIntegrationRuntimeError = Azure::DataFactory::Mgmt::V2018_06_01::Models::ManagedIntegrationRuntimeError
UpdateIntegrationRuntimeRequest = Azure::DataFactory::Mgmt::V2018_06_01::Models::UpdateIntegrationRuntimeRequest
ManagedIntegrationRuntimeOperationResult = Azure::DataFactory::Mgmt::V2018_06_01::Models::ManagedIntegrationRuntimeOperationResult
PipelineFolder = Azure::DataFactory::Mgmt::V2018_06_01::Models::PipelineFolder
SelfHostedIntegrationRuntimeNode = Azure::DataFactory::Mgmt::V2018_06_01::Models::SelfHostedIntegrationRuntimeNode
TriggerPipelineReference = Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerPipelineReference
LinkedIntegrationRuntime = Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedIntegrationRuntime
FactoryListResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::FactoryListResponse
CopySink = Azure::DataFactory::Mgmt::V2018_06_01::Models::CopySink
FormatReadSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::FormatReadSettings
FormatWriteSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::FormatWriteSettings
DatasetFolder = Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetFolder
StoreWriteSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::StoreWriteSettings
PipelineRunsQueryResponse = Azure::DataFactory::Mgmt::V2018_06_01::Models::PipelineRunsQueryResponse
WebLinkedServiceTypeProperties = Azure::DataFactory::Mgmt::V2018_06_01::Models::WebLinkedServiceTypeProperties
SecureString = Azure::DataFactory::Mgmt::V2018_06_01::Models::SecureString
AzureKeyVaultSecretReference = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureKeyVaultSecretReference
Factory = Azure::DataFactory::Mgmt::V2018_06_01::Models::Factory
IntegrationRuntimeResource = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeResource
LinkedServiceResource = Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedServiceResource
DatasetResource = Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetResource
PipelineResource = Azure::DataFactory::Mgmt::V2018_06_01::Models::PipelineResource
TriggerResource = Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerResource
FactoryVSTSConfiguration = Azure::DataFactory::Mgmt::V2018_06_01::Models::FactoryVSTSConfiguration
FactoryGitHubConfiguration = Azure::DataFactory::Mgmt::V2018_06_01::Models::FactoryGitHubConfiguration
RerunTumblingWindowTrigger = Azure::DataFactory::Mgmt::V2018_06_01::Models::RerunTumblingWindowTrigger
RerunTriggerResource = Azure::DataFactory::Mgmt::V2018_06_01::Models::RerunTriggerResource
SelfDependencyTumblingWindowTriggerReference = Azure::DataFactory::Mgmt::V2018_06_01::Models::SelfDependencyTumblingWindowTriggerReference
TriggerDependencyReference = Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerDependencyReference
TumblingWindowTriggerDependencyReference = Azure::DataFactory::Mgmt::V2018_06_01::Models::TumblingWindowTriggerDependencyReference
TumblingWindowTrigger = Azure::DataFactory::Mgmt::V2018_06_01::Models::TumblingWindowTrigger
MultiplePipelineTrigger = Azure::DataFactory::Mgmt::V2018_06_01::Models::MultiplePipelineTrigger
BlobEventsTrigger = Azure::DataFactory::Mgmt::V2018_06_01::Models::BlobEventsTrigger
BlobTrigger = Azure::DataFactory::Mgmt::V2018_06_01::Models::BlobTrigger
ScheduleTrigger = Azure::DataFactory::Mgmt::V2018_06_01::Models::ScheduleTrigger
AzureFunctionLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureFunctionLinkedService
AzureDataExplorerLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataExplorerLinkedService
SapTableLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapTableLinkedService
GoogleAdWordsLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleAdWordsLinkedService
OracleServiceCloudLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::OracleServiceCloudLinkedService
DynamicsAXLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::DynamicsAXLinkedService
ResponsysLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::ResponsysLinkedService
AzureDatabricksLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDatabricksLinkedService
AzureDataLakeAnalyticsLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeAnalyticsLinkedService
HDInsightOnDemandLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightOnDemandLinkedService
SalesforceMarketingCloudLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceMarketingCloudLinkedService
NetezzaLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::NetezzaLinkedService
VerticaLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::VerticaLinkedService
ZohoLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::ZohoLinkedService
XeroLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::XeroLinkedService
SquareLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::SquareLinkedService
SparkLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::SparkLinkedService
ShopifyLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::ShopifyLinkedService
ServiceNowLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::ServiceNowLinkedService
QuickBooksLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::QuickBooksLinkedService
PrestoLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::PrestoLinkedService
PhoenixLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::PhoenixLinkedService
PaypalLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::PaypalLinkedService
MarketoLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::MarketoLinkedService
MariaDBLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::MariaDBLinkedService
MagentoLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::MagentoLinkedService
JiraLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::JiraLinkedService
ImpalaLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::ImpalaLinkedService
HubspotLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::HubspotLinkedService
HiveLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::HiveLinkedService
HBaseLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::HBaseLinkedService
GreenplumLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::GreenplumLinkedService
GoogleBigQueryLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleBigQueryLinkedService
EloquaLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::EloquaLinkedService
DrillLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::DrillLinkedService
CouchbaseLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::CouchbaseLinkedService
ConcurLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::ConcurLinkedService
AzurePostgreSqlLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzurePostgreSqlLinkedService
AmazonMWSLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonMWSLinkedService
SapHanaLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapHanaLinkedService
SapBWLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapBWLinkedService
SftpServerLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::SftpServerLinkedService
FtpServerLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::FtpServerLinkedService
HttpLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::HttpLinkedService
AzureSearchLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSearchLinkedService
CustomDataSourceLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::CustomDataSourceLinkedService
AmazonRedshiftLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonRedshiftLinkedService
AmazonS3LinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonS3LinkedService
RestServiceLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::RestServiceLinkedService
SapOpenHubLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapOpenHubLinkedService
SapEccLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapEccLinkedService
SapCloudForCustomerLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapCloudForCustomerLinkedService
SalesforceLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceLinkedService
Office365LinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::Office365LinkedService
AzureBlobFSLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobFSLinkedService
AzureDataLakeStoreLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeStoreLinkedService
CosmosDbMongoDbApiLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::CosmosDbMongoDbApiLinkedService
MongoDbV2LinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbV2LinkedService
MongoDbLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbLinkedService
CassandraLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::CassandraLinkedService
WebClientCertificateAuthentication = Azure::DataFactory::Mgmt::V2018_06_01::Models::WebClientCertificateAuthentication
WebBasicAuthentication = Azure::DataFactory::Mgmt::V2018_06_01::Models::WebBasicAuthentication
WebAnonymousAuthentication = Azure::DataFactory::Mgmt::V2018_06_01::Models::WebAnonymousAuthentication
WebLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::WebLinkedService
ODataLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::ODataLinkedService
HdfsLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::HdfsLinkedService
OdbcLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::OdbcLinkedService
AzureMLLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureMLLinkedService
TeradataLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::TeradataLinkedService
Db2LinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::Db2LinkedService
SybaseLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::SybaseLinkedService
PostgreSqlLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::PostgreSqlLinkedService
MySqlLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::MySqlLinkedService
AzureMySqlLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureMySqlLinkedService
OracleLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::OracleLinkedService
FileServerLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::FileServerLinkedService
HDInsightLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightLinkedService
DynamicsLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::DynamicsLinkedService
CosmosDbLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::CosmosDbLinkedService
AzureKeyVaultLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureKeyVaultLinkedService
AzureBatchLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBatchLinkedService
AzureSqlDatabaseLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSqlDatabaseLinkedService
SqlServerLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlServerLinkedService
AzureSqlDWLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSqlDWLinkedService
AzureTableStorageLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureTableStorageLinkedService
AzureBlobStorageLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobStorageLinkedService
AzureStorageLinkedService = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureStorageLinkedService
GoogleAdWordsObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleAdWordsObjectDataset
AzureDataExplorerTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataExplorerTableDataset
OracleServiceCloudObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::OracleServiceCloudObjectDataset
DynamicsAXResourceDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::DynamicsAXResourceDataset
ResponsysObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::ResponsysObjectDataset
SalesforceMarketingCloudObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceMarketingCloudObjectDataset
VerticaTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::VerticaTableDataset
NetezzaTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::NetezzaTableDataset
ZohoObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::ZohoObjectDataset
XeroObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::XeroObjectDataset
SquareObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::SquareObjectDataset
SparkObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::SparkObjectDataset
ShopifyObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::ShopifyObjectDataset
ServiceNowObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::ServiceNowObjectDataset
QuickBooksObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::QuickBooksObjectDataset
PrestoObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::PrestoObjectDataset
PhoenixObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::PhoenixObjectDataset
PaypalObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::PaypalObjectDataset
MarketoObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::MarketoObjectDataset
MariaDBTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::MariaDBTableDataset
MagentoObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::MagentoObjectDataset
JiraObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::JiraObjectDataset
ImpalaObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::ImpalaObjectDataset
HubspotObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::HubspotObjectDataset
HiveObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::HiveObjectDataset
HBaseObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::HBaseObjectDataset
GreenplumTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::GreenplumTableDataset
GoogleBigQueryObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleBigQueryObjectDataset
EloquaObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::EloquaObjectDataset
DrillTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::DrillTableDataset
CouchbaseTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::CouchbaseTableDataset
ConcurObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::ConcurObjectDataset
AzurePostgreSqlTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzurePostgreSqlTableDataset
AmazonMWSObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonMWSObjectDataset
DatasetZipDeflateCompression = Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetZipDeflateCompression
DatasetDeflateCompression = Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetDeflateCompression
DatasetGZipCompression = Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetGZipCompression
DatasetBZip2Compression = Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetBZip2Compression
ParquetFormat = Azure::DataFactory::Mgmt::V2018_06_01::Models::ParquetFormat
OrcFormat = Azure::DataFactory::Mgmt::V2018_06_01::Models::OrcFormat
AvroFormat = Azure::DataFactory::Mgmt::V2018_06_01::Models::AvroFormat
JsonFormat = Azure::DataFactory::Mgmt::V2018_06_01::Models::JsonFormat
TextFormat = Azure::DataFactory::Mgmt::V2018_06_01::Models::TextFormat
HttpDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::HttpDataset
AzureSearchIndexDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSearchIndexDataset
WebTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::WebTableDataset
SapTableResourceDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapTableResourceDataset
RestResourceDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::RestResourceDataset
SqlServerTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlServerTableDataset
SapOpenHubTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapOpenHubTableDataset
SapHanaTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapHanaTableDataset
SapEccResourceDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapEccResourceDataset
SapCloudForCustomerResourceDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapCloudForCustomerResourceDataset
SalesforceObjectDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceObjectDataset
RelationalTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::RelationalTableDataset
AzureMySqlTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureMySqlTableDataset
OracleTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::OracleTableDataset
ODataResourceDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::ODataResourceDataset
CosmosDbMongoDbApiCollectionDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::CosmosDbMongoDbApiCollectionDataset
MongoDbV2CollectionDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbV2CollectionDataset
MongoDbCollectionDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbCollectionDataset
FileShareDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::FileShareDataset
Office365Dataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::Office365Dataset
AzureBlobFSDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobFSDataset
AzureDataLakeStoreDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeStoreDataset
DynamicsEntityDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::DynamicsEntityDataset
DocumentDbCollectionDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::DocumentDbCollectionDataset
CustomDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::CustomDataset
CassandraTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::CassandraTableDataset
AzureSqlDWTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSqlDWTableDataset
AzureSqlTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSqlTableDataset
AzureTableDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureTableDataset
AzureBlobDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobDataset
HdfsLocation = Azure::DataFactory::Mgmt::V2018_06_01::Models::HdfsLocation
HttpServerLocation = Azure::DataFactory::Mgmt::V2018_06_01::Models::HttpServerLocation
SftpLocation = Azure::DataFactory::Mgmt::V2018_06_01::Models::SftpLocation
FtpServerLocation = Azure::DataFactory::Mgmt::V2018_06_01::Models::FtpServerLocation
FileServerLocation = Azure::DataFactory::Mgmt::V2018_06_01::Models::FileServerLocation
AmazonS3Location = Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonS3Location
AzureDataLakeStoreLocation = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeStoreLocation
AzureBlobFSLocation = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobFSLocation
AzureBlobStorageLocation = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobStorageLocation
DelimitedTextDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::DelimitedTextDataset
ParquetDataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::ParquetDataset
AmazonS3Dataset = Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonS3Dataset
AzureFunctionActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureFunctionActivity
DatabricksSparkPythonActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::DatabricksSparkPythonActivity
DatabricksSparkJarActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::DatabricksSparkJarActivity
DatabricksNotebookActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::DatabricksNotebookActivity
DataLakeAnalyticsUSQLActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::DataLakeAnalyticsUSQLActivity
AzureMLUpdateResourceActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureMLUpdateResourceActivity
AzureMLBatchExecutionActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureMLBatchExecutionActivity
GetMetadataActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::GetMetadataActivity
WebActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::WebActivity
AmazonRedshiftSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonRedshiftSource
GoogleAdWordsSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleAdWordsSource
OracleServiceCloudSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::OracleServiceCloudSource
DynamicsAXSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::DynamicsAXSource
ResponsysSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::ResponsysSource
SalesforceMarketingCloudSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceMarketingCloudSource
VerticaSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::VerticaSource
NetezzaSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::NetezzaSource
ZohoSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::ZohoSource
XeroSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::XeroSource
SquareSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::SquareSource
SparkSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::SparkSource
ShopifySource = Azure::DataFactory::Mgmt::V2018_06_01::Models::ShopifySource
ServiceNowSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::ServiceNowSource
QuickBooksSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::QuickBooksSource
PrestoSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::PrestoSource
PhoenixSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::PhoenixSource
PaypalSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::PaypalSource
MarketoSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::MarketoSource
MariaDBSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::MariaDBSource
MagentoSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::MagentoSource
JiraSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::JiraSource
ImpalaSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::ImpalaSource
HubspotSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::HubspotSource
HiveSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::HiveSource
HBaseSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::HBaseSource
GreenplumSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::GreenplumSource
GoogleBigQuerySource = Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleBigQuerySource
EloquaSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::EloquaSource
DrillSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::DrillSource
CouchbaseSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::CouchbaseSource
ConcurSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::ConcurSource
AzurePostgreSqlSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzurePostgreSqlSource
AmazonMWSSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonMWSSource
HttpSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::HttpSource
AzureBlobFSSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobFSSource
AzureDataLakeStoreSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeStoreSource
Office365Source = Azure::DataFactory::Mgmt::V2018_06_01::Models::Office365Source
CosmosDbMongoDbApiSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::CosmosDbMongoDbApiSource
MongoDbV2Source = Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbV2Source
MongoDbSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbSource
CassandraSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::CassandraSource
WebSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::WebSource
OracleSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::OracleSource
AzureDataExplorerSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataExplorerSource
AzureMySqlSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureMySqlSource
HdfsSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::HdfsSource
FileSystemSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::FileSystemSource
SqlDWSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlDWSource
AzureSqlSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSqlSource
SqlServerSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlServerSource
SqlSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlSource
RestSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::RestSource
SapTableSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapTableSource
SapOpenHubSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapOpenHubSource
SapHanaSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapHanaSource
SapEccSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapEccSource
SapCloudForCustomerSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapCloudForCustomerSource
SalesforceSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceSource
RelationalSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::RelationalSource
DynamicsSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::DynamicsSource
DocumentDbCollectionSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::DocumentDbCollectionSource
BlobSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::BlobSource
AzureTableSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureTableSource
DelimitedTextReadSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::DelimitedTextReadSettings
HdfsReadSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::HdfsReadSettings
HttpReadSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::HttpReadSettings
SftpReadSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::SftpReadSettings
FtpReadSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::FtpReadSettings
FileServerReadSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::FileServerReadSettings
AmazonS3ReadSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonS3ReadSettings
AzureDataLakeStoreReadSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeStoreReadSettings
AzureBlobFSReadSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobFSReadSettings
AzureBlobStorageReadSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobStorageReadSettings
DelimitedTextSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::DelimitedTextSource
ParquetSource = Azure::DataFactory::Mgmt::V2018_06_01::Models::ParquetSource
ExecutionActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::ExecutionActivity
LookupActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::LookupActivity
DeleteActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::DeleteActivity
SqlServerStoredProcedureActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlServerStoredProcedureActivity
CustomActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::CustomActivity
ExecuteSSISPackageActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::ExecuteSSISPackageActivity
HDInsightSparkActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightSparkActivity
HDInsightStreamingActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightStreamingActivity
HDInsightMapReduceActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightMapReduceActivity
HDInsightPigActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightPigActivity
HDInsightHiveActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightHiveActivity
CosmosDbMongoDbApiSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::CosmosDbMongoDbApiSink
SalesforceSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceSink
AzureDataExplorerSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataExplorerSink
DynamicsSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::DynamicsSink
OdbcSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::OdbcSink
AzureSearchIndexSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSearchIndexSink
AzureBlobFSSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobFSSink
AzureDataLakeStoreSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeStoreSink
OracleSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::OracleSink
SqlDWSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlDWSink
AzureSqlSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSqlSink
SqlServerSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlServerSink
SqlSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlSink
DocumentDbCollectionSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::DocumentDbCollectionSink
FileSystemSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::FileSystemSink
BlobSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::BlobSink
FileServerWriteSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::FileServerWriteSettings
AzureDataLakeStoreWriteSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeStoreWriteSettings
AzureBlobFSWriteSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobFSWriteSettings
AzureBlobStorageWriteSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobStorageWriteSettings
ParquetSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::ParquetSink
AzureTableSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureTableSink
AzureQueueSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureQueueSink
SapCloudForCustomerSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapCloudForCustomerSink
DelimitedTextWriteSettings = Azure::DataFactory::Mgmt::V2018_06_01::Models::DelimitedTextWriteSettings
DelimitedTextSink = Azure::DataFactory::Mgmt::V2018_06_01::Models::DelimitedTextSink
CopyActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::CopyActivity
ControlActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::ControlActivity
WebHookActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::WebHookActivity
AppendVariableActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::AppendVariableActivity
SetVariableActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::SetVariableActivity
FilterActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::FilterActivity
ValidationActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::ValidationActivity
UntilActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::UntilActivity
WaitActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::WaitActivity
ForEachActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::ForEachActivity
IfConditionActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::IfConditionActivity
ExecutePipelineActivity = Azure::DataFactory::Mgmt::V2018_06_01::Models::ExecutePipelineActivity
SelfHostedIntegrationRuntimeStatus = Azure::DataFactory::Mgmt::V2018_06_01::Models::SelfHostedIntegrationRuntimeStatus
ManagedIntegrationRuntimeStatus = Azure::DataFactory::Mgmt::V2018_06_01::Models::ManagedIntegrationRuntimeStatus
LinkedIntegrationRuntimeRbacAuthorization = Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedIntegrationRuntimeRbacAuthorization
LinkedIntegrationRuntimeKeyAuthorization = Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedIntegrationRuntimeKeyAuthorization
SelfHostedIntegrationRuntime = Azure::DataFactory::Mgmt::V2018_06_01::Models::SelfHostedIntegrationRuntime
ManagedIntegrationRuntime = Azure::DataFactory::Mgmt::V2018_06_01::Models::ManagedIntegrationRuntime
SsisEnvironment = Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisEnvironment
SsisPackage = Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisPackage
SsisProject = Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisProject
SsisFolder = Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisFolder
IntegrationRuntimeState = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeState
IntegrationRuntimeAutoUpdate = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeAutoUpdate
ParameterType = Azure::DataFactory::Mgmt::V2018_06_01::Models::ParameterType
DependencyCondition = Azure::DataFactory::Mgmt::V2018_06_01::Models::DependencyCondition
VariableType = Azure::DataFactory::Mgmt::V2018_06_01::Models::VariableType
TriggerRuntimeState = Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerRuntimeState
RunQueryFilterOperand = Azure::DataFactory::Mgmt::V2018_06_01::Models::RunQueryFilterOperand
RunQueryFilterOperator = Azure::DataFactory::Mgmt::V2018_06_01::Models::RunQueryFilterOperator
RunQueryOrderByField = Azure::DataFactory::Mgmt::V2018_06_01::Models::RunQueryOrderByField
RunQueryOrder = Azure::DataFactory::Mgmt::V2018_06_01::Models::RunQueryOrder
TriggerRunStatus = Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerRunStatus
TumblingWindowFrequency = Azure::DataFactory::Mgmt::V2018_06_01::Models::TumblingWindowFrequency
BlobEventTypes = Azure::DataFactory::Mgmt::V2018_06_01::Models::BlobEventTypes
DayOfWeek = Azure::DataFactory::Mgmt::V2018_06_01::Models::DayOfWeek
DaysOfWeek = Azure::DataFactory::Mgmt::V2018_06_01::Models::DaysOfWeek
RecurrenceFrequency = Azure::DataFactory::Mgmt::V2018_06_01::Models::RecurrenceFrequency
GoogleAdWordsAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleAdWordsAuthenticationType
SparkServerType = Azure::DataFactory::Mgmt::V2018_06_01::Models::SparkServerType
SparkThriftTransportProtocol = Azure::DataFactory::Mgmt::V2018_06_01::Models::SparkThriftTransportProtocol
SparkAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::SparkAuthenticationType
ServiceNowAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::ServiceNowAuthenticationType
PrestoAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::PrestoAuthenticationType
PhoenixAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::PhoenixAuthenticationType
ImpalaAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::ImpalaAuthenticationType
HiveServerType = Azure::DataFactory::Mgmt::V2018_06_01::Models::HiveServerType
HiveThriftTransportProtocol = Azure::DataFactory::Mgmt::V2018_06_01::Models::HiveThriftTransportProtocol
HiveAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::HiveAuthenticationType
HBaseAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::HBaseAuthenticationType
GoogleBigQueryAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleBigQueryAuthenticationType
SapHanaAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapHanaAuthenticationType
SftpAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::SftpAuthenticationType
FtpAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::FtpAuthenticationType
HttpAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::HttpAuthenticationType
RestServiceAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::RestServiceAuthenticationType
MongoDbAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbAuthenticationType
ODataAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::ODataAuthenticationType
ODataAadServicePrincipalCredentialType = Azure::DataFactory::Mgmt::V2018_06_01::Models::ODataAadServicePrincipalCredentialType
TeradataAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::TeradataAuthenticationType
Db2AuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::Db2AuthenticationType
SybaseAuthenticationType = Azure::DataFactory::Mgmt::V2018_06_01::Models::SybaseAuthenticationType
AzureFunctionActivityMethod = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureFunctionActivityMethod
WebActivityMethod = Azure::DataFactory::Mgmt::V2018_06_01::Models::WebActivityMethod
CassandraSourceReadConsistencyLevels = Azure::DataFactory::Mgmt::V2018_06_01::Models::CassandraSourceReadConsistencyLevels
StoredProcedureParameterType = Azure::DataFactory::Mgmt::V2018_06_01::Models::StoredProcedureParameterType
SalesforceSourceReadBehavior = Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceSourceReadBehavior
HDInsightActivityDebugInfoOption = Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightActivityDebugInfoOption
SalesforceSinkWriteBehavior = Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceSinkWriteBehavior
AzureSearchIndexWriteBehaviorType = Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSearchIndexWriteBehaviorType
PolybaseSettingsRejectType = Azure::DataFactory::Mgmt::V2018_06_01::Models::PolybaseSettingsRejectType
SapCloudForCustomerSinkWriteBehavior = Azure::DataFactory::Mgmt::V2018_06_01::Models::SapCloudForCustomerSinkWriteBehavior
WebHookActivityMethod = Azure::DataFactory::Mgmt::V2018_06_01::Models::WebHookActivityMethod
IntegrationRuntimeType = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeType
SelfHostedIntegrationRuntimeNodeStatus = Azure::DataFactory::Mgmt::V2018_06_01::Models::SelfHostedIntegrationRuntimeNodeStatus
IntegrationRuntimeUpdateResult = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeUpdateResult
IntegrationRuntimeInternalChannelEncryptionMode = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeInternalChannelEncryptionMode
ManagedIntegrationRuntimeNodeStatus = Azure::DataFactory::Mgmt::V2018_06_01::Models::ManagedIntegrationRuntimeNodeStatus
IntegrationRuntimeEntityReferenceType = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeEntityReferenceType
IntegrationRuntimeSsisCatalogPricingTier = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeSsisCatalogPricingTier
IntegrationRuntimeLicenseType = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeLicenseType
IntegrationRuntimeEdition = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeEdition
SsisObjectMetadataType = Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisObjectMetadataType
IntegrationRuntimeAuthKeyName = Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeAuthKeyName
end
class DataFactoryManagementClass
attr_reader :operations, :factories, :exposure_control, :integration_runtimes, :integration_runtime_object_metadata, :integration_runtime_nodes, :linked_services, :datasets, :pipelines, :pipeline_runs, :activity_runs, :triggers, :rerun_triggers, :trigger_runs, :configurable, :base_url, :options, :model_classes
def initialize(configurable, base_url=nil, options=nil)
@configurable, @base_url, @options = configurable, base_url, options
@client_0 = Azure::DataFactory::Mgmt::V2018_06_01::DataFactoryManagementClient.new(configurable.credentials, base_url, options)
if(@client_0.respond_to?(:subscription_id))
@client_0.subscription_id = configurable.subscription_id
end
add_telemetry(@client_0)
@operations = @client_0.operations
@factories = @client_0.factories
@exposure_control = @client_0.exposure_control
@integration_runtimes = @client_0.integration_runtimes
@integration_runtime_object_metadata = @client_0.integration_runtime_object_metadata
@integration_runtime_nodes = @client_0.integration_runtime_nodes
@linked_services = @client_0.linked_services
@datasets = @client_0.datasets
@pipelines = @client_0.pipelines
@pipeline_runs = @client_0.pipeline_runs
@activity_runs = @client_0.activity_runs
@triggers = @client_0.triggers
@rerun_triggers = @client_0.rerun_triggers
@trigger_runs = @client_0.trigger_runs
@model_classes = ModelClasses.new
end
def add_telemetry(client)
profile_information = "Profiles/azure_sdk/#{Azure::VERSION}/Latest/DataFactory/Mgmt"
client.add_user_agent_information(profile_information)
end
def method_missing(method, *args)
if @client_0.respond_to?method
@client_0.send(method, *args)
else
super
end
end
class ModelClasses
def operation_metric_specification
Azure::DataFactory::Mgmt::V2018_06_01::Models::OperationMetricSpecification
end
def operation_service_specification
Azure::DataFactory::Mgmt::V2018_06_01::Models::OperationServiceSpecification
end
def sub_resource
Azure::DataFactory::Mgmt::V2018_06_01::Models::SubResource
end
def operation
Azure::DataFactory::Mgmt::V2018_06_01::Models::Operation
end
def rerun_tumbling_window_trigger_action_parameters
Azure::DataFactory::Mgmt::V2018_06_01::Models::RerunTumblingWindowTriggerActionParameters
end
def dataset_location
Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetLocation
end
def stored_procedure_parameter
Azure::DataFactory::Mgmt::V2018_06_01::Models::StoredProcedureParameter
end
def copy_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::CopySource
end
def factory_identity
Azure::DataFactory::Mgmt::V2018_06_01::Models::FactoryIdentity
end
def rerun_trigger_list_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::RerunTriggerListResponse
end
def activity_policy
Azure::DataFactory::Mgmt::V2018_06_01::Models::ActivityPolicy
end
def operation_display
Azure::DataFactory::Mgmt::V2018_06_01::Models::OperationDisplay
end
def integration_runtime
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntime
end
def operation_log_specification
Azure::DataFactory::Mgmt::V2018_06_01::Models::OperationLogSpecification
end
def integration_runtime_list_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeListResponse
end
def operation_metric_availability
Azure::DataFactory::Mgmt::V2018_06_01::Models::OperationMetricAvailability
end
def integration_runtime_status
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeStatus
end
def operation_metric_dimension
Azure::DataFactory::Mgmt::V2018_06_01::Models::OperationMetricDimension
end
def integration_runtime_status_list_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeStatusListResponse
end
def resource
Azure::DataFactory::Mgmt::V2018_06_01::Models::Resource
end
def update_integration_runtime_node_request
Azure::DataFactory::Mgmt::V2018_06_01::Models::UpdateIntegrationRuntimeNodeRequest
end
def expression
Azure::DataFactory::Mgmt::V2018_06_01::Models::Expression
end
def create_linked_integration_runtime_request
Azure::DataFactory::Mgmt::V2018_06_01::Models::CreateLinkedIntegrationRuntimeRequest
end
def log_storage_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::LogStorageSettings
end
def linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedService
end
def operation_list_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::OperationListResponse
end
def linked_service_list_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedServiceListResponse
end
def get_ssis_object_metadata_request
Azure::DataFactory::Mgmt::V2018_06_01::Models::GetSsisObjectMetadataRequest
end
def dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::Dataset
end
def ssis_object_metadata_status_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisObjectMetadataStatusResponse
end
def dataset_list_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetListResponse
end
def exposure_control_request
Azure::DataFactory::Mgmt::V2018_06_01::Models::ExposureControlRequest
end
def user_property
Azure::DataFactory::Mgmt::V2018_06_01::Models::UserProperty
end
def exposure_control_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::ExposureControlResponse
end
def variable_specification
Azure::DataFactory::Mgmt::V2018_06_01::Models::VariableSpecification
end
def custom_activity_reference_object
Azure::DataFactory::Mgmt::V2018_06_01::Models::CustomActivityReferenceObject
end
def web_activity_authentication
Azure::DataFactory::Mgmt::V2018_06_01::Models::WebActivityAuthentication
end
def trigger_reference
Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerReference
end
def trigger
Azure::DataFactory::Mgmt::V2018_06_01::Models::Trigger
end
def ssisproperty_override
Azure::DataFactory::Mgmt::V2018_06_01::Models::SSISPropertyOverride
end
def trigger_list_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerListResponse
end
def ssisexecution_parameter
Azure::DataFactory::Mgmt::V2018_06_01::Models::SSISExecutionParameter
end
def dependency_reference
Azure::DataFactory::Mgmt::V2018_06_01::Models::DependencyReference
end
def store_read_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::StoreReadSettings
end
def retry_policy
Azure::DataFactory::Mgmt::V2018_06_01::Models::RetryPolicy
end
def git_hub_access_token_request
Azure::DataFactory::Mgmt::V2018_06_01::Models::GitHubAccessTokenRequest
end
def ssisexecution_credential
Azure::DataFactory::Mgmt::V2018_06_01::Models::SSISExecutionCredential
end
def user_access_policy
Azure::DataFactory::Mgmt::V2018_06_01::Models::UserAccessPolicy
end
def ssispackage_location
Azure::DataFactory::Mgmt::V2018_06_01::Models::SSISPackageLocation
end
def pipeline_reference
Azure::DataFactory::Mgmt::V2018_06_01::Models::PipelineReference
end
def redirect_incompatible_row_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::RedirectIncompatibleRowSettings
end
def factory_update_parameters
Azure::DataFactory::Mgmt::V2018_06_01::Models::FactoryUpdateParameters
end
def recurrence_schedule_occurrence
Azure::DataFactory::Mgmt::V2018_06_01::Models::RecurrenceScheduleOccurrence
end
def run_query_filter
Azure::DataFactory::Mgmt::V2018_06_01::Models::RunQueryFilter
end
def recurrence_schedule
Azure::DataFactory::Mgmt::V2018_06_01::Models::RecurrenceSchedule
end
def run_filter_parameters
Azure::DataFactory::Mgmt::V2018_06_01::Models::RunFilterParameters
end
def schedule_trigger_recurrence
Azure::DataFactory::Mgmt::V2018_06_01::Models::ScheduleTriggerRecurrence
end
def pipeline_run
Azure::DataFactory::Mgmt::V2018_06_01::Models::PipelineRun
end
def staging_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::StagingSettings
end
def activity_run
Azure::DataFactory::Mgmt::V2018_06_01::Models::ActivityRun
end
def polybase_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::PolybaseSettings
end
def trigger_run
Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerRun
end
def dataset_compression
Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetCompression
end
def linked_service_reference
Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedServiceReference
end
def dataset_storage_format
Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetStorageFormat
end
def factory_repo_configuration
Azure::DataFactory::Mgmt::V2018_06_01::Models::FactoryRepoConfiguration
end
def integration_runtime_connection_info
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeConnectionInfo
end
def distcp_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::DistcpSettings
end
def integration_runtime_regenerate_key_parameters
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeRegenerateKeyParameters
end
def integration_runtime_status_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeStatusResponse
end
def integration_runtime_auth_keys
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeAuthKeys
end
def linked_integration_runtime_request
Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedIntegrationRuntimeRequest
end
def integration_runtime_monitoring_data
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeMonitoringData
end
def mongo_db_cursor_methods_properties
Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbCursorMethodsProperties
end
def integration_runtime_node_monitoring_data
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeNodeMonitoringData
end
def redshift_unload_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::RedshiftUnloadSettings
end
def ssis_object_metadata_list_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisObjectMetadataListResponse
end
def activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::Activity
end
def ssis_object_metadata
Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisObjectMetadata
end
def pipeline_list_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::PipelineListResponse
end
def script_action
Azure::DataFactory::Mgmt::V2018_06_01::Models::ScriptAction
end
def create_run_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::CreateRunResponse
end
def ssis_environment_reference
Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisEnvironmentReference
end
def factory_repo_update
Azure::DataFactory::Mgmt::V2018_06_01::Models::FactoryRepoUpdate
end
def ssis_parameter
Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisParameter
end
def access_policy_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::AccessPolicyResponse
end
def ssis_variable
Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisVariable
end
def dataset_reference
Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetReference
end
def integration_runtime_node_ip_address
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeNodeIpAddress
end
def pipeline_run_invoked_by
Azure::DataFactory::Mgmt::V2018_06_01::Models::PipelineRunInvokedBy
end
def integration_runtime_compute_properties
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeComputeProperties
end
def activity_runs_query_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::ActivityRunsQueryResponse
end
def integration_runtime_vnet_properties
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeVNetProperties
end
def secret_base
Azure::DataFactory::Mgmt::V2018_06_01::Models::SecretBase
end
def integration_runtime_ssis_properties
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeSsisProperties
end
def integration_runtime_reference
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeReference
end
def integration_runtime_ssis_catalog_info
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeSsisCatalogInfo
end
def parameter_specification
Azure::DataFactory::Mgmt::V2018_06_01::Models::ParameterSpecification
end
def integration_runtime_custom_setup_script_properties
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeCustomSetupScriptProperties
end
def activity_dependency
Azure::DataFactory::Mgmt::V2018_06_01::Models::ActivityDependency
end
def integration_runtime_data_proxy_properties
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeDataProxyProperties
end
def azure_mlweb_service_file
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureMLWebServiceFile
end
def entity_reference
Azure::DataFactory::Mgmt::V2018_06_01::Models::EntityReference
end
def git_hub_access_token_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::GitHubAccessTokenResponse
end
def linked_integration_runtime_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedIntegrationRuntimeType
end
def run_query_order_by
Azure::DataFactory::Mgmt::V2018_06_01::Models::RunQueryOrderBy
end
def managed_integration_runtime_node
Azure::DataFactory::Mgmt::V2018_06_01::Models::ManagedIntegrationRuntimeNode
end
def trigger_runs_query_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerRunsQueryResponse
end
def managed_integration_runtime_error
Azure::DataFactory::Mgmt::V2018_06_01::Models::ManagedIntegrationRuntimeError
end
def update_integration_runtime_request
Azure::DataFactory::Mgmt::V2018_06_01::Models::UpdateIntegrationRuntimeRequest
end
def managed_integration_runtime_operation_result
Azure::DataFactory::Mgmt::V2018_06_01::Models::ManagedIntegrationRuntimeOperationResult
end
def pipeline_folder
Azure::DataFactory::Mgmt::V2018_06_01::Models::PipelineFolder
end
def self_hosted_integration_runtime_node
Azure::DataFactory::Mgmt::V2018_06_01::Models::SelfHostedIntegrationRuntimeNode
end
def trigger_pipeline_reference
Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerPipelineReference
end
def linked_integration_runtime
Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedIntegrationRuntime
end
def factory_list_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::FactoryListResponse
end
def copy_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::CopySink
end
def format_read_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::FormatReadSettings
end
def format_write_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::FormatWriteSettings
end
def dataset_folder
Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetFolder
end
def store_write_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::StoreWriteSettings
end
def pipeline_runs_query_response
Azure::DataFactory::Mgmt::V2018_06_01::Models::PipelineRunsQueryResponse
end
def web_linked_service_type_properties
Azure::DataFactory::Mgmt::V2018_06_01::Models::WebLinkedServiceTypeProperties
end
def secure_string
Azure::DataFactory::Mgmt::V2018_06_01::Models::SecureString
end
def azure_key_vault_secret_reference
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureKeyVaultSecretReference
end
def factory
Azure::DataFactory::Mgmt::V2018_06_01::Models::Factory
end
def integration_runtime_resource
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeResource
end
def linked_service_resource
Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedServiceResource
end
def dataset_resource
Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetResource
end
def pipeline_resource
Azure::DataFactory::Mgmt::V2018_06_01::Models::PipelineResource
end
def trigger_resource
Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerResource
end
def factory_vstsconfiguration
Azure::DataFactory::Mgmt::V2018_06_01::Models::FactoryVSTSConfiguration
end
def factory_git_hub_configuration
Azure::DataFactory::Mgmt::V2018_06_01::Models::FactoryGitHubConfiguration
end
def rerun_tumbling_window_trigger
Azure::DataFactory::Mgmt::V2018_06_01::Models::RerunTumblingWindowTrigger
end
def rerun_trigger_resource
Azure::DataFactory::Mgmt::V2018_06_01::Models::RerunTriggerResource
end
def self_dependency_tumbling_window_trigger_reference
Azure::DataFactory::Mgmt::V2018_06_01::Models::SelfDependencyTumblingWindowTriggerReference
end
def trigger_dependency_reference
Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerDependencyReference
end
def tumbling_window_trigger_dependency_reference
Azure::DataFactory::Mgmt::V2018_06_01::Models::TumblingWindowTriggerDependencyReference
end
def tumbling_window_trigger
Azure::DataFactory::Mgmt::V2018_06_01::Models::TumblingWindowTrigger
end
def multiple_pipeline_trigger
Azure::DataFactory::Mgmt::V2018_06_01::Models::MultiplePipelineTrigger
end
def blob_events_trigger
Azure::DataFactory::Mgmt::V2018_06_01::Models::BlobEventsTrigger
end
def blob_trigger
Azure::DataFactory::Mgmt::V2018_06_01::Models::BlobTrigger
end
def schedule_trigger
Azure::DataFactory::Mgmt::V2018_06_01::Models::ScheduleTrigger
end
def azure_function_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureFunctionLinkedService
end
def azure_data_explorer_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataExplorerLinkedService
end
def sap_table_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapTableLinkedService
end
def google_ad_words_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleAdWordsLinkedService
end
def oracle_service_cloud_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::OracleServiceCloudLinkedService
end
def dynamics_axlinked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::DynamicsAXLinkedService
end
def responsys_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::ResponsysLinkedService
end
def azure_databricks_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDatabricksLinkedService
end
def azure_data_lake_analytics_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeAnalyticsLinkedService
end
def hdinsight_on_demand_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightOnDemandLinkedService
end
def salesforce_marketing_cloud_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceMarketingCloudLinkedService
end
def netezza_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::NetezzaLinkedService
end
def vertica_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::VerticaLinkedService
end
def zoho_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::ZohoLinkedService
end
def xero_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::XeroLinkedService
end
def square_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::SquareLinkedService
end
def spark_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::SparkLinkedService
end
def shopify_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::ShopifyLinkedService
end
def service_now_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::ServiceNowLinkedService
end
def quick_books_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::QuickBooksLinkedService
end
def presto_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::PrestoLinkedService
end
def phoenix_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::PhoenixLinkedService
end
def paypal_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::PaypalLinkedService
end
def marketo_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::MarketoLinkedService
end
def maria_dblinked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::MariaDBLinkedService
end
def magento_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::MagentoLinkedService
end
def jira_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::JiraLinkedService
end
def impala_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::ImpalaLinkedService
end
def hubspot_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::HubspotLinkedService
end
def hive_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::HiveLinkedService
end
def hbase_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::HBaseLinkedService
end
def greenplum_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::GreenplumLinkedService
end
def google_big_query_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleBigQueryLinkedService
end
def eloqua_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::EloquaLinkedService
end
def drill_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::DrillLinkedService
end
def couchbase_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::CouchbaseLinkedService
end
def concur_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::ConcurLinkedService
end
def azure_postgre_sql_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzurePostgreSqlLinkedService
end
def amazon_mwslinked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonMWSLinkedService
end
def sap_hana_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapHanaLinkedService
end
def sap_bwlinked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapBWLinkedService
end
def sftp_server_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::SftpServerLinkedService
end
def ftp_server_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::FtpServerLinkedService
end
def http_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::HttpLinkedService
end
def azure_search_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSearchLinkedService
end
def custom_data_source_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::CustomDataSourceLinkedService
end
def amazon_redshift_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonRedshiftLinkedService
end
def amazon_s3_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonS3LinkedService
end
def rest_service_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::RestServiceLinkedService
end
def sap_open_hub_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapOpenHubLinkedService
end
def sap_ecc_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapEccLinkedService
end
def sap_cloud_for_customer_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapCloudForCustomerLinkedService
end
def salesforce_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceLinkedService
end
def office365_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::Office365LinkedService
end
def azure_blob_fslinked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobFSLinkedService
end
def azure_data_lake_store_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeStoreLinkedService
end
def cosmos_db_mongo_db_api_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::CosmosDbMongoDbApiLinkedService
end
def mongo_db_v2_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbV2LinkedService
end
def mongo_db_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbLinkedService
end
def cassandra_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::CassandraLinkedService
end
def web_client_certificate_authentication
Azure::DataFactory::Mgmt::V2018_06_01::Models::WebClientCertificateAuthentication
end
def web_basic_authentication
Azure::DataFactory::Mgmt::V2018_06_01::Models::WebBasicAuthentication
end
def web_anonymous_authentication
Azure::DataFactory::Mgmt::V2018_06_01::Models::WebAnonymousAuthentication
end
def web_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::WebLinkedService
end
def odata_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::ODataLinkedService
end
def hdfs_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::HdfsLinkedService
end
def odbc_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::OdbcLinkedService
end
def azure_mllinked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureMLLinkedService
end
def teradata_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::TeradataLinkedService
end
def db2_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::Db2LinkedService
end
def sybase_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::SybaseLinkedService
end
def postgre_sql_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::PostgreSqlLinkedService
end
def my_sql_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::MySqlLinkedService
end
def azure_my_sql_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureMySqlLinkedService
end
def oracle_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::OracleLinkedService
end
def file_server_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::FileServerLinkedService
end
def hdinsight_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightLinkedService
end
def dynamics_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::DynamicsLinkedService
end
def cosmos_db_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::CosmosDbLinkedService
end
def azure_key_vault_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureKeyVaultLinkedService
end
def azure_batch_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBatchLinkedService
end
def azure_sql_database_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSqlDatabaseLinkedService
end
def sql_server_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlServerLinkedService
end
def azure_sql_dwlinked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSqlDWLinkedService
end
def azure_table_storage_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureTableStorageLinkedService
end
def azure_blob_storage_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobStorageLinkedService
end
def azure_storage_linked_service
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureStorageLinkedService
end
def google_ad_words_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleAdWordsObjectDataset
end
def azure_data_explorer_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataExplorerTableDataset
end
def oracle_service_cloud_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::OracleServiceCloudObjectDataset
end
def dynamics_axresource_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::DynamicsAXResourceDataset
end
def responsys_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::ResponsysObjectDataset
end
def salesforce_marketing_cloud_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceMarketingCloudObjectDataset
end
def vertica_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::VerticaTableDataset
end
def netezza_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::NetezzaTableDataset
end
def zoho_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::ZohoObjectDataset
end
def xero_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::XeroObjectDataset
end
def square_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::SquareObjectDataset
end
def spark_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::SparkObjectDataset
end
def shopify_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::ShopifyObjectDataset
end
def service_now_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::ServiceNowObjectDataset
end
def quick_books_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::QuickBooksObjectDataset
end
def presto_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::PrestoObjectDataset
end
def phoenix_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::PhoenixObjectDataset
end
def paypal_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::PaypalObjectDataset
end
def marketo_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::MarketoObjectDataset
end
def maria_dbtable_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::MariaDBTableDataset
end
def magento_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::MagentoObjectDataset
end
def jira_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::JiraObjectDataset
end
def impala_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::ImpalaObjectDataset
end
def hubspot_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::HubspotObjectDataset
end
def hive_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::HiveObjectDataset
end
def hbase_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::HBaseObjectDataset
end
def greenplum_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::GreenplumTableDataset
end
def google_big_query_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleBigQueryObjectDataset
end
def eloqua_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::EloquaObjectDataset
end
def drill_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::DrillTableDataset
end
def couchbase_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::CouchbaseTableDataset
end
def concur_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::ConcurObjectDataset
end
def azure_postgre_sql_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzurePostgreSqlTableDataset
end
def amazon_mwsobject_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonMWSObjectDataset
end
def dataset_zip_deflate_compression
Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetZipDeflateCompression
end
def dataset_deflate_compression
Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetDeflateCompression
end
def dataset_gzip_compression
Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetGZipCompression
end
def dataset_bzip2_compression
Azure::DataFactory::Mgmt::V2018_06_01::Models::DatasetBZip2Compression
end
def parquet_format
Azure::DataFactory::Mgmt::V2018_06_01::Models::ParquetFormat
end
def orc_format
Azure::DataFactory::Mgmt::V2018_06_01::Models::OrcFormat
end
def avro_format
Azure::DataFactory::Mgmt::V2018_06_01::Models::AvroFormat
end
def json_format
Azure::DataFactory::Mgmt::V2018_06_01::Models::JsonFormat
end
def text_format
Azure::DataFactory::Mgmt::V2018_06_01::Models::TextFormat
end
def http_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::HttpDataset
end
def azure_search_index_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSearchIndexDataset
end
def web_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::WebTableDataset
end
def sap_table_resource_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapTableResourceDataset
end
def rest_resource_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::RestResourceDataset
end
def sql_server_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlServerTableDataset
end
def sap_open_hub_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapOpenHubTableDataset
end
def sap_hana_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapHanaTableDataset
end
def sap_ecc_resource_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapEccResourceDataset
end
def sap_cloud_for_customer_resource_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapCloudForCustomerResourceDataset
end
def salesforce_object_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceObjectDataset
end
def relational_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::RelationalTableDataset
end
def azure_my_sql_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureMySqlTableDataset
end
def oracle_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::OracleTableDataset
end
def odata_resource_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::ODataResourceDataset
end
def cosmos_db_mongo_db_api_collection_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::CosmosDbMongoDbApiCollectionDataset
end
def mongo_db_v2_collection_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbV2CollectionDataset
end
def mongo_db_collection_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbCollectionDataset
end
def file_share_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::FileShareDataset
end
def office365_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::Office365Dataset
end
def azure_blob_fsdataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobFSDataset
end
def azure_data_lake_store_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeStoreDataset
end
def dynamics_entity_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::DynamicsEntityDataset
end
def document_db_collection_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::DocumentDbCollectionDataset
end
def custom_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::CustomDataset
end
def cassandra_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::CassandraTableDataset
end
def azure_sql_dwtable_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSqlDWTableDataset
end
def azure_sql_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSqlTableDataset
end
def azure_table_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureTableDataset
end
def azure_blob_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobDataset
end
def hdfs_location
Azure::DataFactory::Mgmt::V2018_06_01::Models::HdfsLocation
end
def http_server_location
Azure::DataFactory::Mgmt::V2018_06_01::Models::HttpServerLocation
end
def sftp_location
Azure::DataFactory::Mgmt::V2018_06_01::Models::SftpLocation
end
def ftp_server_location
Azure::DataFactory::Mgmt::V2018_06_01::Models::FtpServerLocation
end
def file_server_location
Azure::DataFactory::Mgmt::V2018_06_01::Models::FileServerLocation
end
def amazon_s3_location
Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonS3Location
end
def azure_data_lake_store_location
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeStoreLocation
end
def azure_blob_fslocation
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobFSLocation
end
def azure_blob_storage_location
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobStorageLocation
end
def delimited_text_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::DelimitedTextDataset
end
def parquet_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::ParquetDataset
end
def amazon_s3_dataset
Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonS3Dataset
end
def azure_function_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureFunctionActivity
end
def databricks_spark_python_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::DatabricksSparkPythonActivity
end
def databricks_spark_jar_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::DatabricksSparkJarActivity
end
def databricks_notebook_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::DatabricksNotebookActivity
end
def data_lake_analytics_usqlactivity
Azure::DataFactory::Mgmt::V2018_06_01::Models::DataLakeAnalyticsUSQLActivity
end
def azure_mlupdate_resource_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureMLUpdateResourceActivity
end
def azure_mlbatch_execution_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureMLBatchExecutionActivity
end
def get_metadata_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::GetMetadataActivity
end
def web_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::WebActivity
end
def amazon_redshift_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonRedshiftSource
end
def google_ad_words_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleAdWordsSource
end
def oracle_service_cloud_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::OracleServiceCloudSource
end
def dynamics_axsource
Azure::DataFactory::Mgmt::V2018_06_01::Models::DynamicsAXSource
end
def responsys_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::ResponsysSource
end
def salesforce_marketing_cloud_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceMarketingCloudSource
end
def vertica_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::VerticaSource
end
def netezza_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::NetezzaSource
end
def zoho_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::ZohoSource
end
def xero_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::XeroSource
end
def square_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::SquareSource
end
def spark_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::SparkSource
end
def shopify_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::ShopifySource
end
def service_now_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::ServiceNowSource
end
def quick_books_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::QuickBooksSource
end
def presto_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::PrestoSource
end
def phoenix_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::PhoenixSource
end
def paypal_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::PaypalSource
end
def marketo_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::MarketoSource
end
def maria_dbsource
Azure::DataFactory::Mgmt::V2018_06_01::Models::MariaDBSource
end
def magento_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::MagentoSource
end
def jira_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::JiraSource
end
def impala_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::ImpalaSource
end
def hubspot_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::HubspotSource
end
def hive_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::HiveSource
end
def hbase_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::HBaseSource
end
def greenplum_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::GreenplumSource
end
def google_big_query_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleBigQuerySource
end
def eloqua_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::EloquaSource
end
def drill_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::DrillSource
end
def couchbase_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::CouchbaseSource
end
def concur_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::ConcurSource
end
def azure_postgre_sql_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzurePostgreSqlSource
end
def amazon_mwssource
Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonMWSSource
end
def http_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::HttpSource
end
def azure_blob_fssource
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobFSSource
end
def azure_data_lake_store_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeStoreSource
end
def office365_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::Office365Source
end
def cosmos_db_mongo_db_api_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::CosmosDbMongoDbApiSource
end
def mongo_db_v2_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbV2Source
end
def mongo_db_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbSource
end
def cassandra_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::CassandraSource
end
def web_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::WebSource
end
def oracle_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::OracleSource
end
def azure_data_explorer_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataExplorerSource
end
def azure_my_sql_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureMySqlSource
end
def hdfs_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::HdfsSource
end
def file_system_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::FileSystemSource
end
def sql_dwsource
Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlDWSource
end
def azure_sql_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSqlSource
end
def sql_server_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlServerSource
end
def sql_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlSource
end
def rest_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::RestSource
end
def sap_table_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapTableSource
end
def sap_open_hub_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapOpenHubSource
end
def sap_hana_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapHanaSource
end
def sap_ecc_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapEccSource
end
def sap_cloud_for_customer_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapCloudForCustomerSource
end
def salesforce_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceSource
end
def relational_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::RelationalSource
end
def dynamics_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::DynamicsSource
end
def document_db_collection_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::DocumentDbCollectionSource
end
def blob_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::BlobSource
end
def azure_table_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureTableSource
end
def delimited_text_read_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::DelimitedTextReadSettings
end
def hdfs_read_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::HdfsReadSettings
end
def http_read_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::HttpReadSettings
end
def sftp_read_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::SftpReadSettings
end
def ftp_read_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::FtpReadSettings
end
def file_server_read_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::FileServerReadSettings
end
def amazon_s3_read_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::AmazonS3ReadSettings
end
def azure_data_lake_store_read_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeStoreReadSettings
end
def azure_blob_fsread_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobFSReadSettings
end
def azure_blob_storage_read_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobStorageReadSettings
end
def delimited_text_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::DelimitedTextSource
end
def parquet_source
Azure::DataFactory::Mgmt::V2018_06_01::Models::ParquetSource
end
def execution_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::ExecutionActivity
end
def lookup_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::LookupActivity
end
def delete_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::DeleteActivity
end
def sql_server_stored_procedure_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlServerStoredProcedureActivity
end
def custom_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::CustomActivity
end
def execute_ssispackage_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::ExecuteSSISPackageActivity
end
def hdinsight_spark_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightSparkActivity
end
def hdinsight_streaming_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightStreamingActivity
end
def hdinsight_map_reduce_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightMapReduceActivity
end
def hdinsight_pig_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightPigActivity
end
def hdinsight_hive_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightHiveActivity
end
def cosmos_db_mongo_db_api_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::CosmosDbMongoDbApiSink
end
def salesforce_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceSink
end
def azure_data_explorer_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataExplorerSink
end
def dynamics_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::DynamicsSink
end
def odbc_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::OdbcSink
end
def azure_search_index_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSearchIndexSink
end
def azure_blob_fssink
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobFSSink
end
def azure_data_lake_store_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeStoreSink
end
def oracle_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::OracleSink
end
def sql_dwsink
Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlDWSink
end
def azure_sql_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSqlSink
end
def sql_server_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlServerSink
end
def sql_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::SqlSink
end
def document_db_collection_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::DocumentDbCollectionSink
end
def file_system_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::FileSystemSink
end
def blob_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::BlobSink
end
def file_server_write_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::FileServerWriteSettings
end
def azure_data_lake_store_write_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureDataLakeStoreWriteSettings
end
def azure_blob_fswrite_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobFSWriteSettings
end
def azure_blob_storage_write_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureBlobStorageWriteSettings
end
def parquet_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::ParquetSink
end
def azure_table_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureTableSink
end
def azure_queue_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureQueueSink
end
def sap_cloud_for_customer_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapCloudForCustomerSink
end
def delimited_text_write_settings
Azure::DataFactory::Mgmt::V2018_06_01::Models::DelimitedTextWriteSettings
end
def delimited_text_sink
Azure::DataFactory::Mgmt::V2018_06_01::Models::DelimitedTextSink
end
def copy_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::CopyActivity
end
def control_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::ControlActivity
end
def web_hook_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::WebHookActivity
end
def append_variable_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::AppendVariableActivity
end
def set_variable_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::SetVariableActivity
end
def filter_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::FilterActivity
end
def validation_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::ValidationActivity
end
def until_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::UntilActivity
end
def wait_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::WaitActivity
end
def for_each_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::ForEachActivity
end
def if_condition_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::IfConditionActivity
end
def execute_pipeline_activity
Azure::DataFactory::Mgmt::V2018_06_01::Models::ExecutePipelineActivity
end
def self_hosted_integration_runtime_status
Azure::DataFactory::Mgmt::V2018_06_01::Models::SelfHostedIntegrationRuntimeStatus
end
def managed_integration_runtime_status
Azure::DataFactory::Mgmt::V2018_06_01::Models::ManagedIntegrationRuntimeStatus
end
def linked_integration_runtime_rbac_authorization
Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedIntegrationRuntimeRbacAuthorization
end
def linked_integration_runtime_key_authorization
Azure::DataFactory::Mgmt::V2018_06_01::Models::LinkedIntegrationRuntimeKeyAuthorization
end
def self_hosted_integration_runtime
Azure::DataFactory::Mgmt::V2018_06_01::Models::SelfHostedIntegrationRuntime
end
def managed_integration_runtime
Azure::DataFactory::Mgmt::V2018_06_01::Models::ManagedIntegrationRuntime
end
def ssis_environment
Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisEnvironment
end
def ssis_package
Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisPackage
end
def ssis_project
Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisProject
end
def ssis_folder
Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisFolder
end
def integration_runtime_state
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeState
end
def integration_runtime_auto_update
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeAutoUpdate
end
def parameter_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::ParameterType
end
def dependency_condition
Azure::DataFactory::Mgmt::V2018_06_01::Models::DependencyCondition
end
def variable_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::VariableType
end
def trigger_runtime_state
Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerRuntimeState
end
def run_query_filter_operand
Azure::DataFactory::Mgmt::V2018_06_01::Models::RunQueryFilterOperand
end
def run_query_filter_operator
Azure::DataFactory::Mgmt::V2018_06_01::Models::RunQueryFilterOperator
end
def run_query_order_by_field
Azure::DataFactory::Mgmt::V2018_06_01::Models::RunQueryOrderByField
end
def run_query_order
Azure::DataFactory::Mgmt::V2018_06_01::Models::RunQueryOrder
end
def trigger_run_status
Azure::DataFactory::Mgmt::V2018_06_01::Models::TriggerRunStatus
end
def tumbling_window_frequency
Azure::DataFactory::Mgmt::V2018_06_01::Models::TumblingWindowFrequency
end
def blob_event_types
Azure::DataFactory::Mgmt::V2018_06_01::Models::BlobEventTypes
end
def day_of_week
Azure::DataFactory::Mgmt::V2018_06_01::Models::DayOfWeek
end
def days_of_week
Azure::DataFactory::Mgmt::V2018_06_01::Models::DaysOfWeek
end
def recurrence_frequency
Azure::DataFactory::Mgmt::V2018_06_01::Models::RecurrenceFrequency
end
def google_ad_words_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleAdWordsAuthenticationType
end
def spark_server_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::SparkServerType
end
def spark_thrift_transport_protocol
Azure::DataFactory::Mgmt::V2018_06_01::Models::SparkThriftTransportProtocol
end
def spark_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::SparkAuthenticationType
end
def service_now_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::ServiceNowAuthenticationType
end
def presto_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::PrestoAuthenticationType
end
def phoenix_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::PhoenixAuthenticationType
end
def impala_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::ImpalaAuthenticationType
end
def hive_server_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::HiveServerType
end
def hive_thrift_transport_protocol
Azure::DataFactory::Mgmt::V2018_06_01::Models::HiveThriftTransportProtocol
end
def hive_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::HiveAuthenticationType
end
def hbase_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::HBaseAuthenticationType
end
def google_big_query_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::GoogleBigQueryAuthenticationType
end
def sap_hana_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapHanaAuthenticationType
end
def sftp_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::SftpAuthenticationType
end
def ftp_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::FtpAuthenticationType
end
def http_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::HttpAuthenticationType
end
def rest_service_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::RestServiceAuthenticationType
end
def mongo_db_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::MongoDbAuthenticationType
end
def odata_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::ODataAuthenticationType
end
def odata_aad_service_principal_credential_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::ODataAadServicePrincipalCredentialType
end
def teradata_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::TeradataAuthenticationType
end
def db2_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::Db2AuthenticationType
end
def sybase_authentication_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::SybaseAuthenticationType
end
def azure_function_activity_method
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureFunctionActivityMethod
end
def web_activity_method
Azure::DataFactory::Mgmt::V2018_06_01::Models::WebActivityMethod
end
def cassandra_source_read_consistency_levels
Azure::DataFactory::Mgmt::V2018_06_01::Models::CassandraSourceReadConsistencyLevels
end
def stored_procedure_parameter_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::StoredProcedureParameterType
end
def salesforce_source_read_behavior
Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceSourceReadBehavior
end
def hdinsight_activity_debug_info_option
Azure::DataFactory::Mgmt::V2018_06_01::Models::HDInsightActivityDebugInfoOption
end
def salesforce_sink_write_behavior
Azure::DataFactory::Mgmt::V2018_06_01::Models::SalesforceSinkWriteBehavior
end
def azure_search_index_write_behavior_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::AzureSearchIndexWriteBehaviorType
end
def polybase_settings_reject_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::PolybaseSettingsRejectType
end
def sap_cloud_for_customer_sink_write_behavior
Azure::DataFactory::Mgmt::V2018_06_01::Models::SapCloudForCustomerSinkWriteBehavior
end
def web_hook_activity_method
Azure::DataFactory::Mgmt::V2018_06_01::Models::WebHookActivityMethod
end
def integration_runtime_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeType
end
def self_hosted_integration_runtime_node_status
Azure::DataFactory::Mgmt::V2018_06_01::Models::SelfHostedIntegrationRuntimeNodeStatus
end
def integration_runtime_update_result
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeUpdateResult
end
def integration_runtime_internal_channel_encryption_mode
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeInternalChannelEncryptionMode
end
def managed_integration_runtime_node_status
Azure::DataFactory::Mgmt::V2018_06_01::Models::ManagedIntegrationRuntimeNodeStatus
end
def integration_runtime_entity_reference_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeEntityReferenceType
end
def integration_runtime_ssis_catalog_pricing_tier
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeSsisCatalogPricingTier
end
def integration_runtime_license_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeLicenseType
end
def integration_runtime_edition
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeEdition
end
def ssis_object_metadata_type
Azure::DataFactory::Mgmt::V2018_06_01::Models::SsisObjectMetadataType
end
def integration_runtime_auth_key_name
Azure::DataFactory::Mgmt::V2018_06_01::Models::IntegrationRuntimeAuthKeyName
end
end
end
end
end
end
|
mindsnacks/Zinc-ObjC | Zinc/Private/ZincBundleDeleteTask.h | <reponame>mindsnacks/Zinc-ObjC<gh_stars>0
//
// ZincBundleDeleteTask.h
// Zinc-iOS
//
// Created by <NAME> on 1/11/12.
// Copyright (c) 2012 MindSnacks. All rights reserved.
//
#import "ZincTask.h"
#import "ZincGlobals.h"
@interface ZincBundleDeleteTask : ZincTask
@property (readonly) NSString* bundleID;
@property (readonly) ZincVersion version;
@end
|
anetczuk/ReinforcedAgent | src/agents/general/policy/mc/BoxCart.java | /**
*
*/
package agents.general.policy.mc;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import org.rlcommunity.rlglue.codec.types.Observation;
import agents.general.AgentAction;
import agents.general.policy.PolicyControl;
import agents.general.state.DiscreteState;
import agents.general.state.StateTranslator;
/**
* @author bob
*
*/
public class BoxCart implements PolicyControl {
StateTranslator range = null;
double rewardSum = 0;
Boxes boxes = new Boxes();
public BoxCart(StateTranslator range) {
this.range = range;
}
@Override
public void setExplore(boolean state) {
System.out.println("........... Not implemented ..........");
}
@Override
public void setPrintData(boolean state) {
System.out.println("........... Not implemented ..........");
}
@Override
public void setLogStep(boolean state) {
System.out.println("........... Not implemented ..........");
}
@Override
public AgentAction start(Observation nextState) {
rewardSum = 0;
boxes.clear();
DiscreteState discreteState = range.convert(nextState);
int choice = boxes.add(discreteState, rewardSum);
return new AgentAction( choice );
}
@Override
public AgentAction step(double reward, Observation nextState) {
rewardSum += 1;
DiscreteState discreteState = range.convert(nextState);
int choice = boxes.add(discreteState, rewardSum);
return new AgentAction( choice );
}
@Override
public void end(double reward) {
rewardSum += 1;
boxes.calculate( rewardSum );
{
StringBuilder build = new StringBuilder();
build.append( "known states: " );
build.append( Integer.toString( boxes.size() ) );
build.append( " of " );
build.append( Integer.toString( range.totalObservations() ) );
build.append( "\n" );
System.out.println( build.toString() );
}
}
}
/**
*
* @author bob
*
*/
class Boxes {
static class Box {
ArrayList<Double> times = new ArrayList<Double>();
double ll = 1.0;
double lu = 1.0;
double rl = 1.0;
double ru = 1.0;
int action = 0;
public Box() {
}
public void add(double value) {
times.add(value);
}
public void clear() {
times.clear();
}
public void update(final double totalReward, final double dk, final double target) {
ll = ll * dk + rewardSum(totalReward);
lu = lu * dk + times.size();
rl = rl * dk;
ru = ru * dk;
final double k = 1.0;
double ktarget = k * target;
double luk = lu + k;
double lval = (ll + ktarget) / luk;
double ruk = ru + k;
double rval = (rl + ktarget) / ruk;
if (lval > rval)
action = 0;
else
action = 1;
}
double rewardSum(double totalReward) {
double sum = 0.0;
int num = times.size();
for( int i=0; i<num; ++i) {
sum += ( totalReward - times.get(i) );
}
return sum;
}
}
Map<DiscreteState, Box > map = new HashMap<DiscreteState, Box >();
double gl = 0.0;
double gu = 0.0;
double metris = 0.0;
double target = 0.0;
public Boxes() {
}
public int size() {
return map.size();
}
public void calculate(double totalReward) {
final double dk = 0.5; // less than 1
gl = gl * dk + totalReward;
gu = gu * dk + 1.0;
metris = gl / gu;
final double c0 = 0.0; // greater equal 0
final double c1 = 1.0; // greater equal 1
target = c0 + c1 * metris;
for( Box box : map.values()) {
box.update( totalReward, dk, target );
}
}
public void clear() {
for( Box box : map.values()) {
box.clear();
}
}
public int add(DiscreteState state, double value) {
Box box = map.get(state);
if (box == null) {
box = new Box();
map.put(state, box);
}
box.add(value);
return box.action;
}
}
|
azrobles/recipe-planner | src/test/java/com/ara/recipeplanner/repository/LocationRepositoryTest.java | package com.ara.recipeplanner.repository;
import static org.junit.jupiter.api.Assertions.assertEquals;
import com.ara.recipeplanner.model.Location;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.orm.jpa.DataJpaTest;
import org.springframework.boot.test.autoconfigure.orm.jpa.TestEntityManager;
@DataJpaTest
class LocationRepositoryTest {
@Autowired
private TestEntityManager entityManager;
@Autowired
private LocationRepository repository;
@Test
void findOneByNameTest() {
String name = "name";
Location entity = new Location(null, name);
entityManager.persist(entity);
entityManager.flush();
Location found = repository.findOneByName(name);
assertEquals(name, found.getName());
}
}
|
ManjeetMehta/spring-boot-1.5 | mehta-applications-service/src/main/java/com/mehta/applications/service/impl/TestServiceImpl.java | package com.mehta.applications.service.impl;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.mehta.applications.repository.TestRepository;
import com.mehta.applications.service.TestService;
@Service
public class TestServiceImpl implements TestService {
@Autowired
TestRepository testRepository;
@Override
public Object testList() {
Object userList = testRepository.findAllUser();
return userList;
}
}
|
MothOnMars/search-gov | spec/models/rtu_date_range_spec.rb | require 'spec_helper'
describe RtuDateRange do
let(:rtu_date_range) { described_class.new('some affiliate', 'search or click type here') }
shared_context 'when dates are available' do
let(:json_response) do
JSON.parse(
read_fixture_file('/json/rtu_dashboard/rtu_date_range.json')
)
end
let(:search_opts) do
{
index: 'logstash-*',
body: 'query_body',
size: 0
}
end
before do
allow(RtuDateRangeQuery).to receive(:new).
with('some affiliate', 'search or click type here').
and_return(instance_double(RtuDateRangeQuery, body: 'query_body'))
allow(Es::ELK.client_reader).to receive(:search).
with(search_opts).and_return json_response
end
end
describe '#available_dates_range' do
context 'when dates are available' do
include_context 'when dates are available'
it 'should return the range of available dates' do
expect(rtu_date_range.available_dates_range).to eq(Date.parse('2014-05-20')..Date.parse('2014-05-28'))
end
end
context 'when no dates are available' do
let(:json_response) do
JSON.parse(read_fixture_file('/json/rtu_dashboard/rtu_date_range_no_stats.json'))
end
before do
allow(Es::ELK.client_reader).to receive(:search).and_return json_response
end
it 'should return the range of available dates bounded by current day' do
expect(rtu_date_range.available_dates_range).to eq(Date.current..Date.current)
end
end
context 'when there is a problem getting the data' do
before do
allow(Es::ELK.client_reader).to receive(:search).and_raise StandardError
end
it 'should return the range of available dates bounded by current day' do
expect(rtu_date_range.available_dates_range).to eq(Date.current..Date.current)
end
end
end
describe '#default_start' do
context 'when dates are available' do
include_context 'when dates are available'
it 'is the first day of the most recent month with results' do
expect(rtu_date_range.default_start).to eq '2014-05-01'.to_date
end
end
end
describe '#default_end' do
context 'when dates are available' do
include_context 'when dates are available'
it 'is the last day of the available dates' do
expect(rtu_date_range.default_end).to eq '2014-05-28'.to_date
end
end
end
end
|
narimenhadjkacem/E-Reputation-mean-stack-project | server/public/angular/app/campaign/campaign.factory.js | /**
* Created by HP on 20/03/2017.
*/
(function () {
'use strict';
angular
.module('ATSApp.campaign')
.factory('CampaignFactory', CampaignFactory);
CampaignFactory.$inject = ['$resource'];
/* @ngInject */
function CampaignFactory($resource) {
/** Change The Link To your Rest URL From the JAVA EE APP*/
return $resource('api/campaigns/:id',
{id: '@id'},
{
'update': {method: 'PUT'},
'getCampaignById': {
url: 'api/campaigns/:id',
method: 'GET',
params: {
id: '@id',
},
isArray: true
},
}
);
}
})();
|
krattai/AEBL | blades/xbmc/xbmc/android/jni/Cursor.h | #pragma once
/*
* Copyright (C) 2013 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "JNIBase.h"
#include "MediaStore.h"
class CJNIURI;
class CJNICursor : public CJNIBase
{
public:
CJNICursor(const jni::jhobject &object) : CJNIBase(object){};
~CJNICursor(){};
int getCount();
int getPosition();
bool move(int offset);
bool moveToPosition(int position);
bool moveToFirst();
bool moveToLast();
bool moveToNext();
bool moveToPrevious();
bool isFirst();
bool isLast();
bool isBeforeFirst();
bool isAfterLast();
int getColumnIndex(const std::string &columnName);
std::string getColumnName(int columnIndex);
std::vector<std::string> getColumnNames();
int getColumnCount();
std::string getString(int columnIndex);
short getShort(int columnIndex);
int getInt(int columnIndex);
int64_t getLong(int columnIndex);
float getFloat(int columnIndex);
double getDouble(int columnIndex);
int getType(int columnIndex);
bool isNull(int columnIndex);
void deactivate();
bool requery();
void close();
bool isClosed();
static void PopulateStaticFields();
static int FIELD_TYPE_NULL;
static int FIELD_TYPE_INTEGER;
static int FIELD_TYPE_FLOAT;
static int FIELD_TYPE_STRING;
static int FIELD_TYPE_BLOB;
private:
CJNICursor();
};
|
chiendarrendor/AlbertsAdalogicalAenigmas | Ada56/src/TriangleNotOnPathLogicStep.java | import grid.logic.LogicStatus;
import grid.logic.LogicStep;
import java.awt.Point;
public class TriangleNotOnPathLogicStep implements LogicStep<Board> {
Point p;
public TriangleNotOnPathLogicStep(Point p) { this.p = p; }
@Override
public LogicStatus apply(Board thing) {
return thing.isOnPath(p.x,p.y) ? LogicStatus.CONTRADICTION : LogicStatus.STYMIED;
}
}
|
bopopescu/drawquest-web | deploy/ec2/snapshot_rds.py | #!/usr/bin/python
from datetime import datetime, date, timedelta
import os
import sys; sys.path += ['/var/canvas/common', '../../common']
import yaml
import datetime
from collections import defaultdict
from boto.rds import RDSConnection
from configuration import aws
def snapshot_rds():
"""
dumb script that cleans up all the duplicate ebs snapshots our two cron servers
create while backing up redis
"""
(key, secret) = aws
conn = RDSConnection(key, secret)
for db in conn.get_all_dbinstances():
print "backing up rds", db.id, "..."
now = datetime.datetime.now()
conn.create_dbsnapshot("snapshot-backup-{0}".format(now.strftime("%Y-%m-%d")), db.id)
if __name__ == '__main__':
snapshot_rds() |
flexiooss/poom-ci | poom-ci-stages/src/main/java/org/codingmatters/poom/ci/pipeline/PipelineScript.java | <reponame>flexiooss/poom-ci
package org.codingmatters.poom.ci.pipeline;
import org.codingmatters.poom.ci.pipeline.descriptors.Pipeline;
import org.codingmatters.poom.ci.pipeline.descriptors.Stage;
import org.codingmatters.poom.ci.pipeline.descriptors.StageHolder;
import org.codingmatters.value.objects.values.ObjectValue;
import java.io.IOException;
import java.io.OutputStream;
public class PipelineScript {
private final Pipeline pipeline;
public PipelineScript(Pipeline pipeline) {
this.pipeline = pipeline;
}
public void forStage(StageHolder stg, OutputStream out) throws IOException {
Stage stage = stg.stage();
this.header(out);
this.env(out);
this.stage(stage, out);
this.stageResult(stage, out);
}
public void forPipeline(OutputStream out) throws IOException {
this.header(out);
this.env(out);
for (Stage stage : this.pipeline.stages()) {
this.stage(stage, out);
}
this.pipelineResult(out);
}
private void header(OutputStream out) throws IOException {
String header =
"#!/usr/bin/env bash\n" +
"\n" +
"if [[ $# -eq 0 ]] ; then\n" +
" echo 'must provide a workspace as argument'\n" +
" exit 1\n" +
"fi\n" +
"\n" +
"WORKSPACE=$1\n" +
"SRC=$(dirname $(readlink -f $0))\n" +
"if [[ $# -gt 1 ]] ; then\n" +
" SRC=$(readlink -f $2)\n" +
" echo \"running $0 on $SRC\"\n" +
"fi\n" +
"\n" +
"rm -rf $WORKSPACE/logs\n" +
"mkdir -p $WORKSPACE/logs\n\n" +
"export WORKSPACE=$WORKSPACE\n" +
"export SRC=$SRC\n\n";
out.write(header.getBytes());
}
private void env(OutputStream out) throws IOException {
System.out.println(this.pipeline.env());
if(this.pipeline.opt().env().isPresent())
for (ObjectValue envValues : this.pipeline.env()) {
if(envValues != null) {
for (String variable : envValues.propertyNames()) {
String variableLine = String.format("export %s=\"%s\"\n", variable, envValues.property(variable).single().stringValue());
out.write(variableLine.getBytes());
}
}
out.write("\n".getBytes());
}
}
private void stage(Stage stage, OutputStream out) throws IOException {
this.stageVars(stage, out);
this.exec(stage, out);
}
private void stageVars(Stage stage, OutputStream out) throws IOException {
String vars = String.format(
"STAGE=%s\n" + "\n",
// "STAGE_OUT=$WORKSPACE/logs/$STAGE.stdout.log\n" +
// "STAGE_ERR=$WORKSPACE/logs/$STAGE.stderr.log\n\n",
stage.name()
);
out.write(vars.getBytes());
}
private void exec(Stage stage, OutputStream out) throws IOException {
int i = 0;
for (String exec : stage.exec()) {
i++;
String call = String.format(
// "%s > >(tee -a $STAGE_OUT) 2> >(tee -a $STAGE_ERR >&2)\n" +
"%s\n" +
"RESULT=$?\n" +
"if [ \"$RESULT\" -ne 0 ]\n" +
"then\n" +
" echo \"stage $STAGE exec %s failure\"\n" +
" exit $RESULT\n" +
"fi\n\n",
exec, i
);
out.write(call.getBytes());
}
}
private void stageResult(Stage stage, OutputStream out) throws IOException {
String result =
"echo \"$STAGE STAGE EXIT : $RESULT\"\n" +
"exit $RESULT";
out.write(result.getBytes());
}
private void pipelineResult(OutputStream out) throws IOException {
String result =
"echo \"PIPELINE EXIT : $RESULT\"\n" +
"exit $RESULT";
out.write(result.getBytes());
}
}
|
summonFox/unified | Plugins/Tweaks/FixTriggerEnterDetection.cpp | #include "nwnx.hpp"
#include "API/CNWSTrigger.hpp"
#include "API/CScriptEvent.hpp"
namespace Tweaks {
using namespace NWNXLib;
using namespace NWNXLib::API;
using namespace NWNXLib::API::Constants;
bool VectorInTriggerBounds(CNWSTrigger* trigger, Vector point)
{
Vector* vertices = trigger->m_pvVertices;
int numVertices = trigger->m_nVertices;
int i, j;
bool inside = false;
for (i = 0, j = numVertices - 1; i < numVertices; j = i++)
{
if ((vertices[i].y >= point.y ) != (vertices[j].y >= point.y))
{
if ((point.x <= (vertices[j].x - vertices[i].x) * (point.y - vertices[i].y) / (vertices[j].y - vertices[i].y) + vertices[i].x))
{
inside = !inside;
}
}
}
return inside;
}
void FixTriggerEnterDetection() __attribute__((constructor));
void FixTriggerEnterDetection()
{
if (!Config::Get<bool>("FIX_TRIGGER_ENTER_DETECTION", false))
return;
LOG_INFO("Will additionally validate trigger enter events to fix a trigger enter detection bug.");
static Hooks::Hook s_TriggerEventHandlerHook = Hooks::HookFunction(Functions::_ZN11CNWSTrigger12EventHandlerEjjPvjj,
(void*)+[](CNWSTrigger *thisPtr, uint32_t nEventId, OBJECT_ID nCallerObjectId, void* pScript, uint32_t nCalendarDay, uint32_t nTimeOfDay) -> void
{
if (nEventId == AIMasterEvent::SignalEvent)
{
auto* pScriptEvent = static_cast<CScriptEvent*>(pScript);
if (pScriptEvent->m_nType == ScriptEvent::OnObjectEnter)
{
auto* pEntered = Utils::AsNWSObject(Utils::GetGameObject(pScriptEvent->GetObjectID(0)));
if (pEntered != nullptr && !VectorInTriggerBounds(thisPtr, pEntered->m_vPosition))
{
delete pScriptEvent;
return;
}
}
}
s_TriggerEventHandlerHook->CallOriginal<void>(thisPtr, nEventId, nCallerObjectId, pScript, nCalendarDay, nTimeOfDay);
}, Hooks::Order::Early);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.