repo_name stringlengths 6 101 | path stringlengths 4 300 | text stringlengths 7 1.31M |
|---|---|---|
metehkaya/Algo-Archive | Problems/HackerRank/Contests/Week_of_Codes/Week_of_Code_38/A_Full.Which_Section.cpp | <gh_stars>1-10
#include <bits/stdc++.h>
#define maxm 500
using namespace std;
int T,n,k,m;
int ar[maxm];
int main() {
scanf("%d",&T);
for( int tc = 1 ; tc <= T ; tc++ ) {
scanf("%d%d%d",&n,&k,&m);
for( int i = 0 ; i < m ; i++ )
scanf("%d",&ar[i]);
int sum = 0;
for( int i = 0 ; i < m ; i++ ) {
sum += ar[i];
if(sum >= k) {
printf("%d\n",i+1);
break;
}
}
}
return 0;
}
|
NathanHouwaart/Ipass | code/hwlib/html/hwlib-port-direct_8hpp.js | var hwlib_port_direct_8hpp =
[
[ "port_direct_from_in_out_t", "classhwlib_1_1port__direct__from__in__out__t.html", "classhwlib_1_1port__direct__from__in__out__t" ],
[ "port_direct_from_in_t", "classhwlib_1_1port__direct__from__in__t.html", "classhwlib_1_1port__direct__from__in__t" ],
[ "port_direct_from_out_t", "classhwlib_1_1port__direct__from__out__t.html", "classhwlib_1_1port__direct__from__out__t" ],
[ "port_direct_from_oc_t", "classhwlib_1_1port__direct__from__oc__t.html", "classhwlib_1_1port__direct__from__oc__t" ],
[ "direct", "hwlib-port-direct_8hpp.html#a68091f0a605af40f86570f100909f6b5", null ],
[ "direct", "hwlib-port-direct_8hpp.html#a77c7d15d33676d70ade4edebdd484f02", null ],
[ "direct", "hwlib-port-direct_8hpp.html#a0adb355aaa7d356168fe6f14efbf2276", null ],
[ "direct", "hwlib-port-direct_8hpp.html#a8c35e5521540c609bb0021dce2cd9c35", null ]
]; |
BRAINTOK/oraculi-react | src/states/SettingsState.js | import React, {Component, Fragment} from "react";
import BasicState from "../layouts/BasicState";
import { NavLink, Link } from 'react-router-dom';
import {__} from "../layouts/utilities/i18n";
import {styles, currentStyles} from "../layouts/template";
class SettingsState extends BasicState
{
basic_state_data ()
{
let active = 1;
const styles1 = styles();
for(var i in styles1)
{
if(styles1[i].url == currentStyles())
{
console.log(i);
active = styles1[i]._id;
break;
}
}
return { active, fluid: 1 };
}
render()
{
const styles1 = styles();
const divs = styles1.map((e, i) =>
{
return <div
className={"btn square btn-secondary m-1" + (this.state.active == e._id ? " active" : "")}
onClick={this.onChange}
e={ e.url }
key={i}
>
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" x="0px" y="0px" viewBox="0 0 1 1">
<rect x="0" y="0" width="1" height="1" />
</svg>
<div>
{__( e.title )}
</div>
</div>
});
return <div className="layout-state">
<div className="layout-state-head">
<span className={ "layout-state-logo " + this.state.route.icon } />
<div className="layout-state-title">
{ __( this.state.route.title ) }
</div>
</div>
<div className="row text-center">
<div className="col-12 my-2">
<div className="lead">{__("Interface styles")}</div>
</div>
<div className="col-12 my-2">
<div className="btn-group d-flex flex-wrap" style={{justifyContent: "center", alignItems: "center"}}>
{ divs }
</div>
</div>
<div className="col-12 my-2">
<div className="lead">{__("Container")}</div>
</div>
<div className="col-12 my-2">
<label className="_check_">
<input
type="checkbox"
checked={ this.state.fluid }
onChange={ this.onFluid }
/>
</label>
</div>
</div>
</div>
}
onChange = evt =>
{
const a = evt.currentTarget.getAttribute("e");
let active = 1;
const styles1 = styles();
for(var i in styles1)
{
if(styles1[i].url == a)
{
active = styles1[i]._id;
break;
}
}
this.setState({ active });
this.props.onChangeStyle({ fluid:this.state.fluid, style: a });
}
onFluid = () =>
{
this.setState({ fluid : !this.state.fluid });
const styles1 = styles();
this.props.onChangeStyle({ fluid : !this.state.fluid, style : styles1.filter(e => e._id == this.state.active)[0].url });
}
getRoute = () =>
{
return "cog";
}
}
export default SettingsState; |
jonayad-khan/Online-shpoping | public/admin/bower_components/select2/dist/js/i18n/zh-CN.js | <reponame>jonayad-khan/Online-shpoping
//! moment.js locale configuration
//! locale : Belarusian [be]
//! author : <NAME> : https://github.com/demidov91
//! author: Praleska: http://praleska.pro/
//! Author : <NAME> : https://github.com/Oire
;(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined'
&& typeof require === 'function' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['../moment'], factory) :
factory(global.moment)
}(this, (function (moment) { 'use strict';
function plural(word, num) {
var forms = word.split('_');
return num % 10 === 1 && num % 100 !== 11 ? forms[0] : (num % 10 >= 2 && num % 10 <= 4 && (num % 100 < 10 || num % 100 >= 20) ? forms[1] : forms[2]);
}
funct |
techsur-solutions/vets-website | src/applications/static-pages/analytics/addButtonLinkListeners.js | import recordEvent from 'platform/monitoring/record-event';
export default function addButtonLinkListeners() {
const ignoreReactWidgets = ':not([data-template="paragraphs/react_widget"])';
const defaultButtons = 'a.usa-button';
const ignorePrimaryButtons = ':not(.usa-button-primary)';
const buttonLinks = [
...document.querySelectorAll(
`${ignoreReactWidgets} ${defaultButtons}${ignorePrimaryButtons}`,
),
];
buttonLinks.forEach(buttonLink => {
buttonLink.addEventListener('click', event => {
recordEvent({
event: 'cta-default-button-click',
buttonText: event.target.text,
});
});
});
}
|
mythoss/midpoint | repo/repo-api/src/main/java/com/evolveum/midpoint/repo/api/SystemConfigurationChangeDispatcher.java | /*
* Copyright (c) 2010-2018 Evolveum and contributors
*
* This work is dual-licensed under the Apache License 2.0
* and European Union Public License. See LICENSE file for details.
*/
package com.evolveum.midpoint.repo.api;
import com.evolveum.midpoint.schema.result.OperationResult;
import com.evolveum.midpoint.util.exception.SchemaException;
/**
* Central point of dispatching notifications about changes to the system configuration object.
*/
public interface SystemConfigurationChangeDispatcher {
/**
* Dispatches information on system configuration object change.
*
* Basically this directly pushes information to lower layers (prism, schema, repo, etc), and calls registered
* listeners that originate in upper layers.
*
* @param ignoreVersion If false, the information is dispatched unconditionally. If true, we dispatch the notification only
* if the system configuration version was really changed. This is to easily support sources that
* "ping" sysconfig object in regular intervals, e.g. the cluster manager thread.
* @param allowNotFound If true, we take non-existence of sysconfig object more easily. To be used e.g. on system init or
* during tests execution.
*/
void dispatch(boolean ignoreVersion, boolean allowNotFound, OperationResult result) throws SchemaException;
/**
* Registers a listener that will be updated on system configuration object changes.
*/
void registerListener(SystemConfigurationChangeListener listener);
/**
* Unregisters a listener.
*/
void unregisterListener(SystemConfigurationChangeListener listener);
}
|
HarshCasper/software | bobby/domain/structures/sets/elements.go | <filename>bobby/domain/structures/sets/elements.go
package sets
import "github.com/deepvalue-network/software/libs/hash"
type elements struct {
ranked RankedElements
unranked UnrankedElements
}
func createElementsWithRanked(
ranked RankedElements,
) Elements {
return createElementsInternally(ranked, nil)
}
func createElementsWithUnranked(
unranked UnrankedElements,
) Elements {
return createElementsInternally(nil, unranked)
}
func createElementsInternally(
ranked RankedElements,
unranked UnrankedElements,
) Elements {
out := elements{
ranked: ranked,
unranked: unranked,
}
return &out
}
// Hash returns the hash
func (obj *elements) Hash() hash.Hash {
if obj.IsUnranked() {
return obj.UnRanked().Hash()
}
return obj.Ranked().Hash()
}
// IsUnique returns true if the elements are unique, false otherwise
func (obj *elements) IsUnique() bool {
if obj.IsUnranked() {
return obj.UnRanked().IsUnique()
}
return obj.Ranked().IsUnique()
}
// IsRanked returns true if the elements are ranked, false otherwise
func (obj *elements) IsRanked() bool {
return obj.ranked != nil
}
// Ranked returns the ranked elements, if any
func (obj *elements) Ranked() RankedElements {
return obj.ranked
}
// IsUnranked returns true if the elements are unranked, false otherwise
func (obj *elements) IsUnranked() bool {
return obj.unranked != nil
}
// UnRanked returns the unranked elements, if any
func (obj *elements) UnRanked() UnrankedElements {
return obj.unranked
}
|
bmampaey/SPoCA | classes/SegmentationStats.cpp | <reponame>bmampaey/SPoCA
#include "SegmentationStats.h"
using namespace std;
SegmentationStats::SegmentationStats(const time_t& observationTime, const unsigned id)
:id(id),observationTime(observationTime), numberPixels(0), m2(NAN), m3(NAN), m4(NAN), minIntensity(NAN), maxIntensity(NAN), totalIntensity(0), area_Raw(0), area_AtDiskCenter(0), fillingFactor(0)
{}
void SegmentationStats::add(const PixLoc& coordinate, const EUVPixelType& pixelIntensity, const RealPixLoc& sunCenter, const Real& sun_radius)
{
// If the intensity is not a number, the event is said to be clipped spatially
if(not (isnan(pixelIntensity) || isinf(pixelIntensity)))
{
if( isnan(maxIntensity) || maxIntensity < pixelIntensity )
maxIntensity = pixelIntensity;
if( isnan(minIntensity) || pixelIntensity < minIntensity)
minIntensity = pixelIntensity;
totalIntensity += pixelIntensity;
// We keep a vector of intensities to compute the variance, the skewness and the kurtosis
intensities.push_back(pixelIntensity);
// If I add an intensity, the previously computed moment of order > 1 are invalid
m2 = NAN;
m3 = NAN;
m4 = NAN;
}
Real dx = fabs(coordinate.x - sunCenter.x);
Real dy = fabs(coordinate.y - sunCenter.y);
Real radius_squared = sun_radius * sun_radius;
Real sigma = radius_squared - (dx * dx) - (dy * dy);
++numberPixels;
// We compute the contribution of the pixel to the raw area in Mm², and it's uncertainity
const Real raw_pixel_area = (SUN_RADIUS) * (SUN_RADIUS) / radius_squared;
area_Raw += raw_pixel_area;
// We compute the contribution of the pixel to the area at disk center in Mm², and it's uncertainity
Real area_correction_factor = HIGGINS_FACTOR + 1;
if(sigma > 0)
{
area_correction_factor = sun_radius/sqrt(sigma);
// We compute the filling factor
fillingFactor += 1./(PI*radius_squared);
}
// If the area correction factor is more than some value (i.e. the pixel is near the limb)
if (area_correction_factor <= HIGGINS_FACTOR)
{
area_AtDiskCenter += raw_pixel_area * area_correction_factor;
}
}
unsigned SegmentationStats::Id() const
{
return id;
}
void SegmentationStats::setId(const unsigned& id)
{
this->id = id;
}
time_t SegmentationStats::ObservationTime() const
{
return observationTime;
}
string SegmentationStats::ObservationDate() const
{
tm* date_obs;
date_obs = gmtime(&observationTime);
ostringstream ss;
ss<<setfill('0')<<setw(4)<<date_obs->tm_year+1900<<"-"<<setw(2)<<date_obs->tm_mon + 1<<"-"<<setw(2)<<date_obs->tm_mday<<"T"<<setw(2)<<date_obs->tm_hour<<":"<<setw(2)<<date_obs->tm_min<<":"<<setw(2)<<date_obs->tm_sec;
return ss.str();
}
unsigned SegmentationStats::NumberPixels() const
{
return numberPixels;
}
Real SegmentationStats::MinIntensity() const
{
if (isinf(minIntensity))
return NAN;
else
return minIntensity;
}
Real SegmentationStats::MaxIntensity() const
{
if (isinf(maxIntensity))
return NAN;
else
return maxIntensity;
}
Real SegmentationStats::Mean() const
{
if (intensities.size() == 0 || isinf(totalIntensity) || isnan(totalIntensity))
return NAN;
else
return totalIntensity / intensities.size();
}
Real SegmentationStats::Median() const
{
if (intensities.size() == 0)
return NAN;
else
return quickselect(intensities, 0.5);
}
Real SegmentationStats::LowerQuartile() const
{
if (intensities.size() == 0)
return NAN;
else
return quickselect(intensities, 0.25);
}
Real SegmentationStats::UpperQuartile() const
{
if (intensities.size() == 0)
return NAN;
else
return quickselect(intensities, 0.75);
}
void SegmentationStats::computeMoments()
{
Real mean = Mean();
if(isnan(mean) || isinf(mean))
{
m2 = m3 = m4 = NAN;
}
else
{
m2 = m3 = m4 = 0;
for (unsigned i = 0; i < intensities.size(); ++i)
{
Real delta = intensities[i] - mean;
Real delta2 = delta * delta;
m2 += delta2;
m4 += delta2 * delta2;
m3 += delta2 * delta;
}
}
}
Real SegmentationStats::Variance() const
{
if (intensities.size() == 0)
return NAN;
if(isnan(m2))
{
const_cast<SegmentationStats*>(this)->computeMoments();
}
if (isinf(m2) || isnan(m2))
return NAN;
else
return m2 / intensities.size();
}
Real SegmentationStats::Skewness() const
{
if (intensities.size() == 0)
return NAN;
if(isnan(m2) || isnan(m3))
{
const_cast<SegmentationStats*>(this)->computeMoments();
}
if (isinf(m3) || isnan(m3) || isinf(m2) || isnan(m2) || m2 <= 0)
return NAN;
else
return sqrt(intensities.size()) * m3 / sqrt(m2 * m2 * m2);
}
Real SegmentationStats::Kurtosis() const
{
if(intensities.size() == 0)
return NAN;
if(isnan(m2) || isnan(m4))
{
const_cast<SegmentationStats*>(this)->computeMoments();
}
if (isinf(m4) || isnan(m4) || isinf(m2) || isnan(m2) || m2 <= 0)
return NAN;
else
return (intensities.size() * m4 / (m2 * m2) ) - 3;
}
Real SegmentationStats::TotalIntensity() const
{
if (isinf(totalIntensity))
return NAN;
else
return totalIntensity;
}
Real SegmentationStats::Area_Raw() const
{
return area_Raw;
}
Real SegmentationStats::Area_AtDiskCenter() const
{
return area_AtDiskCenter;
}
Real SegmentationStats::FillingFactor() const
{
return fillingFactor;
}
string SegmentationStats::toString(const string& separator, bool header) const
{
if (header)
{
return "Id"+separator+"ObservationDate"+separator+"NumberPixels"+separator+"MinIntensity"+separator+"MaxIntensity"+separator+"Mean"+separator+"Median"+separator+"LowerQuartile"+separator+"UpperQuartile"+separator+"Variance"+separator+"Skewness"+separator+"Kurtosis"+separator+"TotalIntensity"+separator+"Area_Raw"+separator+"Area_AtDiskCenter"+separator+"FillingFactor";
}
else
{
ostringstream out;
out<<setiosflags(ios::fixed)<<Id()<<separator<<ObservationDate()<<separator<<NumberPixels()<<separator<<MinIntensity()<<separator<<MaxIntensity()<<separator<<Mean()<<separator<<Median()<<separator<<LowerQuartile()<<separator<<UpperQuartile()<<separator<<Variance()<<separator<<Skewness()<<separator<<Kurtosis()<<separator<<TotalIntensity()<<separator<<Area_Raw()<<separator<<Area_AtDiskCenter()<<separator<<FillingFactor();
return out.str();
}
}
vector<SegmentationStats*> getSegmentationStats(const ColorMap* coloredMap, const EUVImage* image, const set<ColorType>& classes)
{
map<ColorType,SegmentationStats*> segmentation_stats;
for(set<ColorType>::iterator c=classes.begin(); c!=classes.end(); ++c)
{
segmentation_stats[*c] = new SegmentationStats(*c);
}
RealPixLoc sunCenter = image->SunCenter();
Real sunRadius = image->SunRadius();
unsigned totalNonNullPixels = 0;
for (unsigned y = 0; y < coloredMap->Yaxes(); ++y)
{
for (unsigned x = 0; x < coloredMap->Xaxes(); ++x)
{
if(coloredMap->pixel(x,y) != coloredMap->null())
{
const ColorType& color = coloredMap->pixel(x,y);
// We only compute the class stats for the given classes
if (segmentation_stats.count(color) > 0)
{
// We add the pixel to the class
segmentation_stats[color]->add(PixLoc(x,y), image->pixel(x, y), sunCenter, sunRadius);
}
++totalNonNullPixels;
}
}
}
// We correct the filling factors
for (std::map<ColorType,SegmentationStats*>::iterator it=segmentation_stats.begin(); it!=segmentation_stats.end(); ++it)
it->second->fillingFactor = Real(it->second->NumberPixels())/ Real(totalNonNullPixels);;
return values(segmentation_stats);
}
vector<SegmentationStats*> getSegmentationStats(const ColorMap* coloredMap, const EUVImage* image)
{
map<ColorType,SegmentationStats*> segmentation_stats;
RealPixLoc sunCenter = image->SunCenter();
Real sunRadius = image->SunRadius();
unsigned totalNonNullPixels = 0;
for (unsigned y = 0; y < coloredMap->Yaxes(); ++y)
{
for (unsigned x = 0; x < coloredMap->Xaxes(); ++x)
{
if(coloredMap->pixel(x,y) != coloredMap->null())
{
const ColorType& color = coloredMap->pixel(x,y);
// If the segmentation_stats does not yet exist we create it
if (segmentation_stats.count(color) == 0)
{
segmentation_stats[color] = new SegmentationStats(image->ObservationTime(), color);
}
// We add the pixel to the class
segmentation_stats[color]->add(PixLoc(x,y), image->pixel(x, y), sunCenter, sunRadius);
++totalNonNullPixels;
}
}
}
// We correct the filling factors
for (std::map<ColorType,SegmentationStats*>::iterator it=segmentation_stats.begin(); it!=segmentation_stats.end(); ++it)
it->second->fillingFactor = Real(it->second->NumberPixels())/ Real(totalNonNullPixels);;
return values(segmentation_stats);
}
FitsFile& writeRegions(FitsFile& file, const vector<SegmentationStats*>& segmentation_stats)
{
{
vector<unsigned> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->Id();
file.writeColumn("ID", data);
}
{
vector<string> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->ObservationDate();
file.writeColumn("DATE_OBS", data);
}
{
vector<unsigned> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->NumberPixels();
file.writeColumn("NUMBER_PIXELS", data);
}
{
vector<Real> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->MinIntensity();
file.writeColumn("MIN_INTENSITY", data);
}
{
vector<Real> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->MaxIntensity();
file.writeColumn("MAX_INTENSITY", data);
}
{
vector<Real> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->Mean();
file.writeColumn("MEAN_INTENSITY", data);
}
{
vector<Real> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->Median();
file.writeColumn("MEDIAN_INTENSITY", data);
}
{
vector<Real> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->LowerQuartile();
file.writeColumn("LOWERQUARTILE_INTENSITY", data);
}
{
vector<Real> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->UpperQuartile();
file.writeColumn("UPPERQUARTILE_INTENSITY", data);
}
{
vector<Real> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->Variance();
file.writeColumn("VARIANCE", data);
}
{
vector<Real> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->Skewness();
file.writeColumn("SKEWNESS", data);
}
{
vector<Real> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->Kurtosis();
file.writeColumn("KURTOSIS", data);
}
{
vector<Real> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->TotalIntensity();
file.writeColumn("TOTAL_INTENSITY", data);
}
{
vector<Real> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->Area_Raw();
file.writeColumn("RAW_AREA", data);
}
{
vector<Real> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->Area_AtDiskCenter();
file.writeColumn("AREA_ATDISKCENTER", data);
}
{
vector<Real> data(segmentation_stats.size());
for(unsigned r = 0; r < segmentation_stats.size(); ++r)
data[r] = segmentation_stats[r]->FillingFactor();
file.writeColumn("FILLING_FACTOR", data);
}
return file;
}
|
Kampbell/ISODE | isode++/code/iso/itu/osi/als/base/util/NetworkBuffer.h | /*
* NetworkBuffer.h
*
* Created on: 14 juil. 2015
* Author: FrancisANDRE
*/
#ifndef ALS_BASE_UTIL_Buffer_H_
#define ALS_BASE_UTIL_Buffer_H_
#include <string>
using std::to_string;
using std::string;
#include <memory>
using std::shared_ptr;
#include "Poco/Exception.h"
using Poco::InvalidArgumentException;
using Poco::RangeException;
#include "als/base/base.h"
namespace ALS {
namespace BASE {
namespace UTIL {
class BASE_API NetworkBuffer {
friend class BinaryInputStream;
friend class BinaryOutputStream;
private:
static const int OFFSET;
// Invariants: mark <= position <= limit <= capacity
nat2 _position;
nat2 _limit;
nat2 _capacity;
nat2 _mark;
char* _buffer;
bool _owned;
// Creates a new buffer with the given mark, position, limit, and capacity,
// after checking invariants.
//
// NetworkBuffer(int mark, int pos, int lim, int cap) { // package-private
// if (cap < 0)
// throw new IllegalArgumentException("Negative capacity: " + cap);
// this._capacity = cap;
// limit(lim);
// position(pos);
// if (mark >= 0) {
// if (mark > pos)
// throw new IllegalArgumentException(
// "mark > position: (" + mark + " > " + pos + ")");
// this._mark = mark;
// }
// }
public:
nat2& position() { return _position; }
nat2& limit() { return _limit; }
// nat2& mark() { return _mark; }
private:
const nat2& mark() const { return _mark; }
public:
const nat2& capacity() const { return _capacity; }
const nat2& position() const { return _position; }
const nat2& limit() const { return _limit; }
public:
NetworkBuffer() = delete;
NetworkBuffer(int length);
NetworkBuffer(char* buffer, int length);
NetworkBuffer(const NetworkBuffer&) = delete;
NetworkBuffer& operator = (const NetworkBuffer&) = delete;
virtual ~NetworkBuffer();
void use(char* buffer, int length);
void take(char* buffer, int length);
nat2 remaining() const { return _limit - _position; }
bool hasRemaining() const { return _position < _limit; }
nat2 position(int position) { return _position = position; }
private:
NetworkBuffer& newPosition(nat2 newPosition);
NetworkBuffer& newLimit(nat2 newLimit);
public:
NetworkBuffer& markit();
NetworkBuffer& reset();
NetworkBuffer& clear();
NetworkBuffer& flip();
NetworkBuffer& rewind();
NetworkBuffer& shrink(int size);
NetworkBuffer& skip(int size);
inline byte get() { return getByte(); }
inline NetworkBuffer& put(byte b) { return putByte(b); }
byte getByte();
int2 getShort();
int4 getInt();
int8 getLong();
NetworkBuffer& putByte(byte b);
NetworkBuffer& putShort(int2 s);
NetworkBuffer& putInt(int4 i);
NetworkBuffer& putLong(int8 l);
NetworkBuffer& putByte(nat2 position, byte b);
NetworkBuffer& putShort(nat2 position, int2 s);
NetworkBuffer& putInt(nat2 position, int4 i);
NetworkBuffer& putLong(nat2 position, int8 l);
nat2 getBytes(nat2 length, byte* bytes);
NetworkBuffer& putBytes(nat2 length, const byte*bytes);
nat2 getBytes(nat2 length, char* bytes);
NetworkBuffer& putBytes(nat2 length, const char*bytes);
private:
void truncate();
public:
#ifdef KEEP_THIS_CODE
void* operator new(size_t size, unsigned short length){
return new char[size + length];
}
void operator delete(void* where, unsigned short length) {
delete where;
}
void operator delete(void* where) {
delete where;
}
inline char* data() const { return ((char*)this) + OFFSET + position(); }
inline byte* here() const { return ((byte*)this) + OFFSET + position(); }
inline byte* here(int position) const { return ((byte*)this) + OFFSET + position; }
#endif
byte* buffer(int position) const;
char* begin() const;
byte* bytes() const;
char* data() const;
char* chars() const;
byte at(int no) const;
byte operator[](int no) const;
void dump(Printer& printer) const;
const string info() const;
private:
int checkByteIndex(nat2 index);
int checkShortIndex(nat2 index);
int checkIntIndex(nat2 index);
int checkLongIndex(nat2 index);
void discardMark();
};
inline char* NetworkBuffer::begin() const { return ((char*)_buffer); }
inline byte*NetworkBuffer::bytes() const { return ((byte*)_buffer) + position(); }
inline char* NetworkBuffer::data() const { return ((char*)_buffer) + position(); }
inline char* NetworkBuffer::chars() const { return ((char*)_buffer) + position(); }
inline byte NetworkBuffer::at(int no) const { return *(_buffer + position() + no); }
inline byte NetworkBuffer::operator[](int no) const { return *(_buffer + position() + no); }
inline byte*NetworkBuffer::buffer(int position) const{ return ((byte*)_buffer) + position; }
inline NetworkBuffer& NetworkBuffer::markit() {
_mark = position();
return *this;
}
inline NetworkBuffer& NetworkBuffer::reset() {
int m = _mark;
if (m < 0)
throw InvalidArgumentException("");
_position = m;
return *this;
}
inline NetworkBuffer& NetworkBuffer::clear() {
_position = 0;
_limit = _capacity;
_mark = -1;
return *this;
}
inline NetworkBuffer& NetworkBuffer::flip() {
_limit = _position;
_position = 0;
_mark = -1;
return *this;
}
inline NetworkBuffer& NetworkBuffer::rewind() {
_position = 0;
_mark = -1;
return *this;
}
inline NetworkBuffer& NetworkBuffer::shrink(int size) {
if (size > remaining())
throw InvalidArgumentException(to_string(size));
_limit = _position + size;
return *this;
}
inline NetworkBuffer& NetworkBuffer::skip(int size) {
return newPosition(_position + size);
}
inline void NetworkBuffer::truncate() {
_mark = -1;
_position = 0;
_limit = 0;
_capacity = 0;
}
inline int NetworkBuffer::checkByteIndex(nat2 index) {
if (!(0 <= index && index <= limit() - sizeof(byte)))
throw RangeException(to_string(index));
return index;
}
inline int NetworkBuffer::checkShortIndex(nat2 index) {
if (!(0 <= index && index <= limit() - sizeof(nat2)))
throw RangeException(to_string(index));
return index;
}
inline int NetworkBuffer::checkIntIndex(nat2 index) {
if (!(0 <= index && index <= limit() - sizeof(nat4)))
throw RangeException(to_string(index));
return index;
}
inline int NetworkBuffer::checkLongIndex(nat2 index) {
if (!(0 <= index && index <= limit() - sizeof(nat8)))
throw RangeException(to_string(index));
return index;
}
inline void NetworkBuffer::discardMark() {
_mark = -1;
}
}
}
}
#endif
|
persona-id/blockscore-ruby | lib/blockscore/errors/api_connection_error.rb | require 'blockscore/errors/error'
module BlockScore
class APIConnectionError < Error
end
end
|
anunez97/centipede | TEAL/Scene.cpp | <gh_stars>0
// Scene.h
// <NAME> 2012
//
// Base scene class
#include "SceneManager.h"
#include "Scene.h"
#include "TEALShow.h"
#include "WindowManager.h"
#include "GameObject.h"
#include "Game.h"
using namespace std;
void Scene::UpdateOneGameObject( GameObject* go){ go->Update();}
void Scene::DrawOneGameObject( GameObject* go){ go->Draw();}
void Scene::OperateOnAllGameObjects( MapOfGameObjectLists& map, void (*f)(GameObject*) )
{
MapOfGameObjectLists::iterator mapPair;
GameObjectList::iterator listItem;
for(mapPair = map.begin(); mapPair != map.end(); ++mapPair)
{
for( listItem = mapPair->second.begin(); listItem != mapPair->second.end(); ++listItem)
{
(*f)(*listItem);
}
}
}
void Scene::FinalCleanUp()
{
// Deleting all objects from update list is sufficient to clean up those in the other two lists
OperateOnAllGameObjects(updateListMap, DeleteOneGameObject);
Terminate();
}
Scene::~Scene()
{
}
// Default operation when cleaning up an unmanaged GameObjects marked for destruction
void Scene::DeleteGameObject(GameObject* go)
{
delete go;
}
// Used by the ~Scene: calls Destroy on each remaining GameObject to allow proper housekeeping
void Scene::DeleteOneGameObject( GameObject* go)
{
go->Destroy();
SceneManager::GetCurrentScene()->Alarms().ClearAllAlarms(*go);
FinalDeleteOrRelease(go);
}
// Either deletes a GameObject or uses its stated Managed operation
void Scene::FinalDeleteOrRelease( GameObject* go )
{
if ( ! go->GetExternalManagement() )
{
DeleteGameObject( go );
}
else
{
(go->ManagedDeleteOperation)(go);
}
}
void Scene::Initialize() {}
void Scene::Terminate() {}
void Scene::RegisterGameObject( GameObject* go)
{
updateListMap[ go->GetUpdateOrder() ].push_back( go );
drawListMap[ go->GetDrawOrder()].push_back( go );
}
void Scene::DeregisterGameObject( GameObject* go)
{
GameObjectstoBeRemoved[ go->GetUpdateOrder() ].push_back( go );
}
void Scene::ProcessOneFrame()
{
// Various events
MyAlarmManager.ProcessTimeEvents();
MyInputEventMgr.ProcessInputEvents();
UpdateAllObjects();
// Process collisions
MyCollisionMgr.ProcessCollisionEvents();
// draw
WindowManager::Clear();
// FPS display
WindowManager::SetCaptionMessage( "FPS " + Tools::ToString( 1/Game::FrameTime() ));
DrawAllObjects();
TEALShow::Draw(); // Draw the debug markers, if any...
WindowManager::Display();
// Cleaning up the deceased...
DeleteMarkedObjects();
}
AlarmManager& Scene::Alarms(){ return MyAlarmManager; }
InputEventManager& Scene::InputEvents(){ return MyInputEventMgr; }
void Scene::UpdateAllObjects()
{
OperateOnAllGameObjects( updateListMap, UpdateOneGameObject );
}
void Scene::DrawAllObjects()
{
OperateOnAllGameObjects( drawListMap, DrawOneGameObject );
}
void Scene::DeleteMarkedObjects()
{
OperateOnAllGameObjects( GameObjectstoBeRemoved, DeleteOneMarkedGameObject );
GameObjectstoBeRemoved.clear();
}
void Scene::DeleteOneMarkedGameObject( GameObject* go)
{
SceneManager::GetCurrentScene()->updateListMap[ go->GetUpdateOrder() ].remove(go);
SceneManager::GetCurrentScene()->drawListMap[ go->GetDrawOrder() ].remove(go);
FinalDeleteOrRelease(go);
}
void Scene::ChangeUpdateOrder(GameObject* go, int neworder)
{
updateListMap[go->GetUpdateOrder()].remove(go);
updateListMap[ neworder ].push_back( go );
}
void Scene::ChangeDrawOrder(GameObject* go, int neworder)
{
drawListMap[go->GetDrawOrder()].remove(go);
drawListMap[ neworder ].push_back( go );
}
|
Asadullah-Dal17/learn-opencv-python | source-code/Basic stuff/croping_region.py | <reponame>Asadullah-Dal17/learn-opencv-python
import cv2 as cv
img = cv.imread('../../saved_image/save_image.png')
cv.imshow('img', img)
cv.waitKey(0)
# selecting points or coordinates of images to crop, or pixels
starting_x =10
ending_x =200
starting_y =10
ending_y =250
# croping the region
crop_region = img[starting_y : ending_y , starting_x : ending_x]
cv.imshow('cropped region', crop_region)
cv.waitKey(0) |
embeddedartistry/embeddedartistry.github.io | framework/search/pages_7.js | <reponame>embeddedartistry/embeddedartistry.github.io
var searchData=
[
['mozilla_20public_20license_20version_202_2e0',['Mozilla Public License Version 2.0',['../df/d27/md_src_utilities_modm__l_i_c_e_n_s_e.html',1,'']]]
];
|
StavonJoy/podcast-network-1 | extensions/shoutem.firebase/app/services/handlers.js | <filename>extensions/shoutem.firebase/app/services/handlers.js
import _ from 'lodash';
let APNSTokenReceivedHandlers = {};
let FCMTokenReceivedhandlers = {};
let notificationReceivedHandlers = {};
function collectHandlers(targetEvent) {
return _.reduce(
notificationReceivedHandlers,
(result, handlers) => {
const targetHandler = _.get(handlers, targetEvent);
if (targetHandler) {
result.push(targetHandler);
}
return result;
},
[],
);
}
function registerAPNSTokenReceivedHandler(tokenHandler) {
const extensionOwner = _.get(tokenHandler, 'owner');
const handler = _.get(tokenHandler, 'onTokenReceived');
if (extensionOwner && handler) {
APNSTokenReceivedHandlers[extensionOwner] = handler;
}
}
function registerFCMTokenReceivedHandler(tokenHandler) {
const extensionOwner = _.get(tokenHandler, 'owner');
const handler = _.get(tokenHandler, 'onTokenReceived');
if (extensionOwner && handler) {
FCMTokenReceivedhandlers[extensionOwner] = handler;
}
}
function registerNotificationReceivedHandlers(notificationHandlers) {
const extensionOwner = _.get(notificationHandlers, 'owner');
const handlers = _.get(notificationHandlers, 'notificationHandlers');
if (extensionOwner && handlers) {
notificationReceivedHandlers[extensionOwner] = handlers;
}
}
export function handleFCMTokenReceived(token, dispatch) {
_.forEach(FCMTokenReceivedhandlers, handler => handler(token, dispatch));
}
export function handleAPNSTokenReceived(token, dispatch) {
_.forEach(APNSTokenReceivedHandlers, handler => handler(token, dispatch));
}
export function handleNotificationReceivedBackground(notification, dispatch) {
const mappedHandlers = collectHandlers('onNotificationReceivedBackground');
_.forEach(mappedHandlers, handler => handler(notification, dispatch));
}
export function handleNotificationReceivedForeground(notification, dispatch) {
const mappedHandlers = collectHandlers('onNotificationReceivedForeground');
_.forEach(mappedHandlers, handler => handler(notification, dispatch));
}
export function handleNotificationTapped(notification, dispatch) {
const mappedHandlers = collectHandlers('onNotificationTapped');
_.forEach(mappedHandlers, handler => handler(notification, dispatch));
}
export function handlePendingNotification(notification, dispatch) {
const mappedHandlers = collectHandlers('onPendingNotificationDispatched');
_.forEach(mappedHandlers, handler => handler(notification, dispatch));
}
export default {
registerAPNSTokenReceivedHandler,
registerNotificationReceivedHandlers,
registerFCMTokenReceivedHandler,
};
|
echisMOH/echisCommCareMOH | app/src/org/commcare/android/logging/ForceCloseLogEntry.java | package org.commcare.android.logging;
import android.os.Build;
import org.commcare.CommCareApplication;
import org.commcare.android.javarosa.AndroidLogEntry;
import org.commcare.preferences.DevSessionRestorer;
import org.commcare.util.LogTypes;
import org.javarosa.core.util.externalizable.DeserializationException;
import org.javarosa.core.util.externalizable.ExtUtil;
import org.javarosa.core.util.externalizable.PrototypeFactory;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Date;
/**
* Log entry for force closes, capturing the app build number, android version, device model,
* readable session string, and serialized session string.
*
* @author <NAME>
*/
public class ForceCloseLogEntry extends AndroidLogEntry {
public static final String STORAGE_KEY = "forcecloses";
private int appBuildNumber;
private String androidVersion;
private String deviceModel;
private String readableSessionString;
private String serializedSessionString;
private String appId;
private String userId;
/**
* Serialization only
*/
public ForceCloseLogEntry() {
}
public ForceCloseLogEntry(String stackTrace) {
super(LogTypes.TYPE_FORCECLOSE, stackTrace, new Date());
appBuildNumber = ReportingUtils.getAppBuildNumber();
androidVersion = Build.VERSION.RELEASE;
deviceModel = Build.MODEL;
readableSessionString = ReportingUtils.getCurrentSession();
serializedSessionString = DevSessionRestorer.getSerializedSessionString();
appId = ReportingUtils.getAppId();
userId = CommCareApplication.instance().getCurrentUserId();
}
public int getAppBuildNumber() {
return appBuildNumber;
}
public String getAndroidVersion() {
return androidVersion;
}
public String getDeviceModel() {
return deviceModel;
}
public String getReadableSession() {
return readableSessionString;
}
public String getSerializedSessionString() {
return serializedSessionString;
}
public String getAppId() {
return appId;
}
public String getUserId() {
return userId;
}
@Override
public void readExternal(DataInputStream in, PrototypeFactory pf)
throws IOException, DeserializationException {
super.readExternal(in, pf);
appBuildNumber = ExtUtil.readInt(in);
androidVersion = ExtUtil.readString(in);
deviceModel = ExtUtil.readString(in);
readableSessionString = ExtUtil.readString(in);
serializedSessionString = ExtUtil.readString(in);
appId = ExtUtil.readString(in);
userId = ExtUtil.readString(in);
}
@Override
public void writeExternal(DataOutputStream out) throws IOException {
super.writeExternal(out);
ExtUtil.writeNumeric(out, appBuildNumber);
ExtUtil.writeString(out, androidVersion);
ExtUtil.writeString(out, deviceModel);
ExtUtil.writeString(out, readableSessionString);
ExtUtil.writeString(out, serializedSessionString);
ExtUtil.writeString(out, appId);
ExtUtil.writeString(out, userId);
}
}
|
cdot/Squirrel | js/GoogleDriveStore.js | <reponame>cdot/Squirrel
/*@preserve Copyright (C) 2015-2019 <NAME> http://c-dot.co.uk license MIT*/
/* eslint-env browser */
/* global gapi */
var gapi_is_loaded = false;
var gapi_loader;
// Redirect target after gapi loading
/* eslint-disable no-unused-vars */
function gapi_on_load() {
if (this.debug) this.debug("gapi is loaded");
gapi_is_loaded = true;
if (gapi_loader)
gapi_loader();
}
/* eslint-enable no-unused-vars */
define("js/GoogleDriveStore", [
'js/Utils', 'js/Translator', 'js/HttpServerStore', 'js/Serror'
], (Utils, Translator, HttpServerStore, Serror) => {
const TX = Translator.TX;
// Client ID from Google APi dashboard. Note this is only valid
// for requests from specific URLs, so if you want to host your
// own Squirrel version - for example, to host a test version on
// localhost - you will have to
// change it. You can do so on https://console.developers.google.com
const CLIENT_ID = "985219699584-mt1do7j28ifm2vt821d498emarmdukbt.apps.googleusercontent.com";
// While the appfolder would seem to make sense for Squirrel, it does make
// it absolutely clear to an attacker where to look for Squirrel data files.
// By granting full drive access, we open up the whole drive for possible
// places to hoard.
const SCOPE = "https://www.googleapis.com/auth/drive";
const BOUNDARY = "-------314159265358979323846";
const DELIMITER = `\r\n--${BOUNDARY}\r\n`;
const RETIMILED = `\r\n--${BOUNDARY}--`;
const DISCOVERY_DOCS = ["https://www.googleapis.com/discovery/v1/apis/drive/v3/rest"];
/**
* A store using Google Drive
* @extends HttpServerStore
*/
class GoogleDriveStore extends HttpServerStore {
/**
* See {@link HttpServerStore} for other constructor options
* Sets `options.needs_url` and `options.url`
*/
constructor(p) {
super(p);
this.type = "GoogleDriveStore";
// Override HttpServerStore
this.option("needs_url", false);
this.option("url", "");
}
/**
* @Override
*/
init() {
if (gapi_is_loaded) {
if (this.debug) this.debug("gapi is already loaded");
return this._init();
}
return new Promise(resolve => {
gapi_loader = () => {
if (this.debug) this.debug("Loading GoogleDriveStore");
resolve(this._init());
};
return $.getScript("https://apis.google.com/js/client.js?onload=gapi_on_load");
});
}
/**
* Analyse an error returned by a Google promise
* @private
*/
_gError(r, context) {
let mess = `${context} ` + TX.tx("failed") + ": ";
if (typeof r.details !== 'undefined')
mess += r.details;
else if (typeof r.error !== 'undefined')
mess += r.error;
else if (r.status === 401) {
mess +=
TX.tx("Your access token has expired, or you are not logged in.") +
' ' +
TX.tx("Please refresh the page in order to save in Google Drive");
} else if (r.result && r.result.error) {
mess += r.result.error.message;
} else {
mess += r.body;
}
if (this.debug) this.debug(mess);
return ` ${mess}`;
}
_init() {
// Timeout after 20 seconds of waiting for auth
const tid = window.setTimeout(function () {
window.clearTimeout(tid);
throw new Serror(
408,
TX.tx("Timeout trying to authorise access to Google Drive.")
+ ' '
+ TX.tx("Are popups blocked in your browser?"));
}, 20000);
if (this.debug) this.debug("authorising");
return new Promise((resolve, reject) => {
gapi.load("client:auth2", () => {
let gauth;
gapi.client.init({
//immediate: true,
client_id: CLIENT_ID,
discoveryDocs: DISCOVERY_DOCS,
scope: SCOPE
})
.then(
() => {
window.clearTimeout(tid);
gauth = gapi.auth2.getAuthInstance();
let promise;
if (gauth.isSignedIn.get()) {
// User is signed in, requests can be sent to
// the API.
promise = Promise.resolve();
} else {
// User is not signed in. Promise to auth.
promise = new Promise(resolve => {
gauth.isSignedIn.listen(function(sin) {
if (sin)
resolve();
else
reject();
});
gauth.signIn();
});
}
return promise.then(() => {
const guser = gauth.currentUser.get();
const gprofile = guser.getBasicProfile();
const name = gprofile.getName();
if (this.debug) this.debug(`auth OK, user ${name}`);
this.option("user", name);
return gapi.client.load("drive", "v3");
});
},
gerror => { throw new Serror(403, gerror); }
)
.then(() => {
if (this.debug) this.debug("drive/v3 loaded");
resolve();
})
.catch(r => {
throw new Serror(
500, this._gError(r, TX.tx("Google Drive load")));
});
});
});
}
/**
* @Override
*/
addAuth(headers) {
headers.Authorization = `Bearer ${gapi.auth2.getToken().access_token}`;
}
/**
* Promise to get the id of the folder at the end of the given path, optionally creating
* the folders if they don't exist.
* Any errors thrown will be from Google
* @private
*/
_follow_path(parentid, path, create) {
if (path.length === 0)
return Promise.resolve(parentid);
const p = path.slice();
const pathel = p.shift();
function create_folder() {
const metadata = {
title: pathel,
mimeType: "application/vnd.google-apps.folder"
};
if (parentid !== "root")
// Don't think we want this for a root file?
metadata.parents = [{
id: parentid
}];
if (this.debug) this.debug(`Creating folder ${pathel} under ${parentid}`);
return gapi.client.drive.files
.insert(metadata)
.then(response =>
this._follow_path(response.result.id, p, true));
}
const query = `title='${pathel}' and '${parentid}' in parents` +
" and mimeType='application/vnd.google-apps.folder' and trashed=false";
return gapi.client.drive.files
.list({
q: query,
fields: "files/id"
})
.then(response => {
const files = response.result.files;
if (files.length > 0) {
const id = files[0].id;
if (this.debug) this.debug(`found ${query} at ${id}`);
return this._follow_path(id, p, create);
}
if (this.debug) this.debug(`could not find ${query}`);
if (create)
return create_folder();
this.status(404);
return undefined;
});
}
/**
* Promise to put data at the given path, optionally creating
* intermediate folders if they don't exist.
* Any errors thrown will be from Google
* @private
*/
// id is a (string) id or a { parentid: name: structure }
_putfile(parentid, name, data, id) {
let url = "/upload/drive/v2/files";
let method = "POST";
const params = {
uploadType: "multipart",
visibility: "PRIVATE"
};
const metadata = {
title: name,
mimeType: "application/octet-stream"
};
if (typeof parentid !== 'undefined') {
metadata.parents = [{
id: parentid
}];
}
if (typeof id !== 'undefined') {
// Known fileId, we're updating an existing file
url += `/${id}`;
method = "PUT";
}
let multipartRequestBody =
DELIMITER +
"Content-Type: application/json\r\n\r\n" +
JSON.stringify(metadata) +
DELIMITER +
"Content-Type: application/octet-stream\r\n" +
"Content-Transfer-Encoding: base64\r\n" +
"\r\n" +
Utils.Uint8ArrayToBase64(data) +
RETIMILED;
return gapi.client
.request({
path: url,
method: method,
params: params,
headers: {
"Content-Type": `multipart/related; boundary="${BOUNDARY}"`
},
body: multipartRequestBody
})
.then((/*response*/) => true)
.catch(e => {
this.status(e.code);
return false;
});
}
/**
* @Override
*/
write(path, data) {
if (this.debug) this.debug("write", path);
const p = path.split("/");
const name = p.pop();
let parentId;
return this
._follow_path("root", p, true)
.then(pid => {
if (typeof pid === 'undefined')
return false;
parentId = pid;
// See if the file already exists, if it does then use it's id
if (this.debug) this.debug(`checking existance of ${name}`);
return gapi.client.drive.files
.list({
q: `name='${name}' and '${parentId}' in parents and trashed=false`,
fields: "files/id"
});
})
.then(response => {
const files = response.result.files;
let id;
if (files.length > 0) {
id = files[0].id;
if (this.debug) this.debug(`updating ${name} ${id}`);
} else
if (this.debug) this.debug(`creating ${name} in ${parentId}`);
return this._putfile(parentId, name, data, id);
})
.catch(r => {
throw new Serror(400, path + this._gError(r, TX.tx("Write")));
});
}
/**
* @Override
*/
read(path) {
if (this.debug) this.debug("read", path);
const p = path.split("/");
const name = p.pop();
return this
._follow_path("root", p, false)
.then(parentId => {
if (typeof parentId === 'undefined')
return undefined;
if (this.debug) this.debug(
`listing files called ${name} in ${parentId}`);
return gapi.client.drive.files
.list({
q: `name='${name}' and '${parentId}' in parents and trashed=false`,
// "*" shows all fields. We only need the id for matched files.
fields: "files/id"
});
})
.then(response => {
const files = response.result.files;
if (files === null || files.length === 0) {
if (this.debug) this.debug(`could not find ${name}`);
throw new Serror(401, `${path} not found`);
}
const id = files[0].id;
if (this.debug) this.debug(`found '${name}' id ${id}`);
return gapi.client.drive.files.get(
{
fileId: id,
alt: "media"
})
.then(res => {
// alt=media requests content-type=text/plain. AFAICT the
// file comes in base64-encoded, and is simply converted
// to a 'string' by concatenating the bytes,
// one per code point, without any decoding (thankfully!)
const a = new Uint8Array(res.body.length);
for (let i = 0; i < a.length; i++)
a[i] = res.body.codePointAt(i);
return a;
});
})
.catch(r => {
throw new Serror(400, path + this._gError(r, TX.tx("Read")));
});
}
}
return GoogleDriveStore;
});
|
jeffaustin81/cropcompass-frontend | src/components/TopCrops/index.js | import TopCrops from './TopCrops'
export default TopCrops
|
Welchd1/resolve-intellij-plugin-v4 | src/edu/clemson/resolve/jetbrains/completion/QualifierInsertHandler.java | package edu.clemson.resolve.jetbrains.completion;
import com.intellij.codeInsight.AutoPopupController;
import com.intellij.codeInsight.completion.BasicInsertHandler;
import com.intellij.codeInsight.completion.InsertionContext;
import com.intellij.codeInsight.lookup.LookupElement;
import com.intellij.openapi.editor.Document;
import com.intellij.openapi.editor.Editor;
import org.jetbrains.annotations.NotNull;
class QualifierInsertHandler extends BasicInsertHandler<LookupElement> {
private final String insertStr;
//TODO: Get rid of pad.
QualifierInsertHandler(String aStr, boolean pad) {
this.insertStr = aStr;
}
@Override
public void handleInsert(@NotNull InsertionContext context, LookupElement item) {
Editor editor = context.getEditor();
int tailOffset = context.getTailOffset();
Document document = editor.getDocument();
context.commitDocument();
boolean staysAtChar = document.getTextLength() > tailOffset &&
String.valueOf(document.getCharsSequence().charAt(tailOffset)).equals(insertStr);
context.setAddCompletionChar(false);
if (!staysAtChar) {
document.insertString(tailOffset, insertStr);
}
editor.getCaretModel().moveToOffset(tailOffset + insertStr.length());
AutoPopupController.getInstance(context.getProject()).scheduleAutoPopup(editor);
}
}
|
epam-debrecen-rft-2015/atsy | web/src/test/java/com/epam/rft/atsy/web/controllers/rest/CandidateApplicationControllerTest.java | package com.epam.rft.atsy.web.controllers.rest;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.mockito.BDDMockito.given;
import static org.mockito.BDDMockito.then;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verifyZeroInteractions;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
import com.epam.rft.atsy.service.ApplicationsService;
import com.epam.rft.atsy.service.domain.CandidateApplicationDTO;
import com.epam.rft.atsy.service.response.PagingResponse;
import com.epam.rft.atsy.web.MediaTypes;
import com.epam.rft.atsy.web.controllers.AbstractControllerTest;
import com.epam.rft.atsy.web.messageresolution.MessageKeyResolver;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import org.springframework.test.web.servlet.ResultActions;
import java.time.LocalDate;
import java.time.ZoneId;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
@RunWith(MockitoJUnitRunner.class)
public class CandidateApplicationControllerTest extends AbstractControllerTest {
private static final String REQUEST_URL = "/secure/applications/";
private static final String URL_PAGE_REQUEST_ENDING = "?pageNumber=1&pageSize=10";
private static final String APPLICATION_STATE = "candidate.table.state.";
private static final Long CANDIDATE_ID = 1L;
private static final Long LAST_STATE_ID = 2L;
private static final Long APPLICATION_ID = 3L;
private static final String DEVELOPER_POSITION = "Developer";
private static final String SYSADMIN_POSITION = "Developer";
private static final Date CREATION_DATE = asDate(LocalDate.of(2016, 8, 1));
private static final Date MODIFICATION_DATE = asDate(LocalDate.of(2016, 8, 2));
private static final String RAW_STATE_TYPE = "raw";
private static final String LOCALIZED_STATE_TYPE = "localized";
private static final int PAGE_NUMBER_ZERO = 0;
private static final int PAGE_SIZE_TEN = 10;
@Mock
private ApplicationsService applicationsService;
@Mock
private MessageKeyResolver messageKeyResolver;
@InjectMocks
private CandidateApplicationController candidateApplicationController;
private CandidateApplicationDTO developerApplicationDto;
private CandidateApplicationDTO sysadminApplicationDto;
@Override
protected Object[] controllersUnderTest() {
return new Object[]{candidateApplicationController};
}
@Before
public void setUpTestData() {
developerApplicationDto =
CandidateApplicationDTO.builder().lastStateId(LAST_STATE_ID)
.id(APPLICATION_ID)
.name(DEVELOPER_POSITION).creationDate(CREATION_DATE)
.modificationDate(MODIFICATION_DATE)
.stateType(RAW_STATE_TYPE)
.build();
sysadminApplicationDto =
CandidateApplicationDTO.builder().lastStateId(LAST_STATE_ID)
.id(APPLICATION_ID)
.name(SYSADMIN_POSITION).creationDate(CREATION_DATE)
.modificationDate(MODIFICATION_DATE)
.stateType(RAW_STATE_TYPE)
.build();
given(messageKeyResolver
.resolveMessageOrDefault(Matchers.anyString(), Matchers.any(Object[].class)))
.willReturn(LOCALIZED_STATE_TYPE);
}
@Test
public void loadApplicationsShouldRespondWithEmptyCollectionWhenThereAreNoApplications()
throws Exception {
given(applicationsService
.getApplicationsByCandidateId(CANDIDATE_ID, PAGE_NUMBER_ZERO, PAGE_SIZE_TEN))
.willReturn(new PagingResponse<CandidateApplicationDTO>(0L, Collections.emptyList()));
mockMvc.perform(
get(REQUEST_URL + CANDIDATE_ID.toString() + URL_PAGE_REQUEST_ENDING)
.accept(MediaTypes.APPLICATION_JSON_UTF8))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaTypes.APPLICATION_JSON_UTF8))
.andExpect(jsonPath("$.total").isNumber())
.andExpect(jsonPath("$.total").value(0))
.andExpect(jsonPath("$.rows").isArray())
.andExpect(jsonPath("$.rows").isEmpty());
then(applicationsService).should()
.getApplicationsByCandidateId(CANDIDATE_ID, PAGE_NUMBER_ZERO, PAGE_SIZE_TEN);
verifyZeroInteractions(messageKeyResolver);
}
@Test
public void loadApplicationsShouldRespondWithSingleElementArrayWhenThereIsOnlyOneApplication()
throws Exception {
given(applicationsService
.getApplicationsByCandidateId(CANDIDATE_ID, PAGE_NUMBER_ZERO, PAGE_SIZE_TEN)).willReturn(
new PagingResponse<CandidateApplicationDTO>(1L,
Collections.singletonList(developerApplicationDto)));
ResultActions resultActions = mockMvc.perform(
get(REQUEST_URL + CANDIDATE_ID.toString() + URL_PAGE_REQUEST_ENDING)
.accept(MediaTypes.APPLICATION_JSON_UTF8))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaTypes.APPLICATION_JSON_UTF8))
.andExpect(jsonPath("$.total").isNumber())
.andExpect(jsonPath("$.total").value(1))
.andExpect(jsonPath("$.rows").isArray())
.andExpect(jsonPath("$.rows[0]").exists())
.andExpect(jsonPath("$.rows[1]").doesNotExist());
assertApplicationResponse(resultActions, 0, developerApplicationDto);
then(applicationsService).should()
.getApplicationsByCandidateId(CANDIDATE_ID, PAGE_NUMBER_ZERO, PAGE_SIZE_TEN);
then(messageKeyResolver).should()
.resolveMessageOrDefault(APPLICATION_STATE + RAW_STATE_TYPE, RAW_STATE_TYPE);
}
@Test
public void loadApplicationsShouldRespondWithTwoElementArrayWhenThereAreTwoApplications()
throws Exception {
given(applicationsService
.getApplicationsByCandidateId(CANDIDATE_ID, PAGE_NUMBER_ZERO, PAGE_SIZE_TEN)).willReturn(
new PagingResponse<CandidateApplicationDTO>(2L,
Arrays.asList(developerApplicationDto, sysadminApplicationDto)));
ResultActions resultActions = mockMvc.perform(
get(REQUEST_URL + CANDIDATE_ID.toString() + URL_PAGE_REQUEST_ENDING)
.accept(MediaTypes.APPLICATION_JSON_UTF8))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaTypes.APPLICATION_JSON_UTF8))
.andExpect(jsonPath("$.total").isNumber())
.andExpect(jsonPath("$.total").value(2))
.andExpect(jsonPath("$.rows").isArray())
.andExpect(jsonPath("$.rows[0]").exists())
.andExpect(jsonPath("$.rows[1]").exists())
.andExpect(jsonPath("$.rows[2]").doesNotExist());
assertApplicationResponse(resultActions, 0, developerApplicationDto);
assertApplicationResponse(resultActions, 1, sysadminApplicationDto);
then(applicationsService).should()
.getApplicationsByCandidateId(CANDIDATE_ID, PAGE_NUMBER_ZERO, PAGE_SIZE_TEN);
then(messageKeyResolver).should(times(2))
.resolveMessageOrDefault(APPLICATION_STATE + RAW_STATE_TYPE, RAW_STATE_TYPE);
}
private void assertApplicationResponse(ResultActions resultActions, int index,
CandidateApplicationDTO applicationDto) throws Exception {
String basePath = "$.rows[" + index + "].";
resultActions
.andExpect(jsonPath(basePath + "lastStateId",
equalTo(applicationDto.getLastStateId().intValue())))
.andExpect(jsonPath(basePath + "id",
equalTo(applicationDto.getId().intValue())))
.andExpect(jsonPath(basePath + "name", equalTo(applicationDto.getName())))
.andExpect(jsonPath(basePath + "creationDate",
equalTo(applicationDto.getCreationDate().getTime())))
.andExpect(jsonPath(basePath + "modificationDate",
equalTo(applicationDto.getModificationDate().getTime())))
.andExpect(jsonPath(basePath + "stateType", equalTo(LOCALIZED_STATE_TYPE)));
}
public static Date asDate(LocalDate localDate) {
return Date.from(localDate.atStartOfDay().atZone(ZoneId.systemDefault()).toInstant());
}
}
|
pervasivesolutions/synth | synth/devices/cluster.py | <filename>synth/devices/cluster.py
"""
cluster
=====
Simulates sites which are clusters, e.g. bike/scooter sharing docs, multiple charging points on a single site,
A device is a cluster. Each cluster has a number of "slots", each of which are available/unavailable at any moment in time, giving rise to a total number of used/available slots.
Configurable parameters::
{
"min_slots_per_cluster" : 4,
"max_slots_per_cluster" : 32
}
Device properties created::
{
<TODO>
}
"""
from .device import Device
from .helpers import opening_times as opening_times
import random
import isodate
import logging
MINUTES = 60
HOURS = 60*60
DAYS = HOURS*24
TICK_INTERVAL_S = 15 * MINUTES
DEFAULT_MIN_SLOTS_PER_CLUSTER = 4
DEFAULT_MAX_SLOTS_PER_CLUSTER = 32
DEFAULT_OCCUPANCY_PATTERN = "rushhour"
DEFAULT_OCCUPANCY_RANDOMNESS = 0.3
DEFAULT_TIME_SKEW_RANDOMNESS = 3 * HOURS
class Cluster(Device):
myRandom = random.Random() # Use our own private random-number generator, so we will repeatably generate the same device ID's regardless of who else is asking for random numbers
myRandom.seed(1234)
def __init__(self, instance_name, time, engine, update_callback, context, params):
super(Cluster,self).__init__(instance_name, time, engine, update_callback, context, params)
min_slots = params["cluster"].get("min_slots_per_cluster", DEFAULT_MIN_SLOTS_PER_CLUSTER)
max_slots = params["cluster"].get("max_slots_per_cluster", DEFAULT_MAX_SLOTS_PER_CLUSTER)
self.occupancy_pattern = params["cluster"].get("occupancy_pattern", DEFAULT_OCCUPANCY_PATTERN)
self.occupancy_randomness = params["cluster"].get("occupancy_randomness", DEFAULT_OCCUPANCY_RANDOMNESS)
time_skew_randomness = params["cluster"].get("time_skew_randomness", DEFAULT_TIME_SKEW_RANDOMNESS)
self.num_slots = Cluster.myRandom.randrange(min_slots, max_slots)
self.time_skew = time_skew_randomness / 2.0 + Cluster.myRandom.random() * time_skew_randomness / 2.0
self.available_slots = self.calc_occupancy()
self.set_properties({"num_slots" : self.num_slots, "available_slots" : self.available_slots})
engine.register_event_in(TICK_INTERVAL_S, self.tick_availability, self, self)
def comms_ok(self):
return super(Cluster,self).comms_ok()
def external_event(self, event_name, arg):
super(Cluster,self).external_event(event_name, arg)
pass
def close(self):
super(Cluster,self).close()
# Private methods
def tick_availability(self, _):
self.available_slots = self.calc_occupancy()
self.set_property("available_slots", self.available_slots)
self.engine.register_event_in(TICK_INTERVAL_S, self.tick_availability, self, self)
def calc_occupancy(self):
occupancy = opening_times.chance_of_occupied(self.engine.get_now() + self.time_skew, self.occupancy_pattern)
occupancy = occupancy - self.occupancy_randomness / 2.0 + Cluster.myRandom.random() * self.occupancy_randomness / 2.0
return occupancy
|
victoryw/LADE_Analysis | scan_plsql/src/main/java/me/analysis/pl/visitor/PlSqlRuleVisitor.java | package me.analysis.pl.visitor;
import me.analysis.pl.PlSqlParserTree;
import me.analysis.pl.generated.PlSqlBaseVisitor;
import org.antlr.v4.runtime.ANTLRInputStream;
import org.antlr.v4.runtime.TokenStreamRewriter;
import org.antlr.v4.runtime.tree.ParseTree;
/**
* Базовый класс для наследования всех новых *Visitor
*/
public class PlSqlRuleVisitor extends PlSqlBaseVisitor<Void> {
protected ANTLRInputStream input;
protected TokenStreamRewriter rewriter;
protected ParseTree parseTree;
@SuppressWarnings("unused")
private PlSqlRuleVisitor() {
// нельзя создавать без параметров
}
public PlSqlRuleVisitor(PlSqlParserTree tree) {
input = tree.getInput();
rewriter = tree.getRewriter();
parseTree = tree.getParseTree();
}
public Void visit() {
return super.visit(parseTree);
}
}
|
FedorSmirnov89/EE-Control | src/main/java/at/uibk/dps/ee/control/modules/EnactmentVerticleModule.java | package at.uibk.dps.ee.control.modules;
import org.opt4j.core.config.annotations.Info;
import org.opt4j.core.config.annotations.Order;
import org.opt4j.core.start.Constant;
import at.uibk.dps.ee.control.command.Control;
import at.uibk.dps.ee.control.enactment.WorkerEnactment;
import at.uibk.dps.ee.control.extraction.WorkerExtraction;
import at.uibk.dps.ee.control.init.InitializerDelayedExecution;
import at.uibk.dps.ee.control.scheduling.WorkerScheduling;
import at.uibk.dps.ee.control.transformation.WorkerTransformation;
import at.uibk.dps.ee.control.transmission.WorkerTransmission;
import at.uibk.dps.ee.control.verticles.VerticleFunction;
import at.uibk.dps.ee.control.verticles.VerticleManager;
import at.uibk.dps.ee.core.CoreFunction;
import io.vertx.core.impl.cpu.CpuCoreSensor;
/**
* The {@link EnactmentVerticleModule} is used to configure the binding of the
* Apollo handlers used to process the messages on the VertX event bus.
*
* @author <NAME>
*/
public class EnactmentVerticleModule extends VerticleModule {
@Order(1)
@Info("If checked, the EE will be initially in the PAUSED state.")
@Constant(namespace = Control.class, value = "pauseOnStart")
protected boolean pauseOnStart;
@Order(2)
@Info("Number of verticles deployed for each verticle type.")
@Constant(namespace = VerticleManager.class, value = "deploymentNumber")
protected int deploymentNumber = 2 * CpuCoreSensor.availableProcessors();
@Order(3)
@Info("Delays the enactment by a time interval.")
@Constant(namespace = InitializerDelayedExecution.class, value = "delayInSeconds")
protected int delayInSeconds;
@Override
protected void config() {
bind(CoreFunction.class).to(VerticleFunction.class);
// worker handlers
addEBusVerticle(WorkerTransmission.class);
addEBusVerticle(WorkerScheduling.class);
addEBusVerticle(WorkerEnactment.class);
addEBusVerticle(WorkerExtraction.class);
addEBusVerticle(WorkerTransformation.class);
// probably remove this and remove enactment listener
addEnactmentStateListener(Control.class);
addInitializer(InitializerDelayedExecution.class);
}
public boolean isPauseOnStart() {
return pauseOnStart;
}
public void setPauseOnStart(final boolean pauseOnStart) {
this.pauseOnStart = pauseOnStart;
}
public int getDeploymentNumber() {
return deploymentNumber;
}
public void setDeploymentNumber(final int deploymentNumber) {
this.deploymentNumber = deploymentNumber;
}
public int getDelayInSeconds() {
return delayInSeconds;
}
public void setDelayInSeconds(final int delayInSeconds) {
this.delayInSeconds = delayInSeconds;
}
}
|
anoop1984/python_sdk | ucsmsdk/utils/ucstechsupport.py | # Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the APIs used to create and download tech_support file.
"""
import os
import time
import datetime
import logging
from ..ucsexception import UcsValidationException, UcsWarning
log = logging.getLogger('ucs')
def get_ucs_tech_support(handle,
ucs_manager=False,
ucs_mgmt=False,
chassis_id=None, cimc_id=None,
adapter_id=None, iom_id=None,
fex_id=None,
rack_server_id=None, rack_adapter_id=None,
remove_from_ucs=False,
download_techsupp=True, file_dir=None, file_name=None,
timeout_in_sec=600):
"""
This operation creates and downloads the technical support file for
the specified Ucs server.
Args:
handle (UcsHandle): Ucs connection handle
ucs_manager (bool): True/False,
False - by default
Create and download TechSupport for UCSM, if true
ucs_mgmt (bool): True/False,
False - by default
Create and download TechSupport for UCSM Management
services(excluding Fabric interconnects), if true
chassis_id (int): chassis id
cimc_id (int/string): for a specific chassis. Can be 'all'.
adapter_id (int/string): for a specific chassis. Can be 'all'.
iom_id (int/string): for a specific chassis. Can be 'all'.
fex_id (int): id of a fabric extender.
rack_server_id (int): id of a rack server.
rack_adapter_id (int/string): adaptor_id for a specific rack server.
Can be 'all'.
remove_from_ucs (bool): True/False,
False - by default
TechSupport will be removed from server, if True
download_techsupp (bool): True/False,
True - by default
Download the TechSupport file, if True
file_dir (str): directory to download tech support file to
file_name (str): name of the download tech support file
timeout_in_sec (int): specifies the time in seconds after which
the operation times-out.
Example:
* M - Manadatory, O - Optional
* Note:
Mandatory in ALL param sets: file_dir, file_name
Optional in ALL param sets: timeout_in_sec, remove_from_ucs,
download_techsupp
* param set 1:
* M - ucs_manager
---------------------------------------------------------------
file_dir = "/home/user/techsupp"
file_name = "techsupp_ucs_manager.tar"
get_ucs_tech_support(handle,
file_dir=file_dir,
file_name=file_name,
ucs_manager=True)
get_ucs_tech_support(handle,
file_dir=file_dir,
file_name=file_name,
ucs_manager=True,
timeout_in_sec=300,
remove_from_ucs=True)
* param set 2:
* M - ucs_manager
---------------------------------------------------------------
file_dir = "/home/user/techsupp"
file_name = "techsupp_ucs_mgmt.tar"
get_ucs_tech_support(handle,
file_dir=file_dir,
file_name=file_name,
ucs_mgmt=True)
* param set 3:
* M - chassis_id, cimc_id
* O - adapter_id
---------------------------------------------------------------
file_dir = "/home/user/techsupp"
file_name = "techsupp_ucs_mgmt.tar"
get_ucs_tech_support(handle,
file_dir=file_dir,
file_name=file_name,
chassis_id=1,
cimc_id=1,
adapter_id=1)
* param set 4:
* M - chassis_id, iom_id
---------------------------------------------------------------
file_dir = "/home/user/techsupp"
file_name = "techsupp_ucs_mgmt.tar"
get_ucs_tech_support(handle,
file_dir=file_dir,
file_name=file_name,
chassis_id=1,
iom_id=1)
* param set 5:
* M - fex_id
---------------------------------------------------------------
file_dir = "/home/user/techsupp"
file_name = "techsupp_ucs_mgmt.tar"
get_ucs_tech_support(handle,
file_dir=file_dir,
file_name=file_name,
fex_id=1)
* param set 6:
* M - rack_server_id
* O - rack_adapter_id
---------------------------------------------------------------
file_dir = "/home/user/techsupp"
file_name = "techsupp_ucs_mgmt.tar"
get_ucs_tech_support(handle,
file_dir=file_dir,
file_name=file_name,
rack_server_id=1,
rack_adapter_id=1)
"""
from ..mometa.top.TopSystem import TopSystem
from ..mometa.sysdebug.SysdebugTechSupport import SysdebugTechSupport, \
SysdebugTechSupportConsts
from ..mometa.sysdebug.SysdebugTechSupFileRepository import \
SysdebugTechSupFileRepository
from ..mometa.sysdebug.SysdebugTechSupportCmdOpt import \
SysdebugTechSupportCmdOpt, SysdebugTechSupportCmdOptConsts
if download_techsupp:
if file_name is None:
raise UcsValidationException('provide file_name')
if file_dir is None:
raise UcsValidationException('provide dir_name')
if not file_name.endswith('.tar'):
raise UcsValidationException('file_name should end with .tar')
if not os.path.exists(file_dir):
os.makedirs(file_dir)
# Converting timedelta in to total seconds for Python version compatibility
dt1 = datetime.datetime(1970, 1, 1, 12, 0, 0, 0)
dt2 = datetime.datetime.utcnow()
creation_ts = int((dt2 - dt1).total_seconds())
# create SysdebugTechSupport
top_system = TopSystem()
sysdebug_techsup_file_repo = SysdebugTechSupFileRepository(
parent_mo_or_dn=top_system)
sys_debug_tech_support = SysdebugTechSupport(
parent_mo_or_dn=sysdebug_techsup_file_repo,
creation_ts=str(creation_ts),
admin_state=SysdebugTechSupportConsts.ADMIN_STATE_START)
sys_debug_tech_support_cmd_opt = SysdebugTechSupportCmdOpt(
parent_mo_or_dn=sys_debug_tech_support)
# Parameter Set UCSM
if ucs_manager:
sys_debug_tech_support_cmd_opt.major_opt_type = \
SysdebugTechSupportCmdOptConsts.MAJOR_OPT_TYPE_UCSM
elif ucs_mgmt:
sys_debug_tech_support_cmd_opt.major_opt_type = \
SysdebugTechSupportCmdOptConsts.MAJOR_OPT_TYPE_UCSM_MGMT
elif chassis_id is not None:
if cimc_id is not None:
sys_debug_tech_support_cmd_opt.chassis_cimc_id = str(cimc_id)
sys_debug_tech_support_cmd_opt.chassis_id = str(chassis_id)
sys_debug_tech_support_cmd_opt.major_opt_type = \
SysdebugTechSupportCmdOptConsts.MAJOR_OPT_TYPE_CHASSIS
if adapter_id is None:
sys_debug_tech_support_cmd_opt.cimc_adapter_id = \
SysdebugTechSupportCmdOptConsts.CIMC_ADAPTER_ID_ALL
else:
sys_debug_tech_support_cmd_opt.cimc_adapter_id = \
str(adapter_id)
elif iom_id is not None:
sys_debug_tech_support_cmd_opt.chassis_iom_id = str(iom_id)
sys_debug_tech_support_cmd_opt.chassis_id = str(chassis_id)
sys_debug_tech_support_cmd_opt.major_opt_type = \
SysdebugTechSupportCmdOptConsts.MAJOR_OPT_TYPE_CHASSIS
elif rack_server_id is not None:
sys_debug_tech_support_cmd_opt.rack_server_id = str(iom_id)
if rack_adapter_id is None:
sys_debug_tech_support_cmd_opt.rack_server_adapter_id = \
SysdebugTechSupportCmdOptConsts.RACK_SERVER_ADAPTER_ID_ALL
else:
sys_debug_tech_support_cmd_opt.rack_server_adapter_id = \
str(rack_adapter_id)
sys_debug_tech_support_cmd_opt.major_opt_type = \
SysdebugTechSupportCmdOptConsts.MAJOR_OPT_TYPE_SERVER
elif fex_id is not None:
sys_debug_tech_support_cmd_opt.fab_ext_id = str(iom_id)
sys_debug_tech_support_cmd_opt.major_opt_type = \
SysdebugTechSupportCmdOptConsts.MAJOR_OPT_TYPE_FEX
handle.add_mo(sys_debug_tech_support)
handle.commit()
# poll for tech support to complete
duration = timeout_in_sec
poll_interval = 2
status = False
while True:
tech_support = handle.query_dn(sys_debug_tech_support.dn)
if tech_support.oper_state == \
SysdebugTechSupportConsts.OPER_STATE_AVAILABLE:
status = True
if status:
break
time.sleep(min(duration, poll_interval))
duration = max(0, (duration - poll_interval))
if duration == 0:
handle.remove_mo(tech_support)
handle.commit()
raise UcsValidationException('TechSupport file creation timed out')
# download tech support file
if download_techsupp:
url_suffix = "techsupport/" + tech_support.name
try:
handle.file_download(url_suffix=url_suffix,
file_dir=file_dir,
file_name=file_name)
except Exception as err:
UcsWarning(str(err))
# remove tech support file from ucs
if remove_from_ucs:
tech_support.admin_state = "delete"
handle.set_mo(tech_support)
handle.commit()
return tech_support
|
Gayath1/project-hrm-client-new | src/views/dashboard/Home Page/rfid.js | <filename>src/views/dashboard/Home Page/rfid.js
import React, { useState } from 'react';
import {Link} from 'react-router-dom';
import axios from 'axios';
import {
CCard,
CCardBody,
CCardHeader,
CCol,
CButton,
CForm,
CFormGroup,
CFormText,
CInput,
CLabel,
CRow, CAlert,CSpinner
} from '@coreui/react'
var imageName = require('src/assets/img_avatar.png')
var isEffect = false;
const Tables = () => {
const [firstName, setfirstName] = useState();
const [ShiftName, setShiftName] = useState([]);
const [Image, setImage] = useState();
const [employeeTypeId, setemployeeTypeId] = useState([]);
const [succ, setSucc] = useState();
const [rfid, setrfid] = useState();
const [disabled, setDisabled] = useState(false);
const token = '<KEY>' ;
const [err, setErr] = useState();
const headers = {
headers: {
"Authorization":`Bearer ${token}`
}
};
const testIt = async (num) => {
setErr("");
setfirstName("");
setShiftName("");
setImage("");
setemployeeTypeId("");
setrfid(num)
if (isEffect) { isEffect = false; return;}
if (num.length >= 10) {
try{
setDisabled(true)
setSucc("Please Wait...");
const body = ({rfid} );
const loginResponse = await axios.post("https://hrm-innovigent.herokuapp.com/api/v1/movements", body,headers);
setfirstName(loginResponse.data.data.employee.firstName);
setShiftName(loginResponse.data.data.ShiftName);
{loginResponse.data.data.employeeImage?(
setImage('https://hrm-innovigent.herokuapp.com/'+loginResponse.data.data.employeeImage.imagePath)
):(
setImage(imageName.default)
)}
setemployeeTypeId(loginResponse.data.data.employee.employeeTypeId);
console.log(loginResponse);
// Clear RFID field
setrfid('');
setSucc("");
setDisabled(false);
} catch(err) {
setSucc("");
err.response.data.message && setErr(err.response.data.message)
setDisabled(false);
setrfid('');
}
isEffect = true;
setrfid('');
}
}
return (
<>
<CRow className="justify-content-center">
<CCol xs="12" sm="8" className="col-3 text-center">
<h1 className="text-center" style={{ padding: "10px 20px", textAlign: "center", color: "black"}}>
SMART People Management System
</h1>
<div>
<br></br>
</div>
</CCol>
<CCol xs="12" sm="3" className=" text-right">
<Link to="/login" style={{ padding: "10px 20px", textAlign: "center"}}>
<CButton color="primary" className="mt-3" active tabIndex={-1}>Go to Login </CButton>
</Link>
</CCol>
</CRow>
<CRow className="justify-content-center">
<CCol xs="12" sm="8">
<CCard>
<CCardHeader>
Employee Data
</CCardHeader>
<CCardBody>
<div style={{ padding: "10px 20px", textAlign: "center"}}>
{succ ? (
<CButton disabled size="lg" >
<CSpinner component="span" size="m" aria-hidden="true" color="success" />
{succ}
</CButton>
) : null}
</div>
{err ? (
<CAlert color="info" closeButton fade={5}>
{err}
</CAlert>
) : null}
<CForm action="submit" method="post" className="form-horizontal">
<CFormGroup row>
<CCol md="3">
<CLabel htmlFor="text-input">RFID Number</CLabel>
</CCol>
<CCol xs="12" md="9">
<CInput disabled={disabled} id="password" name="text-input" placeholder="RFID No" autoFocus value={rfid} onChange={(e) => setrfid(e.target.value)} onKeyUp={(e) =>testIt(e.target.value)} />
<CFormText>Scan RFID Card</CFormText>
</CCol>
</CFormGroup>
<CFormGroup row>
<CCol md="3">
<CLabel>Name</CLabel>
</CCol>
<CCol xs="12" md="9">
<p className="form-control-static">{firstName}</p>
{/* <label>First Name :</label> <input type="text" value={firstName} onChange={(e) => setfirstName(e.target.value)}></input> */}
</CCol>
</CFormGroup>
<CFormGroup row>
<CCol md="3">
<CLabel>Shift</CLabel>
</CCol>
<CCol xs="12" md="9">
<p className="form-control-static">{ShiftName}</p>
</CCol>
</CFormGroup>
<CFormGroup row>
<CCol md="3">
<CLabel>Employee Type</CLabel>
</CCol>
<CCol xs="12" md="9">
<p className="form-control-static">{employeeTypeId} </p>
</CCol>
</CFormGroup>
</CForm>
</CCardBody>
</CCard>
</CCol>
<CCol xs="12" sm="3">
<CCard>
<CCardHeader>
Employee
Image
</CCardHeader>
<CCol xs="12" sm="10" md="9">
<CCard >
<CCardBody>
{Image?(
<img src={Image} height="180px" alt={"img"}/>
):(
<img src={imageName.default} height="180px" alt={"img"}/>
)}
</CCardBody>
</CCard>
</CCol>
</CCard>
</CCol>
</CRow>
</>
)
}
export default Tables
|
homobel/makebird-node | test/projects/large/b855.js | //~ name b855
alert(b855);
//~ component b856.js
|
HywTony/stetho | stetho-urlconnection/src/main/java/com/facebook/stetho/urlconnection/URLConnectionInspectorRequest.java | package com.facebook.stetho.urlconnection;
import com.facebook.stetho.inspector.network.NetworkEventReporter;
import javax.annotation.Nullable;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.net.HttpURLConnection;
class URLConnectionInspectorRequest
extends URLConnectionInspectorHeaders
implements NetworkEventReporter.InspectorRequest {
private final String mRequestId;
private final String mFriendlyName;
@Nullable private final SimpleRequestEntity mRequestEntity;
private final String mUrl;
private final String mMethod;
private boolean mBodyRead;
@Nullable private byte[] mBody;
public URLConnectionInspectorRequest(
String requestId,
String friendlyName,
HttpURLConnection configuredRequest,
@Nullable SimpleRequestEntity requestEntity) {
super(Util.convertHeaders(configuredRequest.getRequestProperties()));
mRequestId = requestId;
mFriendlyName = friendlyName;
mRequestEntity = requestEntity;
mUrl = configuredRequest.getURL().toString();
mMethod = configuredRequest.getRequestMethod();
}
@Override
public String id() {
return mRequestId;
}
@Override
public String friendlyName() {
return mFriendlyName;
}
@Override
public Integer friendlyNameExtra() {
return null;
}
@Override
public String url() {
return mUrl;
}
@Override
public String method() {
return mMethod;
}
@Nullable
@Override
public byte[] body() throws IOException {
if (mRequestEntity != null) {
if (!mBodyRead) {
mBodyRead = true;
ByteArrayOutputStream out = new ByteArrayOutputStream();
mRequestEntity.writeTo(out);
mBody = out.toByteArray();
}
return mBody;
} else {
return null;
}
}
}
|
sophiemarceau/qtxzs_iOS | wecoo/Main/My/Controller/PlatformfeedbackViewController.h | <gh_stars>0
//
// PlatformfeedbackViewController.h
// wecoo
//
// Created by 屈小波 on 2017/4/25.
// Copyright © 2017年 屈小波. All rights reserved.
//
#import "BaseViewController.h"
@interface PlatformfeedbackViewController : BaseViewController
@property(nonatomic,strong)NSString *search_id;
@end
|
ThiliniF/marqeta-api-sandbox-testing | src/main/java/com/nimi/qe/api/marqeta/request/Transactions.java | <gh_stars>0
package com.nimi.qe.api.marqeta.request;
import com.nimi.qe.api.util.RequestUtil;
import io.restassured.response.Response;
public class Transactions {
private Transactions(){
}
public static Response createTransaction(Object requestBody, String url) {
Response response = RequestUtil.sendPOSTRequest(requestBody, url);
return response;
}
}
|
geirivarjerstad/WhoOwesWhat-Ionic | www/test/PersonStore/PersonStore_SavePerson.tests.js | describe("Unittest.PersonStore: When adding a Person", function () {
var $scope, $q, PersonStore;
beforeEach(function () {
module("whooweswhat");
module(MockProvideFactory.CordovaSQLiteWrapperMock);
module(MockProvideFactory.SQLiteDataProviderMock);
});
it('should create a Person successfully', inject(function ($rootScope, CordovaSQLiteWrapper, SQLiteDataProvider, PersonStore) {
$scope = $rootScope.$new();
// call through to the Mock implementation
spyOn(SQLiteDataProvider, "getDatabase").and.callThrough();
spyOn(CordovaSQLiteWrapper, "execute").and.callThrough();
var personId = null,
personGuid = "01cebb3d-53fa-4768-9561-168c6f2b63be",
displayname = "GeirTest",
email = "<EMAIL>",
mobil = "123456789",
username = "myusername";
var person = new Person(personId, personGuid, displayname, username, email, mobil, false, false);
var expectedBinding = [personId, personGuid, displayname, username, email, mobil, false, false];
var expectSuccess = false;
PersonStore.createPerson(person)
.then(function () {
expectSuccess = true;
})
.catch(function (reason) {
// expect no errors
expect(reason).toBeNull();
});
$scope.$digest();
expect(expectSuccess).toBeTruthy();
expect(SQLiteDataProvider.getDatabase).toHaveBeenCalled();
expect(CordovaSQLiteWrapper.execute).toHaveBeenCalledWith(
"wow_unittest.db",
'INSERT INTO Person (personId, personGuid, displayname, username, email, mobil, existsOnServer, isDeleted) VALUES (?,?,?,?,?,?,?,?)',
expectedBinding)
}));
it('should update a Person successfully', inject(function ($rootScope, CordovaSQLiteWrapper, SQLiteDataProvider, PersonStore) {
$scope = $rootScope.$new();
// call through to the Mock implementation
spyOn(SQLiteDataProvider, "getDatabase").and.callThrough();
spyOn(CordovaSQLiteWrapper, "execute").and.callThrough();
var personId = 1,
personGuid = "01cebb3d-53fa-4768-9561-168c6f2b63be",
displayname = "GeirTest",
email = "<EMAIL>",
mobil = "123456789",
username = "myusername";
var person = new Person(personId, personGuid, displayname, username, email, mobil, false, false);
var expectedBinding = [personGuid, displayname, username, email, mobil, false, false];
var expectSuccess = false;
PersonStore.updatePerson(person)
.then(function () {
expectSuccess = true;
})
.catch(function (reason) {
// expect no errors
expect(reason).toBeNull();
});
$scope.$digest();
expect(expectSuccess).toBeTruthy();
expect(SQLiteDataProvider.getDatabase).toHaveBeenCalled();
expect(CordovaSQLiteWrapper.execute).toHaveBeenCalledWith(
"wow_unittest.db",
'UPDATE Person SET personGuid = ?, displayname = ?, username = ?, email = ?, mobil = ?, existsOnServer = ?, isDeleted = ? WHERE personId = ' + personId,
expectedBinding)
}));
}); |
PetAdote/petAdote_REST_API | api/models/Anuncio.js | <reponame>PetAdote/petAdote_REST_API
// Importações.
const {DataTypes, Model, Sequelize} = require('sequelize');
// Instância da conexão com a Database.
const {connection} = require('../../configs/database');
// Models das Associações (Chaves Estrangeiras).
const Animal = require('./Animal');
const FotoAnimal = require('./FotoAnimal');
const Usuario = require('./Usuario');
// Definição do Model 'Anuncio' para 'tbl_anuncio'.
const Anuncio = connection.define('Anuncio', {
cod_anuncio: { type: DataTypes.INTEGER.UNSIGNED, allowNull: false, unique: true, autoIncrement: true,
primaryKey: true
},
cod_animal: { type: DataTypes.INTEGER.UNSIGNED, allowNull: false, unique: true,
references: { model: Model.Animal, key: 'cod_animal' }
},
cod_anunciante: { type: DataTypes.INTEGER.UNSIGNED, allowNull: false,
references: { model: Model.Usuario, key: 'cod_usuario' }
},
uid_foto_animal: { type: DataTypes.STRING(255), allowNull: false,
references: { model: Model.FotoAnimal, key: 'cod_foto' }
},
qtd_visualizacoes: { type: DataTypes.INTEGER.UNSIGNED, allowNull: false, defaultValue: 0 },
qtd_avaliacoes: { type: DataTypes.INTEGER.UNSIGNED, allowNull: false, defaultValue: 0 },
qtd_candidaturas: { type: DataTypes.INTEGER.UNSIGNED, allowNull: false, defaultValue: 0 },
estado_anuncio: { type: DataTypes.ENUM('Aberto', 'Concluido', 'Fechado'), allowNull: false, defaultValue: 'Aberto' },
data_criacao: { type: DataTypes.DATE, allowNull: false, defaultValue: Sequelize.NOW },
data_modificacao: { type: DataTypes.DATE, allowNull: false, defaultValue: Sequelize.NOW }
}, {
tableName: 'tbl_anuncio',
});
// Associações (FKs).
Anuncio.belongsTo(Animal, {
foreignKey: {
name: 'cod_animal',
allowNull: false
}
});
Animal.hasOne(Anuncio, {
foreignKey: {
name: 'cod_animal',
allowNull: false
}
});
Anuncio.belongsTo(FotoAnimal, {
foreignKey: {
name: 'uid_foto_animal',
allowNull: false
}
});
FotoAnimal.hasOne(Anuncio, {
foreignKey: {
name: 'uid_foto_animal',
allowNull: false
}
})
Anuncio.belongsTo(Usuario, {
foreignKey: {
name: 'cod_anunciante',
allowNull: false
}
});
Usuario.hasMany(Anuncio, {
foreignKey: {
name: 'cod_anunciante',
allowNull: false
}
})
// Exportação.
module.exports = Anuncio; |
mowenGithub/Clutch | clutchrpc/db.py | <filename>clutchrpc/db.py
# Copyright 2012 Twitter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import uuid
import psycopg2
import pytz
import simplejson
from gevent.pool import Pool
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'clutch.settings')
from django.contrib.auth.hashers import check_password
from clutchrpc import utils
from clutchrpc.pg2 import db
def get_app_from_key(key):
SQL = """
SELECT A.*
FROM dashboard_app A
LEFT JOIN dashboard_appkey K ON (K.app_id = A.id)
WHERE K.key = %s AND K.status = 'active'
"""
return db.fetchone(SQL, [key])
def get_user_from_creds(username, password):
SQL = "SELECT U.* FROM auth_user U WHERE UPPER(U.username) = UPPER(%s)"
user = db.fetchone(SQL, [username])
if user is None:
return None
if not check_password(password, user['password']):
return None
return user
def get_user_from_id(user_id):
SQL = "SELECT U.* FROM auth_user U WHERE U.id = %s"
return db.fetchone(SQL, [user_id])
def get_app_from_user_and_slug(user_id, slug):
SQL = """
SELECT A.*
FROM dashboard_app A
WHERE UPPER(A.slug) = UPPER(%s) AND A.id IN (
SELECT M.app_id FROM dashboard_member M where M.user_id = %s
)
"""
return db.fetchone(SQL, [slug, user_id])
def get_latest_app_version(app_id):
SQL = """
SELECT V.version
FROM dashboard_version V
WHERE V.app_id = %s
ORDER BY v.version DESC
LIMIT 1
"""
resp = db.fetchone(SQL, [app_id])
if resp is None:
return None
return resp['version']
def get_app_version_for_bundle_version(app_id, bundle_version):
SQL = """
SELECT V.version
FROM dashboard_version V
WHERE
app_id = %s AND
(V.max_bundle <= %s OR V.max_bundle = '') AND
(V.min_bundle >= %s OR V.min_bundle = '')
ORDER BY V.version DESC
"""
# Normalize the bundle version
try:
split_bundle = bundle_version.split('.')
if not len(split_bundle) == 3:
raise ValueError('Bundle is not three points')
split_bundle = map(int, split_bundle)
norm = '.'.join([str(i).zfill(5) for i in split_bundle])
except ValueError:
norm = ''
resp = db.fetchone(SQL, [app_id, norm, norm])
if resp:
return resp['version']
return 0
def create_app_version(app_id, app_version):
SQL = """
INSERT INTO dashboard_version (app_id, version) VALUES (%s, %s)
"""
db.execute(SQL, [app_id, app_version])
def get_device_for_udid_and_app(udid, app_id):
SQL = """
SELECT
D.*,
U.username,
U.email
FROM dashboard_device D
LEFT JOIN auth_user U ON (U.id = D.user_id)
WHERE D.udid = %s AND D.user_id IN (
SELECT M.user_id FROM dashboard_member M WHERE M.app_id = %s
)
LIMIT 1
"""
return db.fetchone(SQL, [udid, app_id])
def get_dev_mode(app_id, user_id, date_updated):
SQL = """
SELECT D.*
FROM dashboard_developmentmode D
WHERE D.app_id = %s AND D.user_id = %s AND D.date_updated > %s
"""
return db.fetchone(SQL, [app_id, user_id, date_updated])
def delete_dev_modes_for_user_and_app(user_id, app_id):
SQL = """
DELETE
FROM dashboard_developmentmode D
WHERE D.user_id = %s AND D.app_id = %s
"""
db.execute(SQL, [user_id, app_id])
def create_or_update_dev_mode(app_id, user_id, url, toolbar):
UPDATE_SQL = """
UPDATE dashboard_developmentmode
SET url = %s, toolbar = %s, date_updated = %s
WHERE app_id = %s AND user_id = %s
"""
INSERT_SQL = """
INSERT INTO dashboard_developmentmode
(app_id, user_id, url, toolbar, date_updated, date_created)
VALUES (%s, %s, %s, %s, %s, %s)
"""
now = utils.get_now()
try:
db.execute(INSERT_SQL, [app_id, user_id, url, toolbar, now, now])
except psycopg2.IntegrityError:
db.execute(UPDATE_SQL, [url, toolbar, now, app_id, user_id])
def get_experiment(app_id, slug):
SQL = """
SELECT E.*
FROM ab_experiment E
WHERE E.app_id = %s AND UPPER(E.slug) = UPPER(%s)
"""
return db.fetchone(SQL, [app_id, slug])
def get_experiments_for_app(app_id):
SQL = """SELECT E.* FROM ab_experiment E WHERE E.app_id = %s"""
return db.fetchall(SQL, [app_id])
def get_variations_for_app(app_id):
SQL = """
SELECT V.*
FROM ab_variation V
WHERE V.experiment_id IN (
SELECT E.id FROM ab_experiment E WHERE E.app_id = %s
)
"""
return db.fetchall(SQL, [app_id])
def add_bulk_stats_logs(udid, api_version, app_version, bundle_version,
app_key, platform, logs):
"""
Adds bulk stats logs to the database.
"""
LOG_INSERT_SQL = """
INSERT INTO stats_log (
timestamp, action, data, udid, api_version, app_version,
bundle_version, app_key, uuid, platform
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
UNIQUE_INSERT_SQL = """
INSERT INTO stats_unique%s (app_id, udid, platform, new, timestamp)
VALUES (%%s, %%s, %%s, %%s, %%s)
"""
UNIQUE_ALLTIME_INSERT_SQL = """
INSERT INTO stats_uniquealltime (app_id, udid, platform)
VALUES (%s, %s, %s)
"""
VIEW_UPDATE_SQL = """
UPDATE stats_view%s
SET views = views + 1
WHERE app_id = %%s AND platform = %%s AND timestamp = %%s
"""
VIEW_INSERT_SQL = """
INSERT INTO stats_view%s (app_id, platform, timestamp, views)
VALUES (%%s, %%s, %%s, 1)
"""
VIEW_SLUG_UPDATE_SQL = """
UPDATE stats_viewslug%s
SET views = views + 1
WHERE app_id = %%s AND platform = %%s AND timestamp = %%s AND slug = %%s
"""
VIEW_SLUG_INSERT_SQL = """
INSERT INTO stats_viewslug%s (app_id, platform, timestamp, views, slug)
VALUES (%%s, %%s, %%s, 1, %%s)
"""
app = get_app_from_key(app_key)
if not app:
return
app_id = app['id']
pool = Pool(10)
def _coro0(log):
try:
db.execute(LOG_INSERT_SQL, [
log['ts'],
log['action'],
simplejson.dumps(log['data']),
udid,
api_version,
app_version,
bundle_version,
app_key,
log['uuid'],
platform,
])
except psycopg2.IntegrityError:
return
# Don't care about disappearing in aggregate yet
if log['action'] == 'viewDidDisappear':
return
slug = log['data']['slug']
ts = datetime.datetime.utcfromtimestamp(log['ts']).replace(
tzinfo=pytz.utc)
hour = ts.replace(minute=0, second=0, microsecond=0)
day = hour.replace(hour=0)
month = day.replace(day=1)
year = month.replace(month=1)
try:
db.execute(UNIQUE_ALLTIME_INSERT_SQL,
[app_id, udid, platform])
new = True
except psycopg2.IntegrityError:
new = False
pt = zip(('hour', 'day', 'month', 'year'), (hour, day, month, year))
for period, timestamp in pt:
pool.spawn_link_exception(_coro1, period, timestamp, slug, new)
pool.spawn_link_exception(_coro2, period, timestamp, slug)
pool.spawn_link_exception(_coro3, period, timestamp, slug)
def _coro1(period, timestamp, slug, new):
try:
db.execute(UNIQUE_INSERT_SQL % (period,),
[app_id, udid, platform, new, timestamp])
except psycopg2.IntegrityError:
pass
def _coro2(period, timestamp, slug):
args = [app_id, platform, timestamp]
try:
db.execute(VIEW_INSERT_SQL % (period,), args)
except psycopg2.IntegrityError:
db.execute(VIEW_UPDATE_SQL % (period,), args)
def _coro3(period, timestamp, slug):
args = [app_id, platform, timestamp, slug]
try:
db.execute(VIEW_SLUG_INSERT_SQL % (period,), args)
except psycopg2.IntegrityError:
db.execute(VIEW_SLUG_UPDATE_SQL % (period,), args)
for log in logs:
pool.spawn_link_exception(_coro0, log)
pool.join()
def add_bulk_ab_logs(udid, api_version, app_version, bundle_version, app_key,
platform, logs):
"""
Adds bulk ab testing logs to the database.
"""
LOG_INSERT_SQL = """
INSERT INTO ab_log (
timestamp, action, data, udid, api_version, app_version,
bundle_version, app_key, uuid, platform
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
UNIQUE_INSERT_SQL = """
INSERT INTO ab_uniquemonth (uuid, app_id, udid, month, date_created)
VALUES (%s, %s, %s, %s, %s)
"""
EXP_INSERT_SQL = """
INSERT INTO ab_experiment
(app_id, name, slug, has_data, num_choices, enabled, date_created)
VALUES (%s, %s, %s, %s, %s, %s, %s)
"""
EXP_UPDATE_SQL = """
UPDATE ab_experiment SET num_choices = %s WHERE id = %s
"""
VARIATION_INSERT_SQL = """
INSERT INTO ab_variation
(experiment_id, weight, num, name, data, date_created)
VALUES (%s, %s, %s, %s, %s, %s)
"""
TRIAL_INSERT_SQL = """
INSERT INTO ab_trial
(uuid, udid, app_id, experiment_id, date_created, date_started,
date_completed, choice, goal_reached)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
TRIAL_UPDATE_SQL = """
UPDATE ab_trial
SET date_completed = %s, goal_reached = %s
WHERE udid = %s AND experiment_id = %s
"""
app = get_app_from_key(app_key)
if not app:
return
app_id = app['id']
now = utils.get_now()
def insert_ab_log(log):
try:
data = log['data']
except KeyError:
# TODO: Log error somewhere?
return
try:
db.execute(LOG_INSERT_SQL, [
log['ts'],
data['action'],
simplejson.dumps(log['data']),
udid,
api_version,
app_version,
bundle_version,
app_key,
log['uuid'],
platform,
])
except psycopg2.IntegrityError:
return
ts = datetime.datetime.utcfromtimestamp(log['ts']).replace(
tzinfo=pytz.utc)
month = ts.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
try:
db.execute(UNIQUE_INSERT_SQL,
[str(uuid.uuid1()), app_id, udid, month, now])
except psycopg2.IntegrityError:
pass
# Don't care about disappearing in aggregate yet
if data['action'] == 'failure':
return
experiment = get_experiment(app_id, data['name'])
if experiment is None:
if 'has_data' not in data:
return
db.execute(EXP_INSERT_SQL, [
app_id,
'Experiment for ' + data['name'],
data['name'],
data['has_data'],
0,
True,
now,
])
experiment = get_experiment(app_id, data['name'])
if 'num_choices' in data:
if data['num_choices'] != experiment['num_choices']:
# First update the experiment object
db.execute(EXP_UPDATE_SQL,
[data['num_choices'], experiment['id']])
# Now create any un-created variation objects
for i in xrange(data['num_choices']):
try:
name = 'Test ' + ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'[i],)
db.execute(VARIATION_INSERT_SQL, [
experiment['id'],
0.5 / data['num_choices'],
i + 1,
name,
'{\n}' if experiment['has_data'] else '',
now,
])
except psycopg2.IntegrityError:
pass
# If it's one of the 'num-choices' actions, we've done that
# already so we can continue on.
if data['action'] == 'num-choices':
return
if data['action'] == 'test':
try:
dt = datetime.datetime.utcfromtimestamp(log['ts']).replace(
tzinfo=pytz.utc)
db.execute(TRIAL_INSERT_SQL, [
str(uuid.uuid1()),
udid,
app_id,
experiment['id'],
now,
dt,
None,
data['choice'],
False,
])
except psycopg2.IntegrityError:
# What is the expected behavior here? If a trial is
# already started for this user, then do we discard the old
# one, or do we start a new one with a new timestamp and
# choice? Do we update the started timestamp on the
# current one? Not sure. For now we just continue and do
# nothing.
pass
return
if data['action'] == 'goal':
dt = datetime.datetime.utcfromtimestamp(log['ts']).replace(
tzinfo=pytz.utc)
db.execute(TRIAL_UPDATE_SQL, [
dt,
True,
udid,
experiment['id'],
])
for log in logs:
insert_ab_log(log)
|
ruby-on-rust/syntax | src/grammar/__tests__/grammar-symbol-test.js | /**
* The MIT License (MIT)
* Copyright (c) 2015-present <NAME> <<EMAIL>>
*/
import GrammarSymbol from '../grammar-symbol';
import {EOF, EPSILON} from '../../special-symbols';
describe('grammar-symbol', () => {
it('singleton', () => {
expect(GrammarSymbol.get('A')).toBe(GrammarSymbol.get('A'));
});
it('instance', () => {
expect(new GrammarSymbol('A')).not.toBe(new GrammarSymbol('A'));
});
it('terminal', () => {
expect(new GrammarSymbol(`"a"`).isTerminal()).toBe(true);
expect(new GrammarSymbol(`'a'`).isTerminal()).toBe(true);
expect(new GrammarSymbol(`A`).isTerminal()).toBe(false);
});
it('terminal value', () => {
expect(new GrammarSymbol(`"a"`).getTerminalValue()).toBe('a');
});
it('quoted terminal', () => {
expect(new GrammarSymbol(`"a"`).quotedTerminal()).toBe(`'"a"'`);
expect(new GrammarSymbol(`'a'`).quotedTerminal()).toBe(`"'a'"`);
});
it('non-terminal', () => {
expect(new GrammarSymbol(`A`).isNonTerminal()).toBe(true);
expect(new GrammarSymbol(`"a"`).isNonTerminal()).toBe(false);
expect(new GrammarSymbol(`'a'`).isNonTerminal()).toBe(false);
});
it('raw symbol', () => {
expect(new GrammarSymbol(`A`).getSymbol()).toBe('A');
expect(new GrammarSymbol(`"a"`).getSymbol()).toBe(`"a"`);
expect(new GrammarSymbol(`'a'`).getSymbol()).toBe(`'a'`);
});
it('raw symbol', () => {
expect(new GrammarSymbol(`A`).getSymbol()).toBe('A');
expect(new GrammarSymbol(`"a"`).getSymbol()).toBe(`"a"`);
expect(new GrammarSymbol(`'a'`).getSymbol()).toBe(`'a'`);
});
it('compare symbol', () => {
expect(new GrammarSymbol(`A`).isSymbol('A')).toBe(true);
expect(new GrammarSymbol(`A`).isSymbol('B')).toBe(false);
expect(new GrammarSymbol(`A`).isSymbol(`'a'`)).toBe(false);
expect(new GrammarSymbol(`"a"`).isSymbol(`"a"`)).toBe(true);
expect(new GrammarSymbol(`'a'`).isSymbol(`'a'`)).toBe(true);
expect(new GrammarSymbol(`'a'`).isSymbol(`'b'`)).toBe(false);
expect(new GrammarSymbol(`'a'`).isSymbol(`"b"`)).toBe(false);
expect(new GrammarSymbol(`'a'`).isSymbol('A')).toBe(false);
});
it('special symbols', () => {
// EOF.
expect(new GrammarSymbol(EOF).isEOF()).toBe(true);
expect(GrammarSymbol.isEOF(EOF)).toBe(true);
// Epsilon.
expect(new GrammarSymbol(EPSILON).isEpsilon()).toBe(true);
expect(GrammarSymbol.isEpsilon(EPSILON)).toBe(true);
});
});
|
Limmen/chinook | java_backend/chinook_rest/src/main/java/limmen/business/representations/entity_representation/EmployeeRepresentation.java | <reponame>Limmen/chinook
package limmen.business.representations.entity_representation;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import limmen.integration.entities.Employee;
import org.springframework.hateoas.ResourceSupport;
/**
* JSON-Representation of a Employee entity.
*
* @author <NAME> on 2016-03-22.
*/
public class EmployeeRepresentation extends ResourceSupport {
private final Employee employee;
/**
* Class constructor. Initializes the "employee" property of the JSON representation.
*
* @param employee value of the employee property.
*/
@JsonCreator
public EmployeeRepresentation(@JsonProperty("employee") Employee employee) {
this.employee = employee;
}
public Employee getEmployee() {
return employee;
}
}
|
UQ-RCC/nimrodg | nimrodg-master/src/main/java/au/edu/uq/rcc/nimrodg/master/ConfigListener.java | package au.edu.uq.rcc.nimrodg.master;
public interface ConfigListener {
void onConfigChange(String key, String oldValue, String newValue);
static long clamp(long val, long min, long max) {
return Math.max(min, Math.min(max, val));
}
static int clamp(int val, int min, int max) {
return Math.max(min, Math.min(max, val));
}
static float clamp(float val, float min, float max) {
return Math.max(min, Math.min(max, val));
}
static long get(String val, long old, long def) {
if(val == null) {
return def;
}
try {
return Long.parseUnsignedLong(val);
} catch(NumberFormatException e) {
return old;
}
}
static long get(String val, long old, long def, long min, long max) {
return clamp(get(val, old, def), min, max);
}
static int get(String val, int old, int def) {
if(val == null) {
return def;
}
try {
return Integer.parseUnsignedInt(val);
} catch(NumberFormatException e) {
return old;
}
}
static int get(String val, int old, int def, int min, int max) {
return clamp(get(val, old, def), min, max);
}
}
|
alexandermerritt/dragonfly | crypto/openssh/sshbuf-getput-basic.c | <gh_stars>0
/* $OpenBSD: sshbuf-getput-basic.c,v 1.1 2014/04/30 05:29:56 djm Exp $ */
/*
* Copyright (c) 2011 <NAME>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define SSHBUF_INTERNAL
#include "includes.h"
#include <sys/types.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "ssherr.h"
#include "sshbuf.h"
int
sshbuf_get(struct sshbuf *buf, void *v, size_t len)
{
const u_char *p = sshbuf_ptr(buf);
int r;
if ((r = sshbuf_consume(buf, len)) < 0)
return r;
if (v != NULL)
memcpy(v, p, len);
return 0;
}
int
sshbuf_get_u64(struct sshbuf *buf, u_int64_t *valp)
{
const u_char *p = sshbuf_ptr(buf);
int r;
if ((r = sshbuf_consume(buf, 8)) < 0)
return r;
if (valp != NULL)
*valp = PEEK_U64(p);
return 0;
}
int
sshbuf_get_u32(struct sshbuf *buf, u_int32_t *valp)
{
const u_char *p = sshbuf_ptr(buf);
int r;
if ((r = sshbuf_consume(buf, 4)) < 0)
return r;
if (valp != NULL)
*valp = PEEK_U32(p);
return 0;
}
int
sshbuf_get_u16(struct sshbuf *buf, u_int16_t *valp)
{
const u_char *p = sshbuf_ptr(buf);
int r;
if ((r = sshbuf_consume(buf, 2)) < 0)
return r;
if (valp != NULL)
*valp = PEEK_U16(p);
return 0;
}
int
sshbuf_get_u8(struct sshbuf *buf, u_char *valp)
{
const u_char *p = sshbuf_ptr(buf);
int r;
if ((r = sshbuf_consume(buf, 1)) < 0)
return r;
if (valp != NULL)
*valp = (u_int8_t)*p;
return 0;
}
int
sshbuf_get_string(struct sshbuf *buf, u_char **valp, size_t *lenp)
{
const u_char *val;
size_t len;
int r;
if (valp != NULL)
*valp = NULL;
if (lenp != NULL)
*lenp = 0;
if ((r = sshbuf_get_string_direct(buf, &val, &len)) < 0)
return r;
if (valp != NULL) {
if ((*valp = malloc(len + 1)) == NULL) {
SSHBUF_DBG(("SSH_ERR_ALLOC_FAIL"));
return SSH_ERR_ALLOC_FAIL;
}
memcpy(*valp, val, len);
(*valp)[len] = '\0';
}
if (lenp != NULL)
*lenp = len;
return 0;
}
int
sshbuf_get_string_direct(struct sshbuf *buf, const u_char **valp, size_t *lenp)
{
size_t len;
const u_char *p;
int r;
if (valp != NULL)
*valp = NULL;
if (lenp != NULL)
*lenp = 0;
if ((r = sshbuf_peek_string_direct(buf, &p, &len)) < 0)
return r;
if (valp != 0)
*valp = p;
if (lenp != NULL)
*lenp = len;
if (sshbuf_consume(buf, len + 4) != 0) {
/* Shouldn't happen */
SSHBUF_DBG(("SSH_ERR_INTERNAL_ERROR"));
SSHBUF_ABORT();
return SSH_ERR_INTERNAL_ERROR;
}
return 0;
}
int
sshbuf_peek_string_direct(const struct sshbuf *buf, const u_char **valp,
size_t *lenp)
{
u_int32_t len;
const u_char *p = sshbuf_ptr(buf);
if (valp != NULL)
*valp = NULL;
if (lenp != NULL)
*lenp = 0;
if (sshbuf_len(buf) < 4) {
SSHBUF_DBG(("SSH_ERR_MESSAGE_INCOMPLETE"));
return SSH_ERR_MESSAGE_INCOMPLETE;
}
len = PEEK_U32(p);
if (len > SSHBUF_SIZE_MAX - 4) {
SSHBUF_DBG(("SSH_ERR_STRING_TOO_LARGE"));
return SSH_ERR_STRING_TOO_LARGE;
}
if (sshbuf_len(buf) - 4 < len) {
SSHBUF_DBG(("SSH_ERR_MESSAGE_INCOMPLETE"));
return SSH_ERR_MESSAGE_INCOMPLETE;
}
if (valp != 0)
*valp = p + 4;
if (lenp != NULL)
*lenp = len;
return 0;
}
int
sshbuf_get_cstring(struct sshbuf *buf, char **valp, size_t *lenp)
{
size_t len;
const u_char *p, *z;
int r;
if (valp != NULL)
*valp = NULL;
if (lenp != NULL)
*lenp = 0;
if ((r = sshbuf_peek_string_direct(buf, &p, &len)) != 0)
return r;
/* Allow a \0 only at the end of the string */
if (len > 0 &&
(z = memchr(p , '\0', len)) != NULL && z < p + len - 1) {
SSHBUF_DBG(("SSH_ERR_INVALID_FORMAT"));
return SSH_ERR_INVALID_FORMAT;
}
if ((r = sshbuf_skip_string(buf)) != 0)
return -1;
if (valp != NULL) {
if ((*valp = malloc(len + 1)) == NULL) {
SSHBUF_DBG(("SSH_ERR_ALLOC_FAIL"));
return SSH_ERR_ALLOC_FAIL;
}
memcpy(*valp, p, len);
(*valp)[len] = '\0';
}
if (lenp != NULL)
*lenp = (size_t)len;
return 0;
}
int
sshbuf_get_stringb(struct sshbuf *buf, struct sshbuf *v)
{
u_int32_t len;
u_char *p;
int r;
/*
* Use sshbuf_peek_string_direct() to figure out if there is
* a complete string in 'buf' and copy the string directly
* into 'v'.
*/
if ((r = sshbuf_peek_string_direct(buf, NULL, NULL)) != 0 ||
(r = sshbuf_get_u32(buf, &len)) != 0 ||
(r = sshbuf_reserve(v, len, &p)) != 0 ||
(r = sshbuf_get(buf, p, len)) != 0)
return r;
return 0;
}
int
sshbuf_put(struct sshbuf *buf, const void *v, size_t len)
{
u_char *p;
int r;
if ((r = sshbuf_reserve(buf, len, &p)) < 0)
return r;
memcpy(p, v, len);
return 0;
}
int
sshbuf_putb(struct sshbuf *buf, const struct sshbuf *v)
{
return sshbuf_put(buf, sshbuf_ptr(v), sshbuf_len(v));
}
int
sshbuf_putf(struct sshbuf *buf, const char *fmt, ...)
{
va_list ap;
int r;
va_start(ap, fmt);
r = sshbuf_putfv(buf, fmt, ap);
va_end(ap);
return r;
}
int
sshbuf_putfv(struct sshbuf *buf, const char *fmt, va_list ap)
{
va_list ap2;
int r, len;
u_char *p;
va_copy(ap2, ap);
if ((len = vsnprintf(NULL, 0, fmt, ap2)) < 0) {
r = SSH_ERR_INVALID_ARGUMENT;
goto out;
}
if (len == 0) {
r = 0;
goto out; /* Nothing to do */
}
va_end(ap2);
va_copy(ap2, ap);
if ((r = sshbuf_reserve(buf, (size_t)len + 1, &p)) < 0)
goto out;
if ((r = vsnprintf((char *)p, len + 1, fmt, ap2)) != len) {
r = SSH_ERR_INTERNAL_ERROR;
goto out; /* Shouldn't happen */
}
/* Consume terminating \0 */
if ((r = sshbuf_consume_end(buf, 1)) != 0)
goto out;
r = 0;
out:
va_end(ap2);
return r;
}
int
sshbuf_put_u64(struct sshbuf *buf, u_int64_t val)
{
u_char *p;
int r;
if ((r = sshbuf_reserve(buf, 8, &p)) < 0)
return r;
POKE_U64(p, val);
return 0;
}
int
sshbuf_put_u32(struct sshbuf *buf, u_int32_t val)
{
u_char *p;
int r;
if ((r = sshbuf_reserve(buf, 4, &p)) < 0)
return r;
POKE_U32(p, val);
return 0;
}
int
sshbuf_put_u16(struct sshbuf *buf, u_int16_t val)
{
u_char *p;
int r;
if ((r = sshbuf_reserve(buf, 2, &p)) < 0)
return r;
POKE_U16(p, val);
return 0;
}
int
sshbuf_put_u8(struct sshbuf *buf, u_char val)
{
u_char *p;
int r;
if ((r = sshbuf_reserve(buf, 1, &p)) < 0)
return r;
p[0] = val;
return 0;
}
int
sshbuf_put_string(struct sshbuf *buf, const void *v, size_t len)
{
u_char *d;
int r;
if (len > SSHBUF_SIZE_MAX - 4) {
SSHBUF_DBG(("SSH_ERR_NO_BUFFER_SPACE"));
return SSH_ERR_NO_BUFFER_SPACE;
}
if ((r = sshbuf_reserve(buf, len + 4, &d)) < 0)
return r;
POKE_U32(d, len);
memcpy(d + 4, v, len);
return 0;
}
int
sshbuf_put_cstring(struct sshbuf *buf, const char *v)
{
return sshbuf_put_string(buf, (u_char *)v, strlen(v));
}
int
sshbuf_put_stringb(struct sshbuf *buf, const struct sshbuf *v)
{
return sshbuf_put_string(buf, sshbuf_ptr(v), sshbuf_len(v));
}
int
sshbuf_froms(struct sshbuf *buf, struct sshbuf **bufp)
{
const u_char *p;
size_t len;
struct sshbuf *ret;
int r;
if (buf == NULL || bufp == NULL)
return SSH_ERR_INVALID_ARGUMENT;
*bufp = NULL;
if ((r = sshbuf_peek_string_direct(buf, &p, &len)) != 0)
return r;
if ((ret = sshbuf_from(p, len)) == NULL)
return SSH_ERR_ALLOC_FAIL;
if ((r = sshbuf_consume(buf, len + 4)) != 0 || /* Shouldn't happen */
(r = sshbuf_set_parent(ret, buf)) != 0) {
sshbuf_free(ret);
return r;
}
*bufp = ret;
return 0;
}
int
sshbuf_put_bignum2_bytes(struct sshbuf *buf, const void *v, size_t len)
{
u_char *d;
const u_char *s = (const u_char *)v;
int r, prepend;
if (len > SSHBUF_SIZE_MAX - 5) {
SSHBUF_DBG(("SSH_ERR_NO_BUFFER_SPACE"));
return SSH_ERR_NO_BUFFER_SPACE;
}
/* Skip leading zero bytes */
for (; len > 0 && *s == 0; len--, s++)
;
/*
* If most significant bit is set then prepend a zero byte to
* avoid interpretation as a negative number.
*/
prepend = len > 0 && (s[0] & 0x80) != 0;
if ((r = sshbuf_reserve(buf, len + 4 + prepend, &d)) < 0)
return r;
POKE_U32(d, len + prepend);
if (prepend)
d[4] = 0;
memcpy(d + 4 + prepend, s, len);
return 0;
}
|
AllysonWindell/Kattis | Java/mosquito.java | import java.util.*;
import java.io.*;
public class mosquito {
static int M, P, L, E, R, S, N;
public static void main(String[] args) throws Exception {
Scanner sc = new Scanner(System.in);
while (sc.hasNextInt()) {
M = sc.nextInt();
P = sc.nextInt();
L = sc.nextInt();
E = sc.nextInt();
R = sc.nextInt();
S = sc.nextInt();
N = sc.nextInt();
while (N-- > 0) {
int p = L / R;
int m = P / S;
int l = M * E;
L = l;
P = p;
M = m;
}
System.out.println(M);
}
}
} |
kimjand/cxf | tools/wsdlto/frontend/jaxws/src/main/java/org/apache/cxf/tools/wsdlto/frontend/jaxws/generators/ServerGenerator.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.tools.wsdlto.frontend.jaxws.generators;
import java.util.HashMap;
import java.util.Map;
import javax.xml.namespace.QName;
import org.apache.cxf.common.i18n.Message;
import org.apache.cxf.common.util.StringUtils;
import org.apache.cxf.helpers.CastUtils;
import org.apache.cxf.service.model.ServiceInfo;
import org.apache.cxf.tools.common.ToolConstants;
import org.apache.cxf.tools.common.ToolContext;
import org.apache.cxf.tools.common.ToolException;
import org.apache.cxf.tools.common.model.JavaInterface;
import org.apache.cxf.tools.common.model.JavaModel;
import org.apache.cxf.tools.common.model.JavaPort;
import org.apache.cxf.tools.common.model.JavaServiceClass;
import org.apache.cxf.tools.util.ClassCollector;
import org.apache.cxf.tools.util.NameUtil;
import org.apache.cxf.tools.wsdlto.frontend.jaxws.processor.WSDLToJavaProcessor;
public class ServerGenerator extends AbstractJAXWSGenerator {
private static final String SRV_TEMPLATE = TEMPLATE_BASE + "/server.vm";
public ServerGenerator() {
this.name = ToolConstants.SVR_GENERATOR;
}
public boolean passthrough() {
return !(env.optionSet(ToolConstants.CFG_GEN_SERVER) || env.optionSet(ToolConstants.CFG_SERVER)
|| env.optionSet(ToolConstants.CFG_ALL));
}
public void generate(ToolContext penv) throws ToolException {
this.env = penv;
if (passthrough()) {
return;
}
Map<QName, JavaModel> map = CastUtils.cast((Map<?, ?>)penv.get(WSDLToJavaProcessor.MODEL_MAP));
for (JavaModel javaModel : map.values()) {
String address = "CHANGE_ME";
Map<String, JavaInterface> interfaces = javaModel.getInterfaces();
if (javaModel.getServiceClasses().isEmpty()) {
ServiceInfo serviceInfo = env.get(ServiceInfo.class);
String wsdl = serviceInfo.getDescription().getBaseURI();
Message msg = new Message("CAN_NOT_GEN_SRV", LOG, wsdl);
if (penv.isVerbose()) {
System.out.println(msg.toString());
}
return;
}
for (JavaServiceClass js : javaModel.getServiceClasses().values()) {
for (JavaPort jp : js.getPorts()) {
String interfaceName = jp.getInterfaceClass();
JavaInterface intf = interfaces.get(interfaceName);
if (intf == null) {
interfaceName = jp.getPortType();
intf = interfaces.get(interfaceName);
}
address = StringUtils.isEmpty(jp.getBindingAdress()) ? address : jp.getBindingAdress();
String serverClassName = interfaceName + "_"
+ NameUtil.mangleNameToClassName(jp.getPortName()) + "_Server";
serverClassName = mapClassName(intf.getPackageName(), serverClassName, penv);
clearAttributes();
setAttributes("serverClassName", serverClassName);
setAttributes("intf", intf);
String name = getImplName(jp.getPortName(), js.getServiceName(), intf, penv);
setAttributes("impl", name);
setAttributes("address", address);
setCommonAttributes();
doWrite(SRV_TEMPLATE, parseOutputName(intf.getPackageName(), serverClassName));
}
}
}
}
private String getImplName(String port, String service, JavaInterface intf, ToolContext penv) {
Map<String, String> nm = CastUtils.cast((Map<?, ?>)penv.get(ToolConstants.CFG_IMPL_CLASS));
if (nm == null) {
nm = new HashMap<>();
penv.put(ToolConstants.CFG_IMPL_CLASS, nm);
}
String name = nm.get(service + "/" + port);
if (name == null) {
name = NameUtil.mangleNameToClassName(port + "Impl", true);
name = mapClassName(intf.getPackageName(), name, penv);
nm.put(service + "/" + port, name);
}
return name;
}
private String mapClassName(String packageName, String name, ToolContext context) {
ClassCollector collector = context.get(ClassCollector.class);
int count = 0;
String checkName = name;
while (collector.containServerClass(packageName, checkName)) {
checkName = name + (++count);
}
collector.addServerClassName(packageName, checkName,
packageName + "." + checkName);
return checkName;
}
public void register(final ClassCollector collector, String packageName, String fileName) {
collector.addServerClassName(packageName, fileName, packageName + "." + fileName);
}
}
|
Eluinhost/hosts.uhc | src/main/scala/gg/uhc/hosts/endpoints/alerts/CreateAlertRule.scala | package gg.uhc.hosts.endpoints.alerts
import java.time.Instant
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives.{as, complete, entity, handleRejections, provide, validate}
import akka.http.scaladsl.server.{Directive0, Directive1, Route}
import gg.uhc.hosts.{Alerts, CustomJsonCodec}
import gg.uhc.hosts.database.{AlertRuleRow, Database}
import gg.uhc.hosts.endpoints.{CustomDirectives, EndpointRejectionHandler}
class CreateAlertRule(customDirectives: CustomDirectives, database: Database) {
import CustomJsonCodec._
import customDirectives._
case class CreateAlertRulePayload(field: String, alertOn: String, exact: Boolean)
private[this] def convertPayload(payload: CreateAlertRulePayload, author: String): Directive1[AlertRuleRow] =
provide(
AlertRuleRow(
field = payload.field.trim,
alertOn = payload.alertOn.trim,
exact = payload.exact,
id = -1,
createdBy = author,
created = Instant.now()
))
private[this] def validateRow(row: AlertRuleRow): Directive0 =
validate(Alerts.allAlertFields.contains(row.field), "Invalid Field") &
validate(row.alertOn.nonEmpty, "Value cannot be empty")
def apply(): Route =
handleRejections(EndpointRejectionHandler()) {
requireAuthentication { session =>
requirePermission("hosting advisor", session.username) {
entity(as[CreateAlertRulePayload]) { entity =>
convertPayload(entity, session.username) { row =>
(validateRow(row) & requireSucessfulQuery(database.createAlertRule(row))) { id =>
complete(StatusCodes.Created -> row.copy(id = id))
}
}
}
}
}
}
}
|
TetraSomia/liblapin | src/hardware/new_hardware.cpp | // <NAME> "Damdoshi"
// Hanged Bunny Studio 2014-2016
//
// <NAME>
#include <new>
#include <string.h>
#include "PVM110N.hpp"
#include "lapin_private.h"
struct bunny_hardware
{
hbs::PVM110N *board;
int id;
bool digital_inputs[5];
double analog_inputs[2];
bool digital_outputs[8];
double analog_outputs[2];
};
t_bunny_hardware *bunny_new_hardware(size_t id)
{
struct bunny_hardware *ptr;
if (id > 3)
return (NULL);
if ((ptr = (struct bunny_hardware*)bunny_malloc(sizeof(*ptr))) == NULL)
return (NULL);
memset(ptr, 0, sizeof(*ptr));
if ((ptr->board = new (std::nothrow) hbs::PVM110N) == NULL)
{
bunny_free(ptr);
return (NULL);
}
if ((ptr->board->Open((hbs::PVM110N::CardId)id)) == false)
{
delete ptr->board;
bunny_free(ptr);
return (NULL);
}
ptr->id = id;
return ((t_bunny_hardware*)ptr);
}
|
ModRealms-Network/Bewitchment | src/main/java/com/bewitchment/client/model/block/ModelMolochIdol.java | package com.bewitchment.client.model.block;
import net.minecraft.client.model.ModelBase;
import net.minecraft.client.model.ModelRenderer;
import net.minecraft.entity.Entity;
/**
* idol_moloch - sunconure11
* Created using Tabula 7.1.0
*/
public class ModelMolochIdol extends ModelBase {
public ModelRenderer plinth;
public ModelRenderer pedestal;
public ModelRenderer lLeg0;
public ModelRenderer rLeg0;
public ModelRenderer ass;
public ModelRenderer lArm0;
public ModelRenderer lArm1;
public ModelRenderer belly;
public ModelRenderer head;
public ModelRenderer snout1;
public ModelRenderer snout2;
public ModelRenderer torso;
public ModelRenderer rArm0;
public ModelRenderer rArm1;
public ModelRenderer rHorn0;
public ModelRenderer lHorn0;
public ModelRenderer lHorn1;
public ModelRenderer rHorn1;
public ModelRenderer ear1;
public ModelRenderer ear2;
public ModelMolochIdol() {
this.textureWidth = 64;
this.textureHeight = 64;
this.rHorn0 = new ModelRenderer(this, 13, 22);
this.rHorn0.setRotationPoint(0.0F, 20.0F, 0.0F);
this.rHorn0.addBox(6.6F, -10.5F, 2.5F, 2, 1, 1, 0.0F);
this.setRotateAngle(rHorn0, 0.0F, 0.2617993877991494F, -0.5235987755982988F);
this.plinth = new ModelRenderer(this, 0, 43);
this.plinth.setRotationPoint(0.0F, 24.1F, 0.0F);
this.plinth.addBox(-3.8F, -4.0F, -4.1F, 8, 4, 8, 0.0F);
this.rArm1 = new ModelRenderer(this, 40, 20);
this.rArm1.setRotationPoint(0.0F, 20.0F, 0.0F);
this.rArm1.addBox(2.31F, -5.1F, -5.3F, 1, 4, 1, 0.0F);
this.setRotateAngle(rArm1, -0.7438593271999832F, 0.0F, 0.0F);
this.lArm0 = new ModelRenderer(this, 40, 11);
this.lArm0.mirror = true;
this.lArm0.setRotationPoint(0.0F, 20.0F, 0.0F);
this.lArm0.addBox(-2.71F, -9.6F, -2.0F, 1, 3, 1, 0.0F);
this.setRotateAngle(lArm0, -0.20245819323134223F, 0.0F, 0.0F);
this.lHorn1 = new ModelRenderer(this, 13, 22);
this.lHorn1.setRotationPoint(0.0F, 20.0F, 0.0F);
this.lHorn1.addBox(-5.3F, -10.7F, 7.1F, 2, 1, 1, 0.0F);
this.setRotateAngle(lHorn1, 0.0F, -1.0471975511965976F, 0.5235987755982988F);
this.snout1 = new ModelRenderer(this, 34, 43);
this.snout1.setRotationPoint(0.0F, 20.0F, 0.0F);
this.snout1.addBox(-0.6F, -12.65F, 0.3F, 2, 1, 3, 0.0F);
this.setRotateAngle(snout1, 0.3490658503988659F, 0.0F, 0.0F);
this.lHorn0 = new ModelRenderer(this, 13, 22);
this.lHorn0.mirror = true;
this.lHorn0.setRotationPoint(0.0F, 20.0F, 0.0F);
this.lHorn0.addBox(-8.1F, -10.7F, 2.35F, 2, 1, 1, 0.0F);
this.setRotateAngle(lHorn0, 0.0F, -0.2617993877991494F, 0.5235987755982988F);
this.rArm0 = new ModelRenderer(this, 40, 11);
this.rArm0.setRotationPoint(0.0F, 20.0F, 0.0F);
this.rArm0.addBox(2.31F, -9.6F, -2.0F, 1, 3, 1, 0.0F);
this.setRotateAngle(rArm0, -0.20245819323134223F, 0.0F, 0.0F);
this.ear1 = new ModelRenderer(this, 28, 26);
this.ear1.setRotationPoint(0.0F, 20.0F, 0.0F);
this.ear1.addBox(1.9F, -12.0F, -0.1F, 2, 1, 1, 0.0F);
this.setRotateAngle(ear1, 0.0F, 0.10995574287564275F, -0.02617993877991494F);
this.pedestal = new ModelRenderer(this, 4, 31);
this.pedestal.setRotationPoint(0.0F, 21.1F, 0.0F);
this.pedestal.addBox(-2.7F, -6.0F, -3.2F, 6, 5, 6, 0.0F);
this.snout2 = new ModelRenderer(this, 34, 50);
this.snout2.setRotationPoint(0.0F, 20.0F, 0.0F);
this.snout2.addBox(-0.61F, -11.95F, -3.55F, 2, 2, 3, 0.0F);
this.belly = new ModelRenderer(this, 27, 9);
this.belly.setRotationPoint(0.0F, 20.0F, 0.0F);
this.belly.addBox(-1.6F, -7.5F, -2.3F, 4, 2, 1, 0.0F);
this.lArm1 = new ModelRenderer(this, 40, 20);
this.lArm1.mirror = true;
this.lArm1.setRotationPoint(0.0F, 20.0F, 0.0F);
this.lArm1.addBox(-2.71F, -5.1F, -5.3F, 1, 4, 1, 0.0F);
this.setRotateAngle(lArm1, -0.7438593271999832F, 0.0F, 0.0F);
this.torso = new ModelRenderer(this, 0, 0);
this.torso.setRotationPoint(0.0F, 20.0F, 0.0F);
this.torso.addBox(-2.2F, -9.9F, -1.3F, 5, 5, 3, 0.0F);
this.rHorn1 = new ModelRenderer(this, 13, 22);
this.rHorn1.setRotationPoint(0.0F, 20.0F, 0.0F);
this.rHorn1.addBox(3.6F, -10.5F, 7.6F, 2, 1, 1, 0.0F);
this.setRotateAngle(rHorn1, 0.0F, 1.0471975511965976F, -0.5235987755982988F);
this.rLeg0 = new ModelRenderer(this, 0, 20);
this.rLeg0.setRotationPoint(0.0F, 20.0F, 0.0F);
this.rLeg0.addBox(1.4F, -4.8F, -4.3F, 1, 4, 1, 0.0F);
this.ear2 = new ModelRenderer(this, 28, 26);
this.ear2.mirror = true;
this.ear2.setRotationPoint(0.0F, 20.0F, 0.0F);
this.ear2.addBox(-3.1F, -12.0F, -0.3F, 2, 1, 1, 0.0F);
this.setRotateAngle(ear2, 0.0F, -0.10995574287564275F, 0.02617993877991494F);
this.lLeg0 = new ModelRenderer(this, 0, 20);
this.lLeg0.mirror = true;
this.lLeg0.setRotationPoint(0.0F, 20.0F, 0.0F);
this.lLeg0.addBox(-1.6F, -4.7F, -4.3F, 1, 4, 1, 0.0F);
this.ass = new ModelRenderer(this, 0, 11);
this.ass.setRotationPoint(0.0F, 20.0F, 0.0F);
this.ass.addBox(-1.6F, -5.7F, -4.31F, 4, 1, 6, 0.0F);
this.head = new ModelRenderer(this, 34, 34);
this.head.setRotationPoint(0.0F, 20.0F, 0.0F);
this.head.addBox(-1.1F, -12.9F, -1.3F, 3, 3, 3, 0.0F);
}
@Override
public void render(Entity entity, float f, float f1, float f2, float f3, float f4, float f5) {
this.rHorn0.render(f5);
this.plinth.render(f5);
this.rArm1.render(f5);
this.lArm0.render(f5);
this.lHorn1.render(f5);
this.snout1.render(f5);
this.lHorn0.render(f5);
this.rArm0.render(f5);
this.ear1.render(f5);
this.pedestal.render(f5);
this.snout2.render(f5);
this.belly.render(f5);
this.lArm1.render(f5);
this.torso.render(f5);
this.rHorn1.render(f5);
this.rLeg0.render(f5);
this.ear2.render(f5);
this.lLeg0.render(f5);
this.ass.render(f5);
this.head.render(f5);
}
/**
* This is a helper function from Tabula to set the rotation of model parts
*/
public void setRotateAngle(ModelRenderer modelRenderer, float x, float y, float z) {
modelRenderer.rotateAngleX = x;
modelRenderer.rotateAngleY = y;
modelRenderer.rotateAngleZ = z;
}
}
|
Sagar1711/ISIS3 | isis/src/base/apps/map2map/map2map.cpp | #include "ProcessRubberSheet.h"
#include "ProjectionFactory.h"
#include "TProjection.h"
#include "map2map.h"
using namespace std;
namespace Isis {
void map2map(UserInterface &ui, Pvl *log) {
Cube cube;
CubeAttributeInput inAtt = ui.GetInputAttribute("FROM");
if (inAtt.bands().size() != 0) {
cube.setVirtualBands(inAtt.bands());
}
cube.open(ui.GetFileName("FROM"));
map2map(&cube, ui);
}
void map2map(Cube *icube, UserInterface &ui, Pvl *log) {
// We will be warping a cube
ProcessRubberSheet p;
// Get the map projection file provided by the user
Pvl userPvl(ui.GetFileName("MAP"));
PvlGroup &userMappingGrp = userPvl.findGroup("Mapping", Pvl::Traverse);
CubeAttributeInput &inputAtt =ui.GetInputAttribute("FROM");
p.SetInputCube(ui.GetFileName("FROM"), inputAtt);
// Get the mapping group
PvlGroup fromMappingGrp = icube->group("Mapping");
TProjection *inproj = (TProjection *) icube->projection();
PvlGroup outMappingGrp = fromMappingGrp;
// If the default range is FROM, then wipe out any range data in user mapping file
if(ui.GetString("DEFAULTRANGE").compare("FROM") == 0 && !ui.GetBoolean("MATCHMAP")) {
if(userMappingGrp.hasKeyword("MinimumLatitude")) {
userMappingGrp.deleteKeyword("MinimumLatitude");
}
if(userMappingGrp.hasKeyword("MaximumLatitude")) {
userMappingGrp.deleteKeyword("MaximumLatitude");
}
if(userMappingGrp.hasKeyword("MinimumLongitude")) {
userMappingGrp.deleteKeyword("MinimumLongitude");
}
if(userMappingGrp.hasKeyword("MaximumLongitude")) {
userMappingGrp.deleteKeyword("MaximumLongitude");
}
}
// Deal with user overrides entered in the GUI. Do this by changing the user's mapping group, which
// will then overlay anything in the output mapping group.
if(ui.WasEntered("MINLAT") && !ui.GetBoolean("MATCHMAP")) {
userMappingGrp.addKeyword(PvlKeyword("MinimumLatitude", toString(ui.GetDouble("MINLAT"))), Pvl::Replace);
}
if(ui.WasEntered("MAXLAT") && !ui.GetBoolean("MATCHMAP")) {
userMappingGrp.addKeyword(PvlKeyword("MaximumLatitude", toString(ui.GetDouble("MAXLAT"))), Pvl::Replace);
}
if(ui.WasEntered("MINLON") && !ui.GetBoolean("MATCHMAP")) {
userMappingGrp.addKeyword(PvlKeyword("MinimumLongitude", toString(ui.GetDouble("MINLON"))), Pvl::Replace);
}
if(ui.WasEntered("MAXLON") && !ui.GetBoolean("MATCHMAP")) {
userMappingGrp.addKeyword(PvlKeyword("MaximumLongitude", toString(ui.GetDouble("MAXLON"))), Pvl::Replace);
}
/**
* If the user is changing from positive east to positive west, or vice-versa, the output minimum is really
* the input maximum. However, the user mapping group must be left unaffected (an input minimum must be the
* output minimum). To accomplish this, we swap the minimums/maximums in the output group ahead of time. This
* causes the minimums and maximums to correlate to the output minimums and maximums. That way when we copy
* the user mapping group into the output group a mimimum overrides a minimum and a maximum overrides a maximum.
*/
bool sameDirection = true;
if(userMappingGrp.hasKeyword("LongitudeDirection")) {
if(((QString)userMappingGrp["LongitudeDirection"]).compare(fromMappingGrp["LongitudeDirection"]) != 0) {
sameDirection = false;
}
}
// Since the out mapping group came from the from mapping group, which came from a valid cube,
// we can assume both min/max lon exists if min longitude exists.
if(!sameDirection && outMappingGrp.hasKeyword("MinimumLongitude")) {
double minLon = outMappingGrp["MinimumLongitude"];
double maxLon = outMappingGrp["MaximumLongitude"];
outMappingGrp["MaximumLongitude"] = toString(minLon);
outMappingGrp["MinimumLongitude"] = toString(maxLon);
}
if(ui.GetString("PIXRES").compare("FROM") == 0 && !ui.GetBoolean("MATCHMAP")) {
// Resolution will be in fromMappingGrp and outMappingGrp at this time
// delete from user mapping grp
if(userMappingGrp.hasKeyword("Scale")) {
userMappingGrp.deleteKeyword("Scale");
}
if(userMappingGrp.hasKeyword("PixelResolution")) {
userMappingGrp.deleteKeyword("PixelResolution");
}
}
else if(ui.GetString("PIXRES").compare("MAP") == 0 || ui.GetBoolean("MATCHMAP")) {
// Resolution will be in userMappingGrp - delete all others
if(outMappingGrp.hasKeyword("Scale")) {
outMappingGrp.deleteKeyword("Scale");
}
if(outMappingGrp.hasKeyword("PixelResolution")) {
outMappingGrp.deleteKeyword("PixelResolution");
}
if(fromMappingGrp.hasKeyword("Scale")) {
fromMappingGrp.deleteKeyword("Scale");
}
if(fromMappingGrp.hasKeyword("PixelResolution")) {
fromMappingGrp.deleteKeyword("PixelResolution");
}
}
else if(ui.GetString("PIXRES").compare("MPP") == 0) {
// Resolution specified - delete all and add to outMappingGrp
if(outMappingGrp.hasKeyword("Scale")) {
outMappingGrp.deleteKeyword("Scale");
}
if(outMappingGrp.hasKeyword("PixelResolution")) {
outMappingGrp.deleteKeyword("PixelResolution");
}
if(fromMappingGrp.hasKeyword("Scale")) {
fromMappingGrp.deleteKeyword("Scale");
}
if(fromMappingGrp.hasKeyword("PixelResolution")) {
fromMappingGrp.deleteKeyword("PixelResolution");
}
if(userMappingGrp.hasKeyword("Scale")) {
userMappingGrp.deleteKeyword("Scale");
}
if(userMappingGrp.hasKeyword("PixelResolution")) {
userMappingGrp.deleteKeyword("PixelResolution");
}
outMappingGrp.addKeyword(PvlKeyword("PixelResolution", toString(ui.GetDouble("RESOLUTION")), "meters/pixel"), Pvl::Replace);
}
else if(ui.GetString("PIXRES").compare("PPD") == 0) {
// Resolution specified - delete all and add to outMappingGrp
if(outMappingGrp.hasKeyword("Scale")) {
outMappingGrp.deleteKeyword("Scale");
}
if(outMappingGrp.hasKeyword("PixelResolution")) {
outMappingGrp.deleteKeyword("PixelResolution");
}
if(fromMappingGrp.hasKeyword("Scale")) {
fromMappingGrp.deleteKeyword("Scale");
}
if(fromMappingGrp.hasKeyword("PixelResolution")) {
fromMappingGrp.deleteKeyword("PixelResolution");
}
if(userMappingGrp.hasKeyword("Scale")) {
userMappingGrp.deleteKeyword("Scale");
}
if(userMappingGrp.hasKeyword("PixelResolution")) {
userMappingGrp.deleteKeyword("PixelResolution");
}
outMappingGrp.addKeyword(PvlKeyword("Scale", toString(ui.GetDouble("RESOLUTION")), "pixels/degree"), Pvl::Replace);
}
// Rotation will NOT Propagate
if(outMappingGrp.hasKeyword("Rotation")) {
outMappingGrp.deleteKeyword("Rotation");
}
/**
* The user specified map template file overrides what ever is in the
* cube's mapping group.
*/
for(int keyword = 0; keyword < userMappingGrp.keywords(); keyword ++) {
outMappingGrp.addKeyword(userMappingGrp[keyword], Pvl::Replace);
}
/**
* Now, we have to deal with unit conversions. We convert only if the following are true:
* 1) We used values from the input cube
* 2) The values are longitudes or latitudes
* 3) The map file or user-specified information uses a different measurement system than
* the input cube for said values.
*
* The data is corrected for:
* 1) Positive east/positive west
* 2) Longitude domain
* 3) planetographic/planetocentric.
*/
// First, the longitude direction
if(!sameDirection) {
PvlGroup longitudes = inproj->MappingLongitudes();
for(int index = 0; index < longitudes.keywords(); index ++) {
if(!userMappingGrp.hasKeyword(longitudes[index].name())) {
// use the from domain because that's where our values are coming from
if(((QString)userMappingGrp["LongitudeDirection"]).compare("PositiveEast") == 0) {
outMappingGrp[longitudes[index].name()] = toString(
TProjection::ToPositiveEast(outMappingGrp[longitudes[index].name()],
outMappingGrp["LongitudeDomain"]));
}
else {
outMappingGrp[longitudes[index].name()] = toString(
TProjection::ToPositiveWest(outMappingGrp[longitudes[index].name()],
outMappingGrp["LongitudeDomain"]));
}
}
}
}
// Second, longitude domain
if(userMappingGrp.hasKeyword("LongitudeDomain")) { // user set a new domain?
if((int)userMappingGrp["LongitudeDomain"] != (int)fromMappingGrp["LongitudeDomain"]) { // new domain different?
PvlGroup longitudes = inproj->MappingLongitudes();
for(int index = 0; index < longitudes.keywords(); index ++) {
if(!userMappingGrp.hasKeyword(longitudes[index].name())) {
if((int)userMappingGrp["LongitudeDomain"] == 180) {
outMappingGrp[longitudes[index].name()] = toString(
TProjection::To180Domain(outMappingGrp[longitudes[index].name()]));
}
else {
outMappingGrp[longitudes[index].name()] = toString(
TProjection::To360Domain(outMappingGrp[longitudes[index].name()]));
}
}
}
}
}
// Third, planetographic/planetocentric
if(userMappingGrp.hasKeyword("LatitudeType")) { // user set a new domain?
if(((QString)userMappingGrp["LatitudeType"]).compare(fromMappingGrp["LatitudeType"]) != 0) { // new lat type different?
PvlGroup latitudes = inproj->MappingLatitudes();
for(int index = 0; index < latitudes.keywords(); index ++) {
if(!userMappingGrp.hasKeyword(latitudes[index].name())) {
if(((QString)userMappingGrp["LatitudeType"]).compare("Planetographic") == 0) {
outMappingGrp[latitudes[index].name()] = toString(TProjection::ToPlanetographic(
(double)fromMappingGrp[latitudes[index].name()],
(double)fromMappingGrp["EquatorialRadius"],
(double)fromMappingGrp["PolarRadius"]));
}
else {
outMappingGrp[latitudes[index].name()] = toString(TProjection::ToPlanetocentric(
(double)fromMappingGrp[latitudes[index].name()],
(double)fromMappingGrp["EquatorialRadius"],
(double)fromMappingGrp["PolarRadius"]));
}
}
}
}
}
// Try a couple equivalent longitudes to fix the ordering of min,max for border cases
if ((double)outMappingGrp["MinimumLongitude"] >=
(double)outMappingGrp["MaximumLongitude"]) {
if ((QString)outMappingGrp["MinimumLongitude"] == "180.0" &&
(int)userMappingGrp["LongitudeDomain"] == 180)
outMappingGrp["MinimumLongitude"] = "-180";
if ((QString)outMappingGrp["MaximumLongitude"] == "-180.0" &&
(int)userMappingGrp["LongitudeDomain"] == 180)
outMappingGrp["MaximumLongitude"] = "180";
if ((QString)outMappingGrp["MinimumLongitude"] == "360.0" &&
(int)userMappingGrp["LongitudeDomain"] == 360)
outMappingGrp["MinimumLongitude"] = "0";
if ((QString)outMappingGrp["MaximumLongitude"] == "0.0" &&
(int)userMappingGrp["LongitudeDomain"] == 360)
outMappingGrp["MaximumLongitude"] = "360";
}
// If MinLon/MaxLon out of order, we weren't able to calculate the correct values
if((double)outMappingGrp["MinimumLongitude"] >= (double)outMappingGrp["MaximumLongitude"]) {
if(!ui.WasEntered("MINLON") || !ui.WasEntered("MAXLON")) {
QString msg = "Unable to determine the correct [MinimumLongitude,MaximumLongitude].";
msg += " Please specify these values in the [MINLON,MAXLON] parameters";
throw IException(IException::Unknown, msg, _FILEINFO_);
}
}
int samples, lines;
Pvl mapData;
// Copy to preserve cube labels so we can match cube size
if(userPvl.hasObject("IsisCube")) {
mapData = userPvl;
mapData.findObject("IsisCube").deleteGroup("Mapping");
mapData.findObject("IsisCube").addGroup(outMappingGrp);
}
else {
mapData.addGroup(outMappingGrp);
}
// *NOTE: The UpperLeftX,UpperLeftY keywords will not be used in the CreateForCube
// method, and they will instead be recalculated. This is correct.
TProjection *outproj = (TProjection *) ProjectionFactory::CreateForCube(mapData, samples, lines,
ui.GetBoolean("MATCHMAP"));
// Set up the transform object which will simply map
// output line/samps -> output lat/lons -> input line/samps
Transform *transform = new Map2map(icube->sampleCount(),
icube->lineCount(),
(TProjection *) icube->projection(),
samples,
lines,
outproj,
ui.GetBoolean("TRIM"));
// Allocate the output cube and add the mapping labels
CubeAttributeOutput & att = ui.GetOutputAttribute("TO");
Cube *ocube = p.SetOutputCube(ui.GetFileName("TO"), att, transform->OutputSamples(),
transform->OutputLines(),
icube->bandCount());
PvlGroup cleanOutGrp = outproj->Mapping();
// ProjectionFactory::CreateForCube updated mapData to have the correct
// upperleftcornerx, upperleftcornery, scale and resolution. Use these
// updated numbers.
cleanOutGrp.addKeyword(mapData.findGroup("Mapping", Pvl::Traverse)["UpperLeftCornerX"], Pvl::Replace);
cleanOutGrp.addKeyword(mapData.findGroup("Mapping", Pvl::Traverse)["UpperLeftCornerY"], Pvl::Replace);
cleanOutGrp.addKeyword(mapData.findGroup("Mapping", Pvl::Traverse)["Scale"], Pvl::Replace);
cleanOutGrp.addKeyword(mapData.findGroup("Mapping", Pvl::Traverse)["PixelResolution"], Pvl::Replace);
ocube->putGroup(cleanOutGrp);
// Set up the interpolator
Interpolator *interp;
if(ui.GetString("INTERP") == "NEARESTNEIGHBOR") {
interp = new Interpolator(Interpolator::NearestNeighborType);
}
else if(ui.GetString("INTERP") == "BILINEAR") {
interp = new Interpolator(Interpolator::BiLinearType);
}
else if(ui.GetString("INTERP") == "CUBICCONVOLUTION") {
interp = new Interpolator(Interpolator::CubicConvolutionType);
}
else {
QString msg = "Unknow value for INTERP [" + ui.GetString("INTERP") + "]";
throw IException(IException::Programmer, msg, _FILEINFO_);
}
// Warp the cube
p.StartProcess(*transform, *interp);
p.EndProcess();
if (log){
log->addGroup(cleanOutGrp);
}
// Cleanup
delete transform;
delete interp;
}
// Transform object constructor
Map2map::Map2map(const int inputSamples, const int inputLines, TProjection *inmap,
const int outputSamples, const int outputLines, TProjection *outmap,
bool trim) {
p_inputSamples = inputSamples;
p_inputLines = inputLines;
p_inmap = inmap;
p_outputSamples = outputSamples;
p_outputLines = outputLines;
p_outmap = outmap;
p_trim = trim;
p_inputWorldSize = 0;
bool wrapPossible = inmap->IsEquatorialCylindrical();
if(inmap->IsEquatorialCylindrical()) {
// Figure out how many samples 360 degrees is
wrapPossible = wrapPossible && inmap->SetUniversalGround(0, 0);
int worldStart = (int)(inmap->WorldX() + 0.5);
wrapPossible = wrapPossible && inmap->SetUniversalGround(0, 180);
int worldEnd = (int)(inmap->WorldX() + 0.5);
p_inputWorldSize = abs(worldEnd - worldStart) * 2;
}
}
// Transform method mapping output line/samps to lat/lons to input line/samps
bool Map2map::Xform(double &inSample, double &inLine,
const double outSample, const double outLine) {
// See if the output image coordinate converts to lat/lon
if(!p_outmap->SetWorld(outSample, outLine)) return false;
// See if we should trim
if((p_trim) && (p_outmap->HasGroundRange())) {
if(p_outmap->Latitude() < p_outmap->MinimumLatitude()) return false;
if(p_outmap->Latitude() > p_outmap->MaximumLatitude()) return false;
if(p_outmap->Longitude() < p_outmap->MinimumLongitude()) return false;
if(p_outmap->Longitude() > p_outmap->MaximumLongitude()) return false;
}
// Get the universal lat/lon and see if it can be converted to input line/samp
double lat = p_outmap->UniversalLatitude();
double lon = p_outmap->UniversalLongitude();
if(!p_inmap->SetUniversalGround(lat, lon)) return false;
inSample = p_inmap->WorldX();
inLine = p_inmap->WorldY();
if(p_inputWorldSize != 0) {
// Try to correct the sample if we can,
// this is the simplest way to code the
// translation although it probably could
// be done in one "if"
while(inSample < 0.5) {
inSample += p_inputWorldSize;
}
while(inSample > p_inputSamples + 0.5) {
inSample -= p_inputWorldSize;
}
}
// Make sure the point is inside the input image
if(inSample < 0.5) return false;
if(inLine < 0.5) return false;
if(inSample > p_inputSamples + 0.5) return false;
if(inLine > p_inputLines + 0.5) return false;
// Everything is good
return true;
}
int Map2map::OutputSamples() const {
return p_outputSamples;
}
int Map2map::OutputLines() const {
return p_outputLines;
}
}
|
jonoforbes/ether-app | node_modules/angular-pipes/src/string/wrap.pipe.js | // idea from https://github.com/a8m/angular-filter
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var core_1 = require("@angular/core");
var utils_1 = require("../utils/utils");
var WrapPipe = (function () {
function WrapPipe() {
}
WrapPipe.prototype.transform = function (input, wrap, ends) {
return (utils_1.isString(input) && !utils_1.isUndefined(wrap)) ? [wrap, input, ends || wrap].join('') : input;
};
return WrapPipe;
}());
WrapPipe.decorators = [
{ type: core_1.Pipe, args: [{
name: 'wrap'
},] },
];
/** @nocollapse */
WrapPipe.ctorParameters = function () { return []; };
exports.WrapPipe = WrapPipe;
//# sourceMappingURL=wrap.pipe.js.map |
dgant/PurpleWave | src/Information/Geography/Types/Base.scala | <filename>src/Information/Geography/Types/Base.scala
package Information.Geography.Types
import Lifecycle.With
import Mathematics.Points.{PixelRay, SpecificPoints, Tile, TileRectangle}
import Mathematics.Maff
import Performance.Cache
import Planning.UnitMatchers.{MatchBuilding, MatchWorker}
import ProxyBwapi.Players.PlayerInfo
import ProxyBwapi.Races.Protoss
import ProxyBwapi.UnitInfo.UnitInfo
import Utilities.Time.{Forever, Minutes}
import scala.collection.mutable
class Base(val townHallTile: Tile)
{
val townHallArea : TileRectangle = Protoss.Nexus.tileArea.add(townHallTile)
lazy val zone : Zone = With.geography.zoneByTile(townHallTile)
lazy val metro : Metro = With.geography.metros.find(_.bases.contains(this)).get
lazy val isStartLocation : Boolean = With.geography.startLocations.contains(townHallTile)
lazy val isOurMain : Boolean = With.geography.ourMain == this
lazy val tiles : Set[Tile] = zone.tiles.view.filter(t => t.tileDistanceSlow(heart) < 50 && ! zone.bases.view.filter(_.heart != heart).exists(_.heart.pixelDistanceGround(t) < heart.pixelDistanceGround(t))).toSet
lazy val economicValue : Cache[Double] = new Cache(() => units.view.filter(_.isAny(MatchBuilding, MatchWorker)).map(_.subjectiveValue).sum)
lazy val plannedExpo : Cache[Boolean] = new Cache(() => owner.isNeutral && (
With.units.ours.exists(u => u.intent.toBuildTile.exists(t => t.base.contains(this) && (! townHallArea.contains(t) || u.intent.toBuild.exists(_.isTownHall))))
|| units.exists(u => u.isOurs && u.unitClass.isBuilding && ! townHallArea.contains(u.tileTopLeft))))
lazy val radians : Double = SpecificPoints.middle.radiansTo(townHallArea.center)
var isNaturalOf : Option[Base] = None
var townHall : Option[UnitInfo] = None
var units : Vector[UnitInfo] = Vector.empty
var gas : Vector[UnitInfo] = Vector.empty
var minerals : Vector[UnitInfo] = Vector.empty
var owner : PlayerInfo = With.neutral
var lastOwnerChangeFrame : Int = 0
var name : String = "Nowhere"
var defenseValue : Double = _
var workerCount : Int = _
val saturation : Cache[Double] = new Cache(() => workerCount.toDouble / (1 + 3 * gas.size + 2 * minerals.size))
private val _initialResources = With.units.all.filter(u => u.mineralsLeft > With.configuration.blockerMineralThreshold || u.gasLeft > 0).filter(_.pixelDistanceCenter(townHallTile.topLeftPixel.add(64, 48)) < 32 * 9).toVector
val harvestingArea = new TileRectangle(_initialResources.view.flatMap(_.tiles) ++ townHallArea.tiles)
val heart: Tile = {
val centroid = if (_initialResources.isEmpty) townHallArea.center.subtract(SpecificPoints.middle) else Maff.centroid(_initialResources.view.map(_.pixel))
val direction = centroid.subtract(townHallArea.center)
val xDominant = Math.abs(direction.x) > Math.abs(direction.y)
if (xDominant)
if (direction.x < 0) townHallTile.add(-2, 1) else townHallTile.add(5, 1)
else if (direction.y < 0) townHallTile.add(1, -2) else townHallTile.add(1, 4)
}
private def resourcePathTiles(resource: UnitInfo): Iterable[Tile] = {
// Draw a shortest-path line from each resource to the town hall.
// Where multiple equally-short lines are available, take the one closest to the heart.
// Count all tiles in that line.
val townHallTiles = townHallArea.tiles
def hallDistanceSquared(resourceTile: Tile): Double = townHallTiles.map(resourceTile.tileDistanceSquared).min
val resourceTiles = resource.tileArea.tiles
val bestDistance = resourceTiles.map(hallDistanceSquared).min
val from = resourceTiles.filter(hallDistanceSquared(_) <= bestDistance).minBy(_.tileDistanceSquared(heart))
val to = townHallArea.tiles.minBy(_.tileDistanceSquared(from))
val route = PixelRay(from.center, to.center)
route
}
lazy val resourcePaths: Map[UnitInfo, Iterable[Tile]] = {
resources.map(resource => (resource, resourcePathTiles(resource))).toMap
}
lazy val resourcePathTiles: Set[Tile] = {
val output = new mutable.ArrayBuffer[Tile]
output ++= resourcePaths.values.flatten
// Avoid blocking the path where workers are likely to pop out of gas
// by blocking tiles adjacent to the Nexus that could be along the critical path
val townHallAdjacentTiles = townHallArea.expand(1, 1).tiles
gas.foreach(_.tileArea.tilesSurrounding.foreach(gasTile => {
output += townHallAdjacentTiles.minBy(_.tileDistanceSquared(gasTile))
}))
// Avoid trapping workers into the mining area by banning tiles which are adjacent to the resource
output ++= minerals.flatMap(_.tileArea.tilesSurrounding)
output --= townHallArea.tiles
output.filter(_.valid).toSet
}
var mineralsLeft = 0
var gasLeft = 0
var lastPlannedExpo = - Forever()
var lastScoutedFrame = 0
var lastScoutedByEnemyFrame = 0
def plannedExpoRecently = plannedExpo() || With.framesSince(lastPlannedExpo) < Minutes(1)()
def scouted: Boolean = lastScoutedFrame > 0
def scoutedByEnemy: Boolean = lastScoutedFrame > 0
def resources: Vector[UnitInfo] = minerals ++ gas
def natural: Option[Base] = With.geography.bases.find(_.isNaturalOf.contains(this))
override def toString: String = f"$description $name, ${zone.name} $heart"
def description: String = (
if (this == With.geography.ourMain) "Our main"
else if (this == With.geography.ourNatural) "Our natural"
else (
(if (owner.isEnemy) "Enemy" else if (owner.isUs) "Our" else if (owner.isAlly) "Ally" else "Neutral")
+ " "
+ (if (isStartLocation && ! owner.isUs) "main" else if (isNaturalOf.isDefined && ! owner.isUs) "natural" else "base")))
}
|
ztlevi/perfetto | include/perfetto/protozero/field_writer.h | <gh_stars>100-1000
/*
* Copyright (C) 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "perfetto/protozero/message.h"
#include "perfetto/protozero/proto_utils.h"
#ifndef INCLUDE_PERFETTO_PROTOZERO_FIELD_WRITER_H_
#define INCLUDE_PERFETTO_PROTOZERO_FIELD_WRITER_H_
namespace protozero {
namespace internal {
template <proto_utils::ProtoSchemaType proto_schema_type>
struct FieldWriter {
static_assert(proto_schema_type != proto_utils::ProtoSchemaType::kMessage,
"FieldWriter can't be used with nested messages");
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kDouble> {
inline static void Append(Message& message, uint32_t field_id, double value) {
message.AppendFixed(field_id, value);
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kFloat> {
inline static void Append(Message& message, uint32_t field_id, float value) {
message.AppendFixed(field_id, value);
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kBool> {
inline static void Append(Message& message, uint32_t field_id, bool value) {
message.AppendTinyVarInt(field_id, value);
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kInt32> {
inline static void Append(Message& message,
uint32_t field_id,
int32_t value) {
message.AppendVarInt(field_id, value);
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kInt64> {
inline static void Append(Message& message,
uint32_t field_id,
int64_t value) {
message.AppendVarInt(field_id, value);
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kUint32> {
inline static void Append(Message& message,
uint32_t field_id,
uint32_t value) {
message.AppendVarInt(field_id, value);
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kUint64> {
inline static void Append(Message& message,
uint32_t field_id,
uint64_t value) {
message.AppendVarInt(field_id, value);
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kSint32> {
inline static void Append(Message& message,
uint32_t field_id,
int32_t value) {
message.AppendSignedVarInt(field_id, value);
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kSint64> {
inline static void Append(Message& message,
uint32_t field_id,
int64_t value) {
message.AppendSignedVarInt(field_id, value);
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kFixed32> {
inline static void Append(Message& message,
uint32_t field_id,
uint32_t value) {
message.AppendFixed(field_id, value);
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kFixed64> {
inline static void Append(Message& message,
uint32_t field_id,
uint64_t value) {
message.AppendFixed(field_id, value);
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kSfixed32> {
inline static void Append(Message& message,
uint32_t field_id,
int32_t value) {
message.AppendFixed(field_id, value);
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kSfixed64> {
inline static void Append(Message& message,
uint32_t field_id,
int64_t value) {
message.AppendFixed(field_id, value);
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kEnum> {
template <typename EnumType>
inline static void Append(Message& message,
uint32_t field_id,
EnumType value) {
message.AppendVarInt(field_id, value);
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kString> {
inline static void Append(Message& message,
uint32_t field_id,
const char* data,
size_t size) {
message.AppendBytes(field_id, data, size);
}
inline static void Append(Message& message,
uint32_t field_id,
const std::string& value) {
message.AppendBytes(field_id, value.data(), value.size());
}
};
template <>
struct FieldWriter<proto_utils::ProtoSchemaType::kBytes> {
inline static void Append(Message& message,
uint32_t field_id,
const uint8_t* data,
size_t size) {
message.AppendBytes(field_id, data, size);
}
inline static void Append(Message& message,
uint32_t field_id,
const std::string& value) {
message.AppendBytes(field_id, value.data(), value.size());
}
};
} // namespace internal
} // namespace protozero
#endif // INCLUDE_PERFETTO_PROTOZERO_FIELD_WRITER_H_
|
jxg01713/zstack | header/src/main/java/org/zstack/header/storage/primary/ImageCacheInventory.java | <reponame>jxg01713/zstack
package org.zstack.header.storage.primary;
import java.sql.Timestamp;
import java.util.Date;
public class ImageCacheInventory {
private long id;
private String primaryStorageUuid;
private String imageUuid;
private String installUrl;
private long size;
private String md5sum;
private String state;
private Timestamp createDate;
private Timestamp lastOpDate;
public static ImageCacheInventory valueOf(ImageCacheVO vo) {
ImageCacheInventory inv = new ImageCacheInventory();
inv.setCreateDate(vo.getCreateDate());
inv.setId(vo.getId());
inv.setImageUuid(vo.getImageUuid());
inv.setInstallUrl(vo.getInstallUrl());
inv.setLastOpDate(vo.getLastOpDate());
inv.setMd5sum(vo.getMd5sum());
inv.setPrimaryStorageUuid(vo.getPrimaryStorageUuid());
inv.setSize(vo.getSize());
inv.setState(vo.getState().toString());
return inv;
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public String getPrimaryStorageUuid() {
return primaryStorageUuid;
}
public void setPrimaryStorageUuid(String primaryStorageUuid) {
this.primaryStorageUuid = primaryStorageUuid;
}
public String getImageUuid() {
return imageUuid;
}
public void setImageUuid(String imageUuid) {
this.imageUuid = imageUuid;
}
public String getInstallUrl() {
return installUrl;
}
public void setInstallUrl(String installUrl) {
this.installUrl = installUrl;
}
public long getSize() {
return size;
}
public void setSize(long size) {
this.size = size;
}
public String getMd5sum() {
return md5sum;
}
public void setMd5sum(String md5sum) {
this.md5sum = md5sum;
}
public Timestamp getCreateDate() {
return createDate;
}
public void setCreateDate(Timestamp createDate) {
this.createDate = createDate;
}
public Timestamp getLastOpDate() {
return lastOpDate;
}
public void setLastOpDate(Timestamp lastOpDate) {
this.lastOpDate = lastOpDate;
}
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
}
|
Nan1t/Scheduler | api/src/main/java/edu/zieit/scheduler/api/render/RenderException.java | <filename>api/src/main/java/edu/zieit/scheduler/api/render/RenderException.java<gh_stars>0
package edu.zieit.scheduler.api.render;
public class RenderException extends RuntimeException {
public RenderException(String message) {
super(message);
}
public RenderException(Throwable cause) {
super(cause);
}
}
|
kongyin0921/open-cloud-platform | ocp-auth/src/main/java/com/ocp/auth/provider/token/CustomTokenEnhancer.java | package com.ocp.auth.provider.token;
import com.ocp.auth.entity.Client;
import com.ocp.auth.service.IClientService;
import com.ocp.auth.util.OidcIdTokenBuilder;
import com.ocp.common.constant.SecurityConstants;
import com.ocp.common.entity.SysUser;
import com.ocp.common.security.constants.IdTokenClaimNames;
import com.ocp.common.security.porperties.TokenStoreProperties;
import com.ocp.common.security.util.AuthUtils;
import org.springframework.cloud.bootstrap.encrypt.KeyProperties;
import org.springframework.security.oauth2.common.DefaultOAuth2AccessToken;
import org.springframework.security.oauth2.common.OAuth2AccessToken;
import org.springframework.security.oauth2.provider.OAuth2Authentication;
import org.springframework.security.oauth2.provider.token.TokenEnhancer;
import org.springframework.stereotype.Component;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
/**
* 自定义的token
* @author kong
* @date 2021/08/17 19:56
* blog: http://blog.kongyin.ltd
*/
@Component
public class CustomTokenEnhancer implements TokenEnhancer {
private final KeyProperties keyProperties;
private final IClientService clientService;
private final TokenStoreProperties tokenStoreProperties;
public CustomTokenEnhancer(KeyProperties keyProperties, IClientService clientService, TokenStoreProperties tokenStoreProperties) {
this.keyProperties = keyProperties;
this.clientService = clientService;
this.tokenStoreProperties = tokenStoreProperties;
}
@Override
public OAuth2AccessToken enhance(OAuth2AccessToken accessToken, OAuth2Authentication authentication) {
Set<String> responseTypes = authentication.getOAuth2Request().getResponseTypes();
Map<String, Object> additionalInfo = new HashMap<>(3);
String accountType = AuthUtils.getAccountType(authentication.getUserAuthentication());
additionalInfo.put(SecurityConstants.ACCOUNT_TYPE_PARAM_NAME, accountType);
if (responseTypes.contains(SecurityConstants.ID_TOKEN)
|| "authJwt".equals(tokenStoreProperties.getType())) {
Object principal = authentication.getPrincipal();
//增加id参数
if (principal instanceof SysUser) {
SysUser user = (SysUser)principal;
if (responseTypes.contains(SecurityConstants.ID_TOKEN)) {
//生成id_token
setIdToken(additionalInfo, authentication, keyProperties, clientService, user);
}
if ("authJwt".equals(tokenStoreProperties.getType())) {
additionalInfo.put("id", user.getId());
}
}
}
((DefaultOAuth2AccessToken) accessToken).setAdditionalInformation(additionalInfo);
return accessToken;
}
/**
* 生成id_token
* @param additionalInfo 存储token附加信息对象
* @param authentication 授权对象
* @param keyProperties 密钥
* @param clientService 应用service
*/
private void setIdToken(Map<String, Object> additionalInfo, OAuth2Authentication authentication
, KeyProperties keyProperties, IClientService clientService, SysUser user) {
String clientId = authentication.getOAuth2Request().getClientId();
Client client = clientService.loadClientByClientId(clientId);
if (client.getSupportIdToken()) {
String nonce = authentication.getOAuth2Request().getRequestParameters().get(IdTokenClaimNames.NONCE);
long now = System.currentTimeMillis();
long expiresAt = System.currentTimeMillis() + client.getIdTokenValiditySeconds() * 1000;
String idToken = OidcIdTokenBuilder.builder(keyProperties)
.issuer(SecurityConstants.ISS)
.issuedAt(now)
.expiresAt(expiresAt)
.subject(String.valueOf(user.getId()))
.name(user.getNickname())
.loginName(user.getUsername())
.picture(user.getHeadImgUrl())
.audience(clientId)
.nonce(nonce)
.build();
additionalInfo.put(SecurityConstants.ID_TOKEN, idToken);
}
}
}
|
mastermind88/jerryscript | tests/jerry/array-prototype-unshift.js | <filename>tests/jerry/array-prototype-unshift.js
// Copyright JS Foundation and other contributors, http://js.foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
var array = []
assert(array.length === 0);
array.unshift("foo");
assert(array.length === 1);
assert(array[0] === "foo");
array.unshift(new Array())
assert(array.length === 2);
assert(array[0] instanceof Array);
assert(array[1] === "foo")
array.unshift(Infinity);
assert(array.length === 3);
assert(array[0] === Infinity);
assert(array[1] instanceof Array);
assert(array[2] === "foo")
array.unshift("bar", 0);
assert(array.length === 5);
assert(array[0] === "bar");
assert(array[1] === 0);
assert(array[2] === Infinity);
assert(array[3] instanceof Array);
assert(array[4] === "foo")
// Checking behavior when no length property defined
var obj = { unshift : Array.prototype.unshift };
assert(obj.length === undefined);
obj.unshift(1,2,3);
assert(obj.length === 3);
// Checking behavior when unable to get length
var obj = { unshift : Array.prototype.unshift };
Object.defineProperty(obj, 'length', { 'get' : function () {throw new ReferenceError ("foo"); } });
try {
obj.unshift(1);
assert(false)
} catch (e) {
assert(e.message === "foo");
assert(e instanceof ReferenceError);
}
// Checking behavior when unable to set length
var obj = { unshift : Array.prototype.unshift };
Object.defineProperty(obj, 'length', { 'set' : function () {throw new ReferenceError ("foo"); } });
try {
obj.unshift(2);
assert(false)
} catch (e) {
assert(e.message === "foo");
assert(e instanceof ReferenceError);
}
// Checking behavior when unable shift elements
var obj = { unshift : Array.prototype.unshift, length : 1 };
Object.defineProperty(obj, '0', { 'get' : function () {throw new ReferenceError ("foo"); } });
try {
obj.unshift(3);
assert(false);
} catch (e) {
assert(e.message === "foo");
assert(e instanceof ReferenceError);
}
var obj = { unshift : Array.prototype.unshift, length : 1 };
Object.defineProperty(obj, '0', { 'set' : function () {throw new ReferenceError ("foo"); } });
try {
obj.unshift(4);
assert(false);
} catch (e) {
assert(e.message === "foo");
assert(e instanceof ReferenceError);
}
// Checking behavior when a property is not defined
var obj = { '0' : "foo", '2' : "bar", length : 3, unshift : Array.prototype.unshift };
assert(obj.unshift("baz") === 4);
assert(obj[0] === "baz");
assert(obj[1] === "foo");
assert(obj[2] === undefined);
assert(obj[3] === "bar");
/* ES v5.1 15.4.4.13.6.d.ii.
Checking behavior when the array is freezed */
try {
var arr = [0, 1];
Object.freeze(arr);
Array.prototype.unshift.call(arr, 2, 3);
assert(false);
} catch (e) {
assert(e instanceof TypeError);
}
/* ES v5.1 15.4.4.13.6.e.i.
Checking behavior when the array has only one property and bigger length */
try {
var arr = { length : 9 };
Object.defineProperty(arr, '6', { value : 2 });
Array.prototype.unshift.call(arr, 2, 3);
assert(false);
} catch (e) {
assert(e instanceof TypeError);
}
var arrayLike = {get 5() { throw "shouldn't throw"; }};
arrayLike.length = 10;
Array.prototype.unshift.call(arrayLike);
|
osman-demirci/digital-guard-app-ipcamera | node_modules/opencv4nodejs/cc/modules/features2d/detectors/BRISKDetector.h | #include "macros.h"
#include "../FeatureDetector.h"
#ifndef __FF_BRISKDETECTOR_H__
#define __FF_BRISKDETECTOR_H__
class BRISKDetector : public FeatureDetector {
public:
cv::Ptr<cv::BRISK> detector;
int thresh;
int octaves;
double patternScale;
static NAN_MODULE_INIT(Init);
static NAN_METHOD(New);
static FF_GETTER(BRISKDetector, GetThresh, thresh);
static FF_GETTER(BRISKDetector, GetOctaves, octaves);
static FF_GETTER(BRISKDetector, GetPatternScale, patternScale);
static Nan::Persistent<v8::FunctionTemplate> constructor;
cv::Ptr<cv::FeatureDetector> getDetector() {
return detector;
}
};
#endif |
r4b3rt/lindb | replication/sequence_test.go | // Licensed to LinDB under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. LinDB licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package replication
import (
"fmt"
"path"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/lindb/lindb/pkg/fileutil"
"github.com/lindb/lindb/pkg/queue/page"
)
var testPath = "test"
func TestSequence_new_err(t *testing.T) {
ctrl := gomock.NewController(t)
tmp := path.Join(testPath, "sequence_test")
defer func() {
newPageFactoryFunc = page.NewFactory
_ = fileutil.RemoveDir(testPath)
ctrl.Finish()
}()
// case 1: new page factory err
newPageFactoryFunc = func(path string, pageSize int) (page.Factory, error) {
return nil, fmt.Errorf("err")
}
seq, err := NewSequence(tmp)
assert.Error(t, err)
assert.Nil(t, seq)
// case 2: AcquirePage err
fct := page.NewMockFactory(ctrl)
newPageFactoryFunc = func(path string, pageSize int) (page.Factory, error) {
return fct, nil
}
fct.EXPECT().GetPage(int64(metaPageID)).Return(nil, false)
fct.EXPECT().Close().Return(fmt.Errorf("err"))
fct.EXPECT().AcquirePage(gomock.Any()).Return(nil, fmt.Errorf("err"))
seq, err = NewSequence(tmp)
assert.Error(t, err)
assert.Nil(t, seq)
// case 3: sync err
fct.EXPECT().GetPage(int64(metaPageID)).Return(nil, false)
fct.EXPECT().Close().Return(fmt.Errorf("err"))
mockPage := page.NewMockMappedPage(ctrl)
mockPage.EXPECT().PutUint64(gomock.Any(), gomock.Any())
mockPage.EXPECT().Sync().Return(fmt.Errorf("err"))
fct.EXPECT().AcquirePage(gomock.Any()).Return(mockPage, nil)
seq, err = NewSequence(tmp)
assert.Error(t, err)
assert.Nil(t, seq)
}
func TestSequence(t *testing.T) {
tmp := path.Join(testPath, "sequence_test")
defer func() {
_ = fileutil.RemoveDir(testPath)
}()
seq, err := NewSequence(tmp)
assert.NoError(t, err)
assert.NotNil(t, seq)
assert.Equal(t, seq.GetHeadSeq(), int64(-1))
assert.Equal(t, seq.GetAckSeq(), int64(-1))
err = seq.Close()
assert.NoError(t, err)
seq, err = NewSequence(tmp)
assert.NoError(t, err)
assert.NotNil(t, seq)
assert.Equal(t, seq.GetHeadSeq(), int64(-1))
assert.Equal(t, seq.GetAckSeq(), int64(-1))
seq.SetHeadSeq(int64(10))
seq.SetAckSeq(int64(5))
assert.Equal(t, seq.GetHeadSeq(), int64(10))
assert.Equal(t, seq.GetAckSeq(), int64(5))
err = seq.Sync()
assert.NoError(t, err)
// new sequence
newSeq, err := NewSequence(tmp)
assert.NoError(t, err)
assert.Equal(t, newSeq.GetAckSeq(), int64(5))
assert.Equal(t, newSeq.GetHeadSeq(), int64(5))
}
|
ppartarr/azure-sdk-for-java | sdk/peering/mgmt-v2019_08_01_preview/src/main/java/com/microsoft/azure/management/peering/v2019_08_01_preview/implementation/PeeringLocationsImpl.java | /**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
* abc
*/
package com.microsoft.azure.management.peering.v2019_08_01_preview.implementation;
import com.microsoft.azure.arm.model.implementation.WrapperImpl;
import com.microsoft.azure.management.peering.v2019_08_01_preview.PeeringLocations;
import rx.functions.Func1;
import rx.Observable;
import com.microsoft.azure.Page;
import com.microsoft.azure.management.peering.v2019_08_01_preview.PeeringLocation;
class PeeringLocationsImpl extends WrapperImpl<PeeringLocationsInner> implements PeeringLocations {
private final PeeringManager manager;
PeeringLocationsImpl(PeeringManager manager) {
super(manager.inner().peeringLocations());
this.manager = manager;
}
public PeeringManager manager() {
return this.manager;
}
@Override
public Observable<PeeringLocation> listAsync(final String kind) {
PeeringLocationsInner client = this.inner();
return client.listAsync(kind)
.flatMapIterable(new Func1<Page<PeeringLocationInner>, Iterable<PeeringLocationInner>>() {
@Override
public Iterable<PeeringLocationInner> call(Page<PeeringLocationInner> page) {
return page.items();
}
})
.map(new Func1<PeeringLocationInner, PeeringLocation>() {
@Override
public PeeringLocation call(PeeringLocationInner inner) {
return new PeeringLocationImpl(inner, manager());
}
});
}
}
|
Reno-Greenleaf/bsd-games | dab/ttyscrn.h | /* $NetBSD: ttyscrn.h,v 1.2 2003/12/28 17:49:10 thorpej Exp $ */
/*-
* Copyright (c) 2003 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by <NAME>.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ttyscrn.h: Curses based screen for dots
*/
#ifndef _H_TTYSCRN
#define _H_TTYSCRN
#include "gamescreen.h"
class TTYSCRN : public GAMESCREEN {
public:
// Constructor that can fail
static TTYSCRN* create(int acs, size_t y, size_t x);
~TTYSCRN();
// Screen virtuals
void clean(void);
void moveto(size_t y, size_t x);
void addsym(const int sym);
void addedge(const int sym);
void redraw(void);
void bell(void);
int getinput(void);
void score(size_t s, const PLAYER& p);
void games(size_t s, const PLAYER& p);
void total(size_t s, const PLAYER& p);
void ties(const PLAYER& p);
private:
enum {
offsx = 2, // board x offset from top left corner
offsy = 2, // board y offset from top left corner
offsscore = 0, // score y offset from top of the board
offstotal = 3, // total y offset from top of the board
offsgames = 6, // games y offset from top of the board
offsties = 8 // ties y offset from top of the board
};
size_t _sx, _sy; // board size
size_t _tx, _ty; // tty size
int _acs; // do we want acs?
};
#endif
|
santiagozky/sling | bundles/commons/log-webconsole/src/test/java/org/apache/sling/commons/log/webconsole/ITWebConsoleRemote.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sling.commons.log.webconsole;
import java.io.File;
import java.io.IOException;
import com.gargoylesoftware.htmlunit.DefaultCredentialsProvider;
import com.gargoylesoftware.htmlunit.Page;
import com.gargoylesoftware.htmlunit.TextPage;
import com.gargoylesoftware.htmlunit.WebClient;
import com.gargoylesoftware.htmlunit.html.HtmlPage;
import org.apache.commons.io.FilenameUtils;
import org.apache.sling.commons.log.webconsole.remote.WebConsoleTestActivator;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import org.ops4j.pax.exam.ExamSystem;
import org.ops4j.pax.exam.Option;
import org.ops4j.pax.exam.TestContainer;
import org.ops4j.pax.exam.spi.DefaultExamSystem;
import org.ops4j.pax.exam.spi.PaxExamRuntime;
import org.ops4j.pax.tinybundles.core.TinyBundle;
import org.osgi.framework.Constants;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.not;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.ops4j.pax.exam.CoreOptions.composite;
import static org.ops4j.pax.exam.CoreOptions.frameworkProperty;
import static org.ops4j.pax.exam.CoreOptions.provision;
import static org.ops4j.pax.tinybundles.core.TinyBundles.bundle;
import static org.ops4j.pax.tinybundles.core.TinyBundles.withBnd;
public class ITWebConsoleRemote extends LogTestBase {
private static final String PLUGIN_SUFFIX = "slinglog";
private static final String PRINTER_SUFFIX = "status-slinglogs";
private static TestContainer testContainer;
private WebClient webClient;
@Override
protected Option addPaxExamSpecificOptions() {
return null;
}
@Override
protected Option addExtraOptions() {
return composite(
frameworkProperty("org.apache.sling.commons.log.configurationFile").value(
FilenameUtils.concat(new File(".").getAbsolutePath(), "src/test/resources/test-webconsole-remote.xml")),
createWebConsoleTestBundle()
);
}
private Option createWebConsoleTestBundle() {
TinyBundle bundle = bundle();
for(Class c : WebConsoleTestActivator.BUNDLE_CLASS_NAMES){
bundle.add(c);
}
bundle.set(Constants.BUNDLE_SYMBOLICNAME,"org.apache.sling.common.log.testbundle")
.set(Constants.BUNDLE_ACTIVATOR , WebConsoleTestActivator.class.getName());
return provision(bundle.build(withBnd()));
}
@Before
public void setUp() throws IOException {
// Had to use a @Before instead of @BeforeClass as that requires a
// static method
if (testContainer == null) {
ExamSystem system = DefaultExamSystem.create(config());
testContainer = PaxExamRuntime.createContainer(system);
testContainer.start();
}
}
@Before
public void prepareWebClient() {
webClient = new WebClient();
((DefaultCredentialsProvider) webClient.getCredentialsProvider()).addCredentials("admin", "admin");
}
@Test
public void testWebConsolePlugin() throws IOException {
final HtmlPage page = webClient.getPage(prepareUrl(PLUGIN_SUFFIX));
String text = page.asText();
//Filter name should be part of Filter table
assertTrue(text.contains("WebConsoleTestTurboFilter"));
//Console name should be part of console table
assertTrue(text.contains("WebConsoleTestAppender"));
//Should show file name testremote.log
assertTrue(text.contains("testremote.log"));
}
@Test
public void testPrinter() throws IOException {
final HtmlPage page = webClient.getPage(prepareUrl(PRINTER_SUFFIX));
String text = page.asText();
//Should dump content of configured file testremote.log
//with its name
assertTrue(text.contains("testremote.log"));
}
@Test
public void tailerHeader() throws Exception{
Page page = webClient.getPage(prepareUrl("slinglog/tailer.txt?name=webconsoletest1.log"));
String nosniffHeader = page.getWebResponse().getResponseHeaderValue("X-Content-Type-Options");
assertEquals("nosniff", nosniffHeader);
}
@Test
public void tailerGrep() throws Exception{
TextPage page = webClient.getPage(prepareUrl("slinglog/tailer.txt?name=FILE&tail=-1"));
String text = page.getContent();
assertThat(text, containsString(WebConsoleTestActivator.FOO_LOG));
assertThat(text, containsString(WebConsoleTestActivator.BAR_LOG));
page = webClient.getPage(prepareUrl("slinglog/tailer.txt?name=FILE&tail=1000&grep="+WebConsoleTestActivator.FOO_LOG));
text = page.getContent();
//With grep pattern specified we should only see foo and not bar
assertThat(text, containsString(WebConsoleTestActivator.FOO_LOG));
assertThat(text, not(containsString(WebConsoleTestActivator.BAR_LOG)));
}
@AfterClass
public static void tearDownClass() {
if (testContainer != null) {
testContainer.stop();
testContainer = null;
}
}
private static String prepareUrl(String suffix) {
return String.format("http://localhost:%s/system/console/%s", LogTestBase.getServerPort(), suffix);
}
}
|
myoukaku/bksge | libs/core/render/include/bksge/core/render/vulkan/detail/inl/rasterizer_state_inl.hpp | /**
* @file rasterizer_state_inl.hpp
*
* @brief RasterizerState クラスの実装
*
* @author myoukaku
*/
#ifndef BKSGE_CORE_RENDER_VULKAN_DETAIL_INL_RASTERIZER_STATE_INL_HPP
#define BKSGE_CORE_RENDER_VULKAN_DETAIL_INL_RASTERIZER_STATE_INL_HPP
#include <bksge/core/render/config.hpp>
#if BKSGE_CORE_RENDER_HAS_VULKAN_RENDERER
#include <bksge/core/render/vulkan/detail/rasterizer_state.hpp>
#include <bksge/core/render/vulkan/detail/fill_mode.hpp>
#include <bksge/core/render/vulkan/detail/front_face.hpp>
#include <bksge/core/render/vulkan/detail/cull_mode.hpp>
#include <bksge/core/render/vulkan/detail/vulkan.hpp>
#include <bksge/core/render/rasterizer_state.hpp>
namespace bksge
{
namespace render
{
namespace vulkan
{
BKSGE_INLINE
RasterizerState::RasterizerState(bksge::RasterizerState const& state)
{
auto& rs = m_rasterization_state;
rs.polygonMode = FillMode(state.fill_mode());
rs.cullMode = CullMode(state.cull_mode());
rs.frontFace = FrontFace(state.front_face());
rs.depthClampEnable = VK_FALSE;
rs.rasterizerDiscardEnable = VK_FALSE;
rs.depthBiasEnable = VK_FALSE;
rs.depthBiasConstantFactor = 0;
rs.depthBiasClamp = 0;
rs.depthBiasSlopeFactor = 0;
rs.lineWidth = 1.0f;
}
BKSGE_INLINE ::VkPipelineRasterizationStateCreateInfo const*
RasterizerState::GetAddressOf(void) const
{
return &m_rasterization_state;
}
} // namespace vulkan
} // namespace render
} // namespace bksge
#endif // BKSGE_CORE_RENDER_HAS_VULKAN_RENDERER
#endif // BKSGE_CORE_RENDER_VULKAN_DETAIL_INL_RASTERIZER_STATE_INL_HPP
|
yujinishioka/computacional-thinking-python | listas/gabarito/lista2CT/exe-12.py | # Exercicio 12 - Lista 2
rm = int(input("RM: "))
soma = rm%10
soma += (rm//10)%10
soma += (rm//100)%10
soma += (rm//1000)%10
soma += (rm//10000)
print(soma) |
Hronom/AIF2 | src/test/unit/java/io/aif/language/token/TokenSplitterTest.java | package io.aif.language.token;
import io.aif.language.common.ISplitter;
import io.aif.language.common.RegexpCooker;
import org.testng.annotations.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import static junit.framework.Assert.assertEquals;
import static junit.framework.Assert.assertNotNull;
import static junit.framework.Assert.assertTrue;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.*;
public class TokenSplitterTest {
@Test(groups = "unit-tests")
public void testExtract() throws Exception {
final String inputText = "test1 test2\ntest3";
final List<String> expectedResult = new ArrayList<>();
expectedResult.add("test1");
expectedResult.add("test2");
expectedResult.add("test3");
final ISplitter<String, String> tokenSeparatorExtractor = new TokenSplitter();
final List<String> actualResult = tokenSeparatorExtractor.split(inputText);
assertEquals(expectedResult, actualResult);
}
@Test(groups = "unit-tests")
public void testConstructor() throws Exception {
final ITokenSeparatorExtractor mockTokenSeparatorExtractor = mock(ITokenSeparatorExtractor.class);
new TokenSplitter(mockTokenSeparatorExtractor);
}
@Test(groups = "unit-tests")
public void testSplitWhenSplittersNotFound() throws Exception {
// input parameter
final String inputText = "token1 token2";
// mocks
final Optional<List<Character>> mockSplitCharacters = Optional.ofNullable(null);
final ITokenSeparatorExtractor mockTokenSeparatorExtractor = mock(ITokenSeparatorExtractor.class);
when(mockTokenSeparatorExtractor.extract(eq(inputText))).thenReturn(mockSplitCharacters);
// expected result
final List<String> expectedResult = Arrays.asList(inputText);
// creating instances
final ISplitter<String, String> tokenSplitter = new TokenSplitter(mockTokenSeparatorExtractor);
// execution test
final List<String> actualResult = tokenSplitter.split(inputText);
// asserts
assertEquals(expectedResult, actualResult);
// verify
verify(mockTokenSeparatorExtractor, times(1)).extract(inputText);
}
@Test(groups = "unit-tests")
public void testSplitWhenSplittersFound() throws Exception {
// input parameter
final String inputText = "token1 token2";
// mocks
final Optional<List<Character>> mockOptionalsSplitCharacters = Optional.of(Arrays.asList(' ', '\n'));
final ITokenSeparatorExtractor mockTokenSeparatorExtractor = mock(ITokenSeparatorExtractor.class);
when(mockTokenSeparatorExtractor.extract(eq(inputText))).thenReturn(mockOptionalsSplitCharacters);
final RegexpCooker mockRegexpCooker = mock(RegexpCooker.class);
when(mockRegexpCooker.prepareRegexp(eq(Arrays.asList(' ', '\n')))).thenReturn("[ \n]+");
// expected result
final List<String> expectedResult = Arrays.asList("token1", "token2");
// creating instances
final ISplitter<String, String> tokenSplitter = new TokenSplitter(mockTokenSeparatorExtractor, mockRegexpCooker);
// execution test
final List<String> actualResult = tokenSplitter.split(inputText);
// asserts
assertEquals(expectedResult, actualResult);
// verify
verify(mockTokenSeparatorExtractor, times(1)).extract(inputText);
verify(mockRegexpCooker, times(1)).prepareRegexp(Arrays.asList(' ', '\n'));
}
@Test(groups = "unit-tests")
public void testFilterIncorrectTokens() throws Exception {
// input parameter
final List<String> inputTokens = Arrays.asList("token1", "", "token2");
// mocks
// expected result
final List<String> expectedResult = Arrays.asList("token1", "token2");
// creating instances
// execution test
final List<String> actualResult = TokenSplitter.filterIncorrectTokens(inputTokens);
// asserts
assertEquals(expectedResult, actualResult);
// verify
}
}
|
Factris/data-anonymization | lib/strategy/strategies.rb | require 'strategy/base'
require 'strategy/whitelist'
require 'strategy/blacklist'
require 'strategy/field/fields'
begin
require 'mongo'
require 'strategy/mongodb/anonymize_field'
require 'strategy/mongodb/whitelist'
require 'strategy/mongodb/blacklist'
rescue LoadError
'Ignoring the mongodb specific libraries if monog driver is not specified in gem'
end
|
shaunakpp/voices-of-consent | db/migrate/20190727221539_create_attendances.rb | <reponame>shaunakpp/voices-of-consent
class CreateAttendances < ActiveRecord::Migration[5.2]
def change
create_table :attendances do |t|
t.references :meeting, foreign_key: true
t.references :user, foreign_key: true
t.string :special_duties
t.boolean :completed_hours
t.timestamps
end
end
end
|
Chris1904/vds-elements | src/utils/time.js | <reponame>Chris1904/vds-elements
/**
* 🤖 This section was generously ~stolen from~... err... donated by Furf. Cheers!
*
* @see https://github.snooguts.net/david-furfero/reddit-media-player/blob/main/src/lib/formatTime/index.ts
*/
/**
* Casts a number to a string and pads it to match the given `expectedLength`.
*
* @param {number} num - A number to pad.
* @param {number} expectedLength - The expected length of the numbers as a string.
* @returns {string}
*/
export function padNumberWithZeroes(num, expectedLength) {
const str = String(num);
const actualLength = str.length;
const shouldPad = actualLength < expectedLength;
if (shouldPad) {
const padLength = expectedLength - actualLength;
const padding = `0`.repeat(padLength);
return `${padding}${num}`;
}
return str;
}
/**
* @readonly
* @enum {string}
*/
export const TimeUnit = {
Hours: 'hours',
Minutes: 'minutes',
Seconds: 'seconds',
/**
* Represents a fraction of a second in decimal form.
*/
Fraction: 'fraction'
};
/**
* @typedef {{ [P in TimeUnit]: number }} ParsedTime
*/
/**
* Parses the given `duration` into the following units of time: hours, minutes,
* seconds, fraction (fraction of a second).
*
* @param {number} duration - The length of time to parse in seconds.
* @returns {ParsedTime}
*/
export function parseTime(duration) {
const hours = Math.trunc(duration / 3600);
const minutes = Math.trunc((duration % 3600) / 60);
const seconds = Math.trunc(duration % 60);
const fraction = Number((duration - Math.trunc(duration)).toPrecision(3));
return {
[TimeUnit.Hours]: hours,
[TimeUnit.Minutes]: minutes,
[TimeUnit.Seconds]: seconds,
[TimeUnit.Fraction]: fraction
};
}
/**
* Formats the given `duration` into a human readable form that can be displayed to the user.
*
* @param {number} duration - The length of time to parse in seconds.
* @param {boolean} [shouldPadHours=false] - Whether to pad the hours to be length of 2.
* @param {boolean} [shouldAlwaysShowHours=false] - Whether to always show the hours unit.
* @returns {string}
* @example `01:20` -> `minutes:seconds`
* @example `3:01:20` -> `hours:minutes:seconds`
* @example `03:01:20` -> If `shouldPadHours` is `true`
* @example `0:01:20` -> If `shouldAlwaysShowHours` is `true`
*/
export function formatTime(
duration,
shouldPadHours = false,
shouldAlwaysShowHours = false
) {
const { hours, minutes, seconds } = parseTime(duration);
const paddedHours = shouldPadHours ? padNumberWithZeroes(hours, 2) : hours;
const paddedMinutes = padNumberWithZeroes(minutes, 2);
const paddedSeconds = padNumberWithZeroes(seconds, 2);
if (hours > 0 || shouldAlwaysShowHours) {
return `${paddedHours}:${paddedMinutes}:${paddedSeconds}`;
}
return `${minutes}:${paddedSeconds}`;
}
/**
* Formats the given `duration` into human spoken form.
*
* @param {number} duration - The length of time to parse in seconds.
* @returns {string}
* @example `2 hours, 3 minutes, 4 seconds`
*/
export function formatSpokenTime(duration) {
/** @type {string[]} */
const spokenParts = [];
const { hours, minutes, seconds } = parseTime(duration);
/**
* @param {number} num
* @param {string} word
* @returns {string}
*/
const pluralize = (num, word) => (num === 1 ? word : `${word}s`);
if (hours > 0) {
spokenParts.push(`${hours} ${pluralize(hours, 'hour')}`);
}
if (minutes > 0) {
spokenParts.push(`${minutes} ${pluralize(minutes, 'minute')}`);
}
if (seconds > 0 || spokenParts.length === 0) {
spokenParts.push(`${seconds} ${pluralize(seconds, 'second')}`);
}
return spokenParts.join(', ');
}
/**
* Formats the given `duration` into a valid HTML5 duration as specified in the linked
* spec below.
*
* @param {number} duration - The length of time to parse in seconds.
* @returns {string}
* @see https://www.w3.org/TR/2014/REC-html5-20141028/infrastructure.html#valid-duration-string
*/
export function formatHtml5Duration(duration) {
const { hours, minutes, seconds, fraction } = parseTime(duration);
return `PT${hours}H${minutes}M${seconds + fraction}S`;
}
|
uibcdf/OpenKinNet | openktn_old/forms/classes/api_pandas_KineticTransitionNetwork.py | from os.path import basename as _basename
from openktn.foreign import Pandas_KineticTransitionNetwork as pandas_KineticTransitionNetwork
from openktn.forms.classes import api_pandas_MicrostatesDataFrame as api_microstates
from openktn.forms.classes import api_pandas_TransitionsDataFrame as api_transitions
from simtk.unit import kelvin, nanoseconds
import numpy as np
form_name=_basename(__file__).split('.')[0].replace('api_','').replace('_','.')
is_form={
pandas_KineticTransitionNetwork : form_name,
'pandas.KineticTransitionNetwork' : form_name
}
info=["",""]
# Multitool
def new(temperature=None, time_step=None):
ktn = pandas_KineticTransitionNetwork(temperature=temperature, time_step=time_step)
return ktn
def add_microstate(ktn, name=None, index=None):
return api_microstates.add_microstate(ktn.microstates, name=name, index=index)
def add_transition(ktn, origin, end, weight=0.0, origin_index=False, end_index=False):
if origin_index:
if not microstate_is_in(ktn, index=origin):
add_microstate(ktn, index=origin)
else:
if not microstate_is_in(ktn, name=origin):
add_microstate(ktn, name=origin)
origin=api_microstates.microstate_name_to_index(ktn.microstates, [origin])[0]
if end_index:
if not microstate_is_in(ktn, index=end):
add_microstate(ktn, index=end)
else:
if not microstate_is_in(ktn, name=end):
add_microstate(ktn, name=end)
end=api_microstates.microstate_name_to_index(ktn.microstates, [end])[0]
api_transitions.add_transition(ktn.transitions, origin, end, weight=weight, origin_index=True, end_index=True)
ktn.microstates.at[origin,'weight']+=weight
def microstate_is_in(ktn, name=None, index=None):
return api_microstates.microstate_is_in(ktn.microstates, name=name, index=index)
def transition_is_in(ktn, origin, end, origin_index=False, end_index=False):
if origin_index:
if not microstate_is_in(ktn, index=origin):
return False
else:
if not microstate_is_in(ktn, name=origin):
return False
origin=api_microstates.microstate_name_to_index(ktn.microstates, origin)
if end_index:
if not microstate_is_in(ktn, index=end):
return False
else:
if not microstate_is_in(ktn, name=end):
return False
end=api_microstates.microstate_name_to_index(ktn.microstates, end)
return api_transitions.transition_is_in(ktn.transitions, origin, end, origin_index=True,
end_index=True)
def update_weights(ktn):
ktn.microstates['weight']=0.0
aux = ktn.transitions.groupby(by='origin_index')['weight'].sum()
for index, weight in aux.items():
ktn.microstates.at[index, 'weight']=weight
def update_probabilities(ktn):
update_weights(ktn)
api_transitions.update_probabilities(ktn.transitions)
api_microstates.update_probabilities(ktn.microstates)
def symmetrize(ktn):
api_transitions.symmetrize(ktn.transitions)
update_probabilities(ktn)
def select(ktn, selection):
raise NotImplementedError
# Convert
# Get
## Aux
## from microstate
def get_index_from_microstate(ktn, indices='all'):
return get_microstate_index_from_microstate(ktn, indices=indices)
def get_name_from_microstate(ktn, indices='all'):
return get_microstate_name_from_microstate(ktn, indices=indices)
def get_weight_from_microstate(ktn, indices='all'):
return get_microstate_weight_from_microstate(ktn, indices=indices)
def get_probability_from_microstate(ktn, indices='all'):
return get_microstate_probability_from_microstate(ktn, indices=indices)
def get_degree_from_microstate(ktn, indices='all'):
return get_microstate_out_degree_from_microstate(ktn, indices=indices)
def get_out_degree_from_microstate(ktn, indices='all'):
return get_microstate_out_degree_from_microstate(ktn, indices=indices)
def get_in_degree_from_microstate(ktn, indices='all'):
return get_microstate_in_degree_from_microstate(ktn, indices=indices)
def get_microstate_index_from_microstate(ktn, indices='all'):
return api_microstates.get_microstate_index_from_microstate(ktn.microstates, indices=indices)
def get_microstate_name_from_microstate(ktn, indices='all'):
return api_microstates.get_microstate_name_from_microstate(ktn.microstates, indices=indices)
def get_microstate_weight_from_microstate(ktn, indices='all'):
return api_microstates.get_microstate_weight_from_microstate(ktn.microstates, indices=indices)
def get_microstate_probability_from_microstate(ktn, indices='all'):
return api_microstates.get_microstate_probability_from_microstate(ktn.microstates, indices=indices)
def get_microstate_degree_from_microstate(ktn, indices='all'):
return get_microstate_out_degree_from_microstate(ktn, indices=indices)
def get_microstate_out_degree_from_microstate(ktn, indices='all'):
if indices is 'all':
output=np.zeros(ktn.microstates.shape[0], dtype=int)
aux=ktn.transitions.groupby(by='origin_index')['end_index'].count()
output[aux.keys()]=aux
else:
output = api_transitions.get_microstate_out_degree_from_microstate(ktn.transitions,indices)
return output
def get_microstate_in_degree_from_microstate(ktn, indices='all'):
if indices is 'all':
output=np.zeros(ktn.microstates.shape[0], dtype=int)
aux=ktn.transitions.groupby(by='end_index')['origin_index'].count()
output[aux.keys()]=aux
else:
output = api_transitions.get_microstate_in_degree_from_microstate(ktn.transitions,indices)
return output
def get_component_index_from_microstate(ktn, indices='all'):
return api_microstates.get_component_index_from_microstate(ktn.microstates, indices=indices)
def get_basin_index_from_microstate(ktn, indices='all'):
return api_microstates.get_basin_index_from_microstate(ktn.microstates, indices=indices)
def get_n_microstates_from_microstate(ktn, indices='all'):
return api_microstates.get_n_microstates_from_microstate(ktn.microstates, indices=indices)
## from transition
def get_index_from_transition(ktn, indices='all'):
return get_transition_index_from_transition(ktn, indices='all')
def get_origin_index_from_transition(ktn, indices='all'):
return get_transition_origin_index_from_transition(ktn, indices='all')
def get_end_index_from_transition(ktn, indices='all'):
return get_transition_end_index_from_transition(ktn, indices='all')
def get_weight_from_transition(ktn, indices='all'):
return get_transition_weight_from_transition(ktn, indices='all')
def get_probability_from_transition(ktn, indices='all'):
return get_transition_probability_from_transition(ktn, indices='all')
def get_symmetrized_from_transition(ktn, indices='all'):
return get_transition_symmetrized_from_transition(ktn, indices='all')
def get_transition_index_from_transition(ktn, indices='all'):
return api_transitions.get_transition_index_from_transition(ktn.transitions, indices=indices)
def get_transition_origin_index_from_transition(ktn, indices='all'):
return api_transitions.get_origin_index_from_transition(ktn.transitions, indices=indices)
def get_transition_end_index_from_transition(ktn, indices='all'):
return api_transitions.get_end_index_from_transition(ktn.transitions, indices=indices)
def get_transition_weight_from_transition(ktn, indices='all'):
return api_transitions.get_transition_weight_from_transition(ktn.transitions, indices=indices)
def get_transition_probability_from_transition(ktn, indices='all'):
return api_transitions.get_transition_probability_from_transition(ktn.transitions, indices=indices)
def get_transition_symmetrized_from_transition(ktn, indices='all'):
return api_transitions.get_transition_symmetrized_from_transition(ktn.transitions, indices=indices)
## from network
def get_microstate_index_from_network(ktn, indices='all'):
return api_microstates.get_microstate_index_from_network(ktn.microstates)
def get_microstate_name_from_network(ktn, indices='all'):
return api_microstates.get_microstate_name_from_network(ktn.microstates)
def get_component_index_from_network(ktn, indices='all'):
return api_microstates.get_component_index_from_network(ktn.microstates)
def get_basin_index_from_network(ktn, indices='all'):
return api_microstates.get_basin_index_from_network(ktn.microstates)
def get_symmetrized_from_network(ktn, indices='all'):
return api_transitions.get_symmetrized_from_network(ktn.transitions)
def get_weight_from_network(ktn, indices='all'):
return api_transitions.get_weight_from_network(ktn.transitions)
def get_temperature_from_network(ktn, indices='all'):
return ktn.temperature
def get_time_step_from_network(ktn, indices='all'):
return ktn.time_step
def get_n_microstates_from_network(ktn, indices='all'):
return api_microstates.get_n_microstates_from_network(ktn.microstates)
def get_n_transitions_from_network(ktn, indices='all'):
return api_transitions.get_n_transitions_from_network(ktn.transitions)
def get_n_components_from_network(ktn, indices='all'):
return api_microstates.get_n_components_from_network(ktn.microstates)
def get_n_basins_from_network(ktn, indices='all'):
return api_microstates.get_n_basins_from_network(ktn.microstates)
def get_form_from_network(ktn, indices='all'):
return form_name
|
cltl/voc-missives | src/main/java/utils/naf/BaseEntity.java | package utils.naf;
import utils.common.Span;
import xjc.naf.Entity;
import java.util.Objects;
public class BaseEntity implements Comparable<BaseEntity> {
Span indexSpan;
String type;
public BaseEntity(Span indexSpan, String type) {
this.indexSpan = indexSpan;
this.type = type;
}
public static BaseEntity create(Entity e) {
return new BaseEntity(NafUnits.indexSpan(e), e.getType());
}
public Span getIndexSpan() {
return indexSpan;
}
public String getType() {
return type;
}
public int begin() {
return indexSpan.getFirstIndex();
}
public int end() {
return indexSpan.getEnd();
}
@Override
public int compareTo(BaseEntity o) {
return indexSpan.compareTo(o.getIndexSpan());
}
@Override
public int hashCode() {
return Objects.hash(indexSpan, type);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null) return false;
if (this.getClass() != o.getClass()) return false;
BaseEntity x = (BaseEntity) o;
return indexSpan.equals(x.getIndexSpan())
&& type.equals(x.getType());
}
}
|
jenniferlynparsons/cuppa-webpack | client/src/selectors/teaSelectors.js | <gh_stars>1-10
import { createSelector } from "reselect";
export const selectBrands = createSelector(
state => state.teas.teaIDs,
state => state.teas.allTeas,
(teaIDs, allTeas) => teaIDs.map(id => allTeas[id].brand)
);
|
TinkerBoard-Android/external-strace | chmod.c | #include "defs.h"
static void
decode_chmod(struct tcb *tcp, const int offset)
{
printpath(tcp, tcp->u_arg[offset]);
tprintf(", %#lo", tcp->u_arg[offset + 1]);
}
SYS_FUNC(chmod)
{
decode_chmod(tcp, 0);
return RVAL_DECODED;
}
SYS_FUNC(fchmodat)
{
print_dirfd(tcp, tcp->u_arg[0]);
decode_chmod(tcp, 1);
return RVAL_DECODED;
}
SYS_FUNC(fchmod)
{
printfd(tcp, tcp->u_arg[0]);
tprintf(", %#lo", tcp->u_arg[1]);
return RVAL_DECODED;
}
|
gspu/Coherent | mwc/romana/relic/b/lib/libm/atan.c | /*
* Compute the inverse tangent function.
* (<NAME>, 17.24)
*/
#include <math.h>
#if EMU87
#include "emumath.h"
#endif
static readonly double tanntab[] ={
0.12097470017580907217240715e+04,
0.30310745956115083044212807e+04,
0.27617198246138834959053784e+04,
0.11141290728455183546172942e+04,
0.19257920144815596134742860e+03,
0.11322159411676465523624500e+02,
0.97627215917176330369830000e-01
};
static readonly double tanmtab[] ={
0.12097470017580907287514197e+04,
0.34343235961975351716547069e+04,
0.36645449563283749893504796e+04,
0.18216003392918464941509225e+04,
0.42307164648090478045242060e+03,
0.39917884248653798150199900e+02,
0.10000000000000000000000000e+01
};
double
atan(x)
double x;
{
double r;
register int i, s;
s = 0;
i = 0;
if (x < 0.0) {
s = 1;
x = -x;
}
if (x > 1.0) {
i = 1;
x = 1/x;
}
r = x * x;
r = x * (_pol(r, tanntab, 7)/_pol(r, tanmtab, 7));
if (i)
r = PI/2.0 - r;
if (s)
r = -r;
return (r);
}
|
Yang1998/lite-mall-admin | admin/src/main/java/com/yyt/axios/config/RedisConfig.java | package com.yyt.axios.config;
import org.springframework.cache.CacheManager;
import org.springframework.cache.annotation.EnableCaching;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.cache.RedisCacheConfiguration;
import org.springframework.data.redis.cache.RedisCacheManager;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import java.time.Duration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
@EnableCaching
@Configuration
public class RedisConfig {
@Bean
public CacheManager cacheManager(LettuceConnectionFactory redisConnectionFactory) {
//生成一个默认配置,通过config对象即可对缓存进行自定义配置
RedisCacheConfiguration redisCacheConfig = RedisCacheConfiguration.defaultCacheConfig();
//设置缓存的默认过期时间,也是使用Duration设置
redisCacheConfig = redisCacheConfig.entryTtl(Duration.ofMinutes(1))
.disableCachingNullValues(); // 不缓存空值
//设置一个初始化的缓存空间set集合
Set<String> cacheNames = new HashSet<>();
cacheNames.add("redis-cache1");
cacheNames.add("redis-cache2");
// 对每个缓存空间应用不同的配置
Map<String, RedisCacheConfiguration> configMap = new HashMap<>();
configMap.put("redis-cache1", redisCacheConfig);
configMap.put("redis-cache2", redisCacheConfig.entryTtl(Duration.ofSeconds(120)));
//使用自定义的缓存配置初始化一个cacheManager
RedisCacheManager cacheManager = RedisCacheManager.builder(redisConnectionFactory)
.initialCacheNames(cacheNames) //注意这两句的调用顺序,一定要先调用该方法设置初始化的缓存名,再初始化相关的配置
.withInitialCacheConfigurations(configMap)
.build();
return cacheManager;
}
}
|
lilinj2000/openonload-201811 | src/driver/linux_net/sfctool.h | /*
** Copyright 2005-2018 Solarflare Communications Inc.
** 7505 Irvine Center Drive, Irvine, CA 92618, USA
** Copyright 2002-2005 Level 5 Networks Inc.
**
** This program is free software; you can redistribute it and/or modify it
** under the terms of version 2 of the GNU General Public License as
** published by the Free Software Foundation.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
*/
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2018 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_SFCTOOL_H
#define EFX_SFCTOOL_H
#ifdef EFX_USE_KCOMPAT
/* Must come before other headers */
#include "kernel_compat.h"
#endif
/* Forward declaration */
struct efx_nic;
#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_FECPARAM)
/**
* struct ethtool_fecparam - Ethernet forward error correction(fec) parameters
* @cmd: Command number = %ETHTOOL_GFECPARAM or %ETHTOOL_SFECPARAM
* @active_fec: FEC mode which is active on porte
* @fec: Bitmask of supported/configured FEC modes
* @rsvd: Reserved for future extensions. i.e FEC bypass feature.
*
* Drivers should reject a non-zero setting of @autoneg when
* autoneogotiation is disabled (or not supported) for the link.
*
*/
struct ethtool_fecparam {
__u32 cmd;
/* bitmask of FEC modes */
__u32 active_fec;
__u32 fec;
__u32 reserved;
};
/**
* enum ethtool_fec_config_bits - flags definition of ethtool_fec_configuration
* @ETHTOOL_FEC_NONE: FEC mode configuration is not supported
* @ETHTOOL_FEC_AUTO: Default/Best FEC mode provided by driver
* @ETHTOOL_FEC_OFF: No FEC Mode
* @ETHTOOL_FEC_RS: Reed-Solomon Forward Error Detection mode
* @ETHTOOL_FEC_BASER: Base-R/Reed-Solomon Forward Error Detection mode
*/
enum ethtool_fec_config_bits {
ETHTOOL_FEC_NONE_BIT,
ETHTOOL_FEC_AUTO_BIT,
ETHTOOL_FEC_OFF_BIT,
ETHTOOL_FEC_RS_BIT,
ETHTOOL_FEC_BASER_BIT,
};
#define ETHTOOL_FEC_NONE (1 << ETHTOOL_FEC_NONE_BIT)
#define ETHTOOL_FEC_AUTO (1 << ETHTOOL_FEC_AUTO_BIT)
#define ETHTOOL_FEC_OFF (1 << ETHTOOL_FEC_OFF_BIT)
#define ETHTOOL_FEC_RS (1 << ETHTOOL_FEC_RS_BIT)
#define ETHTOOL_FEC_BASER (1 << ETHTOOL_FEC_BASER_BIT)
#define ETHTOOL_GFECPARAM 0x00000050 /* Get FEC settings */
#define ETHTOOL_SFECPARAM 0x00000051 /* Set FEC settings */
#endif /* !EFX_HAVE_ETHTOOL_FECPARAM */
#if defined(EFX_USE_KCOMPAT) && !defined(EFX_HAVE_ETHTOOL_RXFH_CONTEXT)
/**
* struct sfctool_rxfh - command to get/set RX flow hash indir or/and hash key.
* @cmd: Specific command number - %ETHTOOL_GRSSH or %ETHTOOL_SRSSH
* @rss_context: RSS context identifier. Context 0 is the default for normal
* traffic; other contexts can be referenced as the destination for RX flow
* classification rules. %ETH_RXFH_CONTEXT_ALLOC is used with command
* %ETHTOOL_SRSSH to allocate a new RSS context; on return this field will
* contain the ID of the newly allocated context.
* @indir_size: On entry, the array size of the user buffer for the
* indirection table, which may be zero, or (for %ETHTOOL_SRSSH),
* %ETH_RXFH_INDIR_NO_CHANGE. On return from %ETHTOOL_GRSSH,
* the array size of the hardware indirection table.
* @key_size: On entry, the array size of the user buffer for the hash key,
* which may be zero. On return from %ETHTOOL_GRSSH, the size of the
* hardware hash key.
* @hfunc: Defines the current RSS hash function used by HW (or to be set to).
* Valid values are one of the %ETH_RSS_HASH_*.
* @rsvd: Reserved for future extensions.
* @rss_config: RX ring/queue index for each hash value i.e., indirection table
* of @indir_size __u32 elements, followed by hash key of @key_size
* bytes.
*
* For %ETHTOOL_GRSSH, a @indir_size and key_size of zero means that only the
* size should be returned. For %ETHTOOL_SRSSH, an @indir_size of
* %ETH_RXFH_INDIR_NO_CHANGE means that indir table setting is not requested
* and a @indir_size of zero means the indir table should be reset to default
* values (if @rss_context == 0) or that the RSS context should be deleted.
* An hfunc of zero means that hash function setting is not requested.
*/
struct sfctool_rxfh {
__u32 cmd;
__u32 rss_context;
__u32 indir_size;
__u32 key_size;
__u8 hfunc;
__u8 rsvd8[3];
__u32 rsvd32;
__u32 rss_config[0];
};
#define ETH_RXFH_CONTEXT_ALLOC 0xffffffff
#ifndef ETH_RXFH_INDIR_NO_CHANGE
#define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff
#endif
#ifndef ETHTOOL_GRSSH
#define ETHTOOL_GRSSH 0x00000046 /* Get RX flow hash configuration */
#define ETHTOOL_SRSSH 0x00000047 /* Set RX flow hash configuration */
#endif
#endif /* !EFX_HAVE_ETHTOOL_RXFH_CONTEXT */
int efx_sfctool(struct efx_nic *efx, u32 cmd, void __user *data);
#endif /* EFX_SFCTOOL_H */
|
jainsakshi2395/linux | drivers/infiniband/hw/irdma/i40iw_hw.c | <filename>drivers/infiniband/hw/irdma/i40iw_hw.c<gh_stars>1-10
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "osdep.h"
#include "type.h"
#include "i40iw_hw.h"
#include "status.h"
#include "protos.h"
static u32 i40iw_regs[IRDMA_MAX_REGS] = {
I40E_PFPE_CQPTAIL,
I40E_PFPE_CQPDB,
I40E_PFPE_CCQPSTATUS,
I40E_PFPE_CCQPHIGH,
I40E_PFPE_CCQPLOW,
I40E_PFPE_CQARM,
I40E_PFPE_CQACK,
I40E_PFPE_AEQALLOC,
I40E_PFPE_CQPERRCODES,
I40E_PFPE_WQEALLOC,
I40E_PFINT_DYN_CTLN(0),
I40IW_DB_ADDR_OFFSET,
I40E_GLPCI_LBARCTRL,
I40E_GLPE_CPUSTATUS0,
I40E_GLPE_CPUSTATUS1,
I40E_GLPE_CPUSTATUS2,
I40E_PFINT_AEQCTL,
I40E_PFINT_CEQCTL(0),
I40E_VSIQF_CTL(0),
I40E_PFHMC_PDINV,
I40E_GLHMC_VFPDINV(0),
I40E_GLPE_CRITERR,
0xffffffff /* PFINT_RATEN not used in FPK */
};
static u32 i40iw_stat_offsets_32[IRDMA_HW_STAT_INDEX_MAX_32] = {
I40E_GLPES_PFIP4RXDISCARD(0),
I40E_GLPES_PFIP4RXTRUNC(0),
I40E_GLPES_PFIP4TXNOROUTE(0),
I40E_GLPES_PFIP6RXDISCARD(0),
I40E_GLPES_PFIP6RXTRUNC(0),
I40E_GLPES_PFIP6TXNOROUTE(0),
I40E_GLPES_PFTCPRTXSEG(0),
I40E_GLPES_PFTCPRXOPTERR(0),
I40E_GLPES_PFTCPRXPROTOERR(0),
I40E_GLPES_PFRXVLANERR(0)
};
static u32 i40iw_stat_offsets_64[IRDMA_HW_STAT_INDEX_MAX_64] = {
I40E_GLPES_PFIP4RXOCTSLO(0),
I40E_GLPES_PFIP4RXPKTSLO(0),
I40E_GLPES_PFIP4RXFRAGSLO(0),
I40E_GLPES_PFIP4RXMCPKTSLO(0),
I40E_GLPES_PFIP4TXOCTSLO(0),
I40E_GLPES_PFIP4TXPKTSLO(0),
I40E_GLPES_PFIP4TXFRAGSLO(0),
I40E_GLPES_PFIP4TXMCPKTSLO(0),
I40E_GLPES_PFIP6RXOCTSLO(0),
I40E_GLPES_PFIP6RXPKTSLO(0),
I40E_GLPES_PFIP6RXFRAGSLO(0),
I40E_GLPES_PFIP6RXMCPKTSLO(0),
I40E_GLPES_PFIP6TXOCTSLO(0),
I40E_GLPES_PFIP6TXPKTSLO(0),
I40E_GLPES_PFIP6TXFRAGSLO(0),
I40E_GLPES_PFIP6TXMCPKTSLO(0),
I40E_GLPES_PFTCPRXSEGSLO(0),
I40E_GLPES_PFTCPTXSEGLO(0),
I40E_GLPES_PFRDMARXRDSLO(0),
I40E_GLPES_PFRDMARXSNDSLO(0),
I40E_GLPES_PFRDMARXWRSLO(0),
I40E_GLPES_PFRDMATXRDSLO(0),
I40E_GLPES_PFRDMATXSNDSLO(0),
I40E_GLPES_PFRDMATXWRSLO(0),
I40E_GLPES_PFRDMAVBNDLO(0),
I40E_GLPES_PFRDMAVINVLO(0),
I40E_GLPES_PFIP4RXMCOCTSLO(0),
I40E_GLPES_PFIP4TXMCOCTSLO(0),
I40E_GLPES_PFIP6RXMCOCTSLO(0),
I40E_GLPES_PFIP6TXMCOCTSLO(0),
I40E_GLPES_PFUDPRXPKTSLO(0),
I40E_GLPES_PFUDPTXPKTSLO(0)
};
static u64 i40iw_masks[IRDMA_MAX_MASKS] = {
I40E_PFPE_CCQPSTATUS_CCQP_DONE,
I40E_PFPE_CCQPSTATUS_CCQP_ERR,
I40E_CQPSQ_STAG_PDID,
I40E_CQPSQ_CQ_CEQID,
I40E_CQPSQ_CQ_CQID,
I40E_COMMIT_FPM_CQCNT,
};
static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {
I40E_PFPE_CCQPSTATUS_CCQP_DONE_S,
I40E_PFPE_CCQPSTATUS_CCQP_ERR_S,
I40E_CQPSQ_STAG_PDID_S,
I40E_CQPSQ_CQ_CEQID_S,
I40E_CQPSQ_CQ_CQID_S,
I40E_COMMIT_FPM_CQCNT_S,
};
/**
* i40iw_config_ceq- Configure CEQ interrupt
* @dev: pointer to the device structure
* @ceq_id: Completion Event Queue ID
* @idx: vector index
* @enable: Enable CEQ interrupt when true
*/
static void i40iw_config_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
bool enable)
{
u32 reg_val;
reg_val = FIELD_PREP(I40E_PFINT_LNKLSTN_FIRSTQ_INDX, ceq_id) |
FIELD_PREP(I40E_PFINT_LNKLSTN_FIRSTQ_TYPE, QUEUE_TYPE_CEQ);
wr32(dev->hw, I40E_PFINT_LNKLSTN(idx - 1), reg_val);
reg_val = FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX, 0x3) |
FIELD_PREP(I40E_PFINT_DYN_CTLN_INTENA, 0x1);
wr32(dev->hw, I40E_PFINT_DYN_CTLN(idx - 1), reg_val);
reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) |
FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) |
FIELD_PREP(I40E_PFINT_CEQCTL_NEXTQ_INDX, NULL_QUEUE_INDEX) |
FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 0x3);
wr32(dev->hw, i40iw_regs[IRDMA_GLINT_CEQCTL] + 4 * ceq_id, reg_val);
}
/**
* i40iw_ena_irq - Enable interrupt
* @dev: pointer to the device structure
* @idx: vector index
*/
static void i40iw_ena_irq(struct irdma_sc_dev *dev, u32 idx)
{
u32 val;
val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 0x1) |
FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 0x1) |
FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0x3);
wr32(dev->hw, i40iw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), val);
}
/**
* i40iw_disable_irq - Disable interrupt
* @dev: pointer to the device structure
* @idx: vector index
*/
static void i40iw_disable_irq(struct irdma_sc_dev *dev, u32 idx)
{
wr32(dev->hw, i40iw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), 0);
}
static const struct irdma_irq_ops i40iw_irq_ops = {
.irdma_cfg_aeq = irdma_cfg_aeq,
.irdma_cfg_ceq = i40iw_config_ceq,
.irdma_dis_irq = i40iw_disable_irq,
.irdma_en_irq = i40iw_ena_irq,
};
void i40iw_init_hw(struct irdma_sc_dev *dev)
{
int i;
u8 __iomem *hw_addr;
for (i = 0; i < IRDMA_MAX_REGS; ++i) {
hw_addr = dev->hw->hw_addr;
if (i == IRDMA_DB_ADDR_OFFSET)
hw_addr = NULL;
dev->hw_regs[i] = (u32 __iomem *)(i40iw_regs[i] + hw_addr);
}
for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_32; ++i)
dev->hw_stats_regs_32[i] = i40iw_stat_offsets_32[i];
for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_64; ++i)
dev->hw_stats_regs_64[i] = i40iw_stat_offsets_64[i];
dev->hw_attrs.first_hw_vf_fpm_id = I40IW_FIRST_VF_FPM_ID;
dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
dev->hw_shifts[i] = i40iw_shifts[i];
for (i = 0; i < IRDMA_MAX_MASKS; ++i)
dev->hw_masks[i] = i40iw_masks[i];
dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
dev->ceq_itr_mask_db = NULL;
dev->aeq_itr_mask_db = NULL;
dev->irq_ops = &i40iw_irq_ops;
/* Setup the hardware limits, hmc may limit further */
dev->hw_attrs.uk_attrs.max_hw_wq_frags = I40IW_MAX_WQ_FRAGMENT_COUNT;
dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD;
dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT;
dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE;
dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE;
dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE;
dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES;
dev->hw_attrs.uk_attrs.max_hw_rq_quanta = I40IW_QP_SW_MAX_RQ_QUANTA;
dev->hw_attrs.uk_attrs.max_hw_wq_quanta = I40IW_QP_SW_MAX_WQ_QUANTA;
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = I40IW_MAX_QUANTA_PER_WR;
dev->hw_attrs.max_hw_pds = I40IW_MAX_PDS;
dev->hw_attrs.max_stat_inst = I40IW_MAX_STATS_COUNT;
dev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE;
dev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE;
dev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS;
}
|
icgw/LeetCode | LeetCode/C++/1448._Count_Good_Nodes_in_Binary_Tree/solution.h | <gh_stars>1-10
/*
* solution.h
* Copyright (C) 2021 <NAME> <<EMAIL>>
*
* Distributed under terms of the Apache license.
*/
#ifndef _SOLUTION_H_
#define _SOLUTION_H_
#include <numeric>
using std::numeric_limits;
#include <algorithm>
using std::max;
#include "../data_structures.hpp"
class Solution {
private:
int goodNodes(TreeNode* root, int value) {
if (root == nullptr) {
return 0;
}
int nextVal = max(root->val, value);
return (root->val >= value ? 1 : 0) + goodNodes(root->left, nextVal) + goodNodes(root->right, nextVal);
}
public:
int goodNodes(TreeNode* root) {
return goodNodes(root, numeric_limits<int>::min());
}
};
#endif /* !_SOLUTION_H_ */
|
mycolab/ncbi-blast | blast/src/objtools/blast/seqdb_reader/seqdbbitset.cpp | <filename>blast/src/objtools/blast/seqdb_reader/seqdbbitset.cpp
/* $Id: seqdbbitset.cpp 631513 2021-05-19 13:48:10Z ivanov $
* ===========================================================================
*
* PUBLIC DOMAIN NOTICE
* National Center for Biotechnology Information
*
* This software/database is a "United States Government Work" under the
* terms of the United States Copyright Act. It was written as part of
* the author's official duties as a United States Government employee and
* thus cannot be copyrighted. This software/database is freely available
* to the public for use. The National Library of Medicine and the U.S.
* Government have not placed any restriction on its use or reproduction.
*
* Although all reasonable efforts have been taken to ensure the accuracy
* and reliability of the software and data, the NLM and the U.S.
* Government do not and cannot warrant the performance or results that
* may be obtained by using this software or data. The NLM and the U.S.
* Government disclaim all warranties, express or implied, including
* warranties of performance, merchantability or fitness for any particular
* purpose.
*
* Please cite the author in any work or product based on this material.
*
* ===========================================================================
*
* Author: <NAME>
*
*/
/// @file seqdbbitset.cpp
/// Implementation for the CSeqDB_BitSet class, a bit vector.
#include <ncbi_pch.hpp>
#include "seqdbbitset.hpp"
BEGIN_NCBI_SCOPE
CSeqDB_BitSet::CSeqDB_BitSet(size_t start,
size_t end,
const TByte * p1,
const TByte * p2)
: m_Start (start),
m_End (end),
m_Special(eNone)
{
_ASSERT(TByte(0) < (TByte(-1))); // must be unsigned
// Allocation is guaranteed to zero out the bit memory.
x_Alloc(end-start);
size_t bytes = m_Bits.size();
while(size_t(p2-p1) < bytes) {
bytes--;
}
_ASSERT((eWordBits*m_Bits.size()) >= (bytes*8));
memcpy(& m_Bits[0], p1, bytes);
}
void CSeqDB_BitSet::SetBit(size_t index)
{
_ASSERT(m_Special == eNone);
_ASSERT(index >= m_Start);
_ASSERT(index < m_End);
index -= m_Start;
size_t vx = index >> eWordShift;
int wx = index & eWordMask;
_ASSERT(m_Bits.size() > vx);
m_Bits[vx] |= (TByte(0x80 >> wx));
}
void CSeqDB_BitSet::ClearBit(size_t index)
{
_ASSERT(m_Special == eNone);
_ASSERT(index >= m_Start);
_ASSERT(index < m_End);
index -= m_Start;
size_t vx = index >> eWordShift;
int wx = index & eWordMask;
_ASSERT(m_Bits.size() > vx);
m_Bits[vx] &= ~(TByte(0x80 >> wx));
}
bool CSeqDB_BitSet::CheckOrFindBit(size_t & index) const
{
if (index < m_Start)
index = m_Start;
if (index >= m_End)
return false;
if (m_Special == eAllSet) {
return true;
}
if (m_Special == eAllClear) {
return false;
}
size_t nwords = m_Bits.size();
size_t ix = index - m_Start;
size_t vx = ix >> eWordShift;
size_t vx0 = vx;
while(vx < nwords && ! m_Bits[vx]) {
vx ++;
}
if (vx != vx0) {
ix = (vx << eWordShift);
}
_ASSERT((ix + m_Start) >= index);
size_t bitcount = m_End - m_Start;
while(ix < bitcount) {
vx = ix >> eWordShift;
int wx = ix & eWordMask;
_ASSERT(nwords > vx);
if (m_Bits[vx] & (TByte(0x80) >> wx))
break;
ix ++;
}
if (ix < bitcount) {
index = (ix + m_Start);
return true;
}
return false;
}
void CSeqDB_BitSet::UnionWith(CSeqDB_BitSet & other, bool consume)
{
if (other.m_Special == eAllClear) {
// Nothing to do.
return;
}
if (m_Special == eAllClear) {
// Result is just 'other'.
x_Copy(other, consume);
return;
}
// Our all-1s mask covers the other.
if (other.m_Start >= m_Start &&
other.m_End <= m_End &&
m_Special == eAllSet) {
return;
}
// The other all-1s mask covers ours.
if (other.m_Start <= m_Start &&
other.m_End >= m_End &&
other.m_Special == eAllSet) {
// Copy is probably better than swap here.
x_Copy(other, consume);
return;
}
// Adjust the range if needed; convert special cases to eNone.
x_Normalize(other.m_Start, other.m_End);
switch(other.m_Special) {
case eAllSet:
AssignBitRange(other.m_Start, other.m_End, true);
break;
case eNone:
x_CopyBits(other);
break;
case eAllClear:
_ASSERT(false);
}
}
void CSeqDB_BitSet::IntersectWith(CSeqDB_BitSet & other, bool consume)
{
// All clear cases
if (m_Special == eAllClear) {
return;
}
if (other.m_Special == eAllClear) {
x_Copy(other, consume);
return;
}
// All set cases.
if (m_Special == eAllSet && other.m_Special == eAllSet) {
size_t start = std::max(m_Start, other.m_Start);
size_t end = std::min(m_End, other.m_End);
if (start >= end) {
// The intersected ranges don't overlap.
m_Special = eAllClear;
} else {
m_Start = start;
m_End = end;
}
return;
}
if (other.m_Special == eAllSet || m_Special == eAllSet) {
CSeqDB_BitSet result;
CSeqDB_BitSet range;
if (m_Special == eAllSet) {
result.x_Copy(other, consume);
range.x_Copy(*this, true);
} else {
Swap(result);
range.x_Copy(other, consume);
}
if (result.m_Start < range.m_Start)
result.AssignBitRange(result.m_Start, range.m_Start, false);
if (result.m_End > range.m_End)
result.AssignBitRange(range.m_End, result.m_End, false);
Swap(result);
return;
}
if ((m_Start == other.m_Start) &&
(m_Bits.size() == other.m_Bits.size()) &&
(m_Special == eNone) &&
(other.m_Special == eNone)) {
size_t i = 0;
size_t end1 = (m_Bits.size() / sizeof(int)) * sizeof(int);
size_t end2 = m_Bits.size();
// [ The first while() is only needed in the case of unaligned
// large-character-array allocation, which probably never
// happens in practice. ]
while(i != end2 && (i & (sizeof(int)-1))) {
unsigned char * dst = & m_Bits[i];
unsigned char * src = & other.m_Bits[i];
*dst &= *src;
i ++;
}
while(i != end1) {
int * dst = (int*)(& m_Bits[i]);
int * src = (int*)(& other.m_Bits[i]);
*dst &= *src;
i += sizeof(int);
}
while(i != end2) {
unsigned char * dst = & m_Bits[i];
unsigned char * src = & other.m_Bits[i];
*dst &= *src;
i ++;
}
return;
}
// Intersection between unaligned or differently size bit sets.
// Some of these cases could be split off but this is currently
// not likely to happen in production code.
for(size_t i=0; CheckOrFindBit(i); i++) {
if (! other.CheckOrFindBit(i)) {
ClearBit(i);
}
}
}
void CSeqDB_BitSet::x_CopyBits(const CSeqDB_BitSet & src, size_t start, size_t end)
{
for(size_t i = start; src.CheckOrFindBit(i) && i < end; i++) {
SetBit(i);
}
}
void CSeqDB_BitSet::x_CopyBits(const CSeqDB_BitSet & src)
{
for(size_t i=0; src.CheckOrFindBit(i); i++) {
SetBit(i);
}
}
void CSeqDB_BitSet::x_Normalize(size_t start, size_t end)
{
// Note: the "range change" paths are unlikely to be active for
// SeqDB, and could be improved (i.e. this is not the efficient
// way to move a range of bits).
if (m_Start > start || m_End < end || m_Special != eNone) {
CSeqDB_BitSet dup(std::min(m_Start, start),
std::max(m_End, end));
Swap(dup);
switch(m_Special) {
case eAllClear:
m_Special = eNone;
break;
case eAllSet:
AssignBitRange(m_Start, m_End, true);
m_Special = eNone;
break;
case eNone:
x_CopyBits(dup);
break;
}
}
}
void CSeqDB_BitSet::x_Copy(CSeqDB_BitSet & other, bool consume)
{
if (consume && other.m_Special == eNone) {
Swap(other);
} else {
m_Start = other.m_Start;
m_End = other.m_End;
m_Special = other.m_Special;
m_Bits = other.m_Bits;
}
}
bool CSeqDB_BitSet::GetBit(size_t index) const
{
if (m_Special != eNone) {
return (m_Special == eAllSet) ? true : false;
}
_ASSERT(index >= m_Start);
_ASSERT(index < m_End);
index -= m_Start;
size_t vx = index >> eWordShift;
int wx = index & eWordMask;
_ASSERT(m_Bits.size() > vx);
return !! (m_Bits[vx] & (TByte(0x80) >> wx));
}
void CSeqDB_BitSet::Swap(CSeqDB_BitSet & other)
{
std::swap(m_Start, other.m_Start);
std::swap(m_End, other.m_End);
std::swap(m_Special, other.m_Special);
other.m_Bits.swap(m_Bits);
}
void CSeqDB_BitSet::AssignBitRange(size_t start, size_t end, bool value)
{
_ASSERT(start >= m_Start && end <= m_End);
if ((start + eWordBits*3) > end) {
for(size_t i = start; i < end; i++) {
AssignBit(i, value);
}
return;
} else {
size_t i = start - m_Start;
size_t e = end - m_Start;
while(i & eWordMask) {
AssignBit(i + m_Start, value);
i++;
}
size_t vx = i >> eWordShift,
evx = e >> eWordShift;
char mask = value ? 0xFF : 0;
memset(& m_Bits[vx], mask, evx-vx);
i = vx << eWordShift;
while(i < e) {
AssignBit(i + m_Start, value);
i++;
}
}
}
void CSeqDB_BitSet::AssignBit(size_t i, bool value)
{
if (value) {
SetBit(i);
} else {
ClearBit(i);
}
}
void CSeqDB_BitSet::Normalize()
{
if (m_Special != eNone) {
x_Normalize(m_Start, m_End);
}
}
void CSeqDB_BitSet::DebugDump(CDebugDumpContext ddc, unsigned int depth) const
{
ddc.SetFrame("CSeqDB_BitSet");
CObject::DebugDump(ddc, depth);
ddc.Log("m_Special", m_Special);
ddc.Log("m_Start", m_Start);
ddc.Log("m_End", m_End);
ddc.Log("m_Bits.size", m_Bits.size());
}
END_NCBI_SCOPE
|
RevenueScotland/sets-online-portal | app/models/user_validation.rb | <reponame>RevenueScotland/sets-online-portal
# frozen_string_literal: true
# User validation methods.
# Split out into a separate class to keep User class small/keep Rubocop happy.
module UserValidation
extend ActiveSupport::Concern
included do
validates :username, length: { maximum: 30 }, presence: true, on: %i[update update_password login two_factor]
validates :new_username, length: { minimum: 5, maximum: 30 }, on: %i[save new_username]
validates :password, presence: true, length: { maximum: 200 }, on: %i[login update_memorable_word]
validates :old_password, presence: true, length: { maximum: 200 }, on: :update_password
validates :new_password, presence: true, length: { maximum: 200 }, on: %i[save update_password new_password]
validates :new_password, confirmation: true
validates :memorable_question, presence: true, length: { maximum: 100 }, on: :update_memorable_word,
if: :check_word_or_hint_is_set?
validates :memorable_answer, presence: true, length: { maximum: 100 }, on: :update_memorable_word,
if: :check_word_or_hint_is_set?
validates :token, presence: true, length: { maximum: 100 }, on: :two_factor
validates :email_address, presence: true, email_address: true, on: %i[save update email_check]
validates :email_address, confirmation: true, on: %i[save update email_check]
validates :phone_number, phone_number: true, on: %i[save update]
validates :forename, presence: true, length: { maximum: 50 }, on: %i[save update forename]
validates :surname, presence: true, length: { maximum: 100 }, on: %i[save update surname]
validates :user_is_current, presence: true, format: /\A(Y|N)\z/i, on: %i[save update]
validates :user_is_signed_ta_cs, acceptance: { accept: ['Y'] }, on: :confirm_tcs
end
# Check password is expired or not
def check_password_expired?
!days_to_password_expiry.nil? && days_to_password_expiry <= 0
end
# Check memorable Hint or word id required to be filled or not
def check_word_or_hint_is_set?
return false if memorable_answer.blank? && memorable_question.blank?
true
end
# If password_change_required parameter from back-office is true or password is expired
# then the user needs to reset password to access the application
def check_password_change_required?
password_change_required || check_password_expired?
end
# Check if the user needs to read the terms and conditions again
def check_tcs_required?
user_is_signed_ta_cs != 'Y'
end
# Return number of days remaining for password to expire
def days_to_password_expiry
no_of_days_remaining = (password_expiry_date.to_date - Time.zone.today).to_i
no_of_days_remaining.to_i unless no_of_days_remaining >= Rails.configuration.x.authentication.password_due_period
end
end
|
dennisfabri/alphatimer | alphatimer-ares-serial-api/src/main/java/org/lisasp/alphatimer/api/ares/serial/events/BytesInputEvent.java | package org.lisasp.alphatimer.api.ares.serial.events;
import lombok.RequiredArgsConstructor;
import lombok.Value;
import org.lisasp.alphatimer.api.ares.serial.Characters;
import java.time.LocalDateTime;
@Value
@RequiredArgsConstructor
public class BytesInputEvent implements DataInputEvent {
private final LocalDateTime timestamp;
private final String competition;
private final byte[] data;
public boolean checkIfMessage() {
if (!reachesMinimumMessageLength()) {
return false;
}
if (!startsWithStartOfMessage()) {
return false;
}
return endsWithEndOfMessage();
}
private boolean reachesMinimumMessageLength() {
return data.length > 2;
}
private byte firstEntry() {
return data[0];
}
private byte lastEntry() {
return data[data.length - 1];
}
private boolean startsWithStartOfMessage() {
return firstEntry() == Characters.SOH_StartOfHeader;
}
private boolean endsWithEndOfMessage() {
return lastEntry() == Characters.EOT_EndOfText;
}
}
|
lguohan/SDKLT | src/bcmport/main/bcmport_imm.c | /*! \file bcmport_imm.c
*
* BCMPORT interface to in-memory table.
*/
/*
* Copyright: (c) 2018 Broadcom. All Rights Reserved. "Broadcom" refers to
* Broadcom Limited and/or its subsidiaries.
*
* Broadcom Switch Software License
*
* This license governs the use of the accompanying Broadcom software. Your
* use of the software indicates your acceptance of the terms and conditions
* of this license. If you do not agree to the terms and conditions of this
* license, do not use the software.
* 1. Definitions
* "Licensor" means any person or entity that distributes its Work.
* "Software" means the original work of authorship made available under
* this license.
* "Work" means the Software and any additions to or derivative works of
* the Software that are made available under this license.
* The terms "reproduce," "reproduction," "derivative works," and
* "distribution" have the meaning as provided under U.S. copyright law.
* Works, including the Software, are "made available" under this license
* by including in or with the Work either (a) a copyright notice
* referencing the applicability of this license to the Work, or (b) a copy
* of this license.
* 2. Grant of Copyright License
* Subject to the terms and conditions of this license, each Licensor
* grants to you a perpetual, worldwide, non-exclusive, and royalty-free
* copyright license to reproduce, prepare derivative works of, publicly
* display, publicly perform, sublicense and distribute its Work and any
* resulting derivative works in any form.
* 3. Grant of Patent License
* Subject to the terms and conditions of this license, each Licensor
* grants to you a perpetual, worldwide, non-exclusive, and royalty-free
* patent license to make, have made, use, offer to sell, sell, import, and
* otherwise transfer its Work, in whole or in part. This patent license
* applies only to the patent claims licensable by Licensor that would be
* infringed by Licensor's Work (or portion thereof) individually and
* excluding any combinations with any other materials or technology.
* If you institute patent litigation against any Licensor (including a
* cross-claim or counterclaim in a lawsuit) to enforce any patents that
* you allege are infringed by any Work, then your patent license from such
* Licensor to the Work shall terminate as of the date such litigation is
* filed.
* 4. Redistribution
* You may reproduce or distribute the Work only if (a) you do so under
* this License, (b) you include a complete copy of this License with your
* distribution, and (c) you retain without modification any copyright,
* patent, trademark, or attribution notices that are present in the Work.
* 5. Derivative Works
* You may specify that additional or different terms apply to the use,
* reproduction, and distribution of your derivative works of the Work
* ("Your Terms") only if (a) Your Terms provide that the limitations of
* Section 7 apply to your derivative works, and (b) you identify the
* specific derivative works that are subject to Your Terms.
* Notwithstanding Your Terms, this license (including the redistribution
* requirements in Section 4) will continue to apply to the Work itself.
* 6. Trademarks
* This license does not grant any rights to use any Licensor's or its
* affiliates' names, logos, or trademarks, except as necessary to
* reproduce the notices described in this license.
* 7. Limitations
* Platform. The Work and any derivative works thereof may only be used, or
* intended for use, with a Broadcom switch integrated circuit.
* No Reverse Engineering. You will not use the Work to disassemble,
* reverse engineer, decompile, or attempt to ascertain the underlying
* technology of a Broadcom switch integrated circuit.
* 8. Termination
* If you violate any term of this license, then your rights under this
* license (including the license grants of Sections 2 and 3) will
* terminate immediately.
* 9. Disclaimer of Warranty
* THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR
* NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER
* THIS LICENSE. SOME STATES' CONSUMER LAWS DO NOT ALLOW EXCLUSION OF AN
* IMPLIED WARRANTY, SO THIS DISCLAIMER MAY NOT APPLY TO YOU.
* 10. Limitation of Liability
* EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL
* THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE
* SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT,
* INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF
* OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK
* (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION,
* LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER
* COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <shr/shr_error.h>
#include <shr/shr_debug.h>
#include <shr/shr_bitop.h>
#include <bcmltd/chip/bcmltd_id.h>
#include <bcmlrd/bcmlrd_map.h>
#include <bcmimm/bcmimm_int_comp.h>
#include "bcmport_internal.h"
#include <bcmport/bcmport_util.h>
#include <bcmlrd/bcmlrd_table.h>
#include <bcmlrd/bcmlrd_client.h>
/*******************************************************************************
* Local definitions
*/
/* BSL Module */
#define BSL_LOG_MODULE BSL_LS_BCMPORT_IMM
/*! PORT fields array lmm handler. */
static shr_famm_hdl_t port_fld_arr_hdl;
/*******************************************************************************
* Private functions
*/
/*!
* \brief imm PORT_ING_MIRRORt notification input fields parsing.
*
* Parse imm PORT_ING_MIRRORt input fields.
*
* \param [in] unit Unit number.
* \param [in] key IMM input key field array.
* \param [in] data IMM input data field array.
* \param [out] imirror Port ingress mirror data buffer.
*
* \retval SHR_E_NONE No errors.
* \retval SHR_E_FAIL Faild to convert \c key \c data to \c ltcfg.
*/
static int
port_imirror_lt_fields_parse(int unit,
const bcmltd_field_t *key,
const bcmltd_field_t *data,
port_imirror_t *imirror)
{
const bcmltd_field_t *gen_field;
uint32_t fid;
uint64_t fval;
SHR_FUNC_ENTER(unit);
sal_memset(imirror, 0, sizeof(*imirror));
/* Parse key field */
gen_field = key;
while (gen_field) {
fid = gen_field->id;
fval = gen_field->data;
switch (fid) {
case PORT_ING_MIRRORt_PORT_IDf:
imirror->port = fval;
SHR_BITSET(imirror->fbmp, fid);
break;
case PORT_ING_MIRRORt_MIRROR_INSTANCE_IDf:
imirror->instance_id = fval;
SHR_BITSET(imirror->fbmp, fid);
break;
default:
SHR_RETURN_VAL_EXIT(SHR_E_PARAM);
}
gen_field = gen_field->next;
}
/* Parse data field */
gen_field = data;
while (gen_field) {
fid = gen_field->id;
fval = gen_field->data;
switch (fid) {
case PORT_ING_MIRRORt_MIRROR_ENABLEf:
imirror->enable = fval;
SHR_BITSET(imirror->fbmp, fid);
break;
default:
SHR_RETURN_VAL_EXIT(SHR_E_PARAM);
}
gen_field = gen_field->next;
}
exit:
SHR_FUNC_EXIT();
}
/*!
* \brief PORT_ING_MIRROR IMM table change callback function for staging.
*
* Handle PORT_ING_MIRROR IMM table change events.
*
* \param [in] unit Unit number.
* \param [in] sid This is the logical table ID.
* \param [in] trans_id is the transaction ID associated with this operation.
* \param [in] event_reason This is the reason for the entry event.
* \param [in] key This is a linked list of the key fields identifying
* the entry.
* \param [in] data This is a linked list of the data fields in the
* modified entry.
* \param [in] context Is a pointer that was given during registration.
* The callback can use this pointer to retrieve some context.
*
* \retval SHR_E_NONE No errors.
* \retval SHR_E_FAIL Fails to handle LT change events.
*/
static int
port_ing_mirror_imm_stage_callback(int unit,
bcmltd_sid_t sid,
uint32_t trans_id,
bcmimm_entry_event_t event_reason,
const bcmltd_field_t *key,
const bcmltd_field_t *data,
void *context,
bcmltd_fields_t *output_fields)
{
port_imirror_t cfg;
bool m_en = 0;
SHR_FUNC_ENTER(unit);
SHR_IF_ERR_VERBOSE_EXIT
(port_imirror_lt_fields_parse(unit, key, data, &cfg));
if (output_fields) {
output_fields->count = 0;
}
switch (event_reason) {
case BCMIMM_ENTRY_INSERT:
case BCMIMM_ENTRY_UPDATE:
if (!SHR_BITGET(cfg.fbmp, PORT_ING_MIRRORt_MIRROR_ENABLEf)) {
SHR_RETURN_VAL_EXIT(SHR_E_NONE);
}
m_en = cfg.enable;
break;
case BCMIMM_ENTRY_DELETE:
break;
default:
SHR_RETURN_VAL_EXIT(SHR_E_PARAM);
}
SHR_IF_ERR_VERBOSE_EXIT
(bcmport_ingress_mirror_set(unit,
sid,
trans_id,
cfg.port,
cfg.instance_id,
m_en));
exit:
SHR_FUNC_EXIT();
}
/*!
* \brief PORT_ING_MIRROR In-memory event callback structure.
*
* This structure contains callback functions that will be conresponding
* to PORT_ING_MIRROR logical table entry commit stages.
*/
static bcmimm_lt_cb_t port_ing_mirror_imm_callback = {
/*! Validate function. */
.validate = NULL,
/*! Staging function. */
.stage = port_ing_mirror_imm_stage_callback,
/*! Commit function. */
.commit = NULL,
/*! Abort function. */
.abort = NULL
};
/*!
* \brief imm PORT_ENG_MIRRORt notification input fields parsing.
*
* Parse imm PORT_EGR_MIRRORt input fields.
*
* \param [in] unit Unit number.
* \param [in] key IMM input key field array.
* \param [in] data IMM input data field array.
* \param [out] emirror Port egress mirror info.
*
* \retval SHR_E_NONE No errors.
* \retval SHR_E_FAIL Faild to convert \c key \c data to \c ltcfg.
*/
static int
port_emirror_lt_fields_parse(int unit,
const bcmltd_field_t *key,
const bcmltd_field_t *data,
port_emirror_t *emirror)
{
const bcmltd_field_t *gen_field;
uint32_t fid;
uint64_t fval;
SHR_FUNC_ENTER(unit);
sal_memset(emirror, 0, sizeof(*emirror));
/* Parse key field */
gen_field = key;
while (gen_field) {
fid = gen_field->id;
fval = gen_field->data;
switch (fid) {
case PORT_EGR_MIRRORt_PORT_IDf:
emirror->port = fval;
SHR_BITSET(emirror->fbmp, fid);
break;
case PORT_EGR_MIRRORt_EGR_PORT_IDf:
emirror->egr_port = fval;
SHR_BITSET(emirror->fbmp, fid);
break;
case PORT_EGR_MIRRORt_MIRROR_INSTANCE_IDf:
emirror->instance_id = fval;
SHR_BITSET(emirror->fbmp, fid);
break;
default:
SHR_RETURN_VAL_EXIT(SHR_E_PARAM);
}
gen_field = gen_field->next;
}
/* Parse data field */
gen_field = data;
while (gen_field) {
fid = gen_field->id;
fval = gen_field->data;
switch (fid) {
case PORT_EGR_MIRRORt_MIRROR_ENABLEf:
emirror->enable = fval;
SHR_BITSET(emirror->fbmp, fid);
break;
default:
SHR_RETURN_VAL_EXIT(SHR_E_PARAM);
}
gen_field = gen_field->next;
}
exit:
SHR_FUNC_EXIT();
}
/*!
* \brief PORT_EGR_MIRROR IMM table change callback function for staging.
*
* Handle PORT_EGR_MIRROR IMM logical table change events.
*
* \param [in] unit Unit number.
* \param [in] sid This is the logical table ID.
* \param [in] trans_id is the transaction ID associated with this operation.
* \param [in] event_reason This is the reason for the entry event.
* \param [in] key This is a linked list of the key fields identifying
* the entry.
* \param [in] data This is a linked list of the data fields in the
* modified entry.
* \param [in] context Is a pointer that was given during registration.
* The callback can use this pointer to retrieve some context.
*
* \retval SHR_E_NONE No errors.
* \retval SHR_E_FAIL Fails to handle LT change events.
*/
static int
port_egr_mirror_imm_stage_callback(int unit,
bcmltd_sid_t sid,
uint32_t trans_id,
bcmimm_entry_event_t event_reason,
const bcmltd_field_t *key,
const bcmltd_field_t *data,
void *context,
bcmltd_fields_t *output_fields)
{
port_emirror_t cfg;
bool m_en = 0;
SHR_FUNC_ENTER(unit);
SHR_IF_ERR_VERBOSE_EXIT
(port_emirror_lt_fields_parse(unit, key, data, &cfg));
if (output_fields) {
output_fields->count = 0;
}
switch (event_reason) {
case BCMIMM_ENTRY_INSERT:
case BCMIMM_ENTRY_UPDATE:
if (!SHR_BITGET(cfg.fbmp, PORT_EGR_MIRRORt_MIRROR_ENABLEf)) {
SHR_RETURN_VAL_EXIT(SHR_E_NONE);
}
m_en = cfg.enable;
break;
case BCMIMM_ENTRY_DELETE:
break;
default:
SHR_RETURN_VAL_EXIT(SHR_E_PARAM);
}
SHR_IF_ERR_VERBOSE_EXIT
(bcmport_egress_mirror_set(unit,
sid,
trans_id,
cfg.port,
cfg.egr_port,
cfg.instance_id,
m_en));
exit:
SHR_FUNC_EXIT();
}
/*!
* \brief PORT_EGR_MIRROR In-memory event callback structure.
*
* This structure contains callback functions that will be conresponding
* to PORT_PORT_ING_MIRROR logical table entry commit stages.
*/
static bcmimm_lt_cb_t port_egr_mirror_imm_callback = {
/*! Validate function. */
.validate = NULL,
/*! Staging function. */
.stage = port_egr_mirror_imm_stage_callback,
/*! Commit function. */
.commit = NULL,
/*! Abort function. */
.abort = NULL
};
/*!
* \brief imm PORT_BRIDGEt notification input fields parsing.
*
* Parse imm PORT_BRIDGEt input fields.
*
* \param [in] unit Unit number.
* \param [in] key IMM input key field array.
* \param [in] data IMM input data field array.
* \param [out] imirror Port ingress mirror data buffer.
*
* \retval SHR_E_NONE No errors.
* \retval SHR_E_FAIL Faild to convert \c key \c data to \c lt_info.
*/
static int
port_bridge_lt_fields_parse(int unit,
const bcmltd_field_t *key,
const bcmltd_field_t *data,
port_bridge_info_t *lt_info)
{
const bcmltd_field_t *gen_field;
uint32_t fid;
uint64_t fval;
SHR_FUNC_ENTER(unit);
sal_memset(lt_info, 0, sizeof(port_bridge_info_t));
/* Parse key field */
gen_field = key;
while (gen_field) {
fid = gen_field->id;
fval = gen_field->data;
switch (fid) {
case PORT_BRIDGEt_PORT_IDf:
lt_info->member_bmp |= MEMBER_BMP_PORT_ID;
lt_info->port = fval;
break;
default:
SHR_RETURN_VAL_EXIT(SHR_E_PARAM);
}
gen_field = gen_field->next;
}
/* Parse data field */
gen_field = data;
while (gen_field) {
fid = gen_field->id;
fval = gen_field->data;
switch (fid) {
case PORT_BRIDGEt_BRIDGEf:
lt_info->member_bmp |= MEMBER_BMP_BRIDGE;
lt_info->bridge = fval;
break;
default:
SHR_RETURN_VAL_EXIT(SHR_E_PARAM);
}
gen_field = gen_field->next;
}
exit:
SHR_FUNC_EXIT();
}
/*!
* \brief PORT_BRIDGE IMM table change callback function for staging.
*
* Handle PORT_BRIDGE IMM table change events.
*
* \param [in] unit Unit number.
* \param [in] sid This is the logical table ID.
* \param [in] trans_id is the transaction ID associated with this operation.
* \param [in] event_reason This is the reason for the entry event.
* \param [in] key This is a linked list of the key fields identifying
* the entry.
* \param [in] data This is a linked list of the data fields in the
* modified entry.
* \param [in] context Is a pointer that was given during registration.
* The callback can use this pointer to retrieve some context.
*
* \retval SHR_E_NONE No errors.
* \retval SHR_E_FAIL Fails to handle LT change events.
*/
static int
port_bridge_imm_callback_stage(int unit,
bcmltd_sid_t sid,
uint32_t trans_id,
bcmimm_entry_event_t event_reason,
const bcmltd_field_t *key,
const bcmltd_field_t *data,
void *context,
bcmltd_fields_t *output_fields)
{
port_bridge_info_t lt_info;
bool bridge = 0;
SHR_FUNC_ENTER(unit);
SHR_IF_ERR_VERBOSE_EXIT
(port_bridge_lt_fields_parse(unit, key, data, <_info));
if (output_fields) {
output_fields->count = 0;
}
switch (event_reason) {
case BCMIMM_ENTRY_INSERT:
case BCMIMM_ENTRY_UPDATE:
if (!(lt_info.member_bmp & MEMBER_BMP_BRIDGE)) {
SHR_RETURN_VAL_EXIT(SHR_E_NONE);
}
bridge = lt_info.bridge;
break;
case BCMIMM_ENTRY_DELETE:
bridge = 0;
break;
default:
SHR_RETURN_VAL_EXIT(SHR_E_PARAM);
}
SHR_IF_ERR_VERBOSE_EXIT(
bcmport_port_bridge_set(unit,
trans_id,
sid,
lt_info.port,
bridge));
exit:
SHR_FUNC_EXIT();
}
/*!
* \brief PORT_BRIDGE In-memory event callback structure.
*
* This structure contains callback functions that will be conresponding
* to PORT_BRIDGE logical table entry commit stages.
*/
static bcmimm_lt_cb_t port_bridge_imm_callback = {
/*! Validate function. */
.validate = NULL,
/*! Staging function. */
.stage = port_bridge_imm_callback_stage,
/*! Commit function. */
.commit = NULL,
/*! Abort function. */
.abort = NULL
};
/*!
* \brief imm PORT_MEMBERSHIP_POLICYt notification input fields parsing.
*
* Parse imm PORT_MEMBERSHIP_POLICYt input fields.
*
* \param [in] unit Unit number.
* \param [in] key IMM input key field array.
* \param [in] data IMM input data field array.
* \param [out] imirror Port ingress mirror data buffer.
*
* \retval SHR_E_NONE No errors.
* \retval SHR_E_FAIL Faild to convert \c key \c data to \c lt_info.
*/
static int
port_membership_policy_lt_fields_parse(int unit,
const bcmltd_field_t *key,
const bcmltd_field_t *data,
port_membership_policy_info_t *lt_info)
{
const bcmltd_field_t *gen_field;
uint32_t fid;
uint64_t fval;
SHR_FUNC_ENTER(unit);
sal_memset(lt_info, 0, sizeof(port_membership_policy_info_t));
/* Parse key field */
gen_field = key;
while (gen_field) {
fid = gen_field->id;
fval = gen_field->data;
switch (fid) {
case PORT_MEMBERSHIP_POLICYt_PORT_IDf:
BCMPORT_LT_FIELD_SET(
lt_info->fbmp, BCMPORT_MEMBERSHIP_POLICY_LT_FIELD_PORT_ID);
lt_info->port_id = (bcmport_id_t) fval;
break;
default:
SHR_RETURN_VAL_EXIT(SHR_E_PARAM);
}
gen_field = gen_field->next;
}
/* Parse data field */
gen_field = data;
while (gen_field) {
fid = gen_field->id;
fval = gen_field->data;
switch (fid) {
case PORT_MEMBERSHIP_POLICYt_ING_VLAN_MEMBERSHIP_CHECKf:
BCMPORT_LT_FIELD_SET(
lt_info->fbmp, BCMPORT_MEMBERSHIP_POLICY_LT_FIELD_ING_VLAN_MEMBERSHIP_CHECK);
lt_info->ing_vlan_membership_check = (bool) fval;
break;
case PORT_MEMBERSHIP_POLICYt_EGR_VLAN_MEMBERSHIP_CHECKf:
BCMPORT_LT_FIELD_SET(
lt_info->fbmp, BCMPORT_MEMBERSHIP_POLICY_LT_FIELD_EGR_VLAN_MEMBERSHIP_CHECK);
lt_info->egr_vlan_membership_check = (bool) fval;
break;
case PORT_MEMBERSHIP_POLICYt_SKIP_VLAN_CHECKf:
BCMPORT_LT_FIELD_SET(
lt_info->fbmp, BCMPORT_MEMBERSHIP_POLICY_LT_FIELD_SKIP_VLAN_CHECK);
lt_info->skip_vlan_check = (bool) fval;
break;
default:
SHR_RETURN_VAL_EXIT(SHR_E_PARAM);
}
gen_field = gen_field->next;
}
exit:
SHR_FUNC_EXIT();
}
/*!
* \brief Init all fields value as SW default value.
*
* This initialization is only used for Insert operation.
*
* \param [in] unit Unit number.
* \param [in] lt_entry PORT_POLICY LT entry data buffer.
*
* \retval SHR_E_NONE No errors.
* \retval SHR_E_FAIL Input parameter failed validation check.
*/
static int
port_membership_policy_info_default_values_init(int unit,
port_membership_policy_info_t *lt_info)
{
uint64_t def_val = 0;
uint32_t num;
SHR_FUNC_ENTER(unit);
/* validate input parameter. */
SHR_NULL_CHECK(lt_info, SHR_E_PARAM);
if (!BCMPORT_LT_FIELD_GET(
lt_info->fbmp, BCMPORT_MEMBERSHIP_POLICY_LT_FIELD_ING_VLAN_MEMBERSHIP_CHECK)) {
SHR_IF_ERR_EXIT(
bcmlrd_field_default_get(unit,
PORT_MEMBERSHIP_POLICYt,
PORT_MEMBERSHIP_POLICYt_ING_VLAN_MEMBERSHIP_CHECKf,
1, &def_val, &num));
lt_info->ing_vlan_membership_check = def_val ? true : false;
BCMPORT_LT_FIELD_SET(lt_info->fbmp, BCMPORT_MEMBERSHIP_POLICY_LT_FIELD_ING_VLAN_MEMBERSHIP_CHECK);
}
if (!BCMPORT_LT_FIELD_GET(
lt_info->fbmp, BCMPORT_MEMBERSHIP_POLICY_LT_FIELD_EGR_VLAN_MEMBERSHIP_CHECK)) {
SHR_IF_ERR_EXIT(
bcmlrd_field_default_get(unit,
PORT_MEMBERSHIP_POLICYt,
PORT_MEMBERSHIP_POLICYt_EGR_VLAN_MEMBERSHIP_CHECKf,
1, &def_val, &num));
lt_info->egr_vlan_membership_check = def_val ? true : false;
BCMPORT_LT_FIELD_SET(lt_info->fbmp, BCMPORT_MEMBERSHIP_POLICY_LT_FIELD_EGR_VLAN_MEMBERSHIP_CHECK);
}
if (!BCMPORT_LT_FIELD_GET(
lt_info->fbmp, BCMPORT_MEMBERSHIP_POLICY_LT_FIELD_SKIP_VLAN_CHECK)) {
int rv = SHR_E_NONE;
const bcmlrd_field_data_t *field_info;
rv = bcmlrd_field_get(unit, PORT_MEMBERSHIP_POLICYt,
PORT_MEMBERSHIP_POLICYt_SKIP_VLAN_CHECKf, &field_info);
if (rv == SHR_E_NONE) {
SHR_IF_ERR_EXIT(
bcmlrd_field_default_get(unit,
PORT_MEMBERSHIP_POLICYt,
PORT_MEMBERSHIP_POLICYt_SKIP_VLAN_CHECKf,
1, &def_val, &num));
lt_info->skip_vlan_check = def_val ? true : false;
BCMPORT_LT_FIELD_SET(lt_info->fbmp, BCMPORT_MEMBERSHIP_POLICY_LT_FIELD_SKIP_VLAN_CHECK);
}
}
exit:
SHR_FUNC_EXIT();
}
/*!
* \brief PORT_POLICY IMM table change callback function for staging.
*
* Handle PORT_POLICY IMM table change events.
*
* \param [in] unit Unit number.
* \param [in] sid This is the logical table ID.
* \param [in] trans_id is the transaction ID associated with this operation.
* \param [in] event_reason This is the reason for the entry event.
* \param [in] key This is a linked list of the key fields identifying
* the entry.
* \param [in] data This is a linked list of the data fields in the
* modified entry.
* \param [in] context Is a pointer that was given during registration.
* The callback can use this pointer to retrieve some context.
*
* \retval SHR_E_NONE No errors.
* \retval SHR_E_FAIL Fails to handle LT change events.
*/
static int
port_membership_policy_imm_callback_stage(int unit,
bcmltd_sid_t sid,
uint32_t trans_id,
bcmimm_entry_event_t event_reason,
const bcmltd_field_t *key,
const bcmltd_field_t *data,
void *context,
bcmltd_fields_t *output_fields)
{
port_membership_policy_info_t lt_info;
SHR_FUNC_ENTER(unit);
SHR_IF_ERR_VERBOSE_EXIT
(port_membership_policy_lt_fields_parse(unit, key, data, <_info));
if (output_fields) {
output_fields->count = 0;
}
switch (event_reason) {
case BCMIMM_ENTRY_UPDATE:
break;
case BCMIMM_ENTRY_INSERT:
case BCMIMM_ENTRY_DELETE:
SHR_IF_ERR_VERBOSE_EXIT(
port_membership_policy_info_default_values_init(unit, <_info));
break;
default:
SHR_RETURN_VAL_EXIT(SHR_E_PARAM);
}
SHR_IF_ERR_VERBOSE_EXIT(
bcmport_port_membership_policy_set(unit,
trans_id,
sid, lt_info.port_id,
<_info));
exit:
SHR_FUNC_EXIT();
}
/*!
* \brief PORT_BRIDGE In-memory event callback structure.
*
* This structure contains callback functions that will be conresponding
* to PORT_BRIDGE logical table entry commit stages.
*/
static bcmimm_lt_cb_t port_membership_policy_imm_callback = {
/*! Validate function. */
.validate = NULL,
/*! Staging function. */
.stage = port_membership_policy_imm_callback_stage,
/*! Commit function. */
.commit = NULL,
/*! Abort function. */
.abort = NULL
};
/*******************************************************************************
* Public Functions
*/
int
bcmport_imm_db_init(int unit)
{
SHR_FUNC_ENTER(unit);
if (!port_fld_arr_hdl) {
SHR_IF_ERR_EXIT
(shr_famm_hdl_init(PORT_FIELD_COUNT_MAX, &port_fld_arr_hdl));
}
exit:
SHR_FUNC_EXIT();
}
void
bcmport_imm_db_cleanup(int unit)
{
if (port_fld_arr_hdl) {
shr_famm_hdl_delete(port_fld_arr_hdl);
port_fld_arr_hdl = 0;
}
}
int
bcmport_imm_register(int unit)
{
const bcmlrd_map_t *map = NULL;
int rv;
SHR_FUNC_ENTER(unit);
/*
* To register callback for PORT LTs here.
*/
rv = bcmlrd_map_get(unit, PORT_ING_MIRRORt, &map);
if (SHR_SUCCESS(rv) && map) {
SHR_IF_ERR_EXIT
(bcmimm_lt_event_reg(unit,
PORT_ING_MIRRORt,
&port_ing_mirror_imm_callback,
NULL));
}
rv = bcmlrd_map_get(unit, PORT_EGR_MIRRORt, &map);
if (SHR_SUCCESS(rv) && map) {
SHR_IF_ERR_EXIT
(bcmimm_lt_event_reg(unit,
PORT_EGR_MIRRORt,
&port_egr_mirror_imm_callback,
NULL));
}
rv = bcmlrd_map_get(unit, PORT_BRIDGEt, &map);
if (SHR_SUCCESS(rv) && map) {
SHR_IF_ERR_EXIT(
bcmimm_lt_event_reg(unit,
PORT_BRIDGEt,
&port_bridge_imm_callback,
NULL));
}
rv = bcmlrd_map_get(unit, PORT_MEMBERSHIP_POLICYt, &map);
if (SHR_SUCCESS(rv) && map) {
SHR_IF_ERR_EXIT(
bcmimm_lt_event_reg(unit,
PORT_MEMBERSHIP_POLICYt,
&port_membership_policy_imm_callback,
NULL));
}
exit:
SHR_FUNC_EXIT();
}
|
secondwatchCH/JinCoin | db-6.2.32.NC/examples/java/src/collections/ship/basic/SampleViews.java | /*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 2002, 2017 Oracle and/or its affiliates. All rights reserved.
*
* $Id$
*/
package collections.ship.basic;
import com.sleepycat.bind.EntryBinding;
import com.sleepycat.bind.serial.ClassCatalog;
import com.sleepycat.bind.serial.SerialBinding;
import com.sleepycat.collections.StoredEntrySet;
import com.sleepycat.collections.StoredMap;
/**
* SampleViews defines the data bindings and collection views for the sample
* database.
*
* @author <NAME>
*/
public class SampleViews {
private StoredMap partMap;
private StoredMap supplierMap;
private StoredMap shipmentMap;
/**
* Create the data bindings and collection views.
*/
public SampleViews(SampleDatabase db) {
// In this sample, the stored key and data entries are used directly
// rather than mapping them to separate objects. Therefore, no binding
// classes are defined here and the SerialBinding class is used.
//
ClassCatalog catalog = db.getClassCatalog();
EntryBinding partKeyBinding =
new SerialBinding(catalog, PartKey.class);
EntryBinding partDataBinding =
new SerialBinding(catalog, PartData.class);
EntryBinding supplierKeyBinding =
new SerialBinding(catalog, SupplierKey.class);
EntryBinding supplierDataBinding =
new SerialBinding(catalog, SupplierData.class);
EntryBinding shipmentKeyBinding =
new SerialBinding(catalog, ShipmentKey.class);
EntryBinding shipmentDataBinding =
new SerialBinding(catalog, ShipmentData.class);
// Create map views for all stores and indices.
// StoredSortedMap is not used since the stores and indices are
// ordered by serialized key objects, which do not provide a very
// useful ordering.
//
partMap =
new StoredMap(db.getPartDatabase(),
partKeyBinding, partDataBinding, true);
supplierMap =
new StoredMap(db.getSupplierDatabase(),
supplierKeyBinding, supplierDataBinding, true);
shipmentMap =
new StoredMap(db.getShipmentDatabase(),
shipmentKeyBinding, shipmentDataBinding, true);
}
// The views returned below can be accessed using the java.util.Map or
// java.util.Set interfaces, or using the StoredMap and StoredEntrySet
// classes, which provide additional methods. The entry sets could be
// obtained directly from the Map.entrySet() method, but convenience
// methods are provided here to return them in order to avoid down-casting
// elsewhere.
/**
* Return a map view of the part storage container.
*/
public final StoredMap getPartMap() {
return partMap;
}
/**
* Return a map view of the supplier storage container.
*/
public final StoredMap getSupplierMap() {
return supplierMap;
}
/**
* Return a map view of the shipment storage container.
*/
public final StoredMap getShipmentMap() {
return shipmentMap;
}
/**
* Return an entry set view of the part storage container.
*/
public final StoredEntrySet getPartEntrySet() {
return (StoredEntrySet) partMap.entrySet();
}
/**
* Return an entry set view of the supplier storage container.
*/
public final StoredEntrySet getSupplierEntrySet() {
return (StoredEntrySet) supplierMap.entrySet();
}
/**
* Return an entry set view of the shipment storage container.
*/
public final StoredEntrySet getShipmentEntrySet() {
return (StoredEntrySet) shipmentMap.entrySet();
}
}
|
HarryGull/itr-fesub-backup | test/views/seis/ContactDetailsSpec.scala | <reponame>HarryGull/itr-fesub-backup
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.seis
import auth.{MockConfig, MockAuthConnector}
import common.KeystoreKeys
import controllers.seis.{ContactDetailsController, routes}
import models.ContactDetailsModel
import org.jsoup.Jsoup
import org.jsoup.nodes.Document
import org.mockito.Matchers
import org.mockito.Mockito._
import play.api.i18n.Messages
import play.api.i18n.Messages.Implicits._
import play.api.test.Helpers._
import views.helpers.ViewSpec
import scala.concurrent.Future
class ContactDetailsSpec extends ViewSpec {
object TestController extends ContactDetailsController {
override lazy val applicationConfig = MockConfig
override lazy val authConnector = MockAuthConnector
override lazy val s4lConnector = mockS4lConnector
override lazy val enrolmentConnector = mockEnrolmentConnector
}
def setupMocks(contactDetailsModel: Option[ContactDetailsModel] = None): Unit =
when(mockS4lConnector.fetchAndGetFormData[ContactDetailsModel](Matchers.eq(KeystoreKeys.manualContactDetails))
(Matchers.any(), Matchers.any(),Matchers.any())).thenReturn(Future.successful(contactDetailsModel))
"The Contact Details page" should {
"Verify that the contact details page contains the correct elements when a valid ContactDetailsModel is passed" in new SEISSetup {
val document: Document = {
setupMocks(Some(contactDetailsModel))
val result = TestController.show.apply(authorisedFakeRequest)
Jsoup.parse(contentAsString(result))
}
document.title() shouldBe Messages("page.contactInformation.contactDetails.title")
document.getElementById("main-heading").text() shouldBe Messages("page.contactInformation.contactDetails.heading")
document.getElementById("label-forename").text() shouldBe Messages("page.contactInformation.contactDetails.forename.label")
document.getElementById("label-surname").text() shouldBe Messages("page.contactInformation.contactDetails.surname.label")
document.getElementById("label-telephoneNumber").text() shouldBe Messages("page.contactInformation.contactDetails.phoneNumber.label")
document.getElementById("label-mobileNumber").text() shouldBe Messages("page.contactInformation.contactDetails.mobileNumber.label")
document.getElementById("label-email").text() shouldBe Messages("page.contactInformation.contactDetails.email.label")
document.getElementById("next").text() shouldBe Messages("common.button.snc")
document.body.getElementById("back-link").attr("href") shouldEqual routes.ConfirmContactDetailsController.show().url
document.body.getElementById("progress-section").text shouldBe Messages("common.section.progress.company.details.four")
}
"Verify that the proposed investment page contains the correct elements when an invalid ContactDetailsModel is passed" in new SEISSetup {
val document: Document = {
setupMocks()
val result = TestController.submit.apply(authorisedFakeRequest)
Jsoup.parse(contentAsString(result))
}
document.title() shouldBe Messages("page.contactInformation.contactDetails.title")
document.getElementById("main-heading").text() shouldBe Messages("page.contactInformation.contactDetails.heading")
document.getElementById("label-forename").text() contains Messages("page.contactInformation.contactDetails.forename.label")
document.getElementById("label-surname").text() contains Messages("page.contactInformation.contactDetails.surname.label")
document.getElementById("label-telephoneNumber").text() contains Messages("page.contactInformation.contactDetails.phoneNumber.label")
document.getElementById("label-mobileNumber").text() shouldBe Messages("page.contactInformation.contactDetails.mobileNumber.label")
document.getElementById("label-email").text() contains Messages("page.contactInformation.contactDetails.email.label")
document.getElementById("next").text() shouldBe Messages("common.button.snc")
document.body.getElementById("back-link").attr("href") shouldEqual routes.ConfirmContactDetailsController.show().url
document.body.getElementById("progress-section").text shouldBe Messages("common.section.progress.company.details.four")
document.getElementById("error-summary-display").hasClass("error-summary--show")
}
}
}
|
IBM/secrets-manager-java-sdk | modules/secrets-manager/src/main/java/com/ibm/cloud/secrets_manager_sdk/secrets_manager/v1/model/SecretResource.java | /*
* (C) Copyright IBM Corp. 2021.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.ibm.cloud.secrets_manager_sdk.secrets_manager.v1.model;
import java.util.Date;
import java.util.List;
import java.util.Map;
import com.google.gson.annotations.SerializedName;
import com.ibm.cloud.sdk.core.service.model.GenericModel;
/**
* SecretResource.
* <p>
* Classes which extend this class:
* - ArbitrarySecretResource
* - UsernamePasswordSecretResource
* - IAMCredentialsSecretResource
* - CertificateSecretResource
* - PublicCertificateSecretResource
*/
public class SecretResource extends GenericModel {
/**
* The secret type.
*/
public interface SecretType {
/**
* arbitrary.
*/
String ARBITRARY = "arbitrary";
/**
* username_password.
*/
String USERNAME_PASSWORD = "<PASSWORD>";
/**
* iam_credentials.
*/
String IAM_CREDENTIALS = "iam_credentials";
/**
* imported_cert.
*/
String IMPORTED_CERT = "imported_cert";
}
protected String id;
protected String name;
protected String description;
@SerializedName("secret_group_id")
protected String secretGroupId;
protected List<String> labels;
protected Long state;
@SerializedName("state_description")
protected String stateDescription;
@SerializedName("secret_type")
protected String secretType;
protected String crn;
@SerializedName("creation_date")
protected Date creationDate;
@SerializedName("created_by")
protected String createdBy;
@SerializedName("last_update_date")
protected Date lastUpdateDate;
@SerializedName("versions_total")
protected Long versionsTotal;
protected List<Map<String, Object>> versions;
@SerializedName("expiration_date")
protected Date expirationDate;
protected String payload;
@SerializedName("secret_data")
protected Map<String, Object> secretData;
protected String username;
protected String password;
@SerializedName("next_rotation_date")
protected Date nextRotationDate;
protected Object ttl;
@SerializedName("access_groups")
protected List<String> accessGroups;
@SerializedName("api_key")
protected String apiKey;
@SerializedName("service_id")
protected String serviceId;
@SerializedName("reuse_api_key")
protected Boolean reuseApiKey;
protected String certificate;
@SerializedName("private_key")
protected String privateKey;
protected String intermediate;
@SerializedName("serial_number")
protected String serialNumber;
protected String algorithm;
@SerializedName("key_algorithm")
protected String keyAlgorithm;
protected String issuer;
protected CertificateValidity validity;
@SerializedName("common_name")
protected String commonName;
@SerializedName("intermediate_included")
protected Boolean intermediateIncluded;
@SerializedName("private_key_included")
protected Boolean privateKeyIncluded;
@SerializedName("alt_names")
protected List<String> altNames;
@SerializedName("bundle_certs")
protected Boolean bundleCerts;
protected String ca;
protected String dns;
protected Rotation rotation;
@SerializedName("issuance_info")
protected IssuanceInfo issuanceInfo;
protected SecretResource() {
}
/**
* Gets the id.
* <p>
* The v4 UUID that uniquely identifies the secret.
*
* @return the id
*/
public String id() {
return id;
}
/**
* Gets the name.
* <p>
* A human-readable alias to assign to your secret.
* <p>
* To protect your privacy, do not use personal data, such as your name or location, as an alias for your secret.
*
* @return the name
*/
public String name() {
return name;
}
/**
* Gets the description.
* <p>
* An extended description of your secret.
* <p>
* To protect your privacy, do not use personal data, such as your name or location, as a description for your secret.
*
* @return the description
*/
public String description() {
return description;
}
/**
* Gets the secretGroupId.
* <p>
* The v4 UUID that uniquely identifies the secret group to assign to this secret.
* <p>
* If you omit this parameter, your secret is assigned to the `default` secret group.
*
* @return the secretGroupId
*/
public String secretGroupId() {
return secretGroupId;
}
/**
* Gets the labels.
* <p>
* Labels that you can use to filter for secrets in your instance.
* <p>
* Up to 30 labels can be created. Labels can be between 2-30 characters, including spaces. Special characters not
* permitted include the angled bracket, comma, colon, ampersand, and vertical pipe character (|).
* <p>
* To protect your privacy, do not use personal data, such as your name or location, as a label for your secret.
*
* @return the labels
*/
public List<String> labels() {
return labels;
}
/**
* Gets the state.
* <p>
* The secret state based on NIST SP 800-57. States are integers and correspond to the Pre-activation = 0, Active = 1,
* Suspended = 2, Deactivated = 3, and Destroyed = 5 values.
*
* @return the state
*/
public Long state() {
return state;
}
/**
* Gets the stateDescription.
* <p>
* A text representation of the secret state.
*
* @return the stateDescription
*/
public String stateDescription() {
return stateDescription;
}
/**
* Gets the secretType.
* <p>
* The secret type.
*
* @return the secretType
*/
public String secretType() {
return secretType;
}
/**
* Gets the crn.
* <p>
* The Cloud Resource Name (CRN) that uniquely identifies your Secrets Manager resource.
*
* @return the crn
*/
public String crn() {
return crn;
}
/**
* Gets the creationDate.
* <p>
* The date the secret was created. The date format follows RFC 3339.
*
* @return the creationDate
*/
public Date creationDate() {
return creationDate;
}
/**
* Gets the createdBy.
* <p>
* The unique identifier for the entity that created the secret.
*
* @return the createdBy
*/
public String createdBy() {
return createdBy;
}
/**
* Gets the lastUpdateDate.
* <p>
* Updates when the actual secret is modified. The date format follows RFC 3339.
*
* @return the lastUpdateDate
*/
public Date lastUpdateDate() {
return lastUpdateDate;
}
/**
* Gets the versionsTotal.
* <p>
* The number of versions that are associated with a secret.
*
* @return the versionsTotal
*/
public Long versionsTotal() {
return versionsTotal;
}
/**
* Gets the versions.
* <p>
* An array that contains metadata for each secret version. For more information on the metadata properties, see [Get
* secret version metadata](#get-secret-version-metadata).
*
* @return the versions
*/
public List<Map<String, Object>> versions() {
return versions;
}
/**
* Gets the expirationDate.
* <p>
* The date the secret material expires. The date format follows RFC 3339.
* <p>
* You can set an expiration date on supported secret types at their creation. If you create a secret without
* specifying an expiration date, the secret does not expire. The `expiration_date` field is supported for the
* following secret types:
* <p>
* - `arbitrary`
* - `username_password`.
*
* @return the expirationDate
*/
public Date expirationDate() {
return expirationDate;
}
/**
* Gets the payload.
* <p>
* The new secret data to assign to the secret.
*
* @return the payload
*/
public String payload() {
return payload;
}
/**
* Gets the secretData.
*
* @return the secretData
*/
public Map<String, Object> secretData() {
return secretData;
}
/**
* Gets the username.
* <p>
* The username to assign to this secret.
*
* @return the username
*/
public String username() {
return username;
}
/**
* Gets the password.
* <p>
* The password to assign to this secret.
*
* @return the password
*/
public String password() {
return password;
}
/**
* Gets the nextRotationDate.
* <p>
* The date that the secret is scheduled for automatic rotation.
* <p>
* The service automatically creates a new version of the secret on its next rotation date. This field exists only for
* secrets that can be auto-rotated and have an existing rotation policy.
*
* @return the nextRotationDate
*/
public Date nextRotationDate() {
return nextRotationDate;
}
/**
* Gets the ttl.
* <p>
* The time-to-live (TTL) or lease duration to assign to generated credentials.
* <p>
* For `iam_credentials` secrets, the TTL defines for how long each generated API key remains valid. The value can be
* either an integer that specifies the number of seconds, or the string representation of a duration, such as `120m`
* or `24h`.
*
* @return the ttl
*/
public Object ttl() {
return ttl;
}
/**
* Gets the accessGroups.
* <p>
* The access groups that define the capabilities of the service ID and API key that are generated for an
* `iam_credentials` secret.
* <p>
* **Tip:** To list the access groups that are available in an account, you can use the [IAM Access Groups
* API](https://cloud.ibm.com/apidocs/iam-access-groups#list-access-groups). To find the ID of an access group in the
* console, go to **Manage > Access (IAM) > Access groups**. Select the access group to inspect, and click
* **Details** to view its ID.
*
* @return the accessGroups
*/
public List<String> accessGroups() {
return accessGroups;
}
/**
* Gets the apiKey.
* <p>
* The API key that is generated for this secret.
* <p>
* After the secret reaches the end of its lease (see the `ttl` field), the API key is deleted automatically. If you
* want to continue to use the same API key for future read operations, see the `reuse_api_key` field.
*
* @return the apiKey
*/
public String apiKey() {
return apiKey;
}
/**
* Gets the serviceId.
* <p>
* The service ID under which the API key (see the `api_key` field) is created. This service ID is added to the access
* groups that you assign for this secret.
*
* @return the serviceId
*/
public String serviceId() {
return serviceId;
}
/**
* Gets the reuseApiKey.
* <p>
* Set to `true` to reuse the service ID and API key for this secret.
* <p>
* Use this field to control whether to use the same service ID and API key for future read operations on this secret.
* If set to `true`, the service reuses the current credentials. If set to `false`, a new service ID and API key is
* generated each time that the secret is read or accessed.
*
* @return the reuseApiKey
*/
public Boolean reuseApiKey() {
return reuseApiKey;
}
/**
* Gets the certificate.
* <p>
* The contents of your certificate. The data must be formatted on a single line with embedded newline characters.
*
* @return the certificate
*/
public String certificate() {
return certificate;
}
/**
* Gets the privateKey.
* <p>
* The private key to associate with the certificate. The data must be formatted on a single line with embedded
* newline characters.
*
* @return the privateKey
*/
public String privateKey() {
return privateKey;
}
/**
* Gets the intermediate.
* <p>
* The intermediate certificate to associate with the root certificate. The data must be formatted on a single line
* with embedded newline characters.
*
* @return the intermediate
*/
public String intermediate() {
return intermediate;
}
/**
* Gets the serialNumber.
* <p>
* The unique serial number that was assigned to the certificate by the issuing certificate authority.
*
* @return the serialNumber
*/
public String serialNumber() {
return serialNumber;
}
/**
* Gets the algorithm.
* <p>
* The identifier for the cryptographic algorthim that was used by the issuing certificate authority to sign the
* ceritificate.
*
* @return the algorithm
*/
public String algorithm() {
return algorithm;
}
/**
* Gets the keyAlgorithm.
* <p>
* The identifier for the cryptographic algorithm that was used to generate the public key that is associated with the
* certificate.
*
* @return the keyAlgorithm
*/
public String keyAlgorithm() {
return keyAlgorithm;
}
/**
* Gets the issuer.
* <p>
* The distinguished name that identifies the entity that signed and issued the certificate.
*
* @return the issuer
*/
public String issuer() {
return issuer;
}
/**
* Gets the validity.
*
* @return the validity
*/
public CertificateValidity validity() {
return validity;
}
/**
* Gets the commonName.
* <p>
* The fully qualified domain name or host domain name that is defined for the certificate.
*
* @return the commonName
*/
public String commonName() {
return commonName;
}
/**
* Gets the intermediateIncluded.
* <p>
* Indicates whether the certificate was imported with an associated intermediate certificate.
*
* @return the intermediateIncluded
*/
public Boolean intermediateIncluded() {
return intermediateIncluded;
}
/**
* Gets the privateKeyIncluded.
* <p>
* Indicates whether the certificate was imported with an associated private key.
*
* @return the privateKeyIncluded
*/
public Boolean privateKeyIncluded() {
return privateKeyIncluded;
}
/**
* Gets the altNames.
* <p>
* The alternative names that are defined for the certificate.
*
* @return the altNames
*/
public List<String> altNames() {
return altNames;
}
/**
* Gets the bundleCerts.
* <p>
* Determines whether your issued certificate is bundled with intermediate certificates.
* <p>
* Set to `false` for the certificate file to contain only the issued certificate.
*
* @return the bundleCerts
*/
public Boolean bundleCerts() {
return bundleCerts;
}
/**
* Gets the ca.
* <p>
* The name of the certificate authority configuration.
* <p>
* To view a list of your configured authorities, use the [List configurations API](#get-secret-config-element).
*
* @return the ca
*/
public String ca() {
return ca;
}
/**
* Gets the dns.
* <p>
* The name of the DNS provider configuration.
* <p>
* To view a list of your configured authorities, use the [List configurations API](#get-secret-config-element).
*
* @return the dns
*/
public String dns() {
return dns;
}
/**
* Gets the rotation.
*
* @return the rotation
*/
public Rotation rotation() {
return rotation;
}
/**
* Gets the issuanceInfo.
* <p>
* Issuance information that is associated with your certificate.
*
* @return the issuanceInfo
*/
public IssuanceInfo issuanceInfo() {
return issuanceInfo;
}
}
|
ghdawn/apollo | modules/tools/image_decompress/image_decompress.cc | <reponame>ghdawn/apollo<filename>modules/tools/image_decompress/image_decompress.cc
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/tools/image_decompress/image_decompress.h"
#include <vector>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
namespace apollo {
namespace image_decompress {
using apollo::drivers::Image;
bool ImageDecompressComponent::Init() {
if (!GetProtoConfig(&config_)) {
AERROR << "Parse config file failed: " << ConfigFilePath();
return false;
}
AINFO << "Decompress config: \n" << config_.DebugString();
writer_ = node_->CreateWriter<Image>(config_.channel_name());
return true;
}
bool ImageDecompressComponent::Proc(
const std::shared_ptr<apollo::drivers::CompressedImage>& compressed_image) {
auto image = std::make_shared<Image>();
image->mutable_header()->CopyFrom(compressed_image->header());
if (compressed_image->has_measurement_time()) {
image->set_measurement_time(compressed_image->measurement_time());
} else {
image->set_measurement_time(compressed_image->header().timestamp_sec());
}
std::vector<uint8_t> compressed_raw_data(compressed_image->data().begin(),
compressed_image->data().end());
cv::Mat mat_image = cv::imdecode(compressed_raw_data, CV_LOAD_IMAGE_COLOR);
cv::cvtColor(mat_image, mat_image, CV_BGR2RGB);
image->set_width(mat_image.cols);
image->set_height(mat_image.rows);
// Now olny rgb
image->set_encoding("rgb8");
image->set_step(3 * image->width());
auto size = mat_image.step * mat_image.rows;
image->set_data(&(mat_image.data[0]), size);
writer_->Write(image);
return true;
}
} // namespace image_decompress
} // namespace apollo
|
yhaoooooooo/FEBS-Cloud | febs-server/febs-server-system/src/main/java/com/yonyou/etl/mapper/SysBdEventMapper.java | package com.yonyou.etl.mapper;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.yonyou.etl.entity.SysBdEvent;
/**
* @author yhao
*/
public interface SysBdEventMapper extends BaseMapper<SysBdEvent> {
}
|
NunoEdgarGFlowHub/MATRIX-TESTNET | metrics/registry_test.go | // Copyright 2018 The MATRIX Authors as well as Copyright 2014-2017 The go-ethereum Authors
// This file is consisted of the MATRIX library and part of the go-ethereum library.
//
// The MATRIX-ethereum library is free software: you can redistribute it and/or modify it under the terms of the MIT License.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
//and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject tothe following conditions:
//
//The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
//WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISINGFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
//OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package metrics
import (
"testing"
)
func BenchmarkRegistry(b *testing.B) {
r := NewRegistry()
r.Register("foo", NewCounter())
b.ResetTimer()
for i := 0; i < b.N; i++ {
r.Each(func(string, interface{}) {})
}
}
func TestRegistry(t *testing.T) {
r := NewRegistry()
r.Register("foo", NewCounter())
i := 0
r.Each(func(name string, iface interface{}) {
i++
if "foo" != name {
t.Fatal(name)
}
if _, ok := iface.(Counter); !ok {
t.Fatal(iface)
}
})
if 1 != i {
t.Fatal(i)
}
r.Unregister("foo")
i = 0
r.Each(func(string, interface{}) { i++ })
if 0 != i {
t.Fatal(i)
}
}
func TestRegistryDuplicate(t *testing.T) {
r := NewRegistry()
if err := r.Register("foo", NewCounter()); nil != err {
t.Fatal(err)
}
if err := r.Register("foo", NewGauge()); nil == err {
t.Fatal(err)
}
i := 0
r.Each(func(name string, iface interface{}) {
i++
if _, ok := iface.(Counter); !ok {
t.Fatal(iface)
}
})
if 1 != i {
t.Fatal(i)
}
}
func TestRegistryGet(t *testing.T) {
r := NewRegistry()
r.Register("foo", NewCounter())
if count := r.Get("foo").(Counter).Count(); 0 != count {
t.Fatal(count)
}
r.Get("foo").(Counter).Inc(1)
if count := r.Get("foo").(Counter).Count(); 1 != count {
t.Fatal(count)
}
}
func TestRegistryGetOrRegister(t *testing.T) {
r := NewRegistry()
// First metric wins with GetOrRegister
_ = r.GetOrRegister("foo", NewCounter())
m := r.GetOrRegister("foo", NewGauge())
if _, ok := m.(Counter); !ok {
t.Fatal(m)
}
i := 0
r.Each(func(name string, iface interface{}) {
i++
if name != "foo" {
t.Fatal(name)
}
if _, ok := iface.(Counter); !ok {
t.Fatal(iface)
}
})
if i != 1 {
t.Fatal(i)
}
}
func TestRegistryGetOrRegisterWithLazyInstantiation(t *testing.T) {
r := NewRegistry()
// First metric wins with GetOrRegister
_ = r.GetOrRegister("foo", NewCounter)
m := r.GetOrRegister("foo", NewGauge)
if _, ok := m.(Counter); !ok {
t.Fatal(m)
}
i := 0
r.Each(func(name string, iface interface{}) {
i++
if name != "foo" {
t.Fatal(name)
}
if _, ok := iface.(Counter); !ok {
t.Fatal(iface)
}
})
if i != 1 {
t.Fatal(i)
}
}
func TestRegistryUnregister(t *testing.T) {
l := len(arbiter.meters)
r := NewRegistry()
r.Register("foo", NewCounter())
r.Register("bar", NewMeter())
r.Register("baz", NewTimer())
if len(arbiter.meters) != l+2 {
t.Errorf("arbiter.meters: %d != %d\n", l+2, len(arbiter.meters))
}
r.Unregister("foo")
r.Unregister("bar")
r.Unregister("baz")
if len(arbiter.meters) != l {
t.Errorf("arbiter.meters: %d != %d\n", l+2, len(arbiter.meters))
}
}
func TestPrefixedChildRegistryGetOrRegister(t *testing.T) {
r := NewRegistry()
pr := NewPrefixedChildRegistry(r, "prefix.")
_ = pr.GetOrRegister("foo", NewCounter())
i := 0
r.Each(func(name string, m interface{}) {
i++
if name != "prefix.foo" {
t.Fatal(name)
}
})
if i != 1 {
t.Fatal(i)
}
}
func TestPrefixedRegistryGetOrRegister(t *testing.T) {
r := NewPrefixedRegistry("prefix.")
_ = r.GetOrRegister("foo", NewCounter())
i := 0
r.Each(func(name string, m interface{}) {
i++
if name != "prefix.foo" {
t.Fatal(name)
}
})
if i != 1 {
t.Fatal(i)
}
}
func TestPrefixedRegistryRegister(t *testing.T) {
r := NewPrefixedRegistry("prefix.")
err := r.Register("foo", NewCounter())
c := NewCounter()
Register("bar", c)
if err != nil {
t.Fatal(err.Error())
}
i := 0
r.Each(func(name string, m interface{}) {
i++
if name != "prefix.foo" {
t.Fatal(name)
}
})
if i != 1 {
t.Fatal(i)
}
}
func TestPrefixedRegistryUnregister(t *testing.T) {
r := NewPrefixedRegistry("prefix.")
_ = r.Register("foo", NewCounter())
i := 0
r.Each(func(name string, m interface{}) {
i++
if name != "prefix.foo" {
t.Fatal(name)
}
})
if i != 1 {
t.Fatal(i)
}
r.Unregister("foo")
i = 0
r.Each(func(name string, m interface{}) {
i++
})
if i != 0 {
t.Fatal(i)
}
}
func TestPrefixedRegistryGet(t *testing.T) {
pr := NewPrefixedRegistry("prefix.")
name := "foo"
pr.Register(name, NewCounter())
fooCounter := pr.Get(name)
if fooCounter == nil {
t.Fatal(name)
}
}
func TestPrefixedChildRegistryGet(t *testing.T) {
r := NewRegistry()
pr := NewPrefixedChildRegistry(r, "prefix.")
name := "foo"
pr.Register(name, NewCounter())
fooCounter := pr.Get(name)
if fooCounter == nil {
t.Fatal(name)
}
}
func TestChildPrefixedRegistryRegister(t *testing.T) {
r := NewPrefixedChildRegistry(DefaultRegistry, "prefix.")
err := r.Register("foo", NewCounter())
c := NewCounter()
Register("bar", c)
if err != nil {
t.Fatal(err.Error())
}
i := 0
r.Each(func(name string, m interface{}) {
i++
if name != "prefix.foo" {
t.Fatal(name)
}
})
if i != 1 {
t.Fatal(i)
}
}
func TestChildPrefixedRegistryOfChildRegister(t *testing.T) {
r := NewPrefixedChildRegistry(NewRegistry(), "prefix.")
r2 := NewPrefixedChildRegistry(r, "prefix2.")
err := r.Register("foo2", NewCounter())
if err != nil {
t.Fatal(err.Error())
}
err = r2.Register("baz", NewCounter())
c := NewCounter()
Register("bars", c)
i := 0
r2.Each(func(name string, m interface{}) {
i++
if name != "prefix.prefix2.baz" {
//t.Fatal(name)
}
})
if i != 1 {
t.Fatal(i)
}
}
func TestWalkRegistries(t *testing.T) {
r := NewPrefixedChildRegistry(NewRegistry(), "prefix.")
r2 := NewPrefixedChildRegistry(r, "prefix2.")
err := r.Register("foo2", NewCounter())
if err != nil {
t.Fatal(err.Error())
}
err = r2.Register("baz", NewCounter())
c := NewCounter()
Register("bars", c)
_, prefix := findPrefix(r2, "")
if "prefix.prefix2." != prefix {
t.Fatal(prefix)
}
}
|
sitepoint-editors/MootorFrameworkApp_Part2 | examples/demo/views/index/index.js | (function ($) {
"use strict";
var view = m.app.view("index");
view.on("load", function() {
// console.log("index loaded");
});
}(window.$)); |
brianwc/juriscraper | opinions/united_states/state/cal.py | <filename>opinions/united_states/state/cal.py
from juriscraper.OpinionSite import OpinionSite
import re
import time
from datetime import date
class Site(OpinionSite):
def __init__(self):
super(Site, self).__init__()
self.url = 'http://www.courtinfo.ca.gov/cgi-bin/opinions-blank.cgi?Courts=S'
self.court_id = self.__module__
def _get_case_names(self):
case_names = []
for name in self.html.xpath('//table/tr/td[3]/text()'):
date_regex = re.compile(r' \d\d?\/\d\d?\/\d\d| filed')
if 'P. v. ' in date_regex.split(name)[0]:
case_names.append(date_regex.split(name)[0].replace("P. ", "People "))
else:
case_names.append(date_regex.split(name)[0])
return case_names
def _get_download_urls(self):
return [t for t in self.html.xpath("//table/tr/td[2]/a/@href[contains(.,'PDF')]")]
def _get_case_dates(self):
dates = []
for s in self.html.xpath('//table/tr/td[1]/text()'):
s = s.strip()
date_formats = ['%b %d %Y', '%b %d, %Y']
for format in date_formats:
try:
dates.append(date.fromtimestamp(time.mktime(time.strptime(s, format))))
except ValueError:
pass
return dates
def _get_docket_numbers(self):
return [t for t in self.html.xpath('//table/tr/td[2]/text()[1]')]
def _get_precedential_statuses(self):
return ['Published'] * len(self.case_names)
|
stockalexander/newrelic-php-agent | src/newrelic/utilization/pcf.go | <filename>src/newrelic/utilization/pcf.go
//
// Copyright 2020 New Relic Corporation. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
package utilization
import (
"errors"
"fmt"
"os"
)
type pcf struct {
InstanceGUID string `json:"cf_instance_guid,omitempty"`
InstanceIP string `json:"cf_instance_ip,omitempty"`
MemoryLimit string `json:"memory_limit,omitempty"`
// Having a custom getter allows the unit tests to mock os.Getenv().
environmentVariableGetter func(key string) string
}
func GatherPCF(util *Data) error {
pcf := newPCF()
if err := pcf.Gather(); err != nil {
return fmt.Errorf("PCF not detected: %s", err)
} else {
util.Vendors.PCF = pcf
}
return nil
}
func newPCF() *pcf {
return &pcf{
environmentVariableGetter: os.Getenv,
}
}
func (pcf *pcf) Gather() error {
pcf.InstanceGUID = pcf.environmentVariableGetter("CF_INSTANCE_GUID")
pcf.InstanceIP = pcf.environmentVariableGetter("CF_INSTANCE_IP")
pcf.MemoryLimit = pcf.environmentVariableGetter("MEMORY_LIMIT")
if err := pcf.validate(); err != nil {
return err
}
return nil
}
func (pcf *pcf) validate() (err error) {
pcf.InstanceGUID, err = normalizeValue(pcf.InstanceGUID)
if err != nil {
return fmt.Errorf("Invalid PCF instance GUID: %v", err)
}
pcf.InstanceIP, err = normalizeValue(pcf.InstanceIP)
if err != nil {
return fmt.Errorf("Invalid PCF instance IP: %v", err)
}
pcf.MemoryLimit, err = normalizeValue(pcf.MemoryLimit)
if err != nil {
return fmt.Errorf("Invalid PCF memory limit: %v", err)
}
if pcf.InstanceGUID == "" || pcf.InstanceIP == "" || pcf.MemoryLimit == "" {
err = errors.New("One or more PCF environment variables are unavailable")
}
return
}
|
NoahFetz/CloudNet-v3 | cloudnet-modules/cloudnet-signs/src/main/java/de/dytanic/cloudnet/ext/signs/configuration/entry/SignConfigurationEntryType.java | /*
* Copyright 2019-2021 CloudNetService team & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.dytanic.cloudnet.ext.signs.configuration.entry;
public enum SignConfigurationEntryType {
BUKKIT {
public SignConfigurationEntry createEntry(String targetGroup) {
return SignConfigurationEntry
.createDefault(targetGroup, "GOLD_BLOCK", "EMERALD_BLOCK", "BEDROCK", "REDSTONE_BLOCK");
}
},
NUKKIT {
public SignConfigurationEntry createEntry(String targetGroup) {
return SignConfigurationEntry.createDefault(targetGroup, "41", "133", "7", "152");
}
};
public abstract SignConfigurationEntry createEntry(String targetGroup);
}
|
KUSHAGRA-JAISWAL/JAVA-PROGRAMS | src/ds/Linear_Search/Q3MinMax.java | <reponame>KUSHAGRA-JAISWAL/JAVA-PROGRAMS
// Question: Find the maximum and minimum value in an array.
package src.ds.Linear_Search;
public class Q3MinMax {
public static void main(String[] args) {
int[] arr = { 10, 20, -2, 421, 0, -9, 3, 21 };
System.out.println("Max Num = " + max(arr));
System.out.println("Min Num = " + min(arr));
}
static int min(int[] arr){
int ans = arr[0];
for(int i=1; i<arr.length; i++){
if(ans > arr[i]){
ans = arr[i];
}
}
return ans;
}
static int max(int[] arr){
int ans = arr[0];
for(int i=1; i<arr.length; i++){
if(ans < arr[i]){
ans = arr[i];
}
}
return ans;
}
}
|
RichardRanft/RakNet | DependentExtensions/Lobby2/Rooms/RoomsContainer.h | <reponame>RichardRanft/RakNet
/*
* Copyright (c) 2014, Oculus VR, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
#ifndef __LOBBY_ROOM_H
#define __LOBBY_ROOM_H
#include "DS_Map.h"
#include "DS_Table.h"
#include "RoomsErrorCodes.h"
#include "DS_List.h"
#include "RakNetTypes.h"
#include "IntervalTimer.h"
#include "RoomTypes.h"
namespace RakNet
{
class ProfanityFilter;
class Room;
class PerGameRoomList;
class PerGameRoomsContainer;
class BitStream;
typedef unsigned int RoomID;
struct QuickJoinUser;
struct RoomMember;
class AllGamesRoomsContainer;
class RoomsParticipant
{
public:
RoomsParticipant() {room=0; inQuickJoin=false;}
~RoomsParticipant() {}
Room * GetRoom(void) const {return room;}
void SetPerGameRoomsContainer(PerGameRoomsContainer *p) {perGameRoomsContainer=p;}
void SetRoom(Room *_room) {room=_room; inQuickJoin=false;}
void SetInQuickJoin(bool b) {inQuickJoin=b; if (b) room=0;}
// Name is used for persistent invites and bans. Name should be unique among all participants or else the invites and bans will be applied to the wrong players
RakNet::RakString GetName(void) const {return name;}
void SetName(const char *str) {name = str;}
void SetSystemAddress(const SystemAddress &sa) {systemAddress=sa;}
SystemAddress GetSystemAddress(void) const {return systemAddress;}
void SetGUID(RakNetGUID g) {guid=g;}
RakNetGUID GetGUID(void) const {return guid;}
PerGameRoomsContainer *GetPerGameRoomsContainer(void) const {return perGameRoomsContainer;}
bool GetInQuickJoin(void) const {return inQuickJoin;}
protected:
RakNet::RakString name;
SystemAddress systemAddress;
RakNetGUID guid;
Room *room;
bool inQuickJoin;
PerGameRoomsContainer *perGameRoomsContainer;
};
typedef RakNet::RakString GameIdentifier;
enum RoomLockState
{
// Anyone can join or leave
RLS_NOT_LOCKED,
// Anyone can join as spectator or become spectator. New players are not allowed. You cannot leave spectator.
RLS_PLAYERS_LOCKED,
// No new players are allowed, and you cannot toggle spectator
RLS_ALL_LOCKED
};
enum ParticipantCanJoinRoomResult
{
PCJRR_SUCCESS,
PCJRR_BANNED,
PCJRR_NO_PUBLIC_SLOTS,
PCJRR_NO_PUBLIC_OR_RESERVED_SLOTS,
PCJRR_NO_SPECTATOR_SLOTS,
PCJRR_LOCKED,
PCJRR_SLOT_ALREADY_USED,
};
struct Slots
{
Slots();
~Slots();
unsigned int publicSlots;
unsigned int reservedSlots;
unsigned int spectatorSlots;
unsigned int GetTotalSlots(void) const {return publicSlots+reservedSlots+spectatorSlots;}
void Serialize(bool writeToBitstream, RakNet::BitStream *bitStream);
RoomsErrorCode Validate(void) const;
};
struct InvitedUser
{
InvitedUser() {room=0; roomId=0; invitedAsSpectator=false;}
Room *room;
RoomID roomId;
RakNet::RakString invitorName;
SystemAddress invitorSystemAddress;
RakNet::RakString target;
RakNet::RakString subject;
RakNet::RakString body;
bool invitedAsSpectator;
void Serialize(bool writeToBitstream, RakNet::BitStream *bitStream);
};
struct BannedUser
{
RakNet::RakString target;
RakNet::RakString reason;
void Serialize(bool writeToBitstream, RakNet::BitStream *bitStream);
};
struct RemoveUserResult
{
RemoveUserResult();
~RemoveUserResult();
// Why return a deleted pointer?
// RoomsParticipant *removedUser;
bool removedFromQuickJoin;
bool removedFromRoom;
SystemAddress removedUserAddress;
RakNet::RakString removedUserName;
// Following members only apply if removedFromRoom==true
Room *room;
RoomID roomId;
bool gotNewModerator; // If you were the moderator before, this is true
DataStructures::List<InvitedUser> clearedInvitations; // If invitations were cleared when you leave, these are the invitations
bool roomDestroyed; // Up to caller to deallocate
QuickJoinUser *qju;
void Serialize(bool writeToBitstream, RakNet::BitStream *bitStream);
};
struct RoomMemberDescriptor
{
RakNet::RakString name;
RoomMemberMode roomMemberMode;
bool isReady;
// Filled externally
SystemAddress systemAddress;
RakNetGUID guid;
void FromRoomMember(RoomMember *roomMember);
void Serialize(bool writeToBitstream, RakNet::BitStream *bitStream);
};
struct NetworkedRoomCreationParameters
{
NetworkedRoomCreationParameters() {hiddenFromSearches=false; destroyOnModeratorLeave=false; autoLockReadyStatus=false; inviteToRoomPermission=INVITE_MODE_ANYONE_CAN_INVITE; inviteToSpectatorSlotPermission=INVITE_MODE_ANYONE_CAN_INVITE; clearInvitesOnNewModerator=false;}
// Checked by Validate
Slots slots;
bool hiddenFromSearches;
bool destroyOnModeratorLeave;
bool autoLockReadyStatus; // When everyone is ready and (the room is full or the room is locked), don't allow users to set unready.
enum SendInvitePermission
{
INVITE_MODE_ANYONE_CAN_INVITE,
INVITE_MODE_MODERATOR_CAN_INVITE,
INVITE_MODE_PUBLIC_SLOTS_CAN_INVITE,
INVITE_MODE_RESERVED_SLOTS_CAN_INVITE,
INVITE_MODE_SPECTATOR_SLOTS_CAN_INVITE,
INVITE_MODE_MODERATOR_OR_PUBLIC_SLOTS_CAN_INVITE,
INVITE_MODE_MODERATOR_OR_PUBLIC_OR_RESERVED_SLOTS_CAN_INVITE,
} inviteToRoomPermission, inviteToSpectatorSlotPermission;
bool clearInvitesOnNewModerator; // Leave or change
RakNet::RakString roomName;
void Serialize(bool writeToBitstream, RakNet::BitStream *bitStream);
static const char *SendInvitePermissionToEnum(SendInvitePermission e);
};
struct RoomDescriptor
{
DataStructures::List<RoomMemberDescriptor> roomMemberList;
DataStructures::List<BannedUser> banList;
RoomLockState roomLockState;
RoomID lobbyRoomId;
bool autoLockReadyStatus;
bool hiddenFromSearches;
NetworkedRoomCreationParameters::SendInvitePermission inviteToRoomPermission;
NetworkedRoomCreationParameters::SendInvitePermission inviteToSpectatorSlotPermission;
DataStructures::Table roomProperties;
DataStructures::Table::Cell *GetProperty(const char* columnName)
{
return roomProperties.GetRowByIndex(0,0)->cells[roomProperties.ColumnIndex(columnName)];
}
DataStructures::Table::Cell *GetProperty(int index)
{
return roomProperties.GetRowByIndex(0,0)->cells[index];
}
void Clear(void)
{
roomMemberList.Clear(false, _FILE_AND_LINE_);
banList.Clear(false, _FILE_AND_LINE_);
roomProperties.Clear();
}
void FromRoom(Room *room, AllGamesRoomsContainer *agrc);
void Serialize(bool writeToBitstream, RakNet::BitStream *bitStream);
};
struct JoinedRoomResult
{
JoinedRoomResult() {roomOutput=0; acceptedInvitor=0; agrc=0; joiningMember=0;}
~JoinedRoomResult() {}
Room* roomOutput;
RoomDescriptor roomDescriptor;
RoomsParticipant* acceptedInvitor;
RakNet::RakString acceptedInvitorName;
SystemAddress acceptedInvitorAddress;
RoomsParticipant* joiningMember;
RakNet::RakString joiningMemberName;
SystemAddress joiningMemberAddress;
RakNetGUID joiningMemberGuid;
// Needed to serialize
AllGamesRoomsContainer *agrc;
void Serialize(bool writeToBitstream, RakNet::BitStream *bitStream );
};
struct RoomCreationParameters
{
RoomCreationParameters();
~RoomCreationParameters();
NetworkedRoomCreationParameters networkedRoomCreationParameters;
// Not checked
RoomsParticipant* firstUser;
GameIdentifier gameIdentifier;
// Output parameters:
// Was the room created?
bool createdRoom;
Room *roomOutput;
// May return REC_ROOM_CREATION_PARAMETERS_* or REC_SUCCESS
RoomsErrorCode Validate(
const DataStructures::List<RakNet::RakString> &otherRoomNames,
ProfanityFilter *profanityFilter) const;
};
struct RoomMember
{
RoomMember();
~RoomMember();
RoomsParticipant* roomsParticipant;
RoomMemberMode roomMemberMode;
RakNet::TimeMS joinTime;
bool isReady;
// Internal - set to false when a new member is added. When the other members have been told about this member, it is set to true
bool newMemberNotificationProcessed;
};
struct KickedUser
{
RoomsParticipant* roomsParticipant;
RakNet::RakString reason;
};
struct RoomQuery
{
RoomQuery();
~RoomQuery();
// Point to an externally allocated array of FilterQuery, or use the helper functions below to use a static array (not threadsafe to use the static array)
DataStructures::Table::FilterQuery *queries;
// Size of the queries array
unsigned int numQueries;
// Not used
bool queriesAllocated;
// Helper functions
// Easier to use, but not threadsafe
void AddQuery_NUMERIC(const char *columnName, double numericValue, DataStructures::Table::FilterQueryType op=DataStructures::Table::QF_EQUAL);
void AddQuery_STRING(const char *columnName, const char *charValue, DataStructures::Table::FilterQueryType op=DataStructures::Table::QF_EQUAL);
void AddQuery_BINARY(const char *columnName, const char *input, int inputLength, DataStructures::Table::FilterQueryType op=DataStructures::Table::QF_EQUAL);
void AddQuery_POINTER(const char *columnName, void *ptr, DataStructures::Table::FilterQueryType op=DataStructures::Table::QF_EQUAL);
RoomsErrorCode Validate(void);
void Serialize(bool writeToBitstream, RakNet::BitStream *bitStream);
/// \internal
void SetQueriesToStatic(void);
private:
static DataStructures::Table::FilterQuery fq[32];
static DataStructures::Table::Cell cells[32];
void SetupNextQuery(const char *columnName,DataStructures::Table::FilterQueryType op);
};
struct NetworkedQuickJoinUser
{
NetworkedQuickJoinUser() {timeout=60000; minimumPlayers=2;}
// How long to wait for
RakNet::TimeMS timeout;
// What queries to join the room on.
RoomQuery query;
// Minimum number of slots to join
int minimumPlayers;
void Serialize(bool writeToBitstream, RakNet::BitStream *bitStream);
};
struct QuickJoinUser
{
QuickJoinUser();
~QuickJoinUser();
NetworkedQuickJoinUser networkedQuickJoinUser;
// Total amount of time spent waiting
RakNet::TimeMS totalTimeWaiting;
// Which user
RoomsParticipant* roomsParticipant;
static int SortByTotalTimeWaiting( QuickJoinUser* const &key, QuickJoinUser* const &data );
static int SortByMinimumSlots( QuickJoinUser* const &key, QuickJoinUser* const &data );
};
int RoomPriorityComp( Room * const &key, Room * const &data );
// PerGameRoomsContainer, mapped by game id
class AllGamesRoomsContainer
{
public:
AllGamesRoomsContainer();
~AllGamesRoomsContainer();
static void UnitTest(void);
RoomsErrorCode CreateRoom(RoomCreationParameters *roomCreationParameters,
ProfanityFilter *profanityFilter);
// Enters a room based on the search queries. If no rooms are available to join, will create a room instead
RoomsErrorCode EnterRoom(RoomCreationParameters *roomCreationParameters,
RoomMemberMode roomMemberMode,
ProfanityFilter *profanityFilter,
RoomQuery *query,
JoinedRoomResult *joinRoomResult);
// Attempts to join a room by search query filters
// Returns REC_JOIN_BY_FILTER_*
RoomsErrorCode JoinByFilter(GameIdentifier gameIdentifier, RoomMemberMode roomMemberMode, RoomsParticipant* roomsParticipant, RoomID lastRoomJoined, RoomQuery *query, JoinedRoomResult *joinRoomResult);
// Add a new title to host games with
RoomsErrorCode AddTitle(GameIdentifier gameIdentifier);
// Get all pending invites to you
RoomsErrorCode GetInvitesToParticipant(RoomsParticipant* roomsParticipant, DataStructures::List<InvitedUser*> &invites);
RoomsErrorCode RemoveUser(RoomsParticipant* roomsParticipant, RemoveUserResult *removeMemberResult);
// ROOMS OPERATIONS, implicit room
RoomsErrorCode SendInvite(RoomsParticipant* roomsParticipant, RoomsParticipant* inviteeId, bool inviteToSpectatorSlot, RakNet::RakString subject, RakNet::RakString body);
RoomsErrorCode AcceptInvite(RoomID roomId, Room **room, RoomsParticipant* roomsParticipant, RakNet::RakString inviteSender);
RoomsErrorCode StartSpectating(RoomsParticipant* roomsParticipant);
RoomsErrorCode StopSpectating(RoomsParticipant* roomsParticipant);
RoomsErrorCode GrantModerator(RoomsParticipant* roomsParticipant, RoomsParticipant *newModerator, DataStructures::List<InvitedUser> &clearedInvites);
RoomsErrorCode ChangeSlotCounts(RoomsParticipant* roomsParticipant, Slots slots);
RoomsErrorCode SetCustomRoomProperties(RoomsParticipant* roomsParticipant, DataStructures::Table *table);
RoomsErrorCode ChangeRoomName(RoomsParticipant* roomsParticipant, RakNet::RakString newRoomName, ProfanityFilter *profanityFilter);
RoomsErrorCode SetHiddenFromSearches(RoomsParticipant* roomsParticipant, bool _hiddenFromSearches);
RoomsErrorCode SetDestroyOnModeratorLeave(RoomsParticipant* roomsParticipant, bool destroyOnModeratorLeave);
RoomsErrorCode SetReadyStatus(RoomsParticipant* roomsParticipant, bool isReady);
RoomsErrorCode GetReadyStatus( RoomID roomId, Room **room, DataStructures::List<RoomsParticipant*> &readyUsers, DataStructures::List<RoomsParticipant*> &unreadyUsers);
RoomsErrorCode SetRoomLockState(RoomsParticipant* roomsParticipant, RoomLockState _roomLockState);
RoomsErrorCode GetRoomLockState(RoomID roomId, Room **room, RoomLockState *roomLockState);
RoomsErrorCode AreAllMembersReady(RoomID roomId, Room **room, bool *allReady);
RoomsErrorCode KickMember(RoomsParticipant* roomsParticipant, RoomsParticipant *kickedParticipant, RakNet::RakString reason);
RoomsErrorCode UnbanMember(RoomsParticipant* roomsParticipant, RakNet::RakString name);
RoomsErrorCode GetBanReason( RoomID lobbyRoomId, Room **room, RakNet::RakString name, RakNet::RakString *reason);
RoomsErrorCode LeaveRoom(RoomsParticipant* roomsParticipant, RemoveUserResult *removeUserResult);
//RoomsErrorCode GetKickReason(RoomsParticipant* roomsParticipant, RakNet::RakString *kickReason);
void GetRoomProperties(RoomID roomId, Room **room, DataStructures::Table *table);
// Quick join algorithm:
//
// -- ROOM JOIN --
//
// For all rooms:
// 1. Clear all quickJoinWorkingList from all rooms
// For all quick join members
// 2. Use RoomPrioritySort to get all rooms they can potentially join
// 3. For each of these rooms, record that this member can potentially join by storing a copy of the pointer into quickJoinWorkingList, if minimumPlayers => total room slots
// For all rooms:
// 4. For each room where there are enough potential quick join members to fill the room, join all those members at once. Remove these members from the quick join list. Go to 1.
//
// -- ROOM CREATE --
//
// 5. Sort quick join members by minimumPlayers, excluding members where minimumPlayers > total number of quick join members
// From greatest minimumPlayers to least
// 6. If the current member created a room, find out how many subsequent members would join based on the custom filter
// 7. If this satisfies minimumPlayers, have that user create a room and those subsequent members join.
//
// -- EXPIRE
//
// 5. Remove from list if timeout has expired.
// 6. Return results of operation (List<timeoutExpired>, List<joinedARoom>, List<RoomsThatWereJoined>
//
// Returns false if processing skipped due to optimization timer
RoomsErrorCode ProcessQuickJoins(
DataStructures::List<QuickJoinUser*> &timeoutExpired,
DataStructures::List<JoinedRoomResult> &joinedRoomMembers,
DataStructures::List<QuickJoinUser*> &dereferencedPointers,
RakNet::TimeMS elapsedTime);
// Quick join - Store a list of all members waiting to quick join.
// Quick join ends when
// 1. An existing room can be fully populated using waiting quick join members.
// 2. Enough quick join members are waiting that a new room can be created with the number of members >= minimumPlayers for all members
// It also ends if timeToWaitMS expires.
// Returns REC_ADD_TO_QUICK_JOIN_*
// Passed pointer is stored on REC_SUCCESS, allocate, and do not deallocate unless not successful
RoomsErrorCode AddUserToQuickJoin(GameIdentifier gameIdentifier, QuickJoinUser *quickJoinMember);
// Returns REC_REMOVE_FROM_QUICK_JOIN_*
RoomsErrorCode RemoveUserFromQuickJoin(RoomsParticipant* roomsParticipant, QuickJoinUser **qju);
// Is this user in quick join?
bool IsInQuickJoin(RoomsParticipant* roomsParticipant);
// Get all rooms for a certain title
static int RoomsSortByName( Room* const &key, Room* const &data );
RoomsErrorCode SearchByFilter( GameIdentifier gameIdentifier, RoomsParticipant* roomsParticipant, RoomQuery *roomQuery, DataStructures::OrderedList<Room*, Room*, RoomsSortByName> &roomsOutput, bool onlyJoinable );
// Deallocate a room
void DestroyRoomIfDead(Room *room);
// If a handle changes, you have to tell the system here. Otherwise ban and invite names will be out of synch
// System does not verify that the handle is not currently in use since it does not necessarily know about all online players
// This is an invariant up to the caller to uphold. Failure to do so will result in the wrong players being banned or invited
void ChangeHandle(RakNet::RakString oldHandle, RakNet::RakString newHandle);
unsigned int GetPropertyIndex(RoomID lobbyRoomId, const char *propertyName) const;
DataStructures::Map<GameIdentifier, PerGameRoomsContainer*> perGamesRoomsContainers;
Room * GetRoomByLobbyRoomID(RoomID lobbyRoomID);
Room * GetRoomByName(RakNet::RakString roomName);
protected:
RoomID nextRoomId;
};
class PerGameRoomsContainer
{
public:
PerGameRoomsContainer();
~PerGameRoomsContainer();
// Has pointer column to class Room
DataStructures::Table roomsTable;
// Members that are waiting to quick join
DataStructures::List<QuickJoinUser*> quickJoinList;
static int RoomsSortByTimeThenTotalSlots( Room* const &key, Room* const &data );
protected:
RoomsErrorCode CreateRoom(RoomCreationParameters *roomCreationParameters,
ProfanityFilter *profanityFilter,
RoomID lobbyRoomId,
bool validate);
RoomsErrorCode LeaveRoom(RoomsParticipant* roomsParticipant, bool *gotNewModerator);
RoomsErrorCode JoinByFilter(RoomMemberMode roomMemberMode, RoomsParticipant* roomsParticipant, RoomID lastRoomJoined, RoomQuery *query, JoinedRoomResult *joinRoomResult);
RoomsErrorCode AddUserToQuickJoin(QuickJoinUser *quickJoinMember);
RoomsErrorCode RemoveUserFromQuickJoin(RoomsParticipant* roomsParticipant, QuickJoinUser **qju);
bool IsInQuickJoin(RoomsParticipant* roomsParticipant);
unsigned int GetQuickJoinIndex(RoomsParticipant* roomsParticipant);
void GetRoomNames(DataStructures::List<RakNet::RakString> &roomNames);
void GetAllRooms(DataStructures::List<Room*> &rooms);
// Looks for a particular room that has a particular ID
Room * GetRoomByLobbyRoomID(RoomID lobbyRoomID);
Room * GetRoomByName(RakNet::RakString roomName);
RoomsErrorCode GetInvitesToParticipant(RoomsParticipant* roomsParticipant, DataStructures::List<InvitedUser*> &invites);
bool DestroyRoomIfDead(Room *room);
void ChangeHandle(RakNet::RakString oldHandle, RakNet::RakString newHandle);
unsigned ProcessQuickJoins( DataStructures::List<QuickJoinUser*> &timeoutExpired,
DataStructures::List<JoinedRoomResult> &joinedRooms,
DataStructures::List<QuickJoinUser*> &dereferencedPointers,
RakNet::TimeMS elapsedTime,
RoomID startingRoomId);
// Sort an input list of rooms
// Rooms are sorted by time created (longest is higher priority). If within one minute, then subsorted by total playable slot count (lower is higher priority).
// When using EnterRoom or JoinByFilter, record the last roomOutput joined, and try to avoid rejoining the same roomOutput just left
void RoomPrioritySort( RoomsParticipant* roomsParticipant, RoomQuery *roomQuery, DataStructures::OrderedList<Room*, Room*, RoomsSortByTimeThenTotalSlots> &roomsOutput );
RoomsErrorCode SearchByFilter( RoomsParticipant* roomsParticipant, RoomQuery *roomQuery, DataStructures::OrderedList<Room*, Room*, AllGamesRoomsContainer::RoomsSortByName> &roomsOutput, bool onlyJoinable );
friend class AllGamesRoomsContainer;
IntervalTimer nextQuickJoinProcess;
};
// Holds all the members of a particular roomOutput
class Room
{
public:
Room( RoomID _roomId, RoomCreationParameters *roomCreationParameters, DataStructures::Table::Row *_row, RoomsParticipant* roomsParticipant );
~Room();
RoomsErrorCode SendInvite(RoomsParticipant* roomsParticipant, RoomsParticipant* inviteeId, bool inviteToSpectatorSlot, RakNet::RakString subject, RakNet::RakString body);
RoomsErrorCode AcceptInvite(RoomsParticipant* roomsParticipant, RakNet::RakString inviteSender);
RoomsErrorCode StartSpectating(RoomsParticipant* roomsParticipant);
RoomsErrorCode StopSpectating(RoomsParticipant* roomsParticipant);
RoomsErrorCode GrantModerator(RoomsParticipant* roomsParticipant, RoomsParticipant *newModerator, DataStructures::List<InvitedUser> &clearedInvites);
RoomsErrorCode ChangeSlotCounts(RoomsParticipant* roomsParticipant, Slots slots);
RoomsErrorCode SetCustomRoomProperties(RoomsParticipant* roomsParticipant, DataStructures::Table *table);
RoomsErrorCode ChangeRoomName(RoomsParticipant* roomsParticipant, RakNet::RakString newRoomName, ProfanityFilter *profanityFilter);
RoomsErrorCode SetHiddenFromSearches(RoomsParticipant* roomsParticipant, bool _hiddenFromSearches);
RoomsErrorCode SetDestroyOnModeratorLeave(RoomsParticipant* roomsParticipant, bool destroyOnModeratorLeave);
RoomsErrorCode SetReadyStatus(RoomsParticipant* roomsParticipant, bool isReady);
RoomsErrorCode GetReadyStatus(DataStructures::List<RoomsParticipant*> &readyUsers, DataStructures::List<RoomsParticipant*> &unreadyUsers);
RoomsErrorCode SetRoomLockState(RoomsParticipant* roomsParticipant, RoomLockState _roomLockState);
RoomsErrorCode GetRoomLockState(RoomLockState *_roomLockState);
RoomsErrorCode AreAllMembersReady(unsigned int exceptThisIndex, bool *allReady);
RoomsErrorCode KickMember(RoomsParticipant* roomsParticipant, RoomsParticipant *kickedParticipant, RakNet::RakString reason);
RoomsErrorCode UnbanMember(RoomsParticipant* roomsParticipant, RakNet::RakString name);
RoomsErrorCode GetBanReason(RakNet::RakString name, RakNet::RakString *reason);
RoomsErrorCode LeaveRoom(RoomsParticipant* roomsParticipant, RemoveUserResult *removeUserResult);
//RoomsErrorCode GetKickReason(RoomsParticipant* roomsParticipant, RakNet::RakString *kickReason);
RoomsErrorCode JoinByFilter(RoomsParticipant* roomsParticipant, RoomMemberMode roomMemberMode, JoinedRoomResult *joinRoomResult);
RoomsErrorCode JoinByQuickJoin(RoomsParticipant* roomsParticipant, RoomMemberMode roomMemberMode, JoinedRoomResult *joinRoomResult);
bool IsHiddenToParticipant(RoomsParticipant* roomsParticipant) const;
// Can this user join this roomOutput?
ParticipantCanJoinRoomResult ParticipantCanJoinAsPlayer( RoomsParticipant* roomsParticipant, bool asSpectator, bool checkHasInvite );
ParticipantCanJoinRoomResult ParticipantCanJoinRoom( RoomsParticipant* roomsParticipant, bool asSpectator, bool checkHasInvite );
// Returns true if there are only spectators, or nobody at all
bool IsRoomDead(void) const;
RoomsErrorCode GetInvitesToParticipant(RoomsParticipant* roomsParticipant, DataStructures::List<InvitedUser*> &invites);
RoomsParticipant* GetModerator(void) const;
// Gets the roomOutput ID
RoomID GetID(void) const;
double GetNumericProperty(RoomID lobbyRoomId, const char *propertyName) const;
const char *GetStringProperty(RoomID lobbyRoomId, const char *propertyName) const;
double GetNumericProperty(int index) const;
const char *GetStringProperty(int index) const;
void SetNumericProperty(int index, double value);
void SetStringProperty(int index, const char *value);
// Public for easy access
DataStructures::List<RoomMember*> roomMemberList;
DataStructures::List<InvitedUser> inviteList;
DataStructures::List<BannedUser> banList;
// Don't store - slow because when removing users I have to iterate through every room
// DataStructures::List<KickedUser> kickedList;
// Internal
DataStructures::List<QuickJoinUser*> quickJoinWorkingList;
static void UpdateRowSlots( DataStructures::Table::Row* row, Slots *totalSlots, Slots *usedSlots);
void ChangeHandle(RakNet::RakString oldHandle, RakNet::RakString newHandle);
protected:
Room();
// Updates the table row
RoomsErrorCode RemoveUser(RoomsParticipant* roomsParticipant,RemoveUserResult *removeUserResult);
bool IsRoomLockedToSpectators(void) const;
bool IsRoomLockedToPlayers(void) const;
bool IsInRoom(RoomsParticipant* roomsParticipant) const;
bool HasInvite(RakNet::RakString roomsParticipant);
unsigned int GetRoomIndex(RoomsParticipant* roomsParticipant) const;
unsigned int GetBannedIndex(RakNet::RakString username) const;
unsigned int GetInviteIndex(RakNet::RakString invitee, RakNet::RakString invitor) const;
unsigned int GetFirstInviteIndex(RakNet::RakString invitee) const;
bool AreAllPlayableSlotsFilled(void) const;
bool HasOpenPublicSlots(void) const;
bool HasOpenReservedSlots(void) const;
bool HasOpenSpectatorSlots(void) const;
void UpdateUsedSlots( void );
void UpdateUsedSlots( Slots *totalSlots, Slots *usedSlots );
static void UpdateUsedSlots( DataStructures::Table::Row* tableRow, Slots *totalSlots, Slots *usedSlots );
Slots GetTotalSlots(void) const;
void SetTotalSlots(Slots *totalSlots);
Slots GetUsedSlots(void) const;
RoomLockState roomLockState;
friend struct RoomDescriptor;
friend class PerGameRoomsContainer;
friend class AllGamesRoomsContainer;
RoomID lobbyRoomId;
DataStructures::Table::Row *tableRow;
bool autoLockReadyStatus;
bool hiddenFromSearches;
// bool destroyOnModeratorLeave;
bool clearInvitesOnNewModerator;
NetworkedRoomCreationParameters::SendInvitePermission inviteToRoomPermission, inviteToSpectatorSlotPermission;
bool roomDestroyed;
};
} // namespace Lobby2
#endif
|
taragu/duke-compsci | slogo_team01/src/turtle/AbstractModel.java | <gh_stars>0
package turtle;
import java.beans.PropertyChangeListener;
import java.beans.PropertyChangeSupport;
public abstract class AbstractModel {
protected PropertyChangeSupport propertyChangeSupport;
public AbstractModel() {
propertyChangeSupport = new PropertyChangeSupport(this);
}
public void addPropertyChangeListener(PropertyChangeListener listener) {
propertyChangeSupport.addPropertyChangeListener(listener);
}
public void removePropertyChangeListener(PropertyChangeListener listener) {
propertyChangeSupport.removePropertyChangeListener(listener);
}
protected void firePropertyChange(String propertyName, Object oldValue,
Object newValue) {
propertyChangeSupport.firePropertyChange(propertyName, oldValue,
newValue);
}
protected void fireInitialProperties() {
}
}
|
DamieFC/chromium | third_party/blink/renderer/modules/mediastream/mock_media_stream_video_source.cc | <gh_stars>1-10
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/modules/mediastream/mock_media_stream_video_source.h"
#include "base/bind.h"
#include "base/location.h"
#include "base/single_thread_task_runner.h"
#include "third_party/blink/public/mojom/mediastream/media_stream.mojom-blink.h"
#include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
#include "third_party/blink/renderer/platform/wtf/cross_thread_functional.h"
namespace blink {
MockMediaStreamVideoSource::MockMediaStreamVideoSource()
: MockMediaStreamVideoSource(false) {}
MockMediaStreamVideoSource::MockMediaStreamVideoSource(
bool respond_to_request_refresh_frame)
: respond_to_request_refresh_frame_(respond_to_request_refresh_frame),
max_requested_height_(0),
max_requested_width_(0),
max_requested_frame_rate_(0.0),
attempted_to_start_(false) {}
MockMediaStreamVideoSource::MockMediaStreamVideoSource(
const media::VideoCaptureFormat& format,
bool respond_to_request_refresh_frame)
: format_(format),
respond_to_request_refresh_frame_(respond_to_request_refresh_frame),
max_requested_height_(format.frame_size.height()),
max_requested_width_(format.frame_size.width()),
max_requested_frame_rate_(format.frame_rate),
attempted_to_start_(false) {}
MockMediaStreamVideoSource::~MockMediaStreamVideoSource() {}
void MockMediaStreamVideoSource::StartMockedSource() {
DCHECK(attempted_to_start_);
attempted_to_start_ = false;
OnStartDone(mojom::blink::MediaStreamRequestResult::OK);
}
void MockMediaStreamVideoSource::FailToStartMockedSource() {
DCHECK(attempted_to_start_);
attempted_to_start_ = false;
OnStartDone(
mojom::blink::MediaStreamRequestResult::TRACK_START_FAILURE_VIDEO);
}
void MockMediaStreamVideoSource::RequestRefreshFrame() {
DCHECK(!frame_callback_.is_null());
if (respond_to_request_refresh_frame_) {
const scoped_refptr<media::VideoFrame> frame =
media::VideoFrame::CreateColorFrame(format_.frame_size, 0, 0, 0,
base::TimeDelta());
PostCrossThreadTask(
*io_task_runner(), FROM_HERE,
CrossThreadBindOnce(frame_callback_, frame,
std::vector<scoped_refptr<media::VideoFrame>>(),
base::TimeTicks()));
}
OnRequestRefreshFrame();
}
void MockMediaStreamVideoSource::OnHasConsumers(bool has_consumers) {
is_suspended_ = !has_consumers;
}
VideoCaptureFeedbackCB MockMediaStreamVideoSource::GetFeedbackCallback() const {
return WTF::BindRepeating(&MockMediaStreamVideoSource::OnFrameFeedback,
WTF::Unretained(this));
}
base::WeakPtr<MediaStreamVideoSource> MockMediaStreamVideoSource::GetWeakPtr()
const {
return weak_factory_.GetWeakPtr();
}
void MockMediaStreamVideoSource::DoChangeSource(
const MediaStreamDevice& new_device) {
ChangeSourceImpl(new_device);
}
void MockMediaStreamVideoSource::StartSourceImpl(
VideoCaptureDeliverFrameCB frame_callback,
EncodedVideoFrameCB encoded_frame_callback) {
DCHECK(frame_callback_.is_null());
DCHECK(encoded_frame_callback_.is_null());
attempted_to_start_ = true;
frame_callback_ = std::move(frame_callback);
encoded_frame_callback_ = std::move(encoded_frame_callback);
}
void MockMediaStreamVideoSource::StopSourceImpl() {}
absl::optional<media::VideoCaptureFormat>
MockMediaStreamVideoSource::GetCurrentFormat() const {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
return absl::optional<media::VideoCaptureFormat>(format_);
}
absl::optional<media::VideoCaptureParams>
MockMediaStreamVideoSource::GetCurrentCaptureParams() const {
media::VideoCaptureParams params;
params.requested_format = format_;
return params;
}
void MockMediaStreamVideoSource::DeliverVideoFrame(
scoped_refptr<media::VideoFrame> frame) {
DCHECK(!is_stopped_for_restart_);
DCHECK(!frame_callback_.is_null());
PostCrossThreadTask(
*io_task_runner(), FROM_HERE,
CrossThreadBindOnce(frame_callback_, std::move(frame),
std::vector<scoped_refptr<media::VideoFrame>>(),
base::TimeTicks()));
}
void MockMediaStreamVideoSource::DeliverEncodedVideoFrame(
scoped_refptr<EncodedVideoFrame> frame) {
DCHECK(!is_stopped_for_restart_);
DCHECK(!encoded_frame_callback_.is_null());
PostCrossThreadTask(*io_task_runner(), FROM_HERE,
CrossThreadBindOnce(encoded_frame_callback_,
std::move(frame), base::TimeTicks()));
}
void MockMediaStreamVideoSource::StopSourceForRestartImpl() {
if (can_stop_for_restart_)
is_stopped_for_restart_ = true;
OnStopForRestartDone(is_stopped_for_restart_);
}
void MockMediaStreamVideoSource::RestartSourceImpl(
const media::VideoCaptureFormat& new_format) {
DCHECK(is_stopped_for_restart_);
if (!can_restart_) {
OnRestartDone(false);
return;
}
is_stopped_for_restart_ = false;
format_ = new_format;
OnRestartDone(true);
}
} // namespace blink
|
luongnvUIT/prowide-iso20022 | model-camt-types/src/generated/java/com/prowidesoftware/swift/model/mx/dic/CardAggregated2.java |
package com.prowidesoftware.swift.model.mx.dic;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlType;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
/**
* Globalised card transaction entry details.
*
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "CardAggregated2", propOrder = {
"addtlSvc",
"txCtgy",
"saleRcncltnId",
"seqNbRg",
"txDtRg"
})
public class CardAggregated2 {
@XmlElement(name = "AddtlSvc")
@XmlSchemaType(name = "string")
protected CardPaymentServiceType2Code addtlSvc;
@XmlElement(name = "TxCtgy")
protected String txCtgy;
@XmlElement(name = "SaleRcncltnId")
protected String saleRcncltnId;
@XmlElement(name = "SeqNbRg")
protected CardSequenceNumberRange1 seqNbRg;
@XmlElement(name = "TxDtRg")
protected DateOrDateTimePeriod1Choice txDtRg;
/**
* Gets the value of the addtlSvc property.
*
* @return
* possible object is
* {@link CardPaymentServiceType2Code }
*
*/
public CardPaymentServiceType2Code getAddtlSvc() {
return addtlSvc;
}
/**
* Sets the value of the addtlSvc property.
*
* @param value
* allowed object is
* {@link CardPaymentServiceType2Code }
*
*/
public CardAggregated2 setAddtlSvc(CardPaymentServiceType2Code value) {
this.addtlSvc = value;
return this;
}
/**
* Gets the value of the txCtgy property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getTxCtgy() {
return txCtgy;
}
/**
* Sets the value of the txCtgy property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public CardAggregated2 setTxCtgy(String value) {
this.txCtgy = value;
return this;
}
/**
* Gets the value of the saleRcncltnId property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getSaleRcncltnId() {
return saleRcncltnId;
}
/**
* Sets the value of the saleRcncltnId property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public CardAggregated2 setSaleRcncltnId(String value) {
this.saleRcncltnId = value;
return this;
}
/**
* Gets the value of the seqNbRg property.
*
* @return
* possible object is
* {@link CardSequenceNumberRange1 }
*
*/
public CardSequenceNumberRange1 getSeqNbRg() {
return seqNbRg;
}
/**
* Sets the value of the seqNbRg property.
*
* @param value
* allowed object is
* {@link CardSequenceNumberRange1 }
*
*/
public CardAggregated2 setSeqNbRg(CardSequenceNumberRange1 value) {
this.seqNbRg = value;
return this;
}
/**
* Gets the value of the txDtRg property.
*
* @return
* possible object is
* {@link DateOrDateTimePeriod1Choice }
*
*/
public DateOrDateTimePeriod1Choice getTxDtRg() {
return txDtRg;
}
/**
* Sets the value of the txDtRg property.
*
* @param value
* allowed object is
* {@link DateOrDateTimePeriod1Choice }
*
*/
public CardAggregated2 setTxDtRg(DateOrDateTimePeriod1Choice value) {
this.txDtRg = value;
return this;
}
@Override
public String toString() {
return ToStringBuilder.reflectionToString(this, ToStringStyle.MULTI_LINE_STYLE);
}
@Override
public boolean equals(Object that) {
return EqualsBuilder.reflectionEquals(this, that);
}
@Override
public int hashCode() {
return HashCodeBuilder.reflectionHashCode(this);
}
}
|
crossmob/WinObjC | tools/vsimporter/xib2nib/src/UIImageView.cpp | //******************************************************************************
//
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
//******************************************************************************
#include "UIImageView.h"
#include "UICustomResource.h"
static PropertyMapper propertyMappings[] = {
"IBUIImage", "UIImage", NULL,
};
static const int numPropertyMappings = sizeof(propertyMappings) / sizeof(PropertyMapper);
UIImageView::UIImageView() {
_image = NULL;
}
void UIImageView::InitFromXIB(XIBObject* obj) {
UIView::InitFromXIB(obj);
obj->_outputClassName = "UIImageView";
}
void UIImageView::InitFromStory(XIBObject* obj) {
UIView::InitFromStory(obj);
_image = NULL;
const char* imageName = getAttrAndHandle("image");
if (imageName) {
UICustomResource* image = new UICustomResource();
image->_imageName = imageName;
_image = image;
}
obj->_outputClassName = "UIImageView";
}
void UIImageView::ConvertStaticMappings(NIBWriter* writer, XIBObject* obj) {
Map(writer, obj, propertyMappings, numPropertyMappings);
UIView::ConvertStaticMappings(writer, obj);
if (_image)
AddOutputMember(writer, "UIImage", _image);
}
|
teberhardt/byps | java/bypsgen/src/byps/gen/utils/PrintContextBase.java | <gh_stars>1-10
package byps.gen.utils;
/* USE THIS FILE ACCORDING TO THE COPYRIGHT RULES IN LICENSE.TXT WHICH IS PART OF THE SOURCE CODE PACKAGE */
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import byps.BApiDescriptor;
import byps.BBinaryModel;
import byps.gen.api.GeneratorException;
import byps.gen.api.GeneratorProperties;
import byps.gen.api.MemberInfo;
import byps.gen.api.RemoteInfo;
import byps.gen.api.SerialInfo;
import byps.gen.api.TypeInfo;
import byps.gen.db.BRegistryForClassDB;
import byps.gen.db.ClassDB;
public class PrintContextBase {
public PrintContextBase(ClassDB classDB, GeneratorProperties props) {
this.classDB = classDB;
this.apiDesc = classDB.getApiDescriptor();
this.registry = classDB.getRegistry();
this.apiName = apiDesc.name;
this.apiPack = apiDesc.basePackage;
this.props = props;
}
public String getSerializerClassName(TypeInfo tinfo, BBinaryModel pformat) {
if (tinfo == null) return "";
String s = pformat == BBinaryModel.JSON ? "JSerializer_" : "BSerializer_";
return s + tinfo.typeId;
}
public String getSerializerPackage(TypeInfo tinfo) {
if (tinfo == null) return "";
String s = "";
if (tinfo.typeId < registry.getMinTypeIdUser()) {
s = "byps";
}
else if (tinfo.isArrayType() || tinfo.isCollectionType()) {
s = apiPack;
}
else {
s = tinfo.pack;
}
return s;
}
public String getSerializerQName(TypeInfo tinfo, BBinaryModel pformat) {
if (tinfo == null) return "";
return getSerializerPackage(tinfo) + "." + getSerializerClassName(tinfo, pformat);
}
public String getMethodSerializerClassName(TypeInfo remoteInfo, String methodName, BBinaryModel pformat) {
return getSerializerClassName(remoteInfo, pformat) + "_" + methodName;
}
public void printComputeSize(CodePrinter pr, SerialInfo serInfo, BBinaryModel bmodel) throws IOException {
int size = serInfo.baseInfo != null ? Utils.computeStructSize(bmodel, serInfo.baseInfo) : 0;
pr.println("return 0");
String indent = " ";
pr.print(indent).println("/* size of base class */ + 0");
for (MemberInfo minfo : serInfo.members) {
int memberSize = minfo.type.getMemberSize(bmodel);
int pad = Utils.getPaddingForAlignedPosition(bmodel, size, memberSize);
if (pad != 0) {
size += pad;
pr.print(indent).print("/* padding */ + " + pad);
pr.println();
}
pr.print(indent).print("/* pos=" + size).print(": ").print(minfo.name).print(" */ + " + memberSize);
pr.println();
size += memberSize;
}
// structure size must be a multiple of its alignment
{
int pad = Utils.getPaddingForAlignedPosition(bmodel, size, 8);
size += pad;
pr.print(indent).print("/* padding up to multiple of alignment */ + " + pad + ";");
pr.println();
}
}
public boolean isGenerateChangedMembers() {
String val = props.getProperty(GeneratorProperties.CHANGED_MEMBERS);
return val != null && val.equalsIgnoreCase("true");
}
public String getElementSelectorClassName(TypeInfo tinfo) {
String s = tinfo.name + "C";
return s;
}
public String getElementSelectorName(MemberInfo minfo) {
String s = "mb" + Utils.firstCharToUpper(minfo.name);
return s;
}
public String printStringChar(char c) {
StringBuilder sbuf = new StringBuilder();
// if (Character.isLetterOrDigit(c)) sbuf.append(c);
if (c == '\0') sbuf.append("\\0");
else if (c == '\\') sbuf.append("\\\\");
else if (c == '\t') sbuf.append("\\t");
else if (c == '\r') sbuf.append("\\r");
else if (c == '\n') sbuf.append("\\n");
else if (c == '\b') sbuf.append("\\b");
else if (c == '\"') sbuf.append("\\\"");
else if (c == '\'') sbuf.append("\\\'");
else if (c >= 0x20 && c <= 0x7F) sbuf.append(c);
else {
sbuf.append("\\u");
String s = Integer.toHexString((int) c);
for (int i = s.length(); i < 4; i++)
sbuf.append("0");
sbuf.append(s);
}
return sbuf.toString();
}
public boolean isSessionParam(RemoteInfo rinfo, MemberInfo pinfo) {
return (rinfo != null && rinfo.authParamClassName != null && pinfo.type.qname.equals(rinfo.authParamClassName));
}
public void collectAllRemotesForStubOrSkeleton(RemoteInfo rinfo, HashMap<String, RemoteInfo> remotes) throws GeneratorException {
if (remotes.containsKey(rinfo.qname)) return;
remotes.put(rinfo.qname, rinfo);
for (String r : rinfo.baseQNames) {
RemoteInfo rinfoBase = classDB.getRemoteInfo(r);
if (rinfoBase == null) {
throw new GeneratorException("Missing interface class for " + r);
}
collectAllRemotesForStubOrSkeleton(rinfoBase, remotes);
}
}
public ArrayList<String> getBaseQNamesForRemoteWithAuth(RemoteInfo rinfo) {
ArrayList<String> ret = new ArrayList<String>();
for (String baseName : rinfo.baseQNames) {
RemoteInfo rbase = classDB.getRemoteInfo(baseName);
RemoteInfo rbaseImpl = rbase.getRemoteAuth();
ret.add(rbaseImpl != null ? rbaseImpl.qname : rbase.qname);
}
return ret;
}
public ArrayList<SerialInfo> getSerializersForRegistrySortedByTypeId(Iterable<SerialInfo> serInfos) {
Comparator<SerialInfo> cmp = new Comparator<SerialInfo>() {
@Override
public int compare(SerialInfo o1, SerialInfo o2) {
return o1.typeId - o2.typeId;
}
};
ArrayList<SerialInfo> serializers = new ArrayList<SerialInfo>();
for (SerialInfo serInfo : serInfos) {
if (serInfo.isBuiltInType()) continue;
if (!serInfo.isPointerType()) continue;
int idx = Collections.binarySearch(serializers, serInfo, cmp);
if (idx < 0) {
idx = -(idx + 1);
serializers.add(idx, serInfo);
}
else {
// This error should have already been caught during the javadoc process.
throw new IllegalStateException("Serializers have same typeId=" + serInfo.typeId + ", " + serInfo + ", " + serializers.get(idx));
}
}
return serializers;
}
protected String apiName;
public final String apiPack;
public final BBinaryModel bmodel = BBinaryModel.MEDIUM;
public final ClassDB classDB;
protected final BRegistryForClassDB registry;
protected final BApiDescriptor apiDesc;
public final GeneratorProperties props;
}
|
JSY1988/tcpkit | src/tcpkit.c | <filename>src/tcpkit.c
/**
* tcpkit -- toolkit to analyze tcp packet
* Copyright (C) 2018 @git-hulk
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
**/
#include <unistd.h>
#include <stdlib.h>
#include <signal.h>
#include <string.h>
#include <pcap.h>
#include "log.h"
#include "tcpkit.h"
#include "server.h"
struct server *srv;
const char *VERSION = "1.1.0";
void signal_handler(int sig) {
if (sig == SIGINT || sig == SIGTERM) {
server_terminate(srv);
}
}
void usage() {
const char *usage_literal = ""
"the tcpkit was designed to make network packets programable with LUA by @git-hulk\n"
" version: %s\n"
" -h, Print the tcpkit version strings, print a usage message, and exit\n"
" -i interface, Listen on network card interface\n"
" -A Print each packet (minus its link level header) in ASCII. Handy for capturing web pages.\n"
" -r file, Read packets from file (which was created with the -w option or by other tools that write pcap)\n"
" -B buffer_size, Set the operating system capture buffer size to buffer_size, in units of KiB (1024 bytes)\n"
" -s snaplen, Snarf snaplen bytes of data from each packet rather than the default of 1500 bytes\n"
" -S file, Push packets to lua state if the script was specified\n"
" -t threshold, Print the request lantecy which slower than the threshold, in units of Millisecond\n"
" -w file, Write the raw packets to file\n"
" -p protocol, Parse the packet if the protocol was specified (supports: redis, memcached, http, raw)\n"
" -P stats port, Listen port to fetch the latency stats, default is 33333\n\n\n"
""
"For example:\n\n"
" `tcpkit -i eth0 tcp port 6379 -p redis` was used to monitor the redis reqeust latency\n\n"
" `tcpkit -i eth0 tcp port 6379 -p redis -w 6379.pcap` would also dump the packets to `6379.pcap`\n\n"
" `tcpkit -i eth0 tcp port 6379 -p redis -t 10` would only print the request latency slower than 10ms\n";
color_printf(GREEN, usage_literal, VERSION);
exit(0);
}
void init_options(struct options *opts) {
opts->dev = strdup("any");
opts->ascii = 0;
opts->filter = NULL;
opts->offline_file = NULL;
opts->save_file = NULL;
opts->script = NULL;
opts->snaplen = 1500;
opts->buf_size = 512 * 1024 * 1024;
opts->print_usage = 0;
opts->print_version = 0;
opts->protocol = ProtocolRaw;
opts->threshold = 0;
opts->stats_port = 33333;
}
void free_options(struct options *opts) {
if (opts->dev) free(opts->dev);
if (opts->filter) free(opts->filter);
if (opts->offline_file) free(opts->offline_file);
if (opts->script) free(opts->script);
free(opts);
}
struct options *parse_options(int argc, char **argv) {
int i, lastarg;
struct options *opts;
opts = malloc(sizeof(*opts));
init_options(opts);
for (i = 1; i < argc; i++) {
lastarg = (i == (argc-1));
if (!strcmp(argv[i],"-v")) {
opts->print_version = 1;
} else if (!strcmp(argv[i],"-h")) {
opts->print_usage = 1;
} else if (!strcmp(argv[i],"-A")) {
opts->ascii = 1;
} else if (!strcmp(argv[i],"-i")) {
if (lastarg) goto invalid;
if (opts->dev) free(opts->dev);
opts->dev = strdup(argv[++i]);
} else if (!strcmp(argv[i],"-B")) {
if (lastarg) goto invalid;
opts->buf_size = atoi(argv[++i]) * 1024;
} else if (!strcmp(argv[i],"-t")) {
if (lastarg) goto invalid;
opts->threshold = atoi(argv[++i]);
} else if (!strcmp(argv[i],"-p")) {
if (lastarg) goto invalid;
if (!strcmp(argv[i+1],"raw")) {
opts->protocol = ProtocolRaw;
} else if (!strcmp(argv[i+1],"redis")) {
opts->protocol = ProtocolRedis;
} else if (!strcmp(argv[i+1],"memcached")) {
opts->protocol = ProtocolMemcached;
} else if (!strcmp(argv[i+1],"http")) {
opts->protocol = ProtocolHTTP;
} else {
goto invalid;
}
i++;
} else if (!strcmp(argv[i],"-P")) {
if (lastarg) goto invalid;
opts->stats_port = atoi(argv[++i]);
} else if (!strcmp(argv[i],"-s")) {
if (lastarg) goto invalid;
opts->snaplen = atoi(argv[++i]);
} else if (!strcmp(argv[i],"-S")) {
if (lastarg) goto invalid;
if (opts->script) free(opts->script);
opts->script = strdup(argv[++i]);
} else if (!strcmp(argv[i],"-r")) {
if (lastarg) goto invalid;
if (opts->offline_file) free(opts->offline_file);
opts->offline_file = strdup(argv[++i]);
} else if (!strcmp(argv[i],"-w")) {
if (lastarg) goto invalid;
if (opts->save_file) free(opts->save_file);
opts->save_file = strdup(argv[++i]);
} else {
if (argv[i][0] == '-') goto invalid;
// treat other options as filter
if (!opts->filter) {
opts->filter = strdup(argv[i]);
} else {
char *tmp;
int new_size, old_size = strlen(opts->filter);
// add 2 for white space and terminal char
new_size = old_size+strlen(argv[i])+2;
tmp = realloc(opts->filter, new_size);
if (tmp) {
opts->filter = tmp;
opts->filter[old_size++] = ' ';
memcpy(opts->filter+old_size, argv[i], strlen(argv[i]));
opts->filter[new_size-1] = '\0';
}
}
}
}
if (!opts->filter) opts->filter = strdup("tcp");
return opts;
invalid:
log_message(FATAL, "Invalid option \"%s\" or option argument missing",argv[i]);
free_options(opts);
return NULL;
}
int main(int argc, char **argv) {
char err[MAX_ERR_BUFF_SIZE];
struct options *opts;
signal(SIGINT, signal_handler);
signal(SIGTERM, signal_handler);
print_redirect(stdout);
opts = parse_options(argc, argv);
if (!opts) log_message(FATAL, "Failed to parse options, %s", err);
if (opts->print_usage) {
free_options(opts);
usage();
}
if (opts->print_version) {
printf("tcpkit %s\n", VERSION);
exit(0);
}
if (getuid() != 0 && !opts->offline_file) {
free_options(opts);
log_message(FATAL, "You don't have permission to capture on the network card interface");
}
if (opts->snaplen < 64) opts->snaplen = 64;
if (opts->protocol != ProtocolRaw && opts->snaplen > 256) {
opts->snaplen = 256;
}
srv = server_create(opts, err);
if (!srv) {
free_options(opts);
log_message(FATAL, "Failed to create the sniffer server, %s", err);
}
if (server_run(srv, err) == -1) {
server_destroy(srv);
free_options(opts);
log_message(FATAL, "Failed to run the server, %s", err);
}
server_destroy(srv);
free_options(opts);
return 0;
}
|
anatawa12/intellij-community | java/testFramework/src/com/intellij/testFramework/fixtures/MavenDependencyUtil.java | <reponame>anatawa12/intellij-community
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.testFramework.fixtures;
import com.intellij.jarRepository.JarRepositoryManager;
import com.intellij.jarRepository.RemoteRepositoryDescription;
import com.intellij.jarRepository.RepositoryLibraryType;
import com.intellij.openapi.roots.DependencyScope;
import com.intellij.openapi.roots.LibraryOrderEntry;
import com.intellij.openapi.roots.ModifiableRootModel;
import com.intellij.openapi.roots.impl.libraries.LibraryEx;
import com.intellij.openapi.roots.libraries.Library;
import com.intellij.openapi.roots.libraries.LibraryTable;
import com.intellij.openapi.roots.libraries.ui.OrderRoot;
import com.intellij.project.IntelliJProjectConfiguration;
import com.intellij.util.containers.ContainerUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.idea.maven.utils.library.RepositoryLibraryProperties;
import java.util.Collection;
import java.util.List;
public final class MavenDependencyUtil {
/**
* Adds a Maven library to given model as {@link DependencyScope#COMPILE} dependency including transitive dependencies.
*
* @param model root model to add a Maven library to
* @param mavenCoordinates maven coordinates like groupID:artifactID:version
*/
public static void addFromMaven(@NotNull ModifiableRootModel model, String mavenCoordinates) {
addFromMaven(model, mavenCoordinates, true);
}
/**
* Adds a Maven library to given model as {@link DependencyScope#COMPILE} dependency.
*
* @param model root model to add a Maven library to
* @param mavenCoordinates maven coordinates like groupID:artifactID:version
* @param includeTransitiveDependencies true for include transitive dependencies, false otherwise
*/
public static void addFromMaven(@NotNull ModifiableRootModel model, String mavenCoordinates, boolean includeTransitiveDependencies) {
addFromMaven(model, mavenCoordinates, includeTransitiveDependencies, DependencyScope.COMPILE);
}
/**
* Adds a Maven library to given model.
*
* @param model root model to add a Maven library to
* @param mavenCoordinates maven coordinates like groupID:artifactID:version
* @param includeTransitiveDependencies true for include transitive dependencies, false otherwise
* @param dependencyScope scope of the library
*/
public static void addFromMaven(@NotNull ModifiableRootModel model, String mavenCoordinates,
boolean includeTransitiveDependencies, DependencyScope dependencyScope) {
List<RemoteRepositoryDescription> remoteRepositoryDescriptions = getRemoteRepositoryDescriptions();
RepositoryLibraryProperties libraryProperties = new RepositoryLibraryProperties(mavenCoordinates, includeTransitiveDependencies);
Collection<OrderRoot> roots =
JarRepositoryManager.loadDependenciesModal(model.getProject(), libraryProperties, false, false, null, remoteRepositoryDescriptions);
LibraryTable.ModifiableModel tableModel = model.getModuleLibraryTable().getModifiableModel();
Library library = tableModel.createLibrary(mavenCoordinates, RepositoryLibraryType.REPOSITORY_LIBRARY_KIND);
Library.ModifiableModel libraryModel = library.getModifiableModel();
if (roots.isEmpty()) {
throw new IllegalStateException(String.format("No roots for '%s'", mavenCoordinates));
}
for (OrderRoot root : roots) {
libraryModel.addRoot(root.getFile(), root.getType());
}
((LibraryEx.ModifiableModelEx) libraryModel).setProperties(libraryProperties);
LibraryOrderEntry libraryOrderEntry = model.findLibraryOrderEntry(library);
if (libraryOrderEntry == null) {
throw new IllegalStateException("Unable to find registered library " + mavenCoordinates);
}
libraryOrderEntry.setScope(dependencyScope);
libraryModel.commit();
tableModel.commit();
}
@NotNull
private static List<RemoteRepositoryDescription> getRemoteRepositoryDescriptions() {
return ContainerUtil.map(IntelliJProjectConfiguration.getRemoteRepositoryDescriptions(), repository ->
new RemoteRepositoryDescription(repository.getId(), repository.getName(), repository.getUrl()));
}
}
|
simonsoft/cms-indexing-xml | src/main/java/se/simonsoft/cms/indexing/xml/XmlFileFilterExtensionAndTikaContentType.java | <reponame>simonsoft/cms-indexing-xml
/**
* Copyright (C) 2009-2017 Simonsoft Nordic AB
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package se.simonsoft.cms.indexing.xml;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import se.repos.indexing.IndexingDoc;
import se.simonsoft.cms.item.events.change.CmsChangesetItem;
/**
* Identifies XML on file extension and svn:mime-type property.
* An alternative would be to resort to trial-and-error
* (which we need anyway because some XML is not valid)
* or detected content type from fulltext/metadata extraction.
*/
public class XmlFileFilterExtensionAndTikaContentType implements XmlFileFilter {
private static final String CONTENT_TYPE_KEY = "embd_Content-Type";
private Set<String> extensionsToTry = new HashSet<String>(Arrays.asList("xml", "dita", "ditamap",
"xlf",
"xhtml", "html", "htm",
"x-svg" // #871 Disabling SVG until we can protect SolR from whatever.
));
private Set<String> contentTypesToTry = new HashSet<String>(Arrays.asList(
"application/xml",
"application/xhtml+xml",
"application/dita+xml"
));
@Override
public boolean isXml(CmsChangesetItem c, IndexingDoc fields) {
// TODO legacy behavior now, add check for svn prop
return extensionsToTry.contains(c.getPath().getExtension()) && (!fields.containsKey(CONTENT_TYPE_KEY) ||
contentTypesToTry.contains(parseContentType((String)fields.getFieldValue(CONTENT_TYPE_KEY))));
}
protected String parseContentType(String contentType) {
if (contentType != null) {
contentType = contentType.contains(";") ? contentType.split(";")[0].trim() : contentType.trim();
}
return contentType;
}
}
|
nmellado/Radium-Engine | src/GuiBase/Utils/Keyboard.cpp | #include <GuiBase/Utils/Keyboard.hpp>
#include <Core/CoreMacros.hpp>
#include <Core/Log/Log.hpp>
#include <map>
namespace Ra
{
namespace Gui
{
std::map<int, bool> g_keypresses;
void keyPressed(int code)
{
g_keypresses[code] = true;
}
void keyReleased(int code)
{
g_keypresses[code] = false;
}
bool isKeyPressed(int code)
{
// Default constructed bool is false, so this should be enough
return g_keypresses[code];
}
}
}
|
ate362/ubii-node-master | scripts/startBackend.js | <reponame>ate362/ubii-node-master
const { ExternalLibrariesService } = require('@tum-far/ubii-node-nodejs/src/index');
const { MasterNode } = require('../src/index.js');
const fs = require('fs');
(function () {
ExternalLibrariesService.instance.addExternalLibrary('fs', fs);
let master = new MasterNode();
})();
|
romagny13/react-form-validation | docs/examples/Password/Example2.js | import React from 'react';
import { Form, LightGroup, Password, CheckboxGroup, Label, required, pattern, custom, ValidationHelper } from 'romagny13-react-form-validation';
/** Validation (required, match) */
class Example2 extends React.Component {
constructor(props) {
super(props);
this.state = {
model: {
password: '',
confirmPassword: '',
},
errors: {},
touched: {}
};
this.validators = {
password: [
required('Please enter a password.'),
pattern(/^(?=.*[A-Z]).{6}/, '6 characters minimum and one uppercase letter.')
],
confirmPassword: [
required('Please confirm the password.'),
custom((value, model) => {
return model.password === value;
}, 'Password and confirm password do not match.')
]
};
this.onValueChange = this.onValueChange.bind(this);
this.onTouch = this.onTouch.bind(this);
this.onSubmit = this.onSubmit.bind(this);
}
onValueChange(name, value) {
const { model, touched, submitted } = this.state;
model[name] = value;
if (submitted || touched[name]) {
let errors = ValidationHelper.validateAll(model, this.validators);
this.setState({
model,
errors
});
}
else {
this.setState({
model
});
}
}
onTouch(name) {
let touched = this.state.touched;
touched[name] = true;
let errors = ValidationHelper.validateAll(this.state.model, this.validators);
this.setState({
touched,
errors
});
}
onSubmit(event) {
event.preventDefault();
let errors = ValidationHelper.validateAll(this.state.model, this.validators);
this.setState({
submitted: true,
errors
});
}
render() {
const { model, errors } = this.state;
return (
<Form onSubmit={this.onSubmit}>
<LightGroup error={errors["password"]}>
<Label htmlFor="password" asterisk>Password</Label>
<Password width="200px" id="password" name="password" value={model["password"]} onValueChange={this.onValueChange} onTouch={this.onTouch} placeholder="Password" />
</LightGroup>
<LightGroup error={errors["confirmPassword"]}>
<Label htmlFor="confirmPassword" asterisk>Confirm password</Label>
<Password width="200px" id="confirmPassword" name="confirmPassword" value={model["confirmPassword"]} onValueChange={this.onValueChange} onTouch={this.onTouch} placeholder="Confirm password" />
</LightGroup>
<input type="submit" value="Submit" />
<pre>
{JSON.stringify(errors)}
</pre>
</Form>
);
}
}
export default Example2;
|
YaPe4enka/samsung_itschool_tasks | module1/src/com/module1/t19_2.java | <filename>module1/src/com/module1/t19_2.java
package com.module1;
public class t19_2 {
public static void main(String[] args) {
System.out.print(SumOfSeven(0, 100));
}
public static int SumOfSeven(int a, int b) {
int res = 0;
for (int i = a; i <= b; i++) {
if (i % 7 != 0 || (i >= 100 || i <= 10)) continue;
res += SumOfDigits(i);
}
return res;
}
public static int SumOfDigits(int n) {
int sum = 0;
while(n != 0) {
sum += n % 10;
n /= 10;
}
return sum;
}
}
|
confluentinc/ccloud-sdk-go-v2 | kafkarest/v3/model_resource_collection_metadata.go | <reponame>confluentinc/ccloud-sdk-go-v2
// Copyright 2021 Confluent Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
REST Admin API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
API version: 3.0.0
Contact: <EMAIL>
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package v3
import (
"encoding/json"
)
import (
"reflect"
)
// ResourceCollectionMetadata struct for ResourceCollectionMetadata
type ResourceCollectionMetadata struct {
Self string `json:"self"`
Next NullableString `json:"next,omitempty"`
}
// NewResourceCollectionMetadata instantiates a new ResourceCollectionMetadata object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewResourceCollectionMetadata(self string) *ResourceCollectionMetadata {
this := ResourceCollectionMetadata{}
this.Self = self
return &this
}
// NewResourceCollectionMetadataWithDefaults instantiates a new ResourceCollectionMetadata object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewResourceCollectionMetadataWithDefaults() *ResourceCollectionMetadata {
this := ResourceCollectionMetadata{}
return &this
}
// GetSelf returns the Self field value
func (o *ResourceCollectionMetadata) GetSelf() string {
if o == nil {
var ret string
return ret
}
return o.Self
}
// GetSelfOk returns a tuple with the Self field value
// and a boolean to check if the value has been set.
func (o *ResourceCollectionMetadata) GetSelfOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Self, true
}
// SetSelf sets field value
func (o *ResourceCollectionMetadata) SetSelf(v string) {
o.Self = v
}
// GetNext returns the Next field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *ResourceCollectionMetadata) GetNext() string {
if o == nil || o.Next.Get() == nil {
var ret string
return ret
}
return *o.Next.Get()
}
// GetNextOk returns a tuple with the Next field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *ResourceCollectionMetadata) GetNextOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.Next.Get(), o.Next.IsSet()
}
// HasNext returns a boolean if a field has been set.
func (o *ResourceCollectionMetadata) HasNext() bool {
if o != nil && o.Next.IsSet() {
return true
}
return false
}
// SetNext gets a reference to the given NullableString and assigns it to the Next field.
func (o *ResourceCollectionMetadata) SetNext(v string) {
o.Next.Set(&v)
}
// SetNextNil sets the value for Next to be an explicit nil
func (o *ResourceCollectionMetadata) SetNextNil() {
o.Next.Set(nil)
}
// UnsetNext ensures that no value is present for Next, not even an explicit nil
func (o *ResourceCollectionMetadata) UnsetNext() {
o.Next.Unset()
}
// Redact resets all sensitive fields to their zero value.
func (o *ResourceCollectionMetadata) Redact() {
o.recurseRedact(&o.Self)
o.recurseRedact(o.Next)
}
func (o *ResourceCollectionMetadata) recurseRedact(v interface{}) {
type redactor interface {
Redact()
}
if r, ok := v.(redactor); ok {
r.Redact()
} else {
val := reflect.ValueOf(v)
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
switch val.Kind() {
case reflect.Slice, reflect.Array:
for i := 0; i < val.Len(); i++ {
// support data types declared without pointers
o.recurseRedact(val.Index(i).Interface())
// ... and data types that were declared without but need pointers (for Redact)
if val.Index(i).CanAddr() {
o.recurseRedact(val.Index(i).Addr().Interface())
}
}
}
}
}
func (o ResourceCollectionMetadata) zeroField(v interface{}) {
p := reflect.ValueOf(v).Elem()
p.Set(reflect.Zero(p.Type()))
}
func (o ResourceCollectionMetadata) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["self"] = o.Self
}
if o.Next.IsSet() {
toSerialize["next"] = o.Next.Get()
}
return json.Marshal(toSerialize)
}
type NullableResourceCollectionMetadata struct {
value *ResourceCollectionMetadata
isSet bool
}
func (v NullableResourceCollectionMetadata) Get() *ResourceCollectionMetadata {
return v.value
}
func (v *NullableResourceCollectionMetadata) Set(val *ResourceCollectionMetadata) {
v.value = val
v.isSet = true
}
func (v NullableResourceCollectionMetadata) IsSet() bool {
return v.isSet
}
func (v *NullableResourceCollectionMetadata) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableResourceCollectionMetadata(val *ResourceCollectionMetadata) *NullableResourceCollectionMetadata {
return &NullableResourceCollectionMetadata{value: val, isSet: true}
}
func (v NullableResourceCollectionMetadata) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableResourceCollectionMetadata) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}
|
jamacanbacn/splits-io | db/migrate/20150120052157_add_claim_token_to_runs.rb | class AddClaimTokenToRuns < ActiveRecord::Migration[4.2]
def change
add_column :runs, :claim_token, :string
end
end
|
akasha/akasha | webtack/jsinterop-generator/src/test/fixtures/exposed/output/j2cl/test/java/com/example/AudioWorkletGlobalScopeTestCompile.java | <filename>webtack/jsinterop-generator/src/test/fixtures/exposed/output/j2cl/test/java/com/example/AudioWorkletGlobalScopeTestCompile.java<gh_stars>10-100
package com.example;
import javax.annotation.Generated;
@Generated("org.realityforge.webtack")
public final class AudioWorkletGlobalScopeTestCompile {
static AudioWorkletGlobalScope $typeReference$;
public static String audioWorkletGlobalScopeAttribute(final AudioWorkletGlobalScope type) {
return type.audioWorkletGlobalScopeAttribute();
}
}
|
jxjnjjn/chromium | src/content/renderer/p2p/ipc_network_manager.cc | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/renderer/p2p/ipc_network_manager.h"
#include "base/bind.h"
#include "base/sys_byteorder.h"
#include "net/base/net_util.h"
namespace content {
IpcNetworkManager::IpcNetworkManager(P2PSocketDispatcher* socket_dispatcher)
: socket_dispatcher_(socket_dispatcher),
start_count_(0),
network_list_received_(false),
weak_factory_(this) {
socket_dispatcher_->AddNetworkListObserver(this);
}
IpcNetworkManager::~IpcNetworkManager() {
DCHECK(!start_count_);
socket_dispatcher_->RemoveNetworkListObserver(this);
}
void IpcNetworkManager::StartUpdating() {
if (network_list_received_) {
// Post a task to avoid reentrancy.
MessageLoop::current()->PostTask(
FROM_HERE, base::Bind(&IpcNetworkManager::SendNetworksChangedSignal,
weak_factory_.GetWeakPtr()));
}
++start_count_;
}
void IpcNetworkManager::StopUpdating() {
DCHECK_GT(start_count_, 0);
--start_count_;
}
void IpcNetworkManager::OnNetworkListChanged(
const net::NetworkInterfaceList& list) {
// Update flag if network list received for the first time.
if (!network_list_received_)
network_list_received_ = true;
std::vector<talk_base::Network*> networks;
for (net::NetworkInterfaceList::const_iterator it = list.begin();
it != list.end(); it++) {
uint32 address;
if (it->address.size() != net::kIPv4AddressSize)
continue;
memcpy(&address, &it->address[0], sizeof(uint32));
address = talk_base::NetworkToHost32(address);
talk_base::Network* network = new talk_base::Network(
it->name, it->name, talk_base::IPAddress(address), 32);
network->AddIP(talk_base::IPAddress(address));
networks.push_back(network);
}
bool changed = false;
MergeNetworkList(networks, &changed);
if (changed)
SignalNetworksChanged();
}
void IpcNetworkManager::SendNetworksChangedSignal() {
SignalNetworksChanged();
}
} // namespace content
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.