text stringlengths 2 99k | meta dict |
|---|---|
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved.
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<Configuration status="ERROR">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{ABSOLUTE} %5p |%X{test-name}| - [%c{1}] %t - %m%n"/>
</Console>
</Appenders>
<Loggers>
<Root level="INFO">
<AppenderRef ref="Console"/>
</Root>
<Logger name="com.hazelcast.scheduledexecutor" level="trace"/>
<Logger name="com.hazelcast.instance" level="debug"/>
<Logger name="com.hazelcast.cluster" level="debug"/>
<Logger name="com.hazelcast.internal.cluster" level="debug"/>
<Logger name="com.hazelcast.internal.partition" level="debug"/>
<Logger name="com.hazelcast.test.mocknetwork" level="debug"/>
</Loggers>
</Configuration>
| {
"pile_set_name": "Github"
} |
// @target: es6
// ES6 Spec - 10.1.1 Static Semantics: UTF16Encoding (cp)
// 2. Let cu1 be floor((cp – 65536) / 1024) + 0xD800.
// Although we should just get back a single code point value of 0xD800,
// this is a useful edge-case test.
var x = "\u{D800}";
| {
"pile_set_name": "Github"
} |
; $Id: optionwidgets.help.ini,v 1.1.2.2 2008/10/28 01:35:17 yched Exp $
[advanced help settings]
hide = TRUE
[overview]
title = Overview
[optionwidgets]
title = Optionwidgets
parent = content%fields
| {
"pile_set_name": "Github"
} |
sun.jdbc.odbc.JdbcOdbcDriver
| {
"pile_set_name": "Github"
} |
/**
*
* WARNING! This file was autogenerated by:
* _ _ _ _ __ __
* | | | | | | |\ \ / /
* | | | | |_| | \ V /
* | | | | _ | / \
* | |_| | | | |/ /^\ \
* \___/\_| |_/\/ \/
*
* This file was autogenerated by UnrealHxGenerator using UHT definitions.
* It only includes UPROPERTYs and UFUNCTIONs. Do not modify it!
* In order to add more definitions, create or edit a type with the same name/package, but with an `_Extra` suffix
**/
package unreal.blueprintgraph;
/**
Action to add a node to the graph
**/
@:umodule("BlueprintGraph")
@:glueCppIncludes("Classes/EdGraphSchema_K2_Actions.h")
@:uextern @:ustruct extern class FEdGraphSchemaAction_K2AssignDelegate extends unreal.blueprintgraph.FEdGraphSchemaAction_K2NewNode {
}
| {
"pile_set_name": "Github"
} |
<?php
/*
* Copyright 2014 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
class Google_Service_Monitoring_Distribution extends Google_Collection
{
protected $collection_key = 'bucketCounts';
public $bucketCounts;
protected $bucketOptionsType = 'Google_Service_Monitoring_BucketOptions';
protected $bucketOptionsDataType = '';
public $count;
public $mean;
protected $rangeType = 'Google_Service_Monitoring_Range';
protected $rangeDataType = '';
public $sumOfSquaredDeviation;
public function setBucketCounts($bucketCounts)
{
$this->bucketCounts = $bucketCounts;
}
public function getBucketCounts()
{
return $this->bucketCounts;
}
/**
* @param Google_Service_Monitoring_BucketOptions
*/
public function setBucketOptions(Google_Service_Monitoring_BucketOptions $bucketOptions)
{
$this->bucketOptions = $bucketOptions;
}
/**
* @return Google_Service_Monitoring_BucketOptions
*/
public function getBucketOptions()
{
return $this->bucketOptions;
}
public function setCount($count)
{
$this->count = $count;
}
public function getCount()
{
return $this->count;
}
public function setMean($mean)
{
$this->mean = $mean;
}
public function getMean()
{
return $this->mean;
}
/**
* @param Google_Service_Monitoring_Range
*/
public function setRange(Google_Service_Monitoring_Range $range)
{
$this->range = $range;
}
/**
* @return Google_Service_Monitoring_Range
*/
public function getRange()
{
return $this->range;
}
public function setSumOfSquaredDeviation($sumOfSquaredDeviation)
{
$this->sumOfSquaredDeviation = $sumOfSquaredDeviation;
}
public function getSumOfSquaredDeviation()
{
return $this->sumOfSquaredDeviation;
}
}
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: 3958d4905c0d4b94587a89934df8e787
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
<?php
// by Trirat Petchsingh <rosskouk#gmail.com>
$ADODB_LANG_ARRAY = array (
'LANG' => 'th',
DB_ERROR => 'error ไม่รู้สาเหตุ',
DB_ERROR_ALREADY_EXISTS => 'มี�?ล้ว',
DB_ERROR_CANNOT_CREATE => 'สร้างไม่ได้',
DB_ERROR_CANNOT_DELETE => 'ลบไม่ได้',
DB_ERROR_CANNOT_DROP => 'drop ไม่ได้',
DB_ERROR_CONSTRAINT => 'constraint violation',
DB_ERROR_DIVZERO => 'หา�?ด้วยสู�?',
DB_ERROR_INVALID => 'ไม่ valid',
DB_ERROR_INVALID_DATE => 'วันที่ เวลา ไม่ valid',
DB_ERROR_INVALID_NUMBER => 'เลขไม่ valid',
DB_ERROR_MISMATCH => 'mismatch',
DB_ERROR_NODBSELECTED => 'ไม่ได้เลือ�?�?านข้อมูล',
DB_ERROR_NOSUCHFIELD => 'ไม่มีฟีลด์นี้',
DB_ERROR_NOSUCHTABLE => 'ไม่มีตารางนี้',
DB_ERROR_NOT_CAPABLE => 'DB backend not capable',
DB_ERROR_NOT_FOUND => 'ไม่พบ',
DB_ERROR_NOT_LOCKED => 'ไม่ได้ล๊อ�?',
DB_ERROR_SYNTAX => 'ผิด syntax',
DB_ERROR_UNSUPPORTED => 'ไม่ support',
DB_ERROR_VALUE_COUNT_ON_ROW => 'value count on row',
DB_ERROR_INVALID_DSN => 'invalid DSN',
DB_ERROR_CONNECT_FAILED => 'ไม่สามารถ connect',
0 => 'no error',
DB_ERROR_NEED_MORE_DATA => 'ข้อมูลไม่เพียงพอ',
DB_ERROR_EXTENSION_NOT_FOUND=> 'ไม่พบ extension',
DB_ERROR_NOSUCHDB => 'ไม่มีข้อมูลนี้',
DB_ERROR_ACCESS_VIOLATION => 'permissions ไม่พอ'
);
| {
"pile_set_name": "Github"
} |
// Python Tools for Visual Studio
// Copyright(c) Microsoft Corporation
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the License); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
// OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
// IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABILITY OR NON-INFRINGEMENT.
//
// See the Apache Version 2.0 License for specific language governing
// permissions and limitations under the License.
using Microsoft.Python.Core.Text;
namespace Microsoft.Python.Parsing.Ast {
public static class SourceLocationExtensions {
public static int ToIndex(this SourceLocation location, ILocationConverter lc) => lc.LocationToIndex(location);
public static SourceLocation ToSourceLocation(this Position position, ILocationConverter lc = null) {
var location = new SourceLocation(position.line + 1, position.character + 1);
if (lc == null) {
return location;
}
return new SourceLocation(lc.LocationToIndex(location), location.Line, location.Column);
}
}
public static class RangeExtensions {
public static IndexSpan ToIndexSpan(this Range range, ILocationConverter lc)
=> IndexSpan.FromBounds(lc.LocationToIndex(range.start), lc.LocationToIndex(range.end));
public static SourceSpan ToSourceSpan(this Range range, ILocationConverter lc = null)
=> new SourceSpan(range.start.ToSourceLocation(lc), range.end.ToSourceLocation(lc));
}
public static class SourceSpanExtensions {
public static IndexSpan ToIndexSpan(this SourceSpan span, ILocationConverter lc)
=> IndexSpan.FromBounds(lc.LocationToIndex(span.Start), lc.LocationToIndex(span.End));
}
public static class IndexSpanExtensions {
public static SourceSpan ToSourceSpan(this IndexSpan span, ILocationConverter lc)
=> lc != null ? new SourceSpan(lc.IndexToLocation(span.Start), lc.IndexToLocation(span.End)) : default;
public static bool Contains(this IndexSpan span, IndexSpan other) {
return span.Start <= other.Start && other.End <= span.End;
}
}
}
| {
"pile_set_name": "Github"
} |
[metadata]
name = corpkit
description-file = README.md
description = A toolkit for working with parsed corpora
url = http://github.com/interrogator/corpkit
author = Daniel McDonald
author-email = mcdonaldd@unimelb.edu.au
[bdist_wheel]
universal = 1 | {
"pile_set_name": "Github"
} |
///////////////////////////////////////////////////////////////////////////////
// weighted_tail_variate_means.hpp
//
// Copyright 2006 Daniel Egloff, Olivier Gygi. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_ACCUMULATORS_STATISTICS_WEIGHTED_TAIL_VARIATE_MEANS_HPP_DE_01_01_2006
#define BOOST_ACCUMULATORS_STATISTICS_WEIGHTED_TAIL_VARIATE_MEANS_HPP_DE_01_01_2006
#include <numeric>
#include <vector>
#include <limits>
#include <functional>
#include <sstream>
#include <stdexcept>
#include <boost/throw_exception.hpp>
#include <boost/parameter/keyword.hpp>
#include <boost/mpl/placeholders.hpp>
#include <boost/type_traits/is_same.hpp>
#include <boost/accumulators/numeric/functional.hpp>
#include <boost/accumulators/framework/accumulator_base.hpp>
#include <boost/accumulators/framework/extractor.hpp>
#include <boost/accumulators/framework/parameters/sample.hpp>
#include <boost/accumulators/statistics_fwd.hpp>
#include <boost/accumulators/statistics/tail.hpp>
#include <boost/accumulators/statistics/tail_variate.hpp>
#include <boost/accumulators/statistics/tail_variate_means.hpp>
#include <boost/accumulators/statistics/weighted_tail_mean.hpp>
#include <boost/accumulators/statistics/parameters/quantile_probability.hpp>
#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable: 4127) // conditional expression is constant
#endif
namespace boost
{
// for _BinaryOperatrion2 in std::inner_product below
// mutliplies two values and promotes the result to double
namespace numeric { namespace functional
{
///////////////////////////////////////////////////////////////////////////////
// numeric::functional::multiply_and_promote_to_double
template<typename T, typename U>
struct multiply_and_promote_to_double
: multiplies<T, double const>
{
};
}}
}
namespace boost { namespace accumulators
{
namespace impl
{
/**
@brief Estimation of the absolute and relative weighted tail variate means (for both left and right tails)
For all \f$j\f$-th variates associated to the
\f[
\lambda = \inf\left\{ l \left| \frac{1}{\bar{w}_n}\sum_{i=1}^{l} w_i \geq \alpha \right. \right\}
\f]
smallest samples (left tail) or the weighted mean of the
\f[
n + 1 - \rho = n + 1 - \sup\left\{ r \left| \frac{1}{\bar{w}_n}\sum_{i=r}^{n} w_i \geq (1 - \alpha) \right. \right\}
\f]
largest samples (right tail), the absolute weighted tail means \f$\widehat{ATM}_{n,\alpha}(X, j)\f$
are computed and returned as an iterator range. Alternatively, the relative weighted tail means
\f$\widehat{RTM}_{n,\alpha}(X, j)\f$ are returned, which are the absolute weighted tail means
normalized with the weighted (non-coherent) sample tail mean \f$\widehat{NCTM}_{n,\alpha}(X)\f$.
\f[
\widehat{ATM}_{n,\alpha}^{\mathrm{right}}(X, j) =
\frac{1}{\sum_{i=\rho}^n w_i}
\sum_{i=\rho}^n w_i \xi_{j,i}
\f]
\f[
\widehat{ATM}_{n,\alpha}^{\mathrm{left}}(X, j) =
\frac{1}{\sum_{i=1}^{\lambda}}
\sum_{i=1}^{\lambda} w_i \xi_{j,i}
\f]
\f[
\widehat{RTM}_{n,\alpha}^{\mathrm{right}}(X, j) =
\frac{\sum_{i=\rho}^n w_i \xi_{j,i}}
{\sum_{i=\rho}^n w_i \widehat{NCTM}_{n,\alpha}^{\mathrm{right}}(X)}
\f]
\f[
\widehat{RTM}_{n,\alpha}^{\mathrm{left}}(X, j) =
\frac{\sum_{i=1}^{\lambda} w_i \xi_{j,i}}
{\sum_{i=1}^{\lambda} w_i \widehat{NCTM}_{n,\alpha}^{\mathrm{left}}(X)}
\f]
*/
///////////////////////////////////////////////////////////////////////////////
// weighted_tail_variate_means_impl
// by default: absolute weighted_tail_variate_means
template<typename Sample, typename Weight, typename Impl, typename LeftRight, typename VariateType>
struct weighted_tail_variate_means_impl
: accumulator_base
{
typedef typename numeric::functional::average<Weight, Weight>::result_type float_type;
typedef typename numeric::functional::average<typename numeric::functional::multiplies<VariateType, Weight>::result_type, Weight>::result_type array_type;
// for boost::result_of
typedef iterator_range<typename array_type::iterator> result_type;
weighted_tail_variate_means_impl(dont_care) {}
template<typename Args>
result_type result(Args const &args) const
{
float_type threshold = sum_of_weights(args)
* ( ( is_same<LeftRight, left>::value ) ? args[quantile_probability] : 1. - args[quantile_probability] );
std::size_t n = 0;
Weight sum = Weight(0);
while (sum < threshold)
{
if (n < static_cast<std::size_t>(tail_weights(args).size()))
{
sum += *(tail_weights(args).begin() + n);
n++;
}
else
{
if (std::numeric_limits<float_type>::has_quiet_NaN)
{
std::fill(
this->tail_means_.begin()
, this->tail_means_.end()
, std::numeric_limits<float_type>::quiet_NaN()
);
}
else
{
std::ostringstream msg;
msg << "index n = " << n << " is not in valid range [0, " << tail(args).size() << ")";
boost::throw_exception(std::runtime_error(msg.str()));
}
}
}
std::size_t num_variates = tail_variate(args).begin()->size();
this->tail_means_.clear();
this->tail_means_.resize(num_variates, Sample(0));
this->tail_means_ = std::inner_product(
tail_variate(args).begin()
, tail_variate(args).begin() + n
, tail_weights(args).begin()
, this->tail_means_
, numeric::functional::plus<array_type const, array_type const>()
, numeric::functional::multiply_and_promote_to_double<VariateType const, Weight const>()
);
float_type factor = sum * ( (is_same<Impl, relative>::value) ? non_coherent_weighted_tail_mean(args) : 1. );
std::transform(
this->tail_means_.begin()
, this->tail_means_.end()
, this->tail_means_.begin()
, std::bind2nd(numeric::functional::divides<typename array_type::value_type const, float_type const>(), factor)
);
return make_iterator_range(this->tail_means_);
}
private:
mutable array_type tail_means_;
};
} // namespace impl
///////////////////////////////////////////////////////////////////////////////
// tag::absolute_weighted_tail_variate_means
// tag::relative_weighted_tail_variate_means
//
namespace tag
{
template<typename LeftRight, typename VariateType, typename VariateTag>
struct absolute_weighted_tail_variate_means
: depends_on<non_coherent_weighted_tail_mean<LeftRight>, tail_variate<VariateType, VariateTag, LeftRight>, tail_weights<LeftRight> >
{
typedef accumulators::impl::weighted_tail_variate_means_impl<mpl::_1, mpl::_2, absolute, LeftRight, VariateType> impl;
};
template<typename LeftRight, typename VariateType, typename VariateTag>
struct relative_weighted_tail_variate_means
: depends_on<non_coherent_weighted_tail_mean<LeftRight>, tail_variate<VariateType, VariateTag, LeftRight>, tail_weights<LeftRight> >
{
typedef accumulators::impl::weighted_tail_variate_means_impl<mpl::_1, mpl::_2, relative, LeftRight, VariateType> impl;
};
}
///////////////////////////////////////////////////////////////////////////////
// extract::weighted_tail_variate_means
// extract::relative_weighted_tail_variate_means
//
namespace extract
{
extractor<tag::abstract_absolute_tail_variate_means> const weighted_tail_variate_means = {};
extractor<tag::abstract_relative_tail_variate_means> const relative_weighted_tail_variate_means = {};
BOOST_ACCUMULATORS_IGNORE_GLOBAL(weighted_tail_variate_means)
BOOST_ACCUMULATORS_IGNORE_GLOBAL(relative_weighted_tail_variate_means)
}
using extract::weighted_tail_variate_means;
using extract::relative_weighted_tail_variate_means;
// weighted_tail_variate_means<LeftRight, VariateType, VariateTag>(absolute) -> absolute_weighted_tail_variate_means<LeftRight, VariateType, VariateTag>
template<typename LeftRight, typename VariateType, typename VariateTag>
struct as_feature<tag::weighted_tail_variate_means<LeftRight, VariateType, VariateTag>(absolute)>
{
typedef tag::absolute_weighted_tail_variate_means<LeftRight, VariateType, VariateTag> type;
};
// weighted_tail_variate_means<LeftRight, VariateType, VariateTag>(relative) -> relative_weighted_tail_variate_means<LeftRight, VariateType, VariateTag>
template<typename LeftRight, typename VariateType, typename VariateTag>
struct as_feature<tag::weighted_tail_variate_means<LeftRight, VariateType, VariateTag>(relative)>
{
typedef tag::relative_weighted_tail_variate_means<LeftRight, VariateType, VariateTag> type;
};
}} // namespace boost::accumulators
#ifdef _MSC_VER
# pragma warning(pop)
#endif
#endif
| {
"pile_set_name": "Github"
} |
//
// Copyright 2018 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// FunctionLookup.cpp: Used for storing function calls that have not yet been resolved during
// parsing.
//
#include "compiler/translator/FunctionLookup.h"
#include "compiler/translator/ImmutableStringBuilder.h"
namespace sh
{
namespace
{
const char kFunctionMangledNameSeparator = '(';
constexpr const ImmutableString kEmptyName("");
// Helper function for GetMangledNames
// Gets all ordered combinations of elements in list[currentIndex, end]
std::vector<std::vector<int>> GetImplicitConversionCombinations(const std::vector<int> &list)
{
std::vector<std::vector<int>> target;
target.push_back(std::vector<int>());
for (size_t currentIndex = 0; currentIndex < list.size(); currentIndex++)
{
size_t prevIterSize = target.size();
for (size_t copyIndex = 0; copyIndex < prevIterSize; copyIndex++)
{
std::vector<int> combination = target[copyIndex];
combination.push_back(list[currentIndex]);
target.push_back(combination);
}
}
return target;
}
} // anonymous namespace
TFunctionLookup::TFunctionLookup(const ImmutableString &name,
const TType *constructorType,
const TSymbol *symbol)
: mName(name), mConstructorType(constructorType), mThisNode(nullptr), mSymbol(symbol)
{}
// static
TFunctionLookup *TFunctionLookup::CreateConstructor(const TType *type)
{
ASSERT(type != nullptr);
return new TFunctionLookup(kEmptyName, type, nullptr);
}
// static
TFunctionLookup *TFunctionLookup::CreateFunctionCall(const ImmutableString &name,
const TSymbol *symbol)
{
ASSERT(name != "");
return new TFunctionLookup(name, nullptr, symbol);
}
const ImmutableString &TFunctionLookup::name() const
{
return mName;
}
ImmutableString TFunctionLookup::getMangledName() const
{
return GetMangledName(mName.data(), mArguments);
}
ImmutableString TFunctionLookup::GetMangledName(const char *functionName,
const TIntermSequence &arguments)
{
std::string newName(functionName);
newName += kFunctionMangledNameSeparator;
for (TIntermNode *argument : arguments)
{
newName += argument->getAsTyped()->getType().getMangledName();
}
return ImmutableString(newName);
}
std::vector<ImmutableString> GetMangledNames(const char *functionName,
const TIntermSequence &arguments)
{
std::vector<ImmutableString> target;
std::vector<int> indexes;
for (int i = 0; i < static_cast<int>(arguments.size()); i++)
{
TIntermNode *argument = arguments[i];
TBasicType argType = argument->getAsTyped()->getType().getBasicType();
if (argType == EbtInt || argType == EbtUInt)
{
indexes.push_back(i);
}
}
std::vector<std::vector<int>> combinations = GetImplicitConversionCombinations(indexes);
for (const std::vector<int> &combination : combinations)
{
// combination: ordered list of indexes for arguments that should be converted to float
std::string newName(functionName);
newName += kFunctionMangledNameSeparator;
// combination[currentIndex] represents index of next argument to be converted
int currentIndex = 0;
for (int i = 0; i < (int)arguments.size(); i++)
{
TIntermNode *argument = arguments[i];
if (currentIndex != static_cast<int>(combination.size()) &&
combination[currentIndex] == i)
{
// Convert
TType type = argument->getAsTyped()->getType();
type.setBasicType(EbtFloat);
newName += type.getMangledName();
currentIndex++;
}
else
{
// Don't convert
newName += argument->getAsTyped()->getType().getMangledName();
}
}
target.push_back(ImmutableString(newName));
}
return target;
}
std::vector<ImmutableString> TFunctionLookup::getMangledNamesForImplicitConversions() const
{
return GetMangledNames(mName.data(), mArguments);
}
bool TFunctionLookup::isConstructor() const
{
return mConstructorType != nullptr;
}
const TType &TFunctionLookup::constructorType() const
{
return *mConstructorType;
}
void TFunctionLookup::setThisNode(TIntermTyped *thisNode)
{
mThisNode = thisNode;
}
TIntermTyped *TFunctionLookup::thisNode() const
{
return mThisNode;
}
void TFunctionLookup::addArgument(TIntermTyped *argument)
{
mArguments.push_back(argument);
}
TIntermSequence &TFunctionLookup::arguments()
{
return mArguments;
}
const TSymbol *TFunctionLookup::symbol() const
{
return mSymbol;
}
} // namespace sh
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Oct 15 2018 10:31:50).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard.
//
#import <objc/NSObject.h>
#import <Safari/SandboxExtensionPresentationDelegate-Protocol.h>
@class NSMutableDictionary, NSString, NSURL, SandboxFileExtensionController;
@protocol OS_dispatch_queue;
__attribute__((visibility("hidden")))
@interface ResourcePreferencesController : NSObject <SandboxExtensionPresentationDelegate>
{
SandboxFileExtensionController *_fileExtensionController;
NSMutableDictionary *_preferenceTypesToExtensionTokenPairs;
NSObject<OS_dispatch_queue> *_internalQueue;
NSString *_cachedHomePage;
}
+ (id)defaultHomePageURLString;
- (void).cxx_destruct;
- (void)_obtainExtensionTokenIfNecessaryForFileURL:(id)arg1 ofType:(unsigned long long)arg2 permissions:(unsigned long long)arg3;
- (void)_addExtensionForAccessibleFileURL:(id)arg1 ofType:(unsigned long long)arg2;
- (void)_removeExtensionWithType:(unsigned long long)arg1;
- (void)prefetchSandboxExtensionsWithCompletionHandler:(CDUnknownBlockType)arg1;
- (void)sandboxExtensionPresentationController:(id)arg1 configureOpenPanel:(id)arg2 forURL:(id)arg3;
- (id)createSandboxExtensionPresentationController;
@property(readonly, copy, nonatomic) NSURL *styleSheetFileURLForMigration;
- (id)homePageURLObtainingExtensionTokenIfNecessary:(BOOL)arg1;
- (void)setSandboxExtensionWithToken:(id)arg1 forURL:(id)arg2 ofType:(unsigned long long)arg3;
@property(copy, nonatomic) NSString *homePage;
- (id)secureDefaults;
- (id)homePageObtainingExtensionTokenIfNecessary:(BOOL)arg1;
@property(readonly, nonatomic) BOOL homePageIsForced;
@property(copy, nonatomic) NSURL *styleSheetFileURL;
@property(readonly, copy, nonatomic) NSURL *downloadURLForMigration;
@property(readonly, copy, nonatomic) NSString *fullyResolvedDownloadPath;
@property(copy, nonatomic) NSString *downloadPath;
- (id)initWithSandboxFileExtensionController:(id)arg1;
- (id)init;
// Remaining properties
@property(readonly, copy) NSString *debugDescription;
@property(readonly, copy) NSString *description;
@property(readonly) unsigned long long hash;
@property(readonly) Class superclass;
@end
| {
"pile_set_name": "Github"
} |
This is our page
================
We are two enthusiastic entrepre-coders who love to invent and innovate beyond what's possible.
About your team
===========================
| Josh | Micaiah
|--- |---
|  | 
The reason we joined this hackathon was to have a fun way to put our programming/problem solving skills to the test.
Josh and I both have a way of thinking about everything we encounter anything that causes problems or isn't efficient, how
can we fix this? It's this motivation that drives the majority of our projects, the rest of them fall along the lines of,
"How can we turn this microwave into something useful other than cooking hot pockets?" Either way, it's our passion to make
things that can benefit people in whatever they are doing, making technology work for us, not the other way around.
About your skills and what you are going to do?
=======
We tend to work better with html 5 and jQuery, our project will probably
revolve around web technologies including PHP for our server side code. So
the educational multiplayer game sounds like a likely option for our theme choice. | {
"pile_set_name": "Github"
} |
*** THIS FILE CONTAINS INFORMATION ABOUT CHANGES DONE TO THE JEMALLOC LIBRARY FILES ***
Removed from archive, as OSX does not use jemalloc:
src/zone.c
include/jemalloc/internal/zone.h
| {
"pile_set_name": "Github"
} |
const React = require('react');
class Conditional extends React.Component {
render() {
const { idyll, hasError, updateProps, ...props } = this.props;
if (!props.if) {
return <div style={{ display: 'none' }}>{props.children}</div>;
}
return <div>{props.children}</div>;
}
}
Conditional._idyll = {
name: 'Conditional',
tagType: 'open',
children: ['Some text'],
props: [
{
name: 'if',
type: 'expression',
example: '`x < 10`',
description:
'An expression; if this evaluates to true, the children will be rendered, otherwise nothing will be drawn to the screen'
}
]
};
module.exports = Conditional;
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"bufio"
"io"
"net/rpc"
"sync"
)
// rpcEncodeTerminator allows a handler specify a []byte terminator to send after each Encode.
//
// Some codecs like json need to put a space after each encoded value, to serve as a
// delimiter for things like numbers (else json codec will continue reading till EOF).
type rpcEncodeTerminator interface {
rpcEncodeTerminate() []byte
}
// Rpc provides a rpc Server or Client Codec for rpc communication.
type Rpc interface {
ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
}
// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
// used by the rpc connection. It accomodates use-cases where the connection
// should be used by rpc and non-rpc functions, e.g. streaming a file after
// sending an rpc response.
type RpcCodecBuffered interface {
BufferedReader() *bufio.Reader
BufferedWriter() *bufio.Writer
}
// -------------------------------------
// rpcCodec defines the struct members and common methods.
type rpcCodec struct {
rwc io.ReadWriteCloser
dec *Decoder
enc *Encoder
bw *bufio.Writer
br *bufio.Reader
mu sync.Mutex
h Handle
cls bool
clsmu sync.RWMutex
}
func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
bw := bufio.NewWriter(conn)
br := bufio.NewReader(conn)
return rpcCodec{
rwc: conn,
bw: bw,
br: br,
enc: NewEncoder(bw, h),
dec: NewDecoder(br, h),
h: h,
}
}
func (c *rpcCodec) BufferedReader() *bufio.Reader {
return c.br
}
func (c *rpcCodec) BufferedWriter() *bufio.Writer {
return c.bw
}
func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) {
if c.isClosed() {
return io.EOF
}
if err = c.enc.Encode(obj1); err != nil {
return
}
t, tOk := c.h.(rpcEncodeTerminator)
if tOk {
c.bw.Write(t.rpcEncodeTerminate())
}
if writeObj2 {
if err = c.enc.Encode(obj2); err != nil {
return
}
if tOk {
c.bw.Write(t.rpcEncodeTerminate())
}
}
if doFlush {
return c.bw.Flush()
}
return
}
func (c *rpcCodec) read(obj interface{}) (err error) {
if c.isClosed() {
return io.EOF
}
//If nil is passed in, we should still attempt to read content to nowhere.
if obj == nil {
var obj2 interface{}
return c.dec.Decode(&obj2)
}
return c.dec.Decode(obj)
}
func (c *rpcCodec) isClosed() bool {
c.clsmu.RLock()
x := c.cls
c.clsmu.RUnlock()
return x
}
func (c *rpcCodec) Close() error {
if c.isClosed() {
return io.EOF
}
c.clsmu.Lock()
c.cls = true
c.clsmu.Unlock()
return c.rwc.Close()
}
func (c *rpcCodec) ReadResponseBody(body interface{}) error {
return c.read(body)
}
// -------------------------------------
type goRpcCodec struct {
rpcCodec
}
func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
// Must protect for concurrent access as per API
c.mu.Lock()
defer c.mu.Unlock()
return c.write(r, body, true, true)
}
func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
c.mu.Lock()
defer c.mu.Unlock()
return c.write(r, body, true, true)
}
func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
return c.read(r)
}
func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
return c.read(r)
}
func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
return c.read(body)
}
// -------------------------------------
// goRpc is the implementation of Rpc that uses the communication protocol
// as defined in net/rpc package.
type goRpc struct{}
// GoRpc implements Rpc using the communication protocol defined in net/rpc package.
// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
var GoRpc goRpc
func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
return &goRpcCodec{newRPCCodec(conn, h)}
}
func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
return &goRpcCodec{newRPCCodec(conn, h)}
}
var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered
| {
"pile_set_name": "Github"
} |
# Stubs for scrapy.commands.shell (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from scrapy.commands import ScrapyCommand
from typing import Any
class Command(ScrapyCommand):
requires_project: bool = ...
default_settings: Any = ...
def syntax(self): ...
def short_desc(self): ...
def long_desc(self): ...
def add_options(self, parser: Any) -> None: ...
def update_vars(self, vars: Any) -> None: ...
def run(self, args: Any, opts: Any) -> None: ...
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2018-present, NebulaChat Studio (https://nebula.chat).
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Author: Benqi (wubenqi@gmail.com)
package messages
import (
"github.com/golang/glog"
"github.com/nebula-chat/chatengine/pkg/grpc_util"
"github.com/nebula-chat/chatengine/pkg/logger"
"github.com/nebula-chat/chatengine/mtproto"
"golang.org/x/net/context"
)
// messages.search#f288a275 flags:# peer:InputPeer q:string from_id:flags.0?InputUser filter:MessagesFilter min_date:int max_date:int offset:int max_id:int limit:int = messages.Messages;
func (s *MessagesServiceImpl) MessagesSearchLayer68(ctx context.Context, request *mtproto.TLMessagesSearchLayer68) (*mtproto.Messages_Messages, error) {
md := grpc_util.RpcMetadataFromIncoming(ctx)
glog.Infof("MessagesSearch - metadata: %s, request: %s", logger.JsonDebugData(md), logger.JsonDebugData(request))
// TODO(@benqi): Not impl
messages := mtproto.NewTLMessagesMessages()
glog.Infof("MessagesGetHistory - reply: %s", messages)
return messages.To_Messages_Messages(), nil
}
| {
"pile_set_name": "Github"
} |
/* Code automatically generated by Vult https://github.com/modlfo/vult */
#include "aff_f.h"
extern "C" {
static t_class *aff_f_tilde_class;
typedef struct _aff_f_tilde {
t_pxobject x_obj;
float dummy;
float in0_value;
short in0_connected;
float in1_value;
short in1_connected;
Aff_f_process_type data;
} t_aff_f_tilde;
void aff_f_tilde_perform(t_aff_f_tilde *x, t_object *dsp64, double **ins, long numins, double **outs, long numouts, long sampleframes, long flags, void *userparam)
{
double *in_0 = ins[0];
double *in_1 = ins[1];
double *out_0 = outs[0];
int n = sampleframes;
while (n--) {
float in_0_value = x->in0_connected? *(in_0++): x->in0_value;
float in_1_value = x->in1_connected? *(in_1++): x->in1_value;
float ret = Aff_f_process(x->data,(float) in_0_value,(float) in_1_value);
*(out_0++) = (float) ret;
}
}
void aff_f_tilde_dsp(t_aff_f_tilde *x, t_object *dsp64, short *count, double samplerate, long maxvectorsize, long flags)
{
x->in0_connected = count[0];
x->in1_connected = count[1];
object_method(dsp64, gensym("dsp_add64"), x, aff_f_tilde_perform, 0, NULL);
}
void *aff_f_tilde_new(t_symbol *s, long argc, t_atom *argv)
{
t_aff_f_tilde *x = (t_aff_f_tilde *)object_alloc(aff_f_tilde_class);
Aff_f_process_init(x->data);
Aff_f_default(x->data);
dsp_setup((t_pxobject *)x, 2);
outlet_new((t_object *)x, "signal");
return (void *)x;
}
void aff_f_tilde_delete(t_aff_f_tilde *x){
}
void aff_f_noteOn(t_aff_f_tilde *x, double note, double velocity, double channel){
if((int)velocity) Aff_f_noteOn(x->data,(int)note,(int)velocity,(int)channel);
else Aff_f_noteOff(x->data,(int)note,(int)channel);
}
void aff_f_noteOff(t_aff_f_tilde *x, double note, double channel) {
Aff_f_noteOff(x->data,(int)note,(int)channel);
}
void aff_f_controlChange(t_aff_f_tilde *x, double control, double value, double channel) {
Aff_f_controlChange(x->data,(int)control,(int)value,(int)channel);
}
void aff_f_float(t_aff_f_tilde *x, double f){
int in = proxy_getinlet((t_object *)x);
if(in == 0) x->in0_value = f;
if(in == 1) x->in1_value = f;
}
void ext_main(void *r) {
aff_f_tilde_class = class_new("aff_f~",
(method)aff_f_tilde_new, // constructor function
(method)aff_f_tilde_delete, // destructor function
(long)sizeof(t_aff_f_tilde), // size of the object
0L, A_GIMME, 0); // arguments passed
class_addmethod(aff_f_tilde_class,(method)aff_f_tilde_dsp, "dsp64", A_CANT, 0);
class_addmethod(aff_f_tilde_class, (method)aff_f_noteOn, "noteOn", A_DEFFLOAT, A_DEFFLOAT, A_DEFFLOAT, 0);
class_addmethod(aff_f_tilde_class, (method)aff_f_noteOff, "noteOff", A_DEFFLOAT, A_DEFFLOAT, 0);
class_addmethod(aff_f_tilde_class, (method)aff_f_controlChange, "controlChange", A_DEFFLOAT, A_DEFFLOAT, A_DEFFLOAT, 0);
class_addmethod(aff_f_tilde_class, (method)aff_f_float, "float", A_FLOAT, 0);
class_dspinit(aff_f_tilde_class);
class_register(CLASS_BOX, aff_f_tilde_class);
}
} // extern "C"
| {
"pile_set_name": "Github"
} |
eval '(exit $?0)' && eval 'exec perl -S $0 ${1+"$@"}'
& eval 'exec perl -S $0 $argv:q'
if 0;
use Env (DDS_ROOT);
use lib "$DDS_ROOT/bin";
use Env (ACE_ROOT);
use lib "$ACE_ROOT/bin";
use PerlDDS::Run_Test;
use strict;
PerlDDS::add_lib_path("Idl");
my $test = new PerlDDS::TestFramework();
$test->enable_console_logging();
my $callback = '';
if($test->flag('callback')) {
$callback = 'callback';
}
$test->process('Subscriber', 'Subscriber/subscriber', $callback);
$test->start_process('Subscriber');
sleep 5;
$test->process('Publisher', 'Publisher/publisher');
$test->start_process('Publisher');
exit $test->finish(30);
| {
"pile_set_name": "Github"
} |
#include <bits/stdc++.h>
using namespace std;
#define MAXN 500009
#define MAXLOGN 20
#define FOR(i, n) for(int i = 0; i < n; i++)
#define REP(i, n) for(int i = n-1; i >= 0; i--)
#define FOR1(i, n) for(int i = 1; i <= n; i++)
#define REP1(i, n) for(int i = n; i > 0; i--)
#define fi first
#define se second
#define pb push_back
#define mset(x, y) memset(&x, y, sizeof x)
typedef long long ll;
typedef pair<int, int> ii;
int vsum[MAXN];
char s[MAXN];
bool isVowel(char c) {
return c == 'a' || c == 'e' || c == 'i' || c == 'o' || c == 'u';
}
int sum(int i, int j) {
int ans = vsum[j];
if (i > 0) ans -= vsum[i-1];
return ans;
}
ll solve(int l, int r, int d) {
if (r == l || sum(l, r) == 0) return 1;
ll ans = 0;
if (d == 0) {
if (!isVowel(s[l])) return 0;
ans = solve(l+1, r, 1);
if (!isVowel(s[r])) ans += solve(l, r-1, 0);
}
else {
if (!isVowel(s[r])) return 0;
ans = solve(l, r-1, 0);
if (!isVowel(s[l])) ans += solve(l+1, r, 1);
}
return ans;
}
int main() {
scanf(" %s", s);
int n = strlen(s);
int acum = 0;
FOR(i, n) {
acum += isVowel(s[i]);
vsum[i] = acum;
}
printf("%lld\n", solve(0, n-1, 0));
return 0;
} | {
"pile_set_name": "Github"
} |
namespace AngleSharp.Dom.Html
{
using AngleSharp.Attributes;
using System;
/// <summary>
/// Represents the data HTML element.
/// </summary>
[DomName("HTMLDataElement")]
public interface IHtmlDataElement : IHtmlElement
{
/// <summary>
/// Gets or sets the machine readable value.
/// </summary>
[DomName("value")]
String Value { get; set; }
}
}
| {
"pile_set_name": "Github"
} |
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": {
"0": "\u4e0a\u5348",
"1": "\u4e0b\u5348"
},
"DAY": {
"0": "\u661f\u671f\u65e5",
"1": "\u661f\u671f\u4e00",
"2": "\u661f\u671f\u4e8c",
"3": "\u661f\u671f\u4e09",
"4": "\u661f\u671f\u56db",
"5": "\u661f\u671f\u4e94",
"6": "\u661f\u671f\u516d"
},
"MONTH": {
"0": "1\u6708",
"1": "2\u6708",
"2": "3\u6708",
"3": "4\u6708",
"4": "5\u6708",
"5": "6\u6708",
"6": "7\u6708",
"7": "8\u6708",
"8": "9\u6708",
"9": "10\u6708",
"10": "11\u6708",
"11": "12\u6708"
},
"SHORTDAY": {
"0": "\u5468\u65e5",
"1": "\u5468\u4e00",
"2": "\u5468\u4e8c",
"3": "\u5468\u4e09",
"4": "\u5468\u56db",
"5": "\u5468\u4e94",
"6": "\u5468\u516d"
},
"SHORTMONTH": {
"0": "1\u6708",
"1": "2\u6708",
"2": "3\u6708",
"3": "4\u6708",
"4": "5\u6708",
"5": "6\u6708",
"6": "7\u6708",
"7": "8\u6708",
"8": "9\u6708",
"9": "10\u6708",
"10": "11\u6708",
"11": "12\u6708"
},
"fullDate": "y\u5e74M\u6708d\u65e5EEEE",
"longDate": "y\u5e74M\u6708d\u65e5",
"medium": "yyyy-M-d ah:mm:ss",
"mediumDate": "yyyy-M-d",
"mediumTime": "ah:mm:ss",
"short": "yy-M-d ah:mm",
"shortDate": "yy-M-d",
"shortTime": "ah:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "\u00a5",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": {
"0": {
"gSize": 3,
"lgSize": 3,
"macFrac": 0,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
"1": {
"gSize": 3,
"lgSize": 3,
"macFrac": 0,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "(\u00a4",
"negSuf": ")",
"posPre": "\u00a4",
"posSuf": ""
}
}
},
"id": "zh-cn",
"pluralCat": function (n) { return PLURAL_CATEGORY.OTHER;}
});
}]); | {
"pile_set_name": "Github"
} |
import { Component, OnInit } from '@angular/core';
import { NgbActiveModal } from '@ng-bootstrap/ng-bootstrap';
import { ToastrService } from 'ngx-toastr';
import { TranslateService } from '@ngx-translate/core';
import { ApiService } from '@/app/core/api.service';
import { xor } from '@oznu/ngx-bs4-jsonform';
@Component({
selector: 'app-unpair-accessory-modal',
templateUrl: './unpair-accessory-modal.component.html',
styleUrls: ['./unpair-accessory-modal.component.scss'],
})
export class UnpairAccessoryModalComponent implements OnInit {
public pairings: any[];
public deleting: null | string = null;
constructor(
public activeModal: NgbActiveModal,
public toastr: ToastrService,
private translate: TranslateService,
private $api: ApiService,
) { }
ngOnInit(): void {
this.loadParings();
}
async loadParings() {
try {
this.pairings = (await this.$api.get('/server/pairings').toPromise())
.sort((a, b) => {
return b._main ? 1 : -1;
});
} catch (e) {
this.toastr.error('Paired accessories cloud not be loaded.', this.translate.instant('toast.title_error'));
this.activeModal.close();
}
}
removeAccessory(id: string) {
this.deleting = id;
this.$api.delete(`/server/pairings/${id}`).subscribe(
async data => {
await this.loadParings();
if (!this.pairings.length) {
this.activeModal.close();
}
this.deleting = null;
this.toastr.success(
this.translate.instant('plugins.settings.toast_restart_required'),
this.translate.instant('toast.title_success'),
);
},
err => {
this.deleting = null;
this.toastr.error('Failed to un-pair accessory.', this.translate.instant('toast.title_error'));
},
);
}
}
| {
"pile_set_name": "Github"
} |
/**
* tcprstat -- Extract stats about TCP response times
* Copyright (C) 2010 Ignacio Nin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
**/
#include <stdio.h>
#include <pcap.h>
#include "config.h"
#include "functions.h"
#include "tcprstat.h"
char *usage_msg =
"Usage: %s [--port <port>] [--format=<format>] [--interval=<sec>]\n"
" [--header[=<header>] | --no-header] [--iterations=<it>]\n"
" [--read=<file>]\n"
" %s --version | --help\n"
"\n"
"\t--read <file>, -r Capture from pcap file <file>, not live.\n"
"\t--time <ms>, -T time delay > -T(time) count stastic.\n"
"\t--log <file>, -o if time dealy > -T(time), timestamp record to log file.\n"
"\t--local <addresses>, -l\n"
"\t <addresses> is a comma-separated list of ip\n"
"\t addresses, which are used as the local list of\n"
"\t addresses instead of pcap getting the list.\n"
"\t This is useful when working with a pcap file got\n"
"\t from another host with different addresses.\n"
"\t--port <port>, -p Capture traffic only for tcp/<port>.\n"
"\t--format <format>, -f\n"
"\t Output format. Argument is a string detailing\n"
"\t how the information is presented. Accepted codes:\n"
"\t %%n - Response time count\n"
"\t %%a - Response time media in microseconds\n"
"\t %%s - Response time sum\n"
"\t %%x - Response time squares sum\n"
"\t %%m - Minimum value\n"
"\t %%M - Maximum value\n"
"\t %%T - counts that package delay time(default 40ms)\n"
"\t %%h - Median value\n"
"\t %%S - Standard deviation\n"
"\t %%v - Variance (square stddev)\n"
"\t %%I - Iteration number\n"
"\t %%t - Timestamp since iteration zero\n"
"\t %%T - Unix timestamp\n"
"\t %%%% - A literal %%\n"
"\t Default is:\n"
" \"%s\".\n"
"\t Statistics may contain a percentile between\n"
"\t the percentage sign and the code: %%99n, %%95a.\n"
"\t--header[=<header>], --no-header\n"
"\t Whether to output a header. If not supplied, a\n"
"\t header is created out of the format. By default,\n"
"\t the header is shown.\n"
"\t--interval <seconds>, -t\n"
"\t Output interval. Default is %d.\n"
"\t--iterations <n>, -n\n"
"\t Output iterations. Default is %d, 0 is infinity\n"
"\n"
"\t--help Shows program information and usage.\n"
"\t--version Shows version information.\n"
"\n"
;
int
dump_usage(FILE *stream) {
fprintf(stream, usage_msg, program_name, program_name,
DEFAULT_OUTPUT_FORMAT, DEFAULT_OUTPUT_INTERVAL, DEFAULT_OUTPUT_ITERATIONS);
return 0;
}
int
dump_help(FILE *stream) {
dump_version(stream);
dump_usage(stream);
return 0;
}
int
dump_version(FILE *stream) {
fprintf(stream, "%s %s, %s.\n", PACKAGE_NAME, PACKAGE_VERSION,
pcap_lib_version());
return 0;
}
| {
"pile_set_name": "Github"
} |
In most cases, your application's entire UI will be created by templates
that are managed by the router.
But what if you have an Ember.js app that you need to embed into an
existing page, or run alongside other JavaScript frameworks, or serve from the
same domain as another app?
### Changing the Root Element
By default, your application will render the [application template](../../routing/defining-your-routes/#toc_the-application-route)
and attach it to the document's `body` element.
You can tell the application to append the application template to a
different element by specifying its `rootElement` property:
```javascript {data-filename=app/app.js}
import Ember from 'ember';
export default Ember.Application.extend({
rootElement: '#app'
});
```
This property can be specified as either an element or a
[jQuery-compatible selector
string](http://api.jquery.com/category/selectors/).
### Disabling URL Management
You can prevent Ember from making changes to the URL by [changing the
router's `location`](../specifying-url-type/) to
`none`:
```javascript {data-filename=config/environment.js}
var ENV = {
locationType: 'none'
};
```
### Specifying a Root URL
If your Ember application is one of multiple web applications served from the same domain, it may be necessary to indicate to the router what the root URL for your Ember application is. By default, Ember will assume it is served from the root of your domain.
For example, if you wanted to serve your blogging application from `http://emberjs.com/blog/`, it would be necessary to specify a root URL of `/blog/`.
This can be achieved by setting the `rootURL` on the router:
```javascript {data-filename=app/router.js}
Ember.Router.extend({
rootURL: '/blog/'
});
```
<!-- eof - needed for pages that end in a code block -->
| {
"pile_set_name": "Github"
} |
@extends('layouts.skeleton')
@section('content')
<div class="settings">
{{-- Breadcrumb --}}
<div class="breadcrumb">
<div class="{{ Auth::user()->getFluidLayout() }}">
<div class="row">
<div class="col-12">
<ul class="horizontal">
<li>
<a href="{{ route('dashboard.index') }}">{{ trans('app.breadcrumb_dashboard') }}</a>
</li>
<li>
<a href="{{ route('settings.index') }}">{{ trans('app.breadcrumb_settings') }}</a>
</li>
<li>
{{ trans('app.breadcrumb_settings_tags') }}
</li>
</ul>
</div>
</div>
</div>
</div>
<div class="{{ Auth::user()->getFluidLayout() }}">
<div class="row">
@include('settings._sidebar')
<div class="col-12 col-sm-9">
<div class="br3 ba b--gray-monica bg-white mb4">
<div class="pa3 bb b--gray-monica">
@if (auth()->user()->account->tags->count() == 0)
<div class="col-12 col-sm-9 blank-screen">
<img src="img/settings/tags/tags.png">
<h2>{{ trans('settings.tags_blank_title') }}</h2>
<p>{{ trans('settings.tags_blank_description') }}</p>
</div>
@else
<h3 class="with-actions">
{{ trans('settings.tags_list_title') }}
</h3>
<p>{{ trans('settings.tags_list_description') }}</p>
@if (session('success'))
<div class="alert alert-success">
{{ session('success') }}
</div>
@endif
<ul class="table">
@foreach (auth()->user()->account->tags as $tag)
<li class="table-row" data-tag-id="{{ $tag->id }}">
<div class="table-cell">
{{ $tag->name }}
<span class="tags-list-contact-number">({{ trans_choice('settings.tags_list_contact_number', $tag->contacts()->count(), ['count' => $tag->contacts()->count()]) }})</span>
<ul>
@foreach($tag->contacts as $contact)
<li class="di mr1"><a href="people/{{ $contact->hashID() }}">{{ $contact->name }}</a></li>
@endforeach
</ul>
</div>
<div class="table-cell actions">
<form method="POST" action="{{ route('settings.tags.delete', $tag) }}">
@method('DELETE')
@csrf
<confirm message="{{ trans('settings.tags_list_delete_confirmation') }}">
<i class="fa fa-trash-o" aria-hidden="true"></i>
</confirm>
</form>
</div>
</li>
@endforeach
</ul>
@endif
</div>
</div>
</div>
</div>
</div>
</div>
@endsection
| {
"pile_set_name": "Github"
} |
<!--- Provide a general summary of your changes in the Title above -->
## Description
<!--- Describe your changes in detail -->
## Motivation and Context
<!--- Why is this change required? What problem does it solve? -->
<!--- If it fixes an open issue, please link to the issue here. -->
## How Has This Been Tested?
<!--- Please describe in detail how you tested your changes. -->
<!--- Include details of your testing environment, tests ran to see how -->
<!--- your change affects other areas of the code, etc. -->
## Screenshots (if appropriate):
## Types of changes
<!--- What types of changes does your code introduce? Put an `x` in all the boxes that apply: -->
- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
## Checklist:
<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->
<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
- [ ] My code follows the code style of this project.
- [ ] My change requires a change to the documentation.
- [ ] I have updated the documentation accordingly.
| {
"pile_set_name": "Github"
} |
*> \brief \b DLAKF2
*
* =========== DOCUMENTATION ===========
*
* Online html documentation available at
* http://www.netlib.org/lapack/explore-html/
*
* Definition:
* ===========
*
* SUBROUTINE DLAKF2( M, N, A, LDA, B, D, E, Z, LDZ )
*
* .. Scalar Arguments ..
* INTEGER LDA, LDZ, M, N
* ..
* .. Array Arguments ..
* DOUBLE PRECISION A( LDA, * ), B( LDA, * ), D( LDA, * ),
* $ E( LDA, * ), Z( LDZ, * )
* ..
*
*
*> \par Purpose:
* =============
*>
*> \verbatim
*>
*> Form the 2*M*N by 2*M*N matrix
*>
*> Z = [ kron(In, A) -kron(B', Im) ]
*> [ kron(In, D) -kron(E', Im) ],
*>
*> where In is the identity matrix of size n and X' is the transpose
*> of X. kron(X, Y) is the Kronecker product between the matrices X
*> and Y.
*> \endverbatim
*
* Arguments:
* ==========
*
*> \param[in] M
*> \verbatim
*> M is INTEGER
*> Size of matrix, must be >= 1.
*> \endverbatim
*>
*> \param[in] N
*> \verbatim
*> N is INTEGER
*> Size of matrix, must be >= 1.
*> \endverbatim
*>
*> \param[in] A
*> \verbatim
*> A is DOUBLE PRECISION, dimension ( LDA, M )
*> The matrix A in the output matrix Z.
*> \endverbatim
*>
*> \param[in] LDA
*> \verbatim
*> LDA is INTEGER
*> The leading dimension of A, B, D, and E. ( LDA >= M+N )
*> \endverbatim
*>
*> \param[in] B
*> \verbatim
*> B is DOUBLE PRECISION, dimension ( LDA, N )
*> \endverbatim
*>
*> \param[in] D
*> \verbatim
*> D is DOUBLE PRECISION, dimension ( LDA, M )
*> \endverbatim
*>
*> \param[in] E
*> \verbatim
*> E is DOUBLE PRECISION, dimension ( LDA, N )
*>
*> The matrices used in forming the output matrix Z.
*> \endverbatim
*>
*> \param[out] Z
*> \verbatim
*> Z is DOUBLE PRECISION, dimension ( LDZ, 2*M*N )
*> The resultant Kronecker M*N*2 by M*N*2 matrix (see above.)
*> \endverbatim
*>
*> \param[in] LDZ
*> \verbatim
*> LDZ is INTEGER
*> The leading dimension of Z. ( LDZ >= 2*M*N )
*> \endverbatim
*
* Authors:
* ========
*
*> \author Univ. of Tennessee
*> \author Univ. of California Berkeley
*> \author Univ. of Colorado Denver
*> \author NAG Ltd.
*
*> \date December 2016
*
*> \ingroup double_matgen
*
* =====================================================================
SUBROUTINE DLAKF2( M, N, A, LDA, B, D, E, Z, LDZ )
*
* -- LAPACK computational routine (version 3.7.0) --
* -- LAPACK is a software package provided by Univ. of Tennessee, --
* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
* December 2016
*
* .. Scalar Arguments ..
INTEGER LDA, LDZ, M, N
* ..
* .. Array Arguments ..
DOUBLE PRECISION A( LDA, * ), B( LDA, * ), D( LDA, * ),
$ E( LDA, * ), Z( LDZ, * )
* ..
*
* ====================================================================
*
* .. Parameters ..
DOUBLE PRECISION ZERO
PARAMETER ( ZERO = 0.0D+0 )
* ..
* .. Local Scalars ..
INTEGER I, IK, J, JK, L, MN, MN2
* ..
* .. External Subroutines ..
EXTERNAL DLASET
* ..
* .. Executable Statements ..
*
* Initialize Z
*
MN = M*N
MN2 = 2*MN
CALL DLASET( 'Full', MN2, MN2, ZERO, ZERO, Z, LDZ )
*
IK = 1
DO 50 L = 1, N
*
* form kron(In, A)
*
DO 20 I = 1, M
DO 10 J = 1, M
Z( IK+I-1, IK+J-1 ) = A( I, J )
10 CONTINUE
20 CONTINUE
*
* form kron(In, D)
*
DO 40 I = 1, M
DO 30 J = 1, M
Z( IK+MN+I-1, IK+J-1 ) = D( I, J )
30 CONTINUE
40 CONTINUE
*
IK = IK + M
50 CONTINUE
*
IK = 1
DO 90 L = 1, N
JK = MN + 1
*
DO 80 J = 1, N
*
* form -kron(B', Im)
*
DO 60 I = 1, M
Z( IK+I-1, JK+I-1 ) = -B( J, L )
60 CONTINUE
*
* form -kron(E', Im)
*
DO 70 I = 1, M
Z( IK+MN+I-1, JK+I-1 ) = -E( J, L )
70 CONTINUE
*
JK = JK + M
80 CONTINUE
*
IK = IK + M
90 CONTINUE
*
RETURN
*
* End of DLAKF2
*
END
| {
"pile_set_name": "Github"
} |
.md-badge {
@include md-theme-component() {
@include md-theme-property(color, text-primary, accent);
@include md-theme-property(background-color, accent);
&.md-primary {
@include md-theme-property(color, text-primary, primary);
@include md-theme-property(background-color, primary);
}
}
}
| {
"pile_set_name": "Github"
} |
form=未知
tags=
功业飘零五丈原,
如今局促傍谁辕? 俯眉北去明妃泪,
啼血南飞望帝魂。
骨肉凋残唯我在,
形容变尽只声存。
江流千古英雄恨,
兰作行舟柳作樊。
| {
"pile_set_name": "Github"
} |
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/resource.h>
#include <unistd.h>
#include "fdleak.h"
int main(int argc, char **argv)
{
struct rlimit oldrlim;
struct rlimit newrlim;
int fd;
CLOSE_INHERITED_FDS;
if (getrlimit(RLIMIT_NOFILE, &oldrlim) < 0)
{
perror("getrlimit");
exit(1);
}
newrlim.rlim_cur = oldrlim.rlim_max+1;
newrlim.rlim_max = oldrlim.rlim_max;
if (setrlimit(RLIMIT_NOFILE, &newrlim) == -1)
{
if (errno != EINVAL) {
fprintf(stderr, "setrlimit exceeding hardlimit must set errno=EINVAL\n");
exit(1);
}
}
else
{
fprintf(stderr, "setrlimit exceeding hardlimit must return -1\n");
exit(1);
}
newrlim.rlim_cur = oldrlim.rlim_max;
newrlim.rlim_max = oldrlim.rlim_max+1;
if (setrlimit(RLIMIT_NOFILE, &newrlim) == -1)
{
if (errno != EPERM) {
fprintf(stderr, "setrlimit changing hardlimit must set errno=EPERM\n");
exit(1);
}
}
else
{
fprintf(stderr, "setrlimit changing hardlimit must return -1\n");
exit(1);
}
newrlim.rlim_cur = oldrlim.rlim_cur / 2;
newrlim.rlim_max = oldrlim.rlim_max;
if (setrlimit(RLIMIT_NOFILE, &newrlim) < 0)
{
perror("setrlimit");
exit(1);
}
if (getrlimit(RLIMIT_NOFILE, &newrlim) < 0)
{
perror("getrlimit");
exit(1);
}
if (newrlim.rlim_cur != oldrlim.rlim_cur / 2)
{
fprintf(stderr, "rlim_cur is %llu (should be %llu)\n",
(unsigned long long)newrlim.rlim_cur,
(unsigned long long)oldrlim.rlim_cur / 2);
}
if (newrlim.rlim_max != oldrlim.rlim_max)
{
fprintf(stderr, "rlim_max is %llu (should be %llu)\n",
(unsigned long long)newrlim.rlim_max,
(unsigned long long)oldrlim.rlim_max);
}
newrlim.rlim_cur -= 3; /* allow for stdin, stdout and stderr */
while (newrlim.rlim_cur-- > 0)
{
if (open("/dev/null", O_RDONLY) < 0)
{
perror("open");
}
}
if ((fd = open("/dev/null", O_RDONLY)) >= 0)
{
fprintf(stderr, "open succeeded with fd %d - it should have failed!\n", fd);
}
else if (errno != EMFILE)
{
perror("open");
}
if (setrlimit(RLIMIT_NOFILE, NULL) != -1 || errno != EFAULT)
{
fprintf(stderr, "setrlimit non addressable arg2 must set errno=EFAULT\n");
exit(1);
}
exit(0);
}
| {
"pile_set_name": "Github"
} |
# flyd-forwardto
Forward values from one stream into another existing stream.
Create a new stream that passes all values through a function and forwards them
to a target stream.
__Graph__
```
a: {1---2---3---}
forwardTo(a, parseInt): {--2---3---2-}
flyd.map(square, a): {1-4-4-9-9-4-}
```
__Signature__
`Stream b -> (a -> b) -> Stream a`
__Example__
```javascript
const forwardTo = require('flyd/module/forwardto')
const R = require('ramda')
// A stream of numbers
const numbers = flyd.stream()
// Another stream that squares the numbers
const squaredNumbers = flyd.map(R.square, numbers)
// A stream of numbers as strings
// we want to convert them to ints and forward them into the numbers stream above:
const stringNumbers = forwardTo(numbers, parseInt)
stringNumbers('7')
squaredNumbers() // -> 49
numbers(4)
squaredNumbers() // -> 16
stringNumbers('9')
squaredNumbers() // -> 81
```
| {
"pile_set_name": "Github"
} |
---
layout: api
title: "v2.1.1 JavaScript Library: L.mapbox.geocoder(id|url, options)"
categories: api
version: v2.1.1
permalink: /api/v2.1.1/l-mapbox-geocoder/
---
<h2 id="section-l-mapbox-geocoder">L.mapbox.geocoder(id|url, options)</h2>
<p>A low-level interface to geocoding, useful for more complex uses and reverse-geocoding.</p>
<table>
<thead>
<tr>
<th>Options</th>
<th>Value</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>id <em>or</em> url</td>
<td>string</td>
<td>Value must be <ul><li>A <a href="https://docs.mapbox.com/api/search/#geocoding">geocoder index ID</a>, e.g. <code>mapbox.places-v1</code></li><li>A geocoder API URL, like <code>{{site.tileApi}}/v4/geocode/mapbox.places-v1/{query}.json</code></li></ul></td>
</tr>
<tr>
<td>options</td>
<td>Object</td>
<td>The second argument is optional. If provided, it may include: <ul><li><code>accessToken</code>: Mapbox API access token. Overrides <code><a href="/mapbox.js/api/v2.1.1/l-mapbox-accesstoken">L.mapbox.accessToken</a></code> for this geocoder.</li></ul></td>
</tr>
</tbody>
</table>
<p><em>Returns</em> a <code><a href="/mapbox.js/api/v2.1.1/l-mapbox-geocoder">L.mapbox.geocoder</a></code> object.</p>
<h3 id="section-geocoder-query">geocoder.query(queryString, callback)</h3>
<p>Queries the geocoder with a query string, and returns its result, if any.</p>
<table>
<thead>
<tr>
<th>Options</th>
<th>Value</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>queryString (<em>required</em>)</td>
<td>string</td>
<td>a query, expressed as a string, like 'Arkansas'</td>
</tr>
<tr>
<td>callback (<em>required</em>)</td>
<td>function</td>
<td>a callback</td>
</tr>
</tbody>
</table>
<p>The callback is called with arguments</p>
<ol>
<li>An error, if any</li>
<li><p>The result. This is an object with the following members:</p>
<pre><code> {
results: // raw results
latlng: // a map-friendly latlng array
bounds: // geojson-style bounds of the first result
lbounds: // leaflet-style bounds of the first result
}
</code></pre></li>
</ol>
<p><em>Example</em>: <a href="https://www.mapbox.com/mapbox.js/example/v1.0.0/map-center-geocoding/">Live example of geocoder.query centering a map.</a></p>
<p><em>Returns</em>: the geocoder object. The return value of this function is not useful - you must use a callback to get results.</p>
<h3 id="section-geocoder-reversequery">geocoder.reverseQuery(location, callback)</h3>
<p>Queries the geocoder with a location, and returns its result, if any.</p>
<table>
<thead>
<tr>
<th>Options</th>
<th>Value</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>location (<em>required</em>)</td>
<td>object</td>
<td>A query, expressed as an object:<ul><li><pre>[lon, lat] // an array of lon, lat</pre></li><li><pre>{ lat: 0, lon: 0 } // a lon, lat object</pre></li><li><pre>{ lat: 0, lng: 0 } // a lng, lat object</pre></li></ul> The first argument can also be an array of objects in that form to geocode more than one item.</td>
</tr>
<tr>
<td>callback (<em>required</em>)</td>
<td>function</td>
<td>The callback is called with arguments <ul><li>An error, if any</li><li>The result. This is an object of the raw result from Mapbox.</li></ul></td>
</tr>
</tbody>
</table>
<p><em>Returns</em>: the geocoder object. The return value of this function is not useful - you must use a callback to get results.</p>
| {
"pile_set_name": "Github"
} |
#
# This file is part of LiteX.
#
# Copyright (c) 2020 bunnie <bunnie@kosagi.com>
# SPDX-License-Identifier: BSD-2-Clause
from migen.genlib.cdc import MultiReg
from litex.soc.interconnect import wishbone
from litex.soc.interconnect.csr_eventmanager import *
from litex.soc.integration.doc import AutoDoc, ModuleDoc
class S7SPIOPI(Module, AutoCSR, AutoDoc):
def __init__(self, pads,
dq_delay_taps = 31,
sclk_name = "SCLK_ODDR",
iddr_name = "SPI_IDDR",
cipo_name = "CIPO_FDRE",
sim = False,
spiread = False,
prefetch_lines = 1):
self.intro = ModuleDoc("""Intro
SpiOpi implements a dual-mode SPI or OPI interface. OPI is an octal (8-bit) wide variant of
SPI, which is unique to Macronix parts. It is concurrently interoperable with SPI. The chip
supports "DTR mode" (double transfer rate, e.g. DDR) where data is transferred on each edge
of the clock, and there is a source-synchronous DQS associated with the input data.
The chip by default boots into SPI-only mode (unless NV bits are burned otherwise) so to
enable OPI, a config register needs to be written with SPI mode. Note that once the config
register is written, the only way to return to SPI mode is to change it with OPI writes, or
to issue a hardware reset. This has major implications for reconfiguring the FPGA: a simple
JTAG command to reload from SPI will not yank PROG_B low, and so the SPI ROM will be in DOPI,
and SPI loading will fail. Thus, system architects must take into consideration a hard reset
for the ROM whenever a bitstream reload is demanded of the FPGA.
The SpiOpi architecture is split into two levels: a command manager, and a cycle manager. The
command manager is responsible for taking the current wishbone request and CSR state and
unpacking these into cycle-by-cycle requests. The cycle manager is responsible for coordinating
the cycle-by-cycle requests.
In SPI mode, this means marshalling byte-wide requests into a series of 8 serial cyles.
In OPI [DOPI] mode, this means marshalling 16-bit wide requests into a pair of back-to-back
DDR cycles. Note that because the cycles are DDR, this means one 16-bit wide request must be
issued every cycle to keep up with the interface.
For the output of data to ROM, expects a clock called "spinor_delayed" which is a delayed
version of "sys". The delay is necessary to get the correct phase relationship between the
SIO and SCLK in DTR/DDR mode, and it also has to compensate for the special-case difference
in the CCLK pad vs other I/O.
For the input, DQS signal is independently delayed relative to the DQ signals using an IDELAYE2
block. At a REFCLK frequency of 200 MHz, each delay tap adds 78ps, so up to a 2.418ns delay is
possible between DQS and DQ. The goal is to delay DQS relative to DQ, because the SPI chip
launches both with concurrent rising edges (to within 0.6ns), but the IDDR register needs the
rising edge of DQS to be centered inside the DQ eye.
In DOPI mode, there is a prefetch buffer. It will read `prefetch_lines` cache lines of data
into the prefetch buffer. A cache line is 256 bits (or 8x32-bit words). The maximum value is
63 lines (one line is necessary for synchronization margin). The downside of setting
prefetch_lines high is that the prefetcher is running constantly and burning power, while
throwing away most data. In practice, the CPU will typically consume data at only slightly
faster than the rate of read-out from DOPI-mode ROM, and once data is consumed the prefetch
resumes. Thus, prefetch_lines is probably optimally around 1-3 lines read-ahead of the CPU.
Any higher than 3 lines probably just wastes power. In short simulations, 1 line of prefetch
seems to be enough to keep the prefetcher ahead of the CPU even when it's simply running
straight-line code.
Note the "sim" parameter exists because there seems to be a bug in xvlog that doesn't
correctly simulate the IDELAY machines. Setting "sim" to True removes the IDELAY machines
and passes the data through directly, but in real hardware the IDELAY machines are necessary
to meet timing between DQS and DQ.
dq_delay_taps probably doesn't need to be adjusted; it can be tweaked for timing closure. The
delays can also be adjusted at runtime.
""")
if prefetch_lines > 63:
prefetch_lines = 63
self.spi_mode = spi_mode = Signal(reset=1) # When reset is asserted, force into spi mode
cs_n = Signal(reset=1) # Make sure CS is sane on reset, too
self.config = CSRStorage(fields=[
CSRField("dummy", size=5, description="Number of dummy cycles", reset=10),
])
delay_type="VAR_LOAD"
# DQS input conditioning -----------------------------------------------------------------
dqs_iobuf = Signal()
self.clock_domains.cd_dqs = ClockDomain(reset_less=True)
self.comb += self.cd_dqs.clk.eq(dqs_iobuf)
self.specials += [
Instance("BUFR", i_I=pads.dqs, o_O=dqs_iobuf),
]
# DQ connections -------------------------------------------------------------------------
# PHY API
self.do = Signal(16) # OPI data to SPI
self.di = Signal(16) # OPI data from SPI
self.tx = Signal() # When asserted OPI is transmitting data to SPI, otherwise, receiving
self.copi = Signal() # SPI data to SPI
self.cipo = Signal() # SPI data from SPI
# Delay programming API
self.delay_config = CSRStorage(fields=[
CSRField("d", size=5, description="Delay amount; each increment is 78ps", reset=31),
CSRField("load", size=1, description="Force delay taps to delay_d"),
])
self.delay_status = CSRStatus(fields=[
CSRField("q", size=5, description="Readback of current delay amount, useful if inc/ce is used to set"),
])
self.delay_update = Signal()
self.hw_delay_load = Signal()
self.sync += self.delay_update.eq(self.hw_delay_load | self.delay_config.fields.load)
# Break system API into rising/falling edge samples
do_rise = Signal(8) # data output presented on the rising edge
do_fall = Signal(8) # data output presented on the falling edge
self.comb += [do_rise.eq(self.do[8:]), do_fall.eq(self.do[:8])]
di_rise = Signal(8)
di_fall = Signal(8)
self.comb += self.di.eq(Cat(di_fall, di_rise))
# OPI DDR registers
dq = TSTriple(7) # dq[0] is special because it is also copi
dq_delayed = Signal(8)
self.specials += dq.get_tristate(pads.dq[1:])
for i in range(1, 8):
self.specials += Instance("ODDR",
p_DDR_CLK_EDGE = "SAME_EDGE",
i_C = ClockSignal(),
i_R = ResetSignal(),
i_S = 0,
i_CE = 1,
i_D1 = do_rise[i],
i_D2 = do_fall[i],
o_Q = dq.o[i-1],
)
if sim == False:
if i == 1: # Only wire up o_CNTVALUEOUT for one instance
self.specials += Instance("IDELAYE2",
p_DELAY_SRC = "IDATAIN",
p_SIGNAL_PATTERN = "DATA",
p_CINVCTRL_SEL = "FALSE",
p_HIGH_PERFORMANCE_MODE = "FALSE",
p_REFCLK_FREQUENCY = 200.0,
p_PIPE_SEL = "FALSE",
p_IDELAY_VALUE = dq_delay_taps,
p_IDELAY_TYPE = delay_type,
i_C = ClockSignal(),
i_CINVCTRL = 0,
i_REGRST = 0,
i_LDPIPEEN = 0,
i_INC = 0,
i_CE = 0,
i_LD = self.delay_update,
i_CNTVALUEIN = self.delay_config.fields.d,
o_CNTVALUEOUT = self.delay_status.fields.q,
i_IDATAIN = dq.i[i-1],
o_DATAOUT = dq_delayed[i],
),
else: # Don't wire up o_CNTVALUEOUT for others
self.specials += Instance("IDELAYE2",
p_DELAY_SRC = "IDATAIN",
p_SIGNAL_PATTERN = "DATA",
p_CINVCTRL_SEL = "FALSE",
p_HIGH_PERFORMANCE_MODE = "FALSE",
p_REFCLK_FREQUENCY = 200.0,
p_PIPE_SEL = "FALSE",
p_IDELAY_VALUE = dq_delay_taps,
p_IDELAY_TYPE = delay_type,
i_C = ClockSignal(),
i_CINVCTRL = 0,
i_REGRST = 0,
i_LDPIPEEN = 0 ,
i_INC = 0,
i_CE = 0,
i_LD = self.delay_update,
i_CNTVALUEIN = self.delay_config.fields.d,
i_IDATAIN = dq.i[i-1],
o_DATAOUT = dq_delayed[i],
),
else:
self.comb += dq_delayed[i].eq(dq.i[i-1])
self.specials += Instance("IDDR", name="{}{}".format(iddr_name, str(i)),
p_DDR_CLK_EDGE = "SAME_EDGE_PIPELINED",
i_C = dqs_iobuf,
i_R = ResetSignal(),
i_S = 0,
i_CE = 1,
i_D = dq_delayed[i],
o_Q1 = di_rise[i],
o_Q2 = di_fall[i],
)
# SPI SDR register
self.specials += [
Instance("FDRE", name="{}".format(cipo_name),
i_C = ~ClockSignal("spinor"),
i_CE = 1,
i_R = 0,
o_Q = self.cipo,
i_D = dq_delayed[1],
)
]
# bit 0 (copi) is special-cased to handle SPI mode
dq_copi = TSTriple(1) # this has similar structure but an independent "oe" signal
self.specials += dq_copi.get_tristate(pads.dq[0])
do_mux_rise = Signal() # mux signal for copi/dq select of bit 0
do_mux_fall = Signal()
self.specials += [
Instance("ODDR",
p_DDR_CLK_EDGE = "SAME_EDGE",
i_C = ClockSignal(),
i_R = ResetSignal(),
i_S = 0,
i_CE = 1,
i_D1 = do_mux_rise,
i_D2 = do_mux_fall,
o_Q = dq_copi.o,
),
Instance("IDDR",
p_DDR_CLK_EDGE="SAME_EDGE_PIPELINED",
i_C = dqs_iobuf,
i_R = ResetSignal(),
i_S = 0,
i_CE = 1,
o_Q1 = di_rise[0],
o_Q2 = di_fall[0],
i_D = dq_delayed[0],
),
]
if sim == False:
self.specials += Instance("IDELAYE2",
p_DELAY_SRC = "IDATAIN",
p_SIGNAL_PATTERN = "DATA",
p_CINVCTRL_SEL = "FALSE",
p_HIGH_PERFORMANCE_MODE = "FALSE",
p_REFCLK_FREQUENCY = 200.0,
p_PIPE_SEL = "FALSE",
p_IDELAY_VALUE = dq_delay_taps,
p_IDELAY_TYPE = delay_type,
i_C = ClockSignal(),
i_CINVCTRL = 0,
i_REGRST = 0,
i_LDPIPEEN = 0,
i_INC = 0,
i_CE = 0,
i_LD = self.delay_update,
i_CNTVALUEIN = self.delay_config.fields.d,
i_IDATAIN = dq_copi.i,
o_DATAOUT = dq_delayed[0],
),
else:
self.comb += dq_delayed[0].eq(dq_copi.i)
# Wire up SCLK interface
clk_en = Signal()
self.specials += [
# De-activate the CCLK interface, parallel it with a GPIO
Instance("STARTUPE2",
i_CLK = 0,
i_GSR = 0,
i_GTS = 0,
i_KEYCLEARB = 0,
i_PACK = 0,
i_USRDONEO = 1,
i_USRDONETS = 1,
i_USRCCLKO = 0,
i_USRCCLKTS = 1, # Force to tristate
),
Instance("ODDR", name=sclk_name, # Need to name this so we can constrain it properly
p_DDR_CLK_EDGE = "SAME_EDGE",
i_C = ClockSignal("spinor"),
i_R = ResetSignal("spinor"),
i_S = 0,
i_CE = 1,
i_D1 = clk_en,
i_D2 = 0,
o_Q = pads.sclk,
)
]
# wire up CS_N
spi_cs_n = Signal()
opi_cs_n = Signal()
self.comb += cs_n.eq( (spi_mode & spi_cs_n) | (~spi_mode & opi_cs_n) )
self.specials += [
Instance("ODDR",
p_DDR_CLK_EDGE="SAME_EDGE",
i_C=ClockSignal(), i_R=0, i_S=ResetSignal(), i_CE=1,
i_D1=cs_n, i_D2=cs_n, o_Q=pads.cs_n,
),
]
self.architecture = ModuleDoc("""Architecture
The machine is split into two separate pieces, one to handle SPI, and one to handle OPI.
SPI
-----
The SPI machine architecture is split into two levels: MAC and PHY.
The MAC layer is responsible for:
- receiving requests via CSR register to perform config/status/special command sequences,
and dispatching these to the SPI PHY
- translating wishbone bus requests into command sequences, and routing them to either OPI
or SPI PHY.
- managing the chip select to the chip, and ensuring that one dummy cycle is inserted after
chip select is asserted, or before it is de-asserted; and that the chip select "high" times
are adequate (1 cycle between reads, 4 cycles for all other operations)
On boot, the interface runs in SPI; once the wakeup sequence is executed, the chip permanently
switches to OPI mode unless the CR2 registers are written to fall back, or the
reset to the chip is asserted.
The PHY layers are responsible for the following tasks:
- Serializing and deserializing data, standardized on 8 bits for SPI and 16 bits for OPI
- counting dummy cycles
- managing the clock enable
PHY cycles are initiated with a "req" signal, which is only sampled for
one cycle and then ignored until the PHY issues an "ack" that the current cycle is complete.
Thus holding "req" high can allow the PHY to back-to-back issue cycles without pause.
OPI
-----
The OPI machine is split into three parts: a command controller, a Tx PHY, and an Rx PHY.
The Tx PHY is configured with a "dummy cycle" count register, as there is a variable length
delay for dummy cycles in OPI.
In OPI mode, read data is `mesochronous`, that is, they return at precisely the same frequency
as SCLK, but with an unknown phase relationship. The DQS strobe is provided as a "hint" to
the receiving side to help retime the data. The mesochronous nature of the read data is why
the Tx and Rx PHY must be split into two separate machines, as they are operating in
different clock domains.
DQS is implemented on the ROM as an extra data output that is guaranteed to change polarity
with each data byte; the skew mismatch of DQS to data is within +/-0.6ns or so. It turns out
the mere act of routing the DQS into a BUFR buffer before clocking the data into an IDDR
primitive is sufficient to delay the DQS signal and meet setup and hold time on the IDDR.
Once captured by the IDDR, the data is fed into a dual-clock FIFO to make the transition
from the DQS to sysclk domains cleanly.
Because of the latency involved in going from pin->IDDR->FIFO, excess read cycles are
required beyond the end of the requested cache line. However, there is virtually no penalty
in pre-filling the FIFO with data; if a new cache line has to be fetched, the FIFO can simply
be reset and all pointers zeroed. In fact, pre-filling the FIFO can lead to great performance
benefits if sequential cache lines are requested. In simulation, a cache line can be filled
in 10 bus cycles if it happens to be prefetched (as opposed to 49 bus cycles for random reads).
Either way, this compares favorably to 288 cycles for random reads in 100MHz SPI mode (or 576
for the spimemio.v, which runs at 50MHz).
The command controller is repsonsible for sequencing all commands other than fast reads. Most
commands have some special-case structure to them, and as more commands are implemented, the
state machine is expected to grow fairly large. Fast reads are directly handled in "tx_run"
mode, where the TxPhy and RxPhy run a tight loop to watch incoming read bus cycles, check
the current address, fill the prefetch fifo, and respond to bus cycles.
Writes to ROM might lock up the machine; a TODO is to test this and do something more sane,
like ignore writes by sending an ACK immediately while discarding the data.
Thus, an OPI read proceeds as follows:
- When BUS/STB are asserted:
TxPhy:
- capture bus_adr, and compare against the *next read* address pointer
- if they match, allow the PHYs to do the work
- if bus_adr and next read address don't match, save to next read address pointer, and
cycle wr/rd clk for 5 cycle while asserting reset to reset the FIFO
- initiate an 8DTRD with the read address pointer
- wait the specified dummy cycles
- greedily pre-fill the FIFO by continuing to clock DQS until either:
- the FIFO is full
- pre-fetch is aborted because bus_adr and next read address don't match and FIFO is reset
RxPHY:
- while CTI==2, assemble data into 32-bit words as soon as EMPTY is deasserted,
present a bus_ack, and increment the next read address pointer
- when CTI==7, ack the data, and wait until the next bus cycle with CTI==2 to resume
reading
- A FIFO_SYNC_MACRO is used to instantiate the FIFO. This is chosen because:
- we can specify RAMB18's, which seem to be under-utilized by the auto-inferred memories by migen
- the XPM_FIFO_ASYNC macro claims no instantiation support, and also looks like it has weird
requirements for resetting the pointers: you must check the reset outputs, and the time to
reset is reported to be as high as around 200ns (anecdotally -- could be just that the sim I
read on the web is using a really slow clock, but I'm guessing it's around 10 cycles).
- the FIFO_SYNC_MACRO has a well-specified fixed reset latency of 5 cycles.
- The main downside of FIFO_SYNC_MACRO over XPM_FIFO_ASYNC is that XPM_FIFO_ASYNC can automatically
allow for output data to be read at 32-bit widths, with writes at 16-bit widths. However, with a
bit of additional logic and pipelining, we can aggregate data into 32-bit words going into a
32-bit FIFO_SYNC_MACRO, which is what we do in this implementation.
""")
self.bus = bus = wishbone.Interface()
self.command = CSRStorage(description="Write individual bits to issue special commands to SPI; setting multiple bits at once leads to undefined behavior.",
fields=[
CSRField("wakeup", size=1, description="Sequence through init & wakeup routine"),
CSRField("sector_erase", size=1, description="Erase a sector"),
])
self.sector = CSRStorage(description="Sector to erase",
fields=[
CSRField("sector", size=32, description="Sector to erase")
])
self.status = CSRStatus(description="Interface status",
fields=[
CSRField("wip", size=1, description="Operation in progress (write or erease)")
])
# TODO: implement ECC detailed register readback, CRC checking
# PHY machine mux --------------------------------------------------------------------------
# clk_en mux
spi_clk_en = Signal()
opi_clk_en = Signal()
self.sync += clk_en.eq(~spi_mode & opi_clk_en | spi_mode & spi_clk_en)
# Tristate mux
self.sync += [
dq.oe.eq(~spi_mode & self.tx),
dq_copi.oe.eq(spi_mode | self.tx),
]
# Data out mux (no data in mux, as we can just sample data in all the time without harm)
self.comb += do_mux_rise.eq(~spi_mode & do_rise[0] | spi_mode & self.copi)
self.comb += do_mux_fall.eq(~spi_mode & do_fall[0] | spi_mode & self.copi)
# Indicates if the current "req" requires dummy cycles to be appended (used for both OPI/SPI)
has_dummy = Signal()
# Location of the internal ROM address pointer; reset to invalid address to force an address
# request on first read
rom_addr = Signal(32, reset=0xFFFFFFFC)
# MAC/PHY abstraction for OPI
txphy_do = Signal(16) # Two sources of data out for OPI, one from the PHY, one from MAC
txcmd_do = Signal(16)
opi_di = Signal(16)
# Internal machines
opi_addr = Signal(32)
opi_fifo_rd = Signal(32)
opi_fifo_wd = Signal(32)
opi_reset_rx_req = Signal()
opi_reset_rx_ack = Signal()
opi_rx_run = Signal()
rx_almostempty = Signal()
rx_almostfull = Signal()
rx_empty = Signal()
rx_full = Signal()
rx_rdcount = Signal(9)
rx_rderr = Signal()
rx_wrcount = Signal(9)
rx_wrerr = Signal()
rx_rden = Signal()
rx_wren = Signal(reset=1)
rx_fifo_rst = Signal()
wrendiv = Signal()
wrendiv2 = Signal()
rx_fifo_rst_pipe = Signal()
self.specials += [
# This next pair of async-clear flip flops creates a write-enable gate that (a) ignores
# the first two DQS strobes (as they are pipe-filling) and (b) alternates with the correct
# phase so we are sampling 32-bit data into the FIFO.
Instance("FDCE", name="FDCE_WREN",
i_C = dqs_iobuf,
i_D = ~wrendiv,
o_Q = wrendiv,
i_CE = 1,
i_CLR = ~rx_wren,
),
Instance("FDCE", name="FDCE_WREN",
i_C = dqs_iobuf,
i_D = ~wrendiv2,
o_Q = wrendiv2,
i_CE = wrendiv & ~wrendiv2,
i_CLR = ~rx_wren,
),
# Direct FIFO primitive is more resource-efficient and faster than migen primitive.
Instance("FIFO_DUALCLOCK_MACRO",
p_DEVICE = "7SERIES",
p_FIFO_SIZE = "18Kb",
p_DATA_WIDTH = 32,
p_FIRST_WORD_FALL_THROUGH = "TRUE",
p_ALMOST_EMPTY_OFFSET = 6,
p_ALMOST_FULL_OFFSET = (511 - (8*prefetch_lines)),
o_ALMOSTEMPTY = rx_almostempty,
o_ALMOSTFULL = rx_almostfull,
o_DO = opi_fifo_rd,
o_EMPTY = rx_empty,
o_FULL = rx_full,
o_RDCOUNT = rx_rdcount,
o_RDERR = rx_rderr,
o_WRCOUNT = rx_wrcount,
o_WRERR = rx_wrerr,
i_DI = opi_fifo_wd,
i_RDCLK = ClockSignal(),
i_RDEN = rx_rden,
i_WRCLK = dqs_iobuf,
i_WREN = wrendiv & wrendiv2,
i_RST = rx_fifo_rst_pipe, #rx_fifo_rst,
)
]
self.sync.dqs += opi_di.eq(self.di)
self.comb += opi_fifo_wd.eq(Cat(opi_di, self.di))
self.sync += rx_fifo_rst_pipe.eq(rx_fifo_rst) # add one pipe register to help relax this timing path. It is critical so it must be timed, but one extra cycle is OK.
#--------- OPI Rx Phy machine ------------------------------
self.submodules.rxphy = rxphy = FSM(reset_state="IDLE")
cti_pipe = Signal(3)
rxphy_cnt = Signal(3)
rxphy.act("IDLE",
If(spi_mode,
NextState("IDLE"),
).Else(
NextValue(bus.ack, 0),
If(opi_reset_rx_req,
NextState("WAIT_RESET"),
NextValue(rxphy_cnt, 6),
NextValue(rx_wren, 0),
NextValue(rx_fifo_rst, 1)
).Elif(opi_rx_run,
NextValue(rx_wren, 1),
If((bus.cyc & bus.stb & ~bus.we) & ((bus.cti == 2) |
((bus.cti == 7) & ~bus.ack) ), # handle case of non-pipelined read, ack is late
If(~rx_empty,
NextValue(bus.dat_r, opi_fifo_rd),
rx_rden.eq(1),
NextValue(opi_addr, opi_addr + 4),
NextValue(bus.ack, 1)
)
)
)
)
)
rxphy.act("WAIT_RESET",
NextValue(opi_addr, Cat(Signal(2), bus.adr)),
NextValue(rxphy_cnt, rxphy_cnt - 1),
If(rxphy_cnt == 0,
NextValue(rx_fifo_rst, 0),
opi_reset_rx_ack.eq(1),
NextState("IDLE")
)
)
# TxPHY machine: OPI -------------------------------------------------------------------------
txphy_cnt = Signal(4)
tx_run = Signal()
txphy_cs_n = Signal(reset=1)
txcmd_cs_n = Signal(reset=1)
txphy_clken = Signal()
txcmd_clken = Signal()
txphy_oe = Signal()
txcmd_oe = Signal()
self.sync += opi_cs_n.eq( (tx_run & txphy_cs_n) | (~tx_run & txcmd_cs_n) )
self.comb += If( tx_run, self.do.eq(txphy_do) ).Else( self.do.eq(txcmd_do) )
self.comb += opi_clk_en.eq( (tx_run & txphy_clken) | (~tx_run & txcmd_clken) )
self.comb += self.tx.eq( (tx_run & txphy_oe) | (~tx_run & txcmd_oe) )
tx_almostfull = Signal()
self.sync += tx_almostfull.eq(rx_almostfull) # sync the rx_almostfull signal into the local clock domain
txphy_bus = Signal()
self.sync += txphy_bus.eq(bus.cyc & bus.stb & ~bus.we & (bus.cti == 2))
tx_resetcycle = Signal()
self.submodules.txphy = txphy = FSM(reset_state="RESET")
txphy.act("RESET",
NextValue(opi_rx_run, 0),
NextValue(txphy_oe, 0),
NextValue(txphy_cs_n, 1),
NextValue(txphy_clken, 0),
# guarantee that the first state we go to out of reset is a four-cycle burst
NextValue(txphy_cnt, 4),
If(tx_run & ~spi_mode,
NextState("TX_SETUP")
)
)
txphy.act("TX_SETUP",
NextValue(opi_rx_run, 0),
NextValue(txphy_cnt, txphy_cnt - 1),
If( txphy_cnt > 0,
NextValue(txphy_cs_n, 1)
).Else(
NextValue(txphy_cs_n, 0),
NextValue(txphy_oe, 1),
NextState("TX_CMD_CS_DELAY")
)
)
txphy.act("TX_CMD_CS_DELAY", # meet setup timing for CS-to-clock
NextState("TX_CMD")
)
txphy.act("TX_CMD",
NextValue(txphy_do, 0xEE11),
NextValue(txphy_clken, 1),
NextState("TX_ADRHI")
)
txphy.act("TX_ADRHI",
NextValue(txphy_do, opi_addr[16:] & 0x07FF), # mask off unused bits
NextState("TX_ADRLO")
)
txphy.act("TX_ADRLO",
NextValue(txphy_do, opi_addr[:16]),
NextValue(txphy_cnt, self.config.fields.dummy - 1),
NextState("TX_DUMMY")
)
txphy.act("TX_DUMMY",
NextValue(txphy_oe, 0),
NextValue(txphy_do, 0),
NextValue(txphy_cnt, txphy_cnt - 1),
If(txphy_cnt == 0,
NextValue(opi_rx_run, 1),
If(tx_resetcycle,
NextValue(txphy_clken, 1),
NextValue(opi_reset_rx_req, 1),
NextState("TX_RESET_RX"),
).Else(
NextState("TX_FILL"),
)
)
)
txphy.act("TX_FILL",
If(tx_run,
If(((~txphy_bus & (bus.cyc & bus.stb & ~bus.we & (bus.cti == 2))) &
(opi_addr[2:] != bus.adr)) | tx_resetcycle,
# Tt's a new bus cycle, and the requested address is not equal to the current
# read buffer address
NextValue(txphy_clken, 1),
NextValue(opi_reset_rx_req, 1),
NextState("TX_RESET_RX"),
).Else(
If(tx_almostfull & ~bus.ack,
NextValue(txphy_clken, 0)
).Else(
NextValue(txphy_clken, 1)
)
),
If(~(bus.cyc & bus.stb),
NextValue(opi_rx_run, 0),
).Else(
NextValue(opi_rx_run, 1),
)
).Else(
NextValue(txphy_clken, 0),
NextState("RESET")
)
)
txphy.act("TX_RESET_RX", # Keep clocking the RX until it acknowledges a reset
NextValue(opi_rx_run, 0),
NextValue(opi_reset_rx_req, 0),
If(opi_reset_rx_ack,
NextValue(txphy_clken, 0),
NextValue(txphy_cnt, 0), # 1 cycle CS on back-to-back reads
NextValue(txphy_cs_n, 1),
NextState("TX_SETUP"),
).Else(
NextValue(txphy_clken, 1),
)
)
#--------- OPI CMD machine ------------------------------
self.submodules.opicmd = opicmd = FSM(reset_state="RESET")
opicmd.act("RESET",
NextValue(txcmd_do, 0),
NextValue(txcmd_oe, 0),
NextValue(tx_run, 0),
NextValue(txcmd_cs_n, 1),
If(~spi_mode,
NextState("IDLE")
).Else(
NextState("RESET_CYCLE")
),
)
opicmd.act("RESET_CYCLE",
NextValue(txcmd_cs_n, 0),
If(opi_reset_rx_ack,
NextValue(tx_run, 1),
NextState("IDLE"),
).Else(
NextValue(tx_run, 1),
tx_resetcycle.eq(1)
)
)
opicmd.act("IDLE",
NextValue(txcmd_cs_n, 1),
If(~spi_mode, # This machine stays in idle once spi_mode is dropped
## The full form of this machine is as follows:
# - First check if there is a CSR special command pending
# - if so, wait until the current bus cycle is done, then de-assert tx_run
# - then run the command
# - Else wait until a bus cycle, and once it happens, put the system into run mode
If(bus.cyc & bus.stb,
If(~bus.we & (bus.cti ==2),
NextState("TX_RUN")
).Else(
# Handle other cases here, e.g. what do we do if we get a write? probably
# should just ACK it without doing anything so the CPU doesn't freeze...
)
).Elif(self.command.re,
NextState("DISPATCH_CMD"),
)
)
)
opicmd.act("TX_RUN",
NextValue(tx_run, 1),
If(self.command.re, # Respond to commands
NextState("WAIT_DISPATCH")
)
)
# Wait until the current cycle is done, then stop TX and dispatch command
opicmd.act("WAIT_DISPATCH",
If( ~(bus.cyc & bus.stb),
NextValue(tx_run, 0),
NextState("DISPATCH_CMD")
)
)
opicmd.act("DISPATCH_CMD",
If(self.command.fields.sector_erase,
NextState("DO_SECTOR_ERASE")
).Else(
NextState("IDLE")
)
)
opicmd.act("DO_SECTOR_ERASE",
# Placeholder
)
# MAC/PHY abstraction for the SPI machine
spi_req = Signal()
spi_ack = Signal()
spi_do = Signal(8) # this is the API to the machine
spi_di = Signal(8)
# PHY machine: SPI -------------------------------------------------------------------------
# internal signals are:
# selection - spi_mode
# OPI - self.do(16), self.di(16), self.tx
# SPI - self.copi, self.cipo
# cs_n - both
# ecs_n - OPI
# clk_en - both
spicount = Signal(5)
spi_so = Signal(8) # this internal to the machine
spi_si = Signal(8)
spi_dummy = Signal()
spi_di_load = Signal() # spi_do load is pipelined back one cycle using this mechanism
spi_di_load2 = Signal()
spi_ack_pipe = Signal()
# Pipelining is required the cipo path is very slow (IOB->fabric FD), and a falling-edge
# retiming reg is used to meet timing
self.sync += [
spi_di_load2.eq(spi_di_load),
If(spi_di_load2, spi_di.eq(Cat(self.cipo, spi_si[:-1]))).Else(spi_di.eq(spi_di)),
spi_ack.eq(spi_ack_pipe),
]
self.comb += self.copi.eq(spi_so[7])
self.sync += spi_si.eq(Cat(self.cipo, spi_si[:-1]))
self.submodules.spiphy = spiphy = FSM(reset_state="RESET")
spiphy.act("RESET",
If(spi_req,
NextState("REQ"),
NextValue(spicount, 7),
NextValue(spi_clk_en, 1),
NextValue(spi_so, spi_do),
NextValue(spi_dummy, has_dummy),
).Else(
NextValue(spi_clk_en, 0),
NextValue(spi_ack_pipe, 0),
NextValue(spicount, 0),
NextValue(spi_dummy, 0),
)
)
spiphy.act("REQ",
If(spicount > 0,
NextValue(spicount, spicount-1),
NextValue(spi_clk_en, 1),
NextValue(spi_so, Cat(0, spi_so[:-1])),
NextValue(spi_ack_pipe, 0)
).Elif( (spicount == 0) & spi_req & ~spi_dummy, # Back-to-back transaction
NextValue(spi_clk_en, 1),
NextValue(spicount, 7),
NextValue(spi_clk_en, 1),
NextValue(spi_so, spi_do), # Reload the so register
spi_di_load.eq(1), # "naked" .eq() create single-cycle pulses that default back to 0
NextValue(spi_ack_pipe, 1),
NextValue(spi_dummy, has_dummy)
).Elif( (spicount == 0) & ~spi_req & ~spi_dummy, # Go back to idle
spi_di_load.eq(1),
NextValue(spi_ack_pipe, 1),
NextValue(spi_clk_en, 0),
NextState("RESET")
).Elif( (spicount == 0) & spi_dummy,
spi_di_load.eq(1),
NextValue(spicount, self.config.fields.dummy),
NextValue(spi_clk_en, 1),
NextValue(spi_ack_pipe, 0),
NextValue(spi_so, 0), # Do a dummy with '0' as the output
NextState("DUMMY")
) # This actually should be a fully defined situation, no "Else" applicable
)
spiphy.act("DUMMY",
If(spicount > 1, # Instead of doing dummy-1, we stop at count == 1
NextValue(spicount, spicount - 1),
NextValue(spi_clk_en, 1)
).Elif(spicount <= 1 & spi_req,
NextValue(spi_clk_en, 1),
NextValue(spicount, 7),
NextValue(spi_so, spi_do), # Reload the so register
NextValue(spi_ack_pipe, 1), # Finally ack the cycle
NextValue(spi_dummy, has_dummy)
).Else(
NextValue(spi_clk_en, 0),
NextValue(spi_ack_pipe, 1), # Finally ack the cycle
NextState("RESET")
)
)
# SPI MAC machine --------------------------------------------------------------------------
# default active on boot
addr_updated = Signal()
d_to_wb = Signal(32) # data going back to wishbone
mac_count = Signal(5)
new_cycle = Signal(1)
self.submodules.mac = mac = FSM(reset_state="RESET")
mac.act("RESET",
NextValue(spi_mode, 1),
NextValue(addr_updated, 0),
NextValue(d_to_wb, 0),
NextValue(spi_cs_n, 1),
NextValue(has_dummy, 0),
NextValue(spi_do, 0),
NextValue(spi_req, 0),
NextValue(mac_count, 0),
NextState("WAKEUP_PRE"),
NextValue(new_cycle, 1),
If(spi_mode, NextValue(bus.ack, 0)),
)
if spiread:
mac.act("IDLE",
If(spi_mode, # This machine stays in idle once spi_mode is dropped
NextValue(bus.ack, 0),
If((bus.cyc == 1) & (bus.stb == 1) & (bus.we == 0) & (bus.cti != 7), # read cycle requested, not end-of-burst
If( (rom_addr[2:] != bus.adr) & new_cycle,
NextValue(rom_addr, Cat(Signal(2, reset=0), bus.adr)),
NextValue(addr_updated, 1),
NextValue(spi_cs_n, 1), # raise CS in anticipation of a new address cycle
NextState("SPI_READ_32_CS"),
).Elif( (rom_addr[2:] == bus.adr) | (~new_cycle & bus.cti == 2),
NextValue(mac_count, 3), # get another beat of 4 bytes at the next address
NextState("SPI_READ_32")
).Else(
NextValue(addr_updated, 0),
NextValue(spi_cs_n, 0),
NextState("SPI_READ_32"),
NextValue(mac_count, 3), # prep the MAC state counter to count out 4 bytes
)
).Elif(self.command.fields.wakeup,
NextValue(spi_cs_n, 1),
NextValue(self.command.storage, 0), # clear all pending commands
NextState("WAKEUP_PRE"),
)
)
)
else:
mac.act("IDLE",
If(spi_mode, # This machine stays in idle once spi_mode is dropped
If(self.command.fields.wakeup,
NextValue(spi_cs_n, 1),
NextValue(self.command.storage, 0), # Clear all pending commands
NextState("WAKEUP_PRE"),
)
)
)
#--------- wakup chip ------------------------------
mac.act("WAKEUP_PRE",
NextValue(spi_cs_n, 1), # Why isn't this sticking? i shouldn't have to put this here
NextValue(mac_count, 4),
NextState("WAKEUP_PRE_CS_WAIT")
)
mac.act("WAKEUP_PRE_CS_WAIT",
NextValue(mac_count, mac_count-1),
If(mac_count == 0,
NextState("WAKEUP_WUP"),
NextValue(spi_cs_n, 0)
)
)
mac.act("WAKEUP_WUP",
NextValue(mac_count, mac_count-1),
If(mac_count == 0,
NextValue(spi_cs_n, 0),
NextValue(spi_do, 0xab), # wakeup from deep sleep
NextValue(spi_req, 1),
NextState("WAKEUP_WUP_WAIT")
)
)
mac.act("WAKEUP_WUP_WAIT",
NextValue(spi_req, 0),
If(spi_ack,
NextValue(spi_cs_n, 1), # raise CS
NextValue(mac_count, 4), # for >4 cycles per specsheet
NextState("WAKEUP_CR2_WREN_1")
)
)
#--------- WREN+CR2 - dummy cycles ------------------------------
mac.act("WAKEUP_CR2_WREN_1",
NextValue(mac_count, mac_count-1),
If(mac_count == 0,
NextValue(spi_cs_n, 0),
NextValue(spi_do, 0x06), # WREN to unlock CR2 writing
NextValue(spi_req, 1),
NextState("WAKEUP_CR2_WREN_1_WAIT")
)
)
mac.act("WAKEUP_CR2_WREN_1_WAIT",
NextValue(spi_req, 0),
If(spi_ack,
NextValue(spi_cs_n, 1),
NextValue(mac_count, 4),
NextState("WAKEUP_CR2_DUMMY_CMD")
)
)
mac.act("WAKEUP_CR2_DUMMY_CMD",
NextValue(mac_count, mac_count-1),
If(mac_count == 0,
NextValue(spi_cs_n, 0),
NextValue(spi_do, 0x72), # CR2 command
NextValue(spi_req, 1),
NextValue(mac_count, 2),
NextState("WAKEUP_CR2_DUMMY_ADRHI")
)
)
mac.act("WAKEUP_CR2_DUMMY_ADRHI",
NextValue(spi_do, 0x00), # We want to send 00_00_03_00
If(spi_ack,
NextValue(mac_count, mac_count -1)
),
If(mac_count == 0,
NextState("WAKEUP_CR2_DUMMY_ADRMID")
)
)
mac.act("WAKEUP_CR2_DUMMY_ADRMID",
NextValue(spi_do, 0x03),
If(spi_ack,
NextState("WAKEUP_CR2_DUMMY_ADRLO")
)
)
mac.act("WAKEUP_CR2_DUMMY_ADRLO",
NextValue(spi_do, 0x00),
If(spi_ack,
NextState("WAKEUP_CR2_DUMMY_DATA")
)
)
mac.act("WAKEUP_CR2_DUMMY_DATA",
NextValue(spi_do, 0x05), # 10 dummy cycles as required for 84MHz-104MHz operation
If(spi_ack,
NextState("WAKEUP_CR2_DUMMY_WAIT")
),
)
mac.act("WAKEUP_CR2_DUMMY_WAIT",
NextValue(spi_req, 0),
If(spi_ack,
NextValue(spi_cs_n, 1),
NextValue(mac_count, 4),
NextState("WAKEUP_CR2_WREN_2")
)
)
#--------- WREN+CR2 to DOPI mode ------------------------------
mac.act("WAKEUP_CR2_WREN_2",
NextValue(mac_count, mac_count-1),
If(mac_count == 0,
NextValue(spi_cs_n, 0),
NextValue(spi_do, 0x06), # WREN to unlock CR2 writing
NextValue(spi_req, 1),
NextState("WAKEUP_CR2_WREN_2_WAIT")
)
)
mac.act("WAKEUP_CR2_WREN_2_WAIT",
NextValue(spi_req, 0),
If(spi_ack,
NextValue(spi_cs_n, 1),
NextValue(mac_count, 4),
NextState("WAKEUP_CR2_DOPI_CMD")
)
)
mac.act("WAKEUP_CR2_DOPI_CMD",
NextValue(mac_count, mac_count-1),
If(mac_count == 0,
NextValue(spi_cs_n, 0),
NextValue(spi_do, 0x72), # CR2 command
NextValue(spi_req, 1),
NextValue(mac_count, 4),
NextState("WAKEUP_CR2_DOPI_ADR")
)
)
mac.act("WAKEUP_CR2_DOPI_ADR", # send 0x00_00_00_00 as address
NextValue(spi_do, 0x00), # no need to raise CS or lower spi_req, this is back-to-back
If(spi_ack,
NextValue(mac_count, mac_count - 1)
),
If(mac_count == 0,
NextState("WAKEUP_CR2_DOPI_DATA")
)
),
mac.act("WAKEUP_CR2_DOPI_DATA",
NextValue(spi_do, 2), # enable DOPI mode
If(spi_ack,
NextState("WAKEUP_CR2_DOPI_WAIT")
)
)
mac.act("WAKEUP_CR2_DOPI_WAIT", # trailing CS wait
NextValue(spi_req, 0),
If(spi_ack,
NextValue(spi_cs_n, 1),
NextValue(mac_count, 4),
NextState("WAKEUP_CS_EXIT")
)
)
mac.act("WAKEUP_CS_EXIT",
NextValue(spi_mode, 0), # now enter DOPI mode
NextValue(mac_count, mac_count-1),
If(mac_count == 0,
NextState("IDLE")
)
)
if spiread:
#--------- SPI read machine ------------------------------
mac.act("SPI_READ_32",
If(addr_updated,
NextState("SPI_READ_32_CS"),
NextValue(has_dummy, 0),
NextValue(mac_count, 3),
NextValue(spi_cs_n, 1),
NextValue(spi_req, 0),
).Else(
If(mac_count > 0,
NextValue(has_dummy, 0),
NextValue(spi_req, 1),
NextState("SPI_READ_32_D")
).Else(
NextValue(spi_req, 0),
If(spi_ack,
# Protect these in a spi_mode mux to prevent excess inference of logic to
# handle otherwise implicit dual-controller situation
If(spi_mode,
NextValue(bus.dat_r, Cat(d_to_wb[8:],spi_di)),
NextValue(bus.ack, 1),
),
NextValue(rom_addr, rom_addr + 1),
NextState("IDLE")
)
)
)
)
mac.act("SPI_READ_32_D",
If(spi_ack,
# Shift in one byte at a time to d_to_wb(32)
NextValue(d_to_wb, Cat(d_to_wb[8:],spi_di,)),
NextValue(mac_count, mac_count - 1),
NextState("SPI_READ_32"),
NextValue(rom_addr, rom_addr + 1)
)
)
mac.act("SPI_READ_32_CS",
NextValue(mac_count, mac_count-1),
If(mac_count == 0,
NextValue(spi_cs_n, 0),
NextState("SPI_READ_32_A0")
)
)
mac.act("SPI_READ_32_A0",
NextValue(spi_do, 0x0c), # 32-bit address write for "fast read" command
NextValue(spi_req, 1),
NextState("SPI_READ_32_A1")
)
mac.act("SPI_READ_32_A1",
NextValue(spi_do, rom_addr[24:] & 0x7), # queue up MSB to send, leave req high; mask off unused high bits
If(spi_ack,
NextState("SPI_READ_32_A2")
)
)
mac.act("SPI_READ_32_A2",
NextValue(spi_do, rom_addr[16:24]),
If(spi_ack,
NextState("SPI_READ_32_A3")
)
)
mac.act("SPI_READ_32_A3",
NextValue(spi_do, rom_addr[8:16]),
If(spi_ack,
NextState("SPI_READ_32_A4")
)
)
mac.act("SPI_READ_32_A4",
NextValue(spi_do, rom_addr[:8]),
If(spi_ack,
NextState("SPI_READ_32_A5")
)
)
mac.act("SPI_READ_32_A5",
NextValue(spi_do, 0),
If(spi_ack,
NextState("SPI_READ_32_DUMMY")
)
)
mac.act("SPI_READ_32_DUMMY",
NextValue(spi_req, 0),
NextValue(addr_updated, 0),
If(spi_ack,
NextState("SPI_READ_32"),
NextValue(mac_count, 3), # Prep the MAC state counter to count out 4 bytes
).Else(
NextState("SPI_READ_32_DUMMY")
)
)
# Handle ECS_n -----------------------------------------------------------------------------
# treat ECS_N as an async signal -- just a "rough guide" of problems
ecs_n = Signal()
self.specials += MultiReg(pads.ecs_n, ecs_n)
self.submodules.ev = EventManager()
self.ev.ecc_error = EventSourceProcess(description="An ECC event has happened on the current block; triggered by falling edge of ECC_N")
self.ev.finalize()
self.comb += self.ev.ecc_error.trigger.eq(ecs_n)
ecc_reported = Signal()
ecs_n_delay = Signal()
ecs_pulse = Signal()
self.ecc_address = CSRStatus(fields=[
CSRField("ecc_address", size=32, description="Address of the most recent ECC event")
])
self.ecc_status = CSRStatus(fields=[
CSRField("ecc_error", size=1, description="Live status of the ECS_N bit (ECC error on current packet when low)"),
CSRField("ecc_overflow", size=1, description="More than one ECS_N event has happened since th last time ecc_address was checked")
])
self.comb += self.ecc_status.fields.ecc_error.eq(ecs_n)
self.comb += [
ecs_pulse.eq(ecs_n_delay & ~ecs_n), # falling edge -> positive pulse
If(ecs_pulse,
self.ecc_address.fields.ecc_address.eq(rom_addr),
If(ecc_reported,
self.ecc_status.fields.ecc_overflow.eq(1)
).Else(
self.ecc_status.fields.ecc_overflow.eq(self.ecc_status.fields.ecc_overflow),
)
).Else(
self.ecc_address.fields.ecc_address.eq(self.ecc_address.fields.ecc_address),
If(self.ecc_status.we,
self.ecc_status.fields.ecc_overflow.eq(0),
).Else(
self.ecc_status.fields.ecc_overflow.eq(self.ecc_status.fields.ecc_overflow),
)
)
]
self.sync += [
ecs_n_delay.eq(ecs_n),
If(ecs_pulse,
ecc_reported.eq(1)
).Elif(self.ecc_address.we,
ecc_reported.eq(0)
)
]
| {
"pile_set_name": "Github"
} |
/*
* libevent compatibility layer
*
* Copyright (c) 2007,2008,2009,2010,2012 Marc Alexander Lehmann <libev@schmorp.de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modifica-
* tion, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
* ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Alternatively, the contents of this file may be used under the terms of
* the GNU General Public License ("GPL") version 2 or any later version,
* in which case the provisions of the GPL are applicable instead of
* the above. If you wish to allow the use of your version of this file
* only under the terms of the GPL and not to allow others to use your
* version of this file under the BSD license, indicate your decision
* by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL. If you do not delete the
* provisions above, a recipient may use your version of this file under
* either the BSD or the GPL.
*/
#include <stddef.h>
#include <stdlib.h>
#include <assert.h>
#ifdef EV_EVENT_H
# include EV_EVENT_H
#else
# include "event.h"
#endif
#if EV_MULTIPLICITY
# define dLOOPev struct ev_loop *loop = (struct ev_loop *)ev->ev_base
# define dLOOPbase struct ev_loop *loop = (struct ev_loop *)base
#else
# define dLOOPev
# define dLOOPbase
#endif
/* never accessed, will always be cast from/to ev_loop */
struct event_base
{
int dummy;
};
static struct event_base *ev_x_cur;
static ev_tstamp
ev_tv_get (struct timeval *tv)
{
if (tv)
{
ev_tstamp after = tv->tv_sec + tv->tv_usec * 1e-6;
return after ? after : 1e-6;
}
else
return -1.;
}
#define EVENT_STRINGIFY(s) # s
#define EVENT_VERSION(a,b) EVENT_STRINGIFY (a) "." EVENT_STRINGIFY (b)
const char *
event_get_version (void)
{
/* returns ABI, not API or library, version */
return EVENT_VERSION (EV_VERSION_MAJOR, EV_VERSION_MINOR);
}
const char *
event_get_method (void)
{
return "libev";
}
void *event_init (void)
{
#if EV_MULTIPLICITY
if (ev_x_cur)
ev_x_cur = (struct event_base *)ev_loop_new (EVFLAG_AUTO);
else
ev_x_cur = (struct event_base *)ev_default_loop (EVFLAG_AUTO);
#else
assert (("libev: multiple event bases not supported when not compiled with EV_MULTIPLICITY", !ev_x_cur));
ev_x_cur = (struct event_base *)(long)ev_default_loop (EVFLAG_AUTO);
#endif
return ev_x_cur;
}
const char *
event_base_get_method (const struct event_base *base)
{
return "libev";
}
struct event_base *
event_base_new (void)
{
#if EV_MULTIPLICITY
return (struct event_base *)ev_loop_new (EVFLAG_AUTO);
#else
assert (("libev: multiple event bases not supported when not compiled with EV_MULTIPLICITY"));
return NULL;
#endif
}
void event_base_free (struct event_base *base)
{
dLOOPbase;
#if EV_MULTIPLICITY
if (!ev_is_default_loop (loop))
ev_loop_destroy (loop);
#endif
}
int event_dispatch (void)
{
return event_base_dispatch (ev_x_cur);
}
#ifdef EV_STANDALONE
void event_set_log_callback (event_log_cb cb)
{
/* nop */
}
#endif
int event_loop (int flags)
{
return event_base_loop (ev_x_cur, flags);
}
int event_loopexit (struct timeval *tv)
{
return event_base_loopexit (ev_x_cur, tv);
}
event_callback_fn event_get_callback
(const struct event *ev)
{
return ev->ev_callback;
}
static void
ev_x_cb (struct event *ev, int revents)
{
revents &= EV_READ | EV_WRITE | EV_TIMER | EV_SIGNAL;
ev->ev_res = revents;
ev->ev_callback (ev->ev_fd, (short)revents, ev->ev_arg);
}
static void
ev_x_cb_sig (EV_P_ struct ev_signal *w, int revents)
{
struct event *ev = (struct event *)(((char *)w) - offsetof (struct event, iosig.sig));
if (revents & EV_ERROR)
event_del (ev);
ev_x_cb (ev, revents);
}
static void
ev_x_cb_io (EV_P_ struct ev_io *w, int revents)
{
struct event *ev = (struct event *)(((char *)w) - offsetof (struct event, iosig.io));
if ((revents & EV_ERROR) || !(ev->ev_events & EV_PERSIST))
event_del (ev);
ev_x_cb (ev, revents);
}
static void
ev_x_cb_to (EV_P_ struct ev_timer *w, int revents)
{
struct event *ev = (struct event *)(((char *)w) - offsetof (struct event, to));
event_del (ev);
ev_x_cb (ev, revents);
}
void event_set (struct event *ev, int fd, short events, void (*cb)(int, short, void *), void *arg)
{
if (events & EV_SIGNAL)
ev_init (&ev->iosig.sig, ev_x_cb_sig);
else
ev_init (&ev->iosig.io, ev_x_cb_io);
ev_init (&ev->to, ev_x_cb_to);
ev->ev_base = ev_x_cur; /* not threadsafe, but it's how libevent works */
ev->ev_fd = fd;
ev->ev_events = events;
ev->ev_pri = 0;
ev->ev_callback = cb;
ev->ev_arg = arg;
ev->ev_res = 0;
ev->ev_flags = EVLIST_INIT;
}
int event_once (int fd, short events, void (*cb)(int, short, void *), void *arg, struct timeval *tv)
{
return event_base_once (ev_x_cur, fd, events, cb, arg, tv);
}
int event_add (struct event *ev, struct timeval *tv)
{
dLOOPev;
if (ev->ev_events & EV_SIGNAL)
{
if (!ev_is_active (&ev->iosig.sig))
{
ev_signal_set (&ev->iosig.sig, ev->ev_fd);
ev_signal_start (EV_A_ &ev->iosig.sig);
ev->ev_flags |= EVLIST_SIGNAL;
}
}
else if (ev->ev_events & (EV_READ | EV_WRITE))
{
if (!ev_is_active (&ev->iosig.io))
{
ev_io_set (&ev->iosig.io, ev->ev_fd, ev->ev_events & (EV_READ | EV_WRITE));
ev_io_start (EV_A_ &ev->iosig.io);
ev->ev_flags |= EVLIST_INSERTED;
}
}
if (tv)
{
ev->to.repeat = ev_tv_get (tv);
ev_timer_again (EV_A_ &ev->to);
ev->ev_flags |= EVLIST_TIMEOUT;
}
else
{
ev_timer_stop (EV_A_ &ev->to);
ev->ev_flags &= ~EVLIST_TIMEOUT;
}
ev->ev_flags |= EVLIST_ACTIVE;
return 0;
}
int event_del (struct event *ev)
{
dLOOPev;
if (ev->ev_events & EV_SIGNAL)
ev_signal_stop (EV_A_ &ev->iosig.sig);
else if (ev->ev_events & (EV_READ | EV_WRITE))
ev_io_stop (EV_A_ &ev->iosig.io);
if (ev_is_active (&ev->to))
ev_timer_stop (EV_A_ &ev->to);
ev->ev_flags = EVLIST_INIT;
return 0;
}
void event_active (struct event *ev, int res, short ncalls)
{
dLOOPev;
if (res & EV_TIMEOUT)
ev_feed_event (EV_A_ &ev->to, res & EV_TIMEOUT);
if (res & EV_SIGNAL)
ev_feed_event (EV_A_ &ev->iosig.sig, res & EV_SIGNAL);
if (res & (EV_READ | EV_WRITE))
ev_feed_event (EV_A_ &ev->iosig.io, res & (EV_READ | EV_WRITE));
}
int event_pending (struct event *ev, short events, struct timeval *tv)
{
short revents = 0;
dLOOPev;
if (ev->ev_events & EV_SIGNAL)
{
/* sig */
if (ev_is_active (&ev->iosig.sig) || ev_is_pending (&ev->iosig.sig))
revents |= EV_SIGNAL;
}
else if (ev->ev_events & (EV_READ | EV_WRITE))
{
/* io */
if (ev_is_active (&ev->iosig.io) || ev_is_pending (&ev->iosig.io))
revents |= ev->ev_events & (EV_READ | EV_WRITE);
}
if (ev->ev_events & EV_TIMEOUT || ev_is_active (&ev->to) || ev_is_pending (&ev->to))
{
revents |= EV_TIMEOUT;
if (tv)
{
ev_tstamp at = ev_now (EV_A);
tv->tv_sec = (long)at;
tv->tv_usec = (long)((at - (ev_tstamp)tv->tv_sec) * 1e6);
}
}
return events & revents;
}
int event_priority_init (int npri)
{
return event_base_priority_init (ev_x_cur, npri);
}
int event_priority_set (struct event *ev, int pri)
{
ev->ev_pri = pri;
return 0;
}
int event_base_set (struct event_base *base, struct event *ev)
{
ev->ev_base = base;
return 0;
}
int event_base_loop (struct event_base *base, int flags)
{
dLOOPbase;
return !ev_run (EV_A_ flags);
}
int event_base_dispatch (struct event_base *base)
{
return event_base_loop (base, 0);
}
static void
ev_x_loopexit_cb (int revents, void *base)
{
dLOOPbase;
ev_break (EV_A_ EVBREAK_ONE);
}
int event_base_loopexit (struct event_base *base, struct timeval *tv)
{
ev_tstamp after = ev_tv_get (tv);
dLOOPbase;
ev_once (EV_A_ -1, 0, after >= 0. ? after : 0., ev_x_loopexit_cb, (void *)base);
return 0;
}
struct ev_x_once
{
int fd;
void (*cb)(int, short, void *);
void *arg;
};
static void
ev_x_once_cb (int revents, void *arg)
{
struct ev_x_once *once = (struct ev_x_once *)arg;
once->cb (once->fd, (short)revents, once->arg);
free (once);
}
int event_base_once (struct event_base *base, int fd, short events, void (*cb)(int, short, void *), void *arg, struct timeval *tv)
{
struct ev_x_once *once = (struct ev_x_once *)malloc (sizeof (struct ev_x_once));
dLOOPbase;
if (!once)
return -1;
once->fd = fd;
once->cb = cb;
once->arg = arg;
ev_once (EV_A_ fd, events & (EV_READ | EV_WRITE), ev_tv_get (tv), ev_x_once_cb, (void *)once);
return 0;
}
int event_base_priority_init (struct event_base *base, int npri)
{
/*dLOOPbase;*/
return 0;
}
| {
"pile_set_name": "Github"
} |
"use strict";
function generateBigLeaf() {
var d = document.createElement("li");
d.className = "leaf";
d.textContent = new Array(10000).join("x");
return d;
}
function generateTree() {
var t = document.createElement("ul");
t.style.display = "none";
for (var i = 100; i--;) { t.appendChild(generateBigLeaf()); }
return t;
}
document.body.appendChild(generateTree());
// leak
var treeRef = document.querySelector("ul");
var leafRef = document.querySelector("li:last-child");
| {
"pile_set_name": "Github"
} |
/*
* 流式布局演示
* FlowLayout演示
*1,继承JFrame
*2,定义需要的组键
*3,在构造函数中。初始化组键
*4,添加组键
*5,对窗体(顶层容器),属性进行设置。
*6,显示窗体。setVisible(true);
*-------注意----------
*流式布局中组键默认是中间对齐。可以通过FlowLayout(intalign);来改变
*不限制他所管理的组键大小,允许他们有最佳大小.
*当容器被缩放时,组键的位置可能变化,但组键的大小不变。
*
* */
import java.awt.*;
import javax.swing.*;
public class Demo extends JFrame
{
//声明需要的组键
JButton jb1,jb2,jb3,jb4,jb5,jb6;
public static void main(String[] args)
{
Demo d = new Demo();
}
Demo()
{
//创建组键
jb1 = new JButton("Kevin");
// jb1.setSize(500,500);
jb2 = new JButton("Tony");
jb3 = new JButton("Litch");
jb4 = new JButton("Lenka");
jb5 = new JButton("Rocco");
jb6 = new JButton("Celedy");
//添加组键
this.add(jb1);
this.add(jb2);
this.add(jb3);
this.add(jb4);
this.add(jb5);
this.add(jb6);
//设置窗体属性
this.setTitle("流式布局演示");
this.setBounds(400, 300, 450, 300);//初始化窗体大小以及初始化窗体座标
this.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);//设置窗体退出JVM随之退出。
//显示窗体
this.setVisible(true);
//设置布局格式为-流式布局(默认是中对齐,现在设置的左对齐)
this.setLayout(new FlowLayout(FlowLayout.LEFT));
//禁止用户改变窗体大小
this.setResizable(false);
}
}
| {
"pile_set_name": "Github"
} |
'use strict';
var mocha = require('mocha');
var chai = require('chai');
var expect = chai.expect;
var Store = require('../src/Store');
var testStore = 8386;
//TODO this test needs fleshing out
describe('Menu', function() {
this.timeout(15000);
describe('ParseMenu', function() {
it('should parse menu', function(done) {
var store = new Store({ID: testStore});
store.getMenu(function(menu) {
expect(menu).not.to.be.null;
expect(menu.getFoodCategory()).not.to.be.null;
expect(menu.getCouponCategory()).not.to.be.null;
expect(menu.getPreconfiguredCategory()).not.to.be.null;
var pizzaItem = menu.getItemByCode("S_PIZZA");
expect(pizzaItem).to.not.be.null;
expect(pizzaItem.getName()).to.be.equal("Pizza");
done();
});
});
});
});
| {
"pile_set_name": "Github"
} |
#!/bin/bash
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Executes the samples and tests for the Google Test Framework.
# Help the dynamic linker find the path to the libraries.
export DYLD_FRAMEWORK_PATH=$BUILT_PRODUCTS_DIR
export DYLD_LIBRARY_PATH=$BUILT_PRODUCTS_DIR
# Create some executables.
test_executables=$@
# Now execute each one in turn keeping track of how many succeeded and failed.
succeeded=0
failed=0
failed_list=()
for test in ${test_executables[*]}; do
"$test"
result=$?
if [ $result -eq 0 ]; then
succeeded=$(( $succeeded + 1 ))
else
failed=$(( failed + 1 ))
failed_list="$failed_list $test"
fi
done
# Report the successes and failures to the console.
echo "Tests complete with $succeeded successes and $failed failures."
if [ $failed -ne 0 ]; then
echo "The following tests failed:"
echo $failed_list
fi
exit $failed
| {
"pile_set_name": "Github"
} |
import { EventLike } from "./frombinder";
import { EventStream } from "./observable";
/**
* A polled function used by [fromPoll](../globals.html#frompoll)
*/
export declare type PollFunction<V> = () => EventLike<V>;
/**
Polls given function with given interval.
Function should return Events: either [`Bacon.Next`](classes/next.html) or [`Bacon.End`](classes/end.html). Polling occurs only
when there are subscribers to the stream. Polling ends permanently when
`f` returns [`Bacon.End`](classes/end.html).
* @param delay poll interval in milliseconds
* @param poll function to be polled
* @typeparam V Type of stream elements
*/
export default function fromPoll<V>(delay: number, poll: PollFunction<V>): EventStream<V>;
| {
"pile_set_name": "Github"
} |
# Getting Signpost
Get the latest Signpost build from the [download page](https://github.com/mttkay/signpost/releases)
OR
[checkout the source code](https://github.com/mttkay/signpost) using Git:
`git clone https://github.com/mttkay/signpost.git`
Now `cd signpost/` and run:
`mvn install`
This will download all dependencies and create a JAR in the target/ folder. Note that this step requires that you have the [Apache Maven](https://maven.apache.org/) build system and [Git](https://www.git-scm.com/) installed on your system.
OR
If you use [Apache Maven](https://maven.apache.org/) for project management yourself, you can simply declare Signpost as a dependency in your pom.xml:
```xml
<dependencies>
<dependency>
<groupId>oauth.signpost</groupId>
<artifactId>signpost-core</artifactId>
<version>1.2</version>
<scope>compile</scope>
</dependency>
</dependencies>
```
Depending on your requirements, you may need to add dependencies to other Signpost modules (e.g. signpost-jetty6).
If you need to depend on an unreleased version, you have to add the Signpost snapshots repository to your POM:
```xml
<repositories>
<repository>
<id>signpost-snapshots</id>
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
<releases>
<enabled>false</enabled>
</releases>
<snapshots>
<enabled>true</enabled>
</snapshots>
</repository>
</repositories>
```
The snapshots are kindly hosted by [Sonatype](https://oss.sonatype.org/).
# Setting up Signpost
By default, Signpost supports signing HTTP requests of type java.net.HttpURLConnection. If you only need that, then you're good to go and you can skip to the next section. If you want to use a different HTTP messaging system, you must download an adapter module that supports adapting request objects of that library for Signpost being able to sign them. The adapter module must be added to your project's build path.
For a list of available adapter modules, refer to [SupportedHttpLibraries](SupportedHttpLibraries.md).
# Using Signpost
_All examples below assume that you have already obtained a consumer key and secret from the OAuth service provider you are communicating with._
**Android users: Do NOT use the DefaultOAuth`*` implementations on Android, since there's a bug in Android's java.net.HttpURLConnection that keeps it from working with some service providers. Instead, use the CommonsHttpOAuth`*` classes, since they are meant to be used with Apache Commons HTTP (that's what Android uses for HTTP anyway).**
## Signing an HTTP message using OAuthConsumer
**This section shows how to sign HTTP requests of type java.net.HttpURLConnection, which is the default. If you need to sign requests for other HTTP request types, please have a look at the examples in [SupportedHttpLibraries](SupportedHttpLibraries.md).**
If you have already obtained an access token from your service provider that allows you to access a protected resource, you can sign a request to that resource using Signpost as follows:
```java
// create a consumer object and configure it with the access
// token and token secret obtained from the service provider
OAuthConsumer consumer = new DefaultOAuthConsumer(CONSUMER_KEY,
CONSUMER_SECRET);
consumer.setTokenWithSecret(ACCESS_TOKEN, TOKEN_SECRET);
// create an HTTP request to a protected resource
URL url = new URL("http://example.com/protected");
HttpURLConnection request = (HttpURLConnection) url.openConnection();
// sign the request
consumer.sign(request);
// send the request
request.connect();
```
**NOTE:** When using HttpURLConnection, you cannot sign POST requests that carry query parameters in the message payload (i.e. requests of type application/x-www-form-urlencoded). This is not a limitation of Signpost per se, but with the way URLConnection works. Server communication with URLConnection is based on data streams, which means that whenever you write something to the connection, it will be sent to the server immediately. This data is not buffered, and there is simply no way for Signpost to inspect that data and include it in a signature. Hence, when you have to sign requests which contain parameters in their body, you have to use an HTTP library like Apache Commons HttpComponents and the respective Signpost module. (This restriction does not apply to requests which send binary data such as documents or files, because that data won't become part of the signature anyway.)
## Obtaining a request token using OAuthProvider
Obtaining a request token from the OAuth service provider is the first step in the 3-way handshake defined by OAuth. In a second step (which is beyond the scope of Signpost or any OAuth library) the user must then authorize this request token by granting your application access to protected resources on a special website defined by the OAuth service provider.
```java
// create a new service provider object and configure it with
// the URLs which provide request tokens, access tokens, and
// the URL to which users are sent in order to grant permission
// to your application to access protected resources
OAuthProvider provider = new DefaultOAuthProvider(
REQUEST_TOKEN_ENDPOINT_URL, ACCESS_TOKEN_ENDPOINT_URL,
AUTHORIZE_WEBSITE_URL);
// fetches a request token from the service provider and builds
// a url based on AUTHORIZE_WEBSITE_URL and CALLBACK_URL to
// which your app must now send the user
String url = provider.retrieveRequestToken(consumer, CALLBACK_URL);
```
If your application cannot receive callbacks (e.g. because it's a desktop app), then you must replace CALLBACK\_URL with one of these values:
* If the service provider you're communicating with implements version 1.0a of the protocol, then you must pass "oob" or `OAuth.OUT_OF_BAND` to indicate that you cannot receive callbacks.
* If the service provider is still using the older 1.0 protocol, then you must pass `null` to indicate that you cannot receive callbacks.
**If you get a 401 during these steps:** Please make sure that when passing a callback URL, your applications is registered as being able to receive callbacks from your service provider. If you do NOT do that, then the service provider may decide to reject your request, because it thinks it's illegitimate. Twitter, for instance, will do this.
## Obtaining an access token using OAuthProvider
The third and last step in the "OAuth dance" is to exchange the blessed request token for an access token, which the client can then use to access protected resources on behalf of the user. Again, this is very simple to do with Signpost:
```java
provider.retrieveAccessToken(consumer, verificationCode);
```
The `verificationCode` is only meaningful for service providers implementing OAuth 1.0a. Depending on whether you provided a callback URL or out-of-band before, this value is either being passed to your application during callback as the `oauth_verifier` request parameter, or you must obtain this value manually from the user of your application.
On success, the OAuthConsumer connected to this OAuthProvider has now a valid access token and token secret set, and can start signing messages!
| {
"pile_set_name": "Github"
} |
h1,
h2,
h3,
h4,
h5,
h6,
p {
margin: 0;
padding: 0;
}
a:active,
a:hover {
outline: 0;
}
html {
width: 100%;
height: 100%;
min-height: 100%;
font-family: sans-serif;
-webkit-text-size-adjust: 100%;
}
body {
overflow-x: hidden;
width: 100%;
min-width: 300px;
height: 100%;
min-height: 100%;
margin: 0;
padding: 0;
color: #fff;
background: #21272f;
font-family: 'Helvetica Neue', Helvetica, sans-serif;
font-size: 100%;
font-weight: 400;
-webkit-font-smoothing: antialiased;
}
a:link,
a:visited {
text-decoration: none;
color: #fff;
}
img {
width: auto;
max-width: 100%;
height: auto;
}
p {
margin-bottom: 10px;
font-size: 18px;
line-height: 1.6;
}
.responsive {
width: 100%;
}
@media only screen and (min-width: 767px) {
.responsive {
width: 50%;
}
}
.sep {
display: block;
clear: both;
width: 100%;
height: 2px;
margin: 15px 0;
background: rgba(255, 255, 255, 0.1);
}
@media only screen and (min-width: 767px) {
.sep {
margin-top: 0;
}
}
.primary-btn {
padding: 15px;
text-transform: uppercase;
color: rgba(255, 255, 255, 0.5);
border: 2px solid rgba(255, 255, 255, 0.6);
border-radius: 3px;
font-size: 15px;
font-weight: 500;
}
.primary-btn:hover {
transition: all 0.1s ease-in-out;
color: #21272f;
background: #fff;
box-shadow: 0 4px 20px 2px rgba(0, 0, 0, 0.2);
}
.container {
display: block;
overflow: hidden;
box-sizing: border-box;
width: 100%;
max-width: 900px;
margin: 0 auto;
padding: 15px;
}
@media only screen and (min-width: 767px) {
.container {
padding: 35px;
}
}
.intro-container {
position: relative;
z-index: 10;
width: 100%;
margin-top: 30px;
margin-bottom: 70px;
text-align: center;
}
@media only screen and (min-width: 767px) {
.intro-container {
float: left;
width: 42%;
margin-bottom: 0;
text-align: left;
}
}
.app-store-btn::before {
padding-right: 8px;
content: '';
font-size: 16px;
}
.app-icon {
width: 90px;
margin: 0 auto;
filter: drop-shadow(0 0 2px rgba(0, 0, 0, 0.1));
-webkit-filter: drop-shadow(0 0 2px rgba(0, 0, 0, 0.1));
}
.app-icon p {
font-size: 35px;
font-weight: 200;
white-space: nowrap;
}
@media only screen and (min-width: 767px) {
.app-icon {
margin: 0;
}
.app-icon p {
margin-left: 2px;
}
}
.intro {
max-width: 300px;
margin-top: 13%;
margin-right: auto;
margin-bottom: 40px;
margin-left: auto;
}
@media only screen and (min-width: 767px) {
.intro {
max-width: 100%;
margin-top: 28%;
margin-bottom: 40px;
}
}
.intro h2 {
margin-bottom: 20px;
font-size: 28px;
font-weight: 200;
line-height: 1.2;
}
@media only screen and (min-width: 580px) {
.intro h2 {
font-size: 30px;
}
}
.iphone {
position: relative;
max-width: 300px;
height: 300px;
margin: 0 auto;
padding-bottom: 50px;
}
@media only screen and (min-width: 767px) {
.iphone {
float: right;
width: 100%;
max-width: 42%;
height: 100%;
text-align: right;
}
}
.iphone-screenshot {
position: relative;
z-index: 100;
overflow: hidden;
width: 86.5%;
height: 0;
margin: 0 auto;
padding-bottom: 182%;
}
@media only screen and (min-width: 767px) {
.iphone-screenshot {
z-index: 1;
width: 87%;
}
}
.iphone-screenshot img {
position: absolute;
top: 13.88%;
left: 0;
}
.iphone-mask {
position: absolute;
z-index: 10;
top: 0;
left: 0;
display: block;
}
.footer {
margin-bottom: 10px;
}
.footer p {
opacity: 0.7;
}
.credit {
float: left;
}
.credit a {
opacity: 1;
}
.credit a:hover {
text-decoration: underline;
}
.contact {
float: left;
}
.contact a {
padding-right: 20px;
}
.contact a,
.contact a:visited {
transition: opacity 0.5s ease-out;
text-decoration: none;
opacity: 0.7;
color: #fff;
}
.contact a:hover,
.contact a:focus,
.contact a:visited:hover,
.contact a:visited:focus {
opacity: 1;
}
@media only screen and (min-width: 767px) {
.contact {
text-align: right;
}
.contact a {
padding-right: 0;
padding-left: 12px;
}
}
| {
"pile_set_name": "Github"
} |
from os import path
import click
import requests
from webtest.app import TestApp, AppError
@click.command()
@click.option(
'--message',
'-m',
default=u'Hello there'
)
@click.option(
'attachments',
'-a',
multiple=True
)
@click.argument(
'url',
default=u'http://localhost:6543',
)
def submit_attachment(url, message, attachments):
click.echo('running against %s!' % url)
# files = [('attachments', (attachment, open(path.abspath(attachment)))) for attachment in attachments]
browser = TestApp(url)
submit_page = browser.get('/briefkasten/')
submit_form = submit_page.forms['briefkasten-form']
submit_form['message'] = u'Hello there'
try:
response = submit_form.submit()
import pdb; pdb.set_trace()
click.echo(u'Got %s' % response.url)
except AppError as exc:
click.echo(u'Oops %s' % exc)
if __name__ == '__main__':
submit_attachment()
| {
"pile_set_name": "Github"
} |
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Component\DomCrawler\Field;
/**
* FileFormField represents a file form field (an HTML file input tag).
*
* @author Fabien Potencier <fabien@symfony.com>
*/
class FileFormField extends FormField
{
/**
* Sets the PHP error code associated with the field.
*
* @param int $error The error code (one of UPLOAD_ERR_INI_SIZE, UPLOAD_ERR_FORM_SIZE, UPLOAD_ERR_PARTIAL, UPLOAD_ERR_NO_FILE, UPLOAD_ERR_NO_TMP_DIR, UPLOAD_ERR_CANT_WRITE, or UPLOAD_ERR_EXTENSION)
*
* @throws \InvalidArgumentException When error code doesn't exist
*/
public function setErrorCode($error)
{
$codes = array(UPLOAD_ERR_INI_SIZE, UPLOAD_ERR_FORM_SIZE, UPLOAD_ERR_PARTIAL, UPLOAD_ERR_NO_FILE, UPLOAD_ERR_NO_TMP_DIR, UPLOAD_ERR_CANT_WRITE, UPLOAD_ERR_EXTENSION);
if (!in_array($error, $codes)) {
throw new \InvalidArgumentException(sprintf('The error code %s is not valid.', $error));
}
$this->value = array('name' => '', 'type' => '', 'tmp_name' => '', 'error' => $error, 'size' => 0);
}
/**
* Sets the value of the field.
*
* @param string $value The value of the field
*/
public function upload($value)
{
$this->setValue($value);
}
/**
* Sets the value of the field.
*
* @param string $value The value of the field
*/
public function setValue($value)
{
if (null !== $value && is_readable($value)) {
$error = UPLOAD_ERR_OK;
$size = filesize($value);
$info = pathinfo($value);
$name = $info['basename'];
// copy to a tmp location
$tmp = sys_get_temp_dir().'/'.sha1(uniqid(mt_rand(), true));
if (array_key_exists('extension', $info)) {
$tmp .= '.'.$info['extension'];
}
if (is_file($tmp)) {
unlink($tmp);
}
copy($value, $tmp);
$value = $tmp;
} else {
$error = UPLOAD_ERR_NO_FILE;
$size = 0;
$name = '';
$value = '';
}
$this->value = array('name' => $name, 'type' => '', 'tmp_name' => $value, 'error' => $error, 'size' => $size);
}
/**
* Sets path to the file as string for simulating HTTP request.
*
* @param string $path The path to the file
*/
public function setFilePath($path)
{
parent::setValue($path);
}
/**
* Initializes the form field.
*
* @throws \LogicException When node type is incorrect
*/
protected function initialize()
{
if ('input' !== $this->node->nodeName) {
throw new \LogicException(sprintf('A FileFormField can only be created from an input tag (%s given).', $this->node->nodeName));
}
if ('file' !== strtolower($this->node->getAttribute('type'))) {
throw new \LogicException(sprintf('A FileFormField can only be created from an input tag with a type of file (given type is %s).', $this->node->getAttribute('type')));
}
$this->setValue(null);
}
}
| {
"pile_set_name": "Github"
} |
/*! normalize.css v3.0.2 | MIT License | git.io/normalize */
/**
* 1. Set default font family to sans-serif.
* 2. Prevent iOS text size adjust after orientation change, without disabling
* user zoom.
*/
html {
font-family: sans-serif; /* 1 */
-ms-text-size-adjust: 100%; /* 2 */
-webkit-text-size-adjust: 100%; /* 2 */
}
/**
* Remove default margin.
*/
body {
margin: 0;
}
/* HTML5 display definitions
========================================================================== */
/**
* Correct `block` display not defined for any HTML5 element in IE 8/9.
* Correct `block` display not defined for `details` or `summary` in IE 10/11
* and Firefox.
* Correct `block` display not defined for `main` in IE 11.
*/
article,
aside,
details,
figcaption,
figure,
footer,
header,
hgroup,
main,
menu,
nav,
section,
summary {
display: block;
}
/**
* 1. Correct `inline-block` display not defined in IE 8/9.
* 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.
*/
audio,
canvas,
progress,
video {
display: inline-block; /* 1 */
vertical-align: baseline; /* 2 */
}
/**
* Prevent modern browsers from displaying `audio` without controls.
* Remove excess height in iOS 5 devices.
*/
audio:not([controls]) {
display: none;
height: 0;
}
/**
* Address `[hidden]` styling not present in IE 8/9/10.
* Hide the `template` element in IE 8/9/11, Safari, and Firefox < 22.
*/
[hidden],
template {
display: none;
}
/* Links
========================================================================== */
/**
* Remove the gray background color from active links in IE 10.
*/
a {
background-color: transparent;
}
/**
* Improve readability when focused and also mouse hovered in all browsers.
*/
a:active,
a:hover {
outline: 0;
}
/* Text-level semantics
========================================================================== */
/**
* Address styling not present in IE 8/9/10/11, Safari, and Chrome.
*/
abbr[title] {
border-bottom: 1px dotted;
}
/**
* Address style set to `bolder` in Firefox 4+, Safari, and Chrome.
*/
b,
strong {
font-weight: bold;
}
/**
* Address styling not present in Safari and Chrome.
*/
dfn {
font-style: italic;
}
/**
* Address variable `h1` font-size and margin within `section` and `article`
* contexts in Firefox 4+, Safari, and Chrome.
*/
h1 {
font-size: 2em;
margin: 0.67em 0;
}
/**
* Address styling not present in IE 8/9.
*/
mark {
background: #ff0;
color: #000;
}
/**
* Address inconsistent and variable font size in all browsers.
*/
small {
font-size: 80%;
}
/**
* Prevent `sub` and `sup` affecting `line-height` in all browsers.
*/
sub,
sup {
font-size: 75%;
line-height: 0;
position: relative;
vertical-align: baseline;
}
sup {
top: -0.5em;
}
sub {
bottom: -0.25em;
}
/* Embedded content
========================================================================== */
/**
* Remove border when inside `a` element in IE 8/9/10.
*/
img {
border: 0;
}
/**
* Correct overflow not hidden in IE 9/10/11.
*/
svg:not(:root) {
overflow: hidden;
}
/* Grouping content
========================================================================== */
/**
* Address margin not present in IE 8/9 and Safari.
*/
figure {
margin: 1em 40px;
}
/**
* Address differences between Firefox and other browsers.
*/
hr {
-moz-box-sizing: content-box;
box-sizing: content-box;
height: 0;
}
/**
* Contain overflow in all browsers.
*/
pre {
overflow: auto;
}
/**
* Address odd `em`-unit font size rendering in all browsers.
*/
code,
kbd,
pre,
samp {
font-family: monospace, monospace;
font-size: 1em;
}
/* Forms
========================================================================== */
/**
* Known limitation: by default, Chrome and Safari on OS X allow very limited
* styling of `select`, unless a `border` property is set.
*/
/**
* 1. Correct color not being inherited.
* Known issue: affects color of disabled elements.
* 2. Correct font properties not being inherited.
* 3. Address margins set differently in Firefox 4+, Safari, and Chrome.
*/
button,
input,
optgroup,
select,
textarea {
color: inherit; /* 1 */
font: inherit; /* 2 */
margin: 0; /* 3 */
}
/**
* Address `overflow` set to `hidden` in IE 8/9/10/11.
*/
button {
overflow: visible;
}
/**
* Address inconsistent `text-transform` inheritance for `button` and `select`.
* All other form control elements do not inherit `text-transform` values.
* Correct `button` style inheritance in Firefox, IE 8/9/10/11, and Opera.
* Correct `select` style inheritance in Firefox.
*/
button,
select {
text-transform: none;
}
/**
* 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`
* and `video` controls.
* 2. Correct inability to style clickable `input` types in iOS.
* 3. Improve usability and consistency of cursor style between image-type
* `input` and others.
*/
button,
html input[type="button"], /* 1 */
input[type="reset"],
input[type="submit"] {
-webkit-appearance: button; /* 2 */
cursor: pointer; /* 3 */
}
/**
* Re-set default cursor for disabled elements.
*/
button[disabled],
html input[disabled] {
cursor: default;
}
/**
* Remove inner padding and border in Firefox 4+.
*/
button::-moz-focus-inner,
input::-moz-focus-inner {
border: 0;
padding: 0;
}
/**
* Address Firefox 4+ setting `line-height` on `input` using `!important` in
* the UA stylesheet.
*/
input {
line-height: normal;
}
/**
* It's recommended that you don't attempt to style these elements.
* Firefox's implementation doesn't respect box-sizing, padding, or width.
*
* 1. Address box sizing set to `content-box` in IE 8/9/10.
* 2. Remove excess padding in IE 8/9/10.
*/
input[type="checkbox"],
input[type="radio"] {
box-sizing: border-box; /* 1 */
padding: 0; /* 2 */
}
/**
* Fix the cursor style for Chrome's increment/decrement buttons. For certain
* `font-size` values of the `input`, it causes the cursor style of the
* decrement button to change from `default` to `text`.
*/
input[type="number"]::-webkit-inner-spin-button,
input[type="number"]::-webkit-outer-spin-button {
height: auto;
}
/**
* 1. Address `appearance` set to `searchfield` in Safari and Chrome.
* 2. Address `box-sizing` set to `border-box` in Safari and Chrome
* (include `-moz` to future-proof).
*/
input[type="search"] {
-webkit-appearance: textfield; /* 1 */
-moz-box-sizing: content-box;
-webkit-box-sizing: content-box; /* 2 */
box-sizing: content-box;
}
/**
* Remove inner padding and search cancel button in Safari and Chrome on OS X.
* Safari (but not Chrome) clips the cancel button when the search input has
* padding (and `textfield` appearance).
*/
input[type="search"]::-webkit-search-cancel-button,
input[type="search"]::-webkit-search-decoration {
-webkit-appearance: none;
}
/**
* Define consistent border, margin, and padding.
*/
fieldset {
border: 1px solid #c0c0c0;
margin: 0 2px;
padding: 0.35em 0.625em 0.75em;
}
/**
* 1. Correct `color` not being inherited in IE 8/9/10/11.
* 2. Remove padding so people aren't caught out if they zero out fieldsets.
*/
legend {
border: 0; /* 1 */
padding: 0; /* 2 */
}
/**
* Remove default vertical scrollbar in IE 8/9/10/11.
*/
textarea {
overflow: auto;
}
/**
* Don't inherit the `font-weight` (applied by a rule above).
* NOTE: the default cannot safely be changed in Chrome and Safari on OS X.
*/
optgroup {
font-weight: bold;
}
/* Tables
========================================================================== */
/**
* Remove most spacing between table cells.
*/
table {
border-collapse: collapse;
border-spacing: 0;
}
td,
th {
padding: 0;
}
| {
"pile_set_name": "Github"
} |
// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
using System;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Threading.Tasks;
using Pulumi.Serialization;
namespace Pulumi.Aws.WafV2.Inputs
{
public sealed class WebAclRuleStatementNotStatementStatementSizeConstraintStatementFieldToMatchArgs : Pulumi.ResourceArgs
{
/// <summary>
/// Inspect all query arguments.
/// </summary>
[Input("allQueryArguments")]
public Input<Inputs.WebAclRuleStatementNotStatementStatementSizeConstraintStatementFieldToMatchAllQueryArgumentsArgs>? AllQueryArguments { get; set; }
/// <summary>
/// Inspect the request body, which immediately follows the request headers.
/// </summary>
[Input("body")]
public Input<Inputs.WebAclRuleStatementNotStatementStatementSizeConstraintStatementFieldToMatchBodyArgs>? Body { get; set; }
/// <summary>
/// Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
/// </summary>
[Input("method")]
public Input<Inputs.WebAclRuleStatementNotStatementStatementSizeConstraintStatementFieldToMatchMethodArgs>? Method { get; set; }
/// <summary>
/// Inspect the query string. This is the part of a URL that appears after a `?` character, if any.
/// </summary>
[Input("queryString")]
public Input<Inputs.WebAclRuleStatementNotStatementStatementSizeConstraintStatementFieldToMatchQueryStringArgs>? QueryString { get; set; }
/// <summary>
/// Inspect a single header. See Single Header below for details.
/// </summary>
[Input("singleHeader")]
public Input<Inputs.WebAclRuleStatementNotStatementStatementSizeConstraintStatementFieldToMatchSingleHeaderArgs>? SingleHeader { get; set; }
/// <summary>
/// Inspect a single query argument. See Single Query Argument below for details.
/// </summary>
[Input("singleQueryArgument")]
public Input<Inputs.WebAclRuleStatementNotStatementStatementSizeConstraintStatementFieldToMatchSingleQueryArgumentArgs>? SingleQueryArgument { get; set; }
/// <summary>
/// Inspect the request URI path. This is the part of a web request that identifies a resource, for example, `/images/daily-ad.jpg`.
/// </summary>
[Input("uriPath")]
public Input<Inputs.WebAclRuleStatementNotStatementStatementSizeConstraintStatementFieldToMatchUriPathArgs>? UriPath { get; set; }
public WebAclRuleStatementNotStatementStatementSizeConstraintStatementFieldToMatchArgs()
{
}
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2020 ZUP IT SERVICOS EM TECNOLOGIA E INOVACAO SA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React, { ReactElement } from "react";
import { render, act, fireEvent, cleanup, wait } from "@testing-library/react";
import FormModule from "../";
import { Component as ComponentInterface } from "modules/Modules/interfaces/Component";
import { AllTheProviders } from "unit-test/testUtils";
import { Module, Author } from "modules/Modules/interfaces/Module";
import { Actions, Subjects } from "core/utils/abilities";
import MutationObserver from 'mutation-observer'
(global as any).MutationObserver = MutationObserver
interface fakeCanProps {
I?: Actions;
a?: Subjects;
passThrough?: boolean;
isDisabled?: boolean;
allowedRoutes?: boolean;
children: ReactElement;
}
const fakeAuthor: Author = {
createdAt: "fake-data",
email: "pseudonym@gmail.com",
id: "1",
name: "pseudonym",
photoUrl: "url-photo"
}
const fakeComponent: ComponentInterface = {
id: "fake-id",
name: "fake-name",
latencyThreshold: "30",
errorThreshold: "30",
hostValue: "fakeHost",
gatewayName: "fakeGateway"
};
const fakeModule: Module = {
gitRepositoryAddress: "fake-github",
helmRepository: "fake-api",
id: "1",
name: "fake-module",
author: fakeAuthor,
components: [fakeComponent]
}
const mockOnChange = jest.fn()
jest.mock('containers/Can', () => {
return {
__esModule: true,
default: ({children}: fakeCanProps) => {
return <div>{children}</div>;
}
};
});
test("Test component for edit mode render", async () => {
const { container } = render(
<AllTheProviders>
<FormModule
onChange={mockOnChange}
module={fakeModule}
key={"fake-key"}
/>
</AllTheProviders>
);
await wait()
expect(container.innerHTML).toMatch("Edit module");
});
test("Test component for edit mode render", async () => {
const { container } = render(
<AllTheProviders>
<FormModule
onChange={mockOnChange}
module={null}
key={"fake-key"}
/>
</AllTheProviders>
);
await wait()
expect(container.innerHTML).toMatch("Create module");
});
| {
"pile_set_name": "Github"
} |
FROM alpine:edge AS build
MAINTAINER Ivan <ivan@zderadicka.eu>
ARG FEATURES
RUN apk update &&\
apk add git bash curl yasm build-base \
wget zlib zlib-dev libbz2 bzip2-dev ffmpeg-dev rust cargo &&\
adduser -D -u 1000 ivan &&\
mkdir /src
USER ivan
WORKDIR /src
ENV RUSTFLAGS="-C target-feature=+crt-static"
CMD cargo build --target x86_64-alpine-linux-musl --release --example media_info --features static
| {
"pile_set_name": "Github"
} |
import java.io.IOException;
public class BuilderMultipleConstructorException {
private int first;
private float second;
public BuilderMultipleConstructorException(int i) throws IOException {
throw new IOException("Some Exception");
}
public BuilderMultipleConstructorException(int i, String someString) throws IOException {
throw new IOException("Some other Exception");
}
public BuilderMultipleConstructorException(int first, float second) {
this.first = first;
this.second = second;
}
public static void main(String[] args) {
System.out.println(builder().first(2).second(2.0f).build());
}
public static BuilderMultipleConstructorExceptionBuilder builder() {
return new BuilderMultipleConstructorExceptionBuilder();
}
public static class BuilderMultipleConstructorExceptionBuilder {
private int first;
private float second;
BuilderMultipleConstructorExceptionBuilder() {
}
public BuilderMultipleConstructorExceptionBuilder first(int first) {
this.first = first;
return this;
}
public BuilderMultipleConstructorExceptionBuilder second(float second) {
this.second = second;
return this;
}
public BuilderMultipleConstructorException build() {
return new BuilderMultipleConstructorException(first, second);
}
public String toString() {
return "BuilderMultipleConstructorException.BuilderMultipleConstructorExceptionBuilder(first=" + this.first + ", second=" + this.second + ")";
}
}
}
| {
"pile_set_name": "Github"
} |
// rollup.config.js
import configFactory from "../../scripts/rollup.config.factory.js";
import packageJson from "./package.json";
export default configFactory(packageJson.wcfactory);
| {
"pile_set_name": "Github"
} |
{{ .Files.Get "LICENSES/LICENSE-LI" }}
On first launch, Kibana will take several minutes to complete an optimization phase. During that time you will likely have connection errors.
{{- if eq .Values.kibana.serviceType "NodePort"}}
Kibana is exposed as a NodePort service. To get the URL to access Kibana, run the following commands:
export NODE_IP=$(kubectl cluster-info | grep "catalog" | awk 'match($0, /([0-9]{1,3}\.){3}[0-9]{1,3}/) { print substr( $0, RSTART, RLENGTH )}')
export KIBANA_NODE_PORT=$(kubectl get svc --namespace {{ .Release.Namespace }} "{{ .Release.Name }}-ibm-dba-ek-kibana" -o 'jsonpath={.spec.ports[?(@.targetPort=="kibana-ui")].nodePort}')
echo https://$NODE_IP:$KIBANA_NODE_PORT
{{- end }}
{{- if eq .Values.kibana.serviceType "ClusterIP"}}
Kibana is exposed as a ClusterIP service. You cannot access it outside of Kubernetes and you should create an Ingress Controller to expose it externally. The URL to access Kibana is the following:
https://{{ .Release.Name }}-ibm-dba-ek-kibana:5602
{{- end }}
{{- if eq .Values.elasticsearch.client.serviceType "NodePort"}}
Elasticsearch REST API is exposed as a NodePort service. To get the URL to access Elasticsearch REST API, run the following commands:
export NODE_IP=$(kubectl cluster-info | grep "catalog" | awk 'match($0, /([0-9]{1,3}\.){3}[0-9]{1,3}/) { print substr( $0, RSTART, RLENGTH )}')
export ES_NODE_PORT=$(kubectl get svc --namespace {{ .Release.Namespace }} "{{ .Release.Name }}-ibm-dba-ek-client" -o 'jsonpath={.spec.ports[?(@.targetPort=="es-rest")].nodePort'})
echo https://$NODE_IP:$ES_NODE_PORT
{{- end }}
{{- if eq .Values.elasticsearch.client.serviceType "ClusterIP"}}
Elasticsearch REST API is exposed as a ClusterIP service. You cannot access it outside of Kubernetes and you should create an Ingress Controller to expose it externally. The URL to access the Elasticsearch REST API is the following:
https://{{ .Release.Name }}-ibm-dba-ek-client:9201
{{- end }}
{{- if eq .Values.security.initOpenDistroConfig true }}
{{- if .Values.security.openDistroConfigSecret }}
You can access Kibana and the Elasticsearch REST API with the credentials that you provided as input through the `openDistroConfigSecret` value.
{{- else }}
You can use the following login/password to authenticate against Kibana and the Elasticsearch REST API:
- admin/passw0rd (admin role)
- demo/demo (readall and kibanauser roles)
{{- end }}
{{- else }}
The Open distro for Elasticsearch security config is unchanged.
{{- end }}
| {
"pile_set_name": "Github"
} |
#ifndef SMARTNET_PARSE_H
#define SMARTNET_PARSE_H
#include "parser.h"
#include "system.h"
#include <iostream>
#include <vector>
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/tokenizer.hpp>
#include <stdio.h>
#include <boost/log/trivial.hpp>
// OSW commands range from $000 to $3ff
// Within this range are 4 ranges for channel indicators
// $000-$2f7 (760 channels)
// $32f-$33f ( 17 channels)
// $3be ( 1 channel )
// $3c1-$3fe ( 62 channels)
// Channels are pre-defined for 8/9 trunks. If an OBT trunk, the first range gets
// used to to indicate both inbound and outbound channels, with radio programming
// determining frequencies.
// from $000-$2f7 (760 channels)
// $000-$17b (380 channels) OBT inbound channels
// $17c-$2f7 (380 channels) OBT outbound channels
#define OSW_MIN 0x000
#define OSW_CHAN_BAND_1_MIN 0x000 // Bandplan Range 1, also used for OBT channelization
#define OSW_CHAN_BAND_1_MAX 0x2f7 // Bandplan Range 1, also used for OBT channelization
#define OSW_BACKGROUND_IDLE 0x2f8
#define OSW_FIRST_CODED_PC 0x304
#define OSW_FIRST_NORMAL 0x308
#define OSW_FIRST_TY2AS1 0x309 // Unused - we don't support Type I trunks (or in this case, Type IIi)
#define OSW_EXTENDED_FCN 0x30b
#define OSW_AFFIL_FCN 0x30d
#define OSW_TY2_AFFILIATION 0x310
#define OSW_TY1_STATUS_MIN 0x310 // Unused - we don't support Type I trunks
#define OSW_TY2_MESSAGE 0x311
#define OSW_TY1_STATUS_MAX 0x317 // Unused - we don't support Type I trunks
#define OSW_TY1_ALERT 0x318 // Unused - we don't support Type I trunks
#define OSW_TY1_EMERGENCY 0x319 // Unused - we don't support Type I trunks
#define OSW_TY2_CALL_ALERT 0x319
#define OSW_SECOND_NORMAL 0x320
#define OSW_FIRST_ASTRO 0x321
#define OSW_SYSTEM_CLOCK 0x322
#define OSW_SCAN_MARKER 0x32b
#define OSW_EMERG_ANNC 0x32e
#define OSW_CHAN_BAND_2_MIN 0x32f // Bandplan Range 2
#define OSW_CHAN_BAND_2_MAX 0x33f // Bandplan Range 2
// The following command range ($340-$3bd) wastes 64 commands for AMSS site #
#define OSW_AMSS_ID_MIN 0x360
#define OSW_AMSS_ID_MAX 0x39f
#define OSW_CW_ID 0x3a0
#define OSW_CHAN_BAND_3 0x3be // Bandplan "Range" 3
#define OSW_SYS_NETSTAT 0x3bf
#define OSW_SYS_STATUS 0x3c0
#define OSW_CHAN_BAND_4_MIN 0x3c1 // Bandplan Range 4
#define OSW_CHAN_BAND_4_MAX 0x3fe // Bandplan Range 4
#define OSW_MAX 0x3ff
struct osw_stru {
unsigned short cmd;
unsigned short id;
int status;
int full_address;
long address;
int grp;
};
class SmartnetParser : public TrunkParser {
int lastcmd;
long lastaddress;
public:
struct osw_stru stack[5];
short numStacked;
short numConsumed;
SmartnetParser();
void print_osw(std::string s);
double getfreq(int cmd, System *system);
bool is_chan_outbound(int cmd, System *system);
bool is_chan_inbound_obt(int cmd, System *system);
bool is_first_normal(int cmd, System *system);
std::vector<TrunkMessage> parse_message(std::string s, System *system);
};
#endif
| {
"pile_set_name": "Github"
} |
#
# Copyright 1995-2007 Sun Microsystems, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Sun designates this
# particular file as subject to the "Classpath" exception as provided
# by Sun in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
# CA 95054 USA or visit www.sun.com if you need additional information or
# have any questions.
#
#
# Generic makefile for building shared libraries.
#
include $(TOPDIR)/make/common/Classes.gmk
#
# It is important to define these *after* including Classes.gmk
# in order to override the values defined inthat makefile.
#
ACTUAL_LIBRARY_NAME = $(LIB_PREFIX)$(LIBRARY).$(LIBRARY_SUFFIX)
ACTUAL_LIBRARY_DIR = $(LIB_LOCATION)
ACTUAL_LIBRARY = $(ACTUAL_LIBRARY_DIR)/$(ACTUAL_LIBRARY_NAME)
library:: $(ACTUAL_LIBRARY)
FILES_o = $(patsubst %.c, %.$(OBJECT_SUFFIX), $(addprefix $(OBJDIR)/, $(notdir $(FILES_c))))
FILES_o += $(patsubst %.s, %.$(OBJECT_SUFFIX), $(addprefix $(OBJDIR)/, $(notdir $(FILES_s))))
FILES_o += $(patsubst %.cpp, %.$(OBJECT_SUFFIX), $(addprefix $(OBJDIR)/, $(notdir $(FILES_cpp))))
ifeq ($(INCREMENTAL_BUILD),true)
FILES_d = $(patsubst %.c, %.$(DEPEND_SUFFIX), $(addprefix $(OBJDIR)/, $(notdir $(FILES_c))))
FILES_d += $(patsubst %.cpp, %.$(DEPEND_SUFFIX), $(addprefix $(OBJDIR)/, $(notdir $(FILES_cpp))))
endif # INCREMENTAL_BUILD
ifeq ($(PLATFORM),solaris)
# List of all lint files, one for each .c file (only for C)
FILES_ln = $(patsubst %.c, %.$(LINT_SUFFIX), $(addprefix $(OBJDIR)/, $(notdir $(FILES_c))))
endif
#
# C++ libraries must be linked with CC.
#
ifdef CPLUSPLUSLIBRARY
LINKER=$(LINK.cc)
else
LINKER=$(LINK.c)
endif
# We either need to import (copy) libraries in, or build them
$(ACTUAL_LIBRARY):: $(INIT) $(TEMPDIR) $(LIBDIR) $(BINDIR) $(EXTDIR) classheaders
#
# COMPILE_APPROACH: Different approaches to compile up the native object
# files as quickly as possible.
# The setting of parallel works best on Unix, batch on Windows.
#
COMPILE_FILES_o = $(OBJDIR)/.files_compiled
$(COMPILE_FILES_o): $(FILES_d) $(FILES_o)
@$(ECHO) "$<" >> $@
clean::
$(RM) $(COMPILE_FILES_o)
#
# COMPILE_APPROACH=parallel: Will trigger compilations (just compilations) to
# happen in parallel. Greatly decreases Unix build time, even on single CPU
# machines, more so on multiple CPU machines. Default is 2 compiles
# at a time, but can be adjusted with ALT_PARALLEL_COMPILE_JOBS.
# Note that each .d file will also be dependent on it's .o file, see
# Rules.gmk.
# Note this does not depend on Rules.gmk to work like batch (below)
# and this technique doesn't seem to help Windows build time nor does
# it work very well, it's possible the Windows Visual Studio compilers
# don't work well in a parallel situation, this needs investigation.
#
ifeq ($(COMPILE_APPROACH),parallel)
.PHONY: library_parallel_compile
library_parallel_compile:
@$(ECHO) "Begin parallel compiles: $(shell $(PWD))"
@$(MAKE) -j $(PARALLEL_COMPILE_JOBS) $(COMPILE_FILES_o)
@$(ECHO) "Done with parallel compiles: $(shell $(PWD))"
$(ACTUAL_LIBRARY):: library_parallel_compile
endif
#
# COMPILE_APPROACH=batch: Will trigger compilations (just compilations) to
# happen in batch mode. Greatly decreases Windows build time.
# See logic in Rules.gmk for how compiles happen, the $(MAKE) in
# library_batch_compile below triggers the actions in Rules.gmk.
# Note that each .d file will also be dependent on it's .o file, see
# Rules.gmk.
#
ifeq ($(COMPILE_APPROACH),batch)
.PHONY: library_batch_compile
library_batch_compile:
@$(ECHO) "Begin BATCH compiles: $(shell $(PWD))"
$(MAKE) $(COMPILE_FILES_o)
$(MAKE) batch_compile
@$(ECHO) "Done with BATCH compiles: $(shell $(PWD))"
$(MAKE) COMPILE_APPROACH=normal $(COMPILE_FILES_o)
$(ACTUAL_LIBRARY):: library_batch_compile
endif
ifeq ($(PLATFORM), windows)
#
# Library building rules.
#
$(LIBRARY).lib:: $(OBJDIR)
# build it into $(OBJDIR) so that the other generated files get put
# there, then copy just the DLL (and MAP file) to the requested directory.
#
$(ACTUAL_LIBRARY):: $(OBJDIR)/$(LIBRARY).lcf
@$(prep-target)
@$(MKDIR) -p $(OBJDIR)
$(LINK) -dll -out:$(OBJDIR)/$(@F) \
-map:$(OBJDIR)/$(LIBRARY).map \
$(LFLAGS) @$(OBJDIR)/$(LIBRARY).lcf \
$(OTHER_LCF) $(JAVALIB) $(LDLIBS)
$(CP) $(OBJDIR)/$(@F) $@
$(CP) $(OBJDIR)/$(LIBRARY).map $(@D)
$(CP) $(OBJDIR)/$(LIBRARY).pdb $(@D)
$(OBJDIR)/$(LIBRARY).lcf: $(OBJDIR)/$(LIBRARY).res $(COMPILE_FILES_o) $(FILES_m)
@$(prep-target)
@$(MKDIR) -p $(TEMPDIR)
@$(ECHO) $(FILES_o) > $@
ifndef LOCAL_RESOURCE_FILE
@$(ECHO) $(OBJDIR)/$(LIBRARY).res >> $@
endif
@$(ECHO) Created $@
RC_FLAGS += /D "J2SE_FNAME=$(LIBRARY).dll" \
/D "J2SE_INTERNAL_NAME=$(LIBRARY)" \
/D "J2SE_FTYPE=0x2L"
$(OBJDIR)/$(LIBRARY).res: $(VERSIONINFO_RESOURCE)
ifndef LOCAL_RESOURCE_FILE
@$(prep-target)
$(RC) $(RC_FLAGS) $(CC_OBJECT_OUTPUT_FLAG)$(@) $(VERSIONINFO_RESOURCE)
endif
#
# Install a .lib file if required.
#
ifeq ($(INSTALL_DOT_LIB), true)
$(ACTUAL_LIBRARY):: $(LIBDIR)/$(LIBRARY).lib
clean::
-$(RM) $(LIBDIR)/$(LIBRARY).lib
$(LIBDIR)/$(LIBRARY).lib:: $(OBJDIR)/$(LIBRARY).lib
$(install-file)
$(LIBDIR)/$(LIBRARY).dll:: $(OBJDIR)/$(LIBRARY).dll
$(install-file)
endif # INSTALL_DOT_LIB
else # PLATFORM
#
# On Solaris, use mcs to write the version into the comment section of
# the shared library. On other platforms set this to false at the
# make command line.
#
$(ACTUAL_LIBRARY):: $(COMPILE_FILES_o) $(FILES_m) $(FILES_reorder)
@$(prep-target)
@$(ECHO) "STATS: LIBRARY=$(LIBRARY), PRODUCT=$(PRODUCT), _OPT=$(_OPT)"
@$(ECHO) "Rebuilding $@ because of $?"
$(LINKER) $(SHARED_LIBRARY_FLAG) -o $@ $(FILES_o) $(LDLIBS)
ifeq ($(WRITE_LIBVERSION),true)
$(MCS) -d -a "$(FULL_VERSION)" $@
endif # WRITE_LIBVERSION
endif # PLATFORM
#
# Cross check all linted files against each other
#
ifeq ($(PLATFORM),solaris)
lint.errors : $(FILES_ln)
$(LINT.c) $(FILES_ln) $(LDLIBS)
endif
#
# Class libraries with JNI native methods get a include to the package.
#
ifdef PACKAGE
vpath %.c $(PLATFORM_SRC)/native/$(PKGDIR)
vpath %.c $(SHARE_SRC)/native/$(PKGDIR)
OTHER_INCLUDES += -I$(SHARE_SRC)/native/common -I$(PLATFORM_SRC)/native/common
OTHER_INCLUDES += -I$(SHARE_SRC)/native/$(PKGDIR) \
-I$(PLATFORM_SRC)/native/$(PKGDIR)
endif
#
# Clean/clobber rules
#
clean::
$(RM) -r $(ACTUAL_LIBRARY)
clobber:: clean
#
# INCREMENTAL_BUILD means that this workspace will be built over and over
# possibly incrementally. This means tracking the object file dependencies
# on include files so that sources get re-compiled when the include files
# change. When building from scratch and doing a one time build (like
# release engineering or nightly builds) set INCREMENTAL_BUILD=false.
#
ifeq ($(INCREMENTAL_BUILD),true)
#
# Workaround: gnumake sometimes says files is empty when it shouldn't
# was: files := $(foreach file, $(wildcard $(OBJDIR)/*.$(DEPEND_SUFFIX)), $(file))
#
files := $(shell $(LS) $(OBJDIR)/*.$(DEPEND_SUFFIX) 2>/dev/null)
#
# Only include these files if we have any.
#
ifneq ($(strip $(files)),)
include $(files)
endif # files
endif # INCREMENTAL_BUILD
#
# Default dependencies
#
all: build
build: library
debug:
$(MAKE) VARIANT=DBG build
fastdebug:
$(MAKE) VARIANT=DBG FASTDEBUG=true build
.PHONY: all build debug fastdebug
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="14.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
<ProjectGuid>{093EA927-5701-484C-BD7A-4B5EE2AC9D7E}</ProjectGuid>
<OutputType>Exe</OutputType>
<AppDesignerFolder>Properties</AppDesignerFolder>
<RootNamespace>Amerger</RootNamespace>
<AssemblyName>Amerger</AssemblyName>
<TargetFrameworkVersion>v4.6.1</TargetFrameworkVersion>
<FileAlignment>512</FileAlignment>
<AutoGenerateBindingRedirects>true</AutoGenerateBindingRedirects>
<TargetFrameworkProfile />
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
<PlatformTarget>AnyCPU</PlatformTarget>
<DebugSymbols>true</DebugSymbols>
<DebugType>full</DebugType>
<Optimize>false</Optimize>
<OutputPath>bin\Debug\</OutputPath>
<DefineConstants>DEBUG;TRACE</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
<PlatformTarget>AnyCPU</PlatformTarget>
<DebugType>pdbonly</DebugType>
<Optimize>true</Optimize>
<OutputPath>bin\Release\</OutputPath>
<DefineConstants>TRACE</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
</PropertyGroup>
<ItemGroup>
<Reference Include="Microsoft.Msagl">
<HintPath>..\..\support\Microsoft.Msagl.dll</HintPath>
</Reference>
<Reference Include="Microsoft.Msagl.Drawing">
<HintPath>..\..\support\Microsoft.Msagl.Drawing.dll</HintPath>
</Reference>
<Reference Include="Microsoft.Msagl.GraphmapsWpfControl">
<HintPath>..\..\support\Microsoft.Msagl.GraphmapsWpfControl.dll</HintPath>
</Reference>
<Reference Include="Microsoft.Msagl.GraphViewerGdi">
<HintPath>..\..\support\Microsoft.Msagl.GraphViewerGdi.dll</HintPath>
</Reference>
<Reference Include="System" />
<Reference Include="System.Core" />
<Reference Include="System.Xml.Linq" />
<Reference Include="System.Data.DataSetExtensions" />
<Reference Include="Microsoft.CSharp" />
<Reference Include="System.Data" />
<Reference Include="System.Net.Http" />
<Reference Include="System.Xml" />
</ItemGroup>
<ItemGroup>
<Compile Include="Program.cs" />
<Compile Include="Properties\AssemblyInfo.cs" />
</ItemGroup>
<ItemGroup>
<None Include="App.config" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\prep\Agasm\Agasm.vcxproj">
<Project>{42b676b2-f551-49e7-9b3b-bb85b02f15ac}</Project>
<Name>Agasm</Name>
</ProjectReference>
<ProjectReference Include="..\Dia2Sharp\Dia2Sharp.csproj">
<Project>{fb72e2ee-7b32-4879-9b2b-35fb3cdf0fcc}</Project>
<Name>Dia2Sharp</Name>
</ProjectReference>
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
<!-- To modify your build process, add your task inside one of the targets below and uncomment it.
Other similar extension points exist, see Microsoft.Common.targets.
<Target Name="BeforeBuild">
</Target>
<Target Name="AfterBuild">
</Target>
-->
</Project> | {
"pile_set_name": "Github"
} |
/******************************************************************************
* Tis function converts HSV values to RGB values, scaled from 0 to maxBrightness
*
* The ranges for the input variables are:
* hue: 0-360
* sat: 0-255
* lig: 0-255
*
* The ranges for the output variables are:
* r: 0-maxBrightness
* g: 0-maxBrightness
* b: 0-maxBrightness
*
* r,g, and b are passed as pointers, because a function cannot have 3 return variables
* Use it like this:
* int hue, sat, val;
* unsigned char red, green, blue;
* // set hue, sat and val
* hsv2rgb(hue, sat, val, &red, &green, &blue, maxBrightness); //pass r, g, and b as the location where the result should be stored
* // use r, b and g.
*
* (c) Elco Jacobs, E-atelier Industrial Design TU/e, July 2011.
*
*****************************************************************************/
void hsv2rgb(unsigned int hue, unsigned int sat, unsigned int val, \
unsigned char * r, unsigned char * g, unsigned char * b, unsigned char maxBrightness ) {
unsigned int H_accent = hue/60;
unsigned int bottom = ((255 - sat) * val)>>8;
unsigned int top = val;
unsigned char rising = ((top-bottom) *(hue%60 ) ) / 60 + bottom;
unsigned char falling = ((top-bottom) *(60-hue%60) ) / 60 + bottom;
switch(H_accent) {
case 0:
*r = top;
*g = rising;
*b = bottom;
break;
case 1:
*r = falling;
*g = top;
*b = bottom;
break;
case 2:
*r = bottom;
*g = top;
*b = rising;
break;
case 3:
*r = bottom;
*g = falling;
*b = top;
break;
case 4:
*r = rising;
*g = bottom;
*b = top;
break;
case 5:
*r = top;
*g = bottom;
*b = falling;
break;
}
// Scale values to maxBrightness
*r = *r * maxBrightness/255;
*g = *g * maxBrightness/255;
*b = *b * maxBrightness/255;
}
| {
"pile_set_name": "Github"
} |
//
// Prefix header for all source files of the 'HelloWorld' target in the 'HelloWorld' project
//
#ifdef __OBJC__
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>
#endif
| {
"pile_set_name": "Github"
} |
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// UNSUPPORTED: c++98, c++03
// <unordered_set>
// void swap(unordered_set& c)
// noexcept(
// (!allocator_type::propagate_on_container_swap::value ||
// __is_nothrow_swappable<allocator_type>::value) &&
// __is_nothrow_swappable<hasher>::value &&
// __is_nothrow_swappable<key_equal>::value);
//
// In C++17, the standard says that swap shall have:
// noexcept(allocator_traits<Allocator>::is_always_equal::value &&
// noexcept(swap(declval<Hash&>(), declval<Hash&>())) &&
// noexcept(swap(declval<Pred&>(), declval<Pred&>())));
// This tests a conforming extension
#include <unordered_set>
#include <utility>
#include <cassert>
#include "test_macros.h"
#include "MoveOnly.h"
#include "test_allocator.h"
template <class T>
struct some_comp
{
typedef T value_type;
some_comp() {}
some_comp(const some_comp&) {}
bool operator()(const T&, const T&) const { return false; }
};
template <class T>
struct some_comp2
{
typedef T value_type;
some_comp2() {}
some_comp2(const some_comp2&) {}
bool operator()(const T&, const T&) const { return false; }
};
#if TEST_STD_VER >= 14
template <typename T>
void swap(some_comp2<T>&, some_comp2<T>&) noexcept {}
#endif
template <class T>
struct some_hash
{
typedef T value_type;
some_hash() {}
some_hash(const some_hash&);
std::size_t operator()(const T&) const { return 0; }
};
template <class T>
struct some_hash2
{
typedef T value_type;
some_hash2() {}
some_hash2(const some_hash2&);
std::size_t operator()(const T&) const { return 0; }
};
#if TEST_STD_VER >= 14
template <typename T>
void swap(some_hash2<T>&, some_hash2<T>&) noexcept {}
#endif
template <class T>
struct some_alloc
{
typedef T value_type;
some_alloc() {}
some_alloc(const some_alloc&);
void deallocate(void*, unsigned) {}
typedef std::true_type propagate_on_container_swap;
};
template <class T>
struct some_alloc2
{
typedef T value_type;
some_alloc2() {}
some_alloc2(const some_alloc2&);
void deallocate(void*, unsigned) {}
typedef std::false_type propagate_on_container_swap;
typedef std::true_type is_always_equal;
};
template <class T>
struct some_alloc3
{
typedef T value_type;
some_alloc3() {}
some_alloc3(const some_alloc3&);
void deallocate(void*, unsigned) {}
typedef std::false_type propagate_on_container_swap;
typedef std::false_type is_always_equal;
};
int main(int, char**)
{
{
typedef std::unordered_set<MoveOnly> C;
static_assert(noexcept(swap(std::declval<C&>(), std::declval<C&>())), "");
}
#if defined(_LIBCPP_VERSION)
{
typedef std::unordered_set<MoveOnly, std::hash<MoveOnly>,
std::equal_to<MoveOnly>, test_allocator<MoveOnly>> C;
static_assert(noexcept(swap(std::declval<C&>(), std::declval<C&>())), "");
}
{
typedef std::unordered_set<MoveOnly, std::hash<MoveOnly>,
std::equal_to<MoveOnly>, other_allocator<MoveOnly>> C;
static_assert(noexcept(swap(std::declval<C&>(), std::declval<C&>())), "");
}
#endif // _LIBCPP_VERSION
{
typedef std::unordered_set<MoveOnly, some_hash<MoveOnly>> C;
static_assert(!noexcept(swap(std::declval<C&>(), std::declval<C&>())), "");
}
{
typedef std::unordered_set<MoveOnly, std::hash<MoveOnly>,
some_comp<MoveOnly>> C;
static_assert(!noexcept(swap(std::declval<C&>(), std::declval<C&>())), "");
}
#if TEST_STD_VER >= 14
{ // POCS allocator, throwable swap for hash, throwable swap for comp
typedef std::unordered_set<MoveOnly, some_hash<MoveOnly>, some_comp <MoveOnly>, some_alloc <MoveOnly>> C;
static_assert(!noexcept(swap(std::declval<C&>(), std::declval<C&>())), "");
}
{ // always equal allocator, throwable swap for hash, throwable swap for comp
typedef std::unordered_set<MoveOnly, some_hash<MoveOnly>, some_comp <MoveOnly>, some_alloc2<MoveOnly>> C;
static_assert(!noexcept(swap(std::declval<C&>(), std::declval<C&>())), "");
}
{ // POCS allocator, throwable swap for hash, nothrow swap for comp
typedef std::unordered_set<MoveOnly, some_hash<MoveOnly>, some_comp2<MoveOnly>, some_alloc <MoveOnly>> C;
static_assert(!noexcept(swap(std::declval<C&>(), std::declval<C&>())), "");
}
{ // always equal allocator, throwable swap for hash, nothrow swap for comp
typedef std::unordered_set<MoveOnly, some_hash<MoveOnly>, some_comp2<MoveOnly>, some_alloc2<MoveOnly>> C;
static_assert(!noexcept(swap(std::declval<C&>(), std::declval<C&>())), "");
}
{ // POCS allocator, nothrow swap for hash, throwable swap for comp
typedef std::unordered_set<MoveOnly, some_hash2<MoveOnly>, some_comp <MoveOnly>, some_alloc <MoveOnly>> C;
static_assert(!noexcept(swap(std::declval<C&>(), std::declval<C&>())), "");
}
{ // always equal allocator, nothrow swap for hash, throwable swap for comp
typedef std::unordered_set<MoveOnly, some_hash2<MoveOnly>, some_comp <MoveOnly>, some_alloc2<MoveOnly>> C;
static_assert(!noexcept(swap(std::declval<C&>(), std::declval<C&>())), "");
}
{ // POCS allocator, nothrow swap for hash, nothrow swap for comp
typedef std::unordered_set<MoveOnly, some_hash2<MoveOnly>, some_comp2<MoveOnly>, some_alloc <MoveOnly>> C;
static_assert( noexcept(swap(std::declval<C&>(), std::declval<C&>())), "");
}
{ // always equal allocator, nothrow swap for hash, nothrow swap for comp
typedef std::unordered_set<MoveOnly, some_hash2<MoveOnly>, some_comp2<MoveOnly>, some_alloc2<MoveOnly>> C;
static_assert( noexcept(swap(std::declval<C&>(), std::declval<C&>())), "");
}
#if defined(_LIBCPP_VERSION)
{ // NOT always equal allocator, nothrow swap for hash, nothrow swap for comp
typedef std::unordered_set<MoveOnly, some_hash2<MoveOnly>, some_comp2<MoveOnly>, some_alloc3<MoveOnly>> C;
static_assert( noexcept(swap(std::declval<C&>(), std::declval<C&>())), "");
}
#endif // _LIBCPP_VERSION
#endif
return 0;
}
| {
"pile_set_name": "Github"
} |
#import <Foundation/Foundation.h>
@class Configuration;
/**
Subclass QuickConfiguration and override the +[QuickConfiguration configure:]
method in order to configure how Quick behaves when running specs, or to define
shared examples that are used across spec files.
*/
@interface QuickConfiguration : NSObject
/**
This method is executed on each subclass of this class before Quick runs
any examples. You may override this method on as many subclasses as you like, but
there is no guarantee as to the order in which these methods are executed.
You can override this method in order to:
1. Configure how Quick behaves, by modifying properties on the Configuration object.
Setting the same properties in several methods has undefined behavior.
2. Define shared examples using `sharedExamples`.
@param configuration A mutable object that is used to configure how Quick behaves on
a framework level. For details on all the options, see the
documentation in Configuration.swift.
*/
+ (void)configure:(Configuration *)configuration;
@end
| {
"pile_set_name": "Github"
} |
# muchtrans
[](https://app.netlify.com/sites/muchtrans/deploys)
Muchtrans is yet another translation platform, under heavy developement.
<br>Muchtrans는 한창 개발하고 있는 또 다른 번역 플랫폼입니다.
## Why muchtrans? / 왜 muchtrans 인가요?
I've translated English articles into Korean for years. During translation processes, I found the importance of showing original contents with translated paragraphs. It helps readers to find out wrong translations and makes them work together for better translations. By reading original contents side-by-side, you can improve your language skills at the same time either.
<br>지난 몇 년간 영어 문서를 한국어로 번역해왔습니다. 번역하는 과정에서 원본 콘텐츠를 번역된 것과 함께 보여주는 것의 중요성을 파악했습니다. 이를 통해 독자가 잘못된 번역을 쉽게 찾을 수 있도록 하고 함께 더 나은 번역을 가능하게 합니다. 원본 콘텐츠를 함께 읽으므로 외국어 실력도 동시에 향상시킬 수 있습니다.
So, I made it like this.
<br>그래서 이렇게 만들었습니다.
The second reason is recording translators and their contribution in clear way. Text files could be well tracked by Git so I chose it as a tracker. By using Git, we can get free lunch benefits from its toolchain and ecosystems like GitHub. We can track down the translation history only by reading Git commit history.
<br>두번째 이유는 번역자와 그들의 기여를 명확한 방법으로 기록하기 위해서입니다. 텍스트 파일은 Git으로 잘 관리할 수 있으므로 Git을 선택했습니다. Git을 쓰면 그것의 툴체인과 에코시스템을 공짜로 사용할 수도 있습니다. Git 커밋 기록만 읽어도 번역 이력을 따라갈 수 있습니다.
## To add an article / 문서를 추가하려면
If you're familiar with Git and GitHub, then create a pull request with a new article(e.g. `new_article.md`) and the translation file(e.g. `new_article.ko.md`) within `articles` directory. [Netlify](https://netlify.com) will build it automatically and generate intermediate site for verification.
<br>Git과 GitHub에 익숙한 분이라면, 새로운 문서(예: `new_article.md`)와 번역 파일(예: `new_article.ko.md`)를 함께 `articles` 디렉토리에 안에 생성한 후 풀 리퀘스트로 작성해주시면 됩니다. [Netlify](https://netlify.com)가 자동으로 빌드해서 확인용 임시 사이트를 만들어줍니다.
Or, just leave a URL with some explanation why it should be translated on [GitHub Issue](https://github.com/zerobased-co/muchtrans/issues/9).
<br>아니면, URL과 함께 해당 문서가 번역되어야 하는 이유를 [GitHub Issue](https://github.com/zerobased-co/muchtrans/issues/9)에 남겨주세요.
## To build muchtrans / muchtrans를 빌드하려면
On your Python environments,
<br>당신의 파이썬 환경에서,
```shell
$ pip install -r requirements.txt
$ python build.py
$ python dev.py
```
And connect to `http://localhost:4000` in your favour web browser.
<br>그리고 좋아하는 웹 브라우저에서 `http://localhost:4000`으로 접속하시면 됩니다.
## License
Original articles may have their own license. Translated articles can be published under `MIT license` as same as muchtrans codes.
<br>원본 문서는 각자의 라이센스를 가지고 있을 수 있습니다. 번역된 문서는 muchtrans 코드와 마찬가지로 `MIT License`를 통해 전달됩니다.
| {
"pile_set_name": "Github"
} |
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
/*
* DlgPenyakit.java
*
* Created on May 23, 2010, 12:57:16 AM
*/
package khanzaantrianpoli;
import fungsi.WarnaTable;
import fungsi.batasInput;
import fungsi.koneksiDB;
import fungsi.sekuel;
import fungsi.validasi;
import fungsi.var;
import java.awt.Cursor;
import java.awt.Dimension;
import java.awt.event.KeyEvent;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import javax.swing.JTable;
import javax.swing.event.DocumentEvent;
import javax.swing.table.DefaultTableModel;
import javax.swing.table.TableColumn;
/**
*
* @author dosen
*/
public final class DlgCariPoli extends javax.swing.JDialog {
private final DefaultTableModel tabMode;
private sekuel Sequel=new sekuel();
private validasi Valid=new validasi();
private Connection koneksi=koneksiDB.condb();
private PreparedStatement ps;
private ResultSet rs;
/** Creates new form DlgPenyakit
* @param parent
* @param modal */
public DlgCariPoli(java.awt.Frame parent, boolean modal) {
super(parent, modal);
initComponents();
this.setLocation(10,2);
setSize(656,250);
Object[] row={"Kode Unit","Nama Unit","Biaya Registrasi"};
tabMode=new DefaultTableModel(null,row){
@Override public boolean isCellEditable(int rowIndex, int colIndex){return false;}
};
tbKamar.setModel(tabMode);
//tbPenyakit.setDefaultRenderer(Object.class, new WarnaTable(panelJudul.getBackground(),tbPenyakit.getBackground()));
tbKamar.setPreferredScrollableViewportSize(new Dimension(500,500));
tbKamar.setAutoResizeMode(JTable.AUTO_RESIZE_OFF);
for (int i = 0; i < 3; i++) {
TableColumn column = tbKamar.getColumnModel().getColumn(i);
if(i==0){
column.setPreferredWidth(100);
}else if(i==1){
column.setPreferredWidth(300);
}else if(i==2){
column.setPreferredWidth(250);
}
}
tbKamar.setDefaultRenderer(Object.class, new WarnaTable());
TCari.setDocument(new batasInput((byte)100).getKata(TCari));
TCari.getDocument().addDocumentListener(new javax.swing.event.DocumentListener(){
@Override
public void insertUpdate(DocumentEvent e) {
tampil();
}
@Override
public void removeUpdate(DocumentEvent e) {
tampil();
}
@Override
public void changedUpdate(DocumentEvent e) {
tampil();
}
});
try {
ps=koneksi.prepareStatement("select kd_poli, nm_poli, registrasi "+
" from poliklinik where kd_poli<>'IGDK' and kd_poli like ? or "+
" kd_poli<>'IGDK' and nm_poli like ? or "+
" kd_poli<>'IGDK' and registrasi like ? order by nm_poli");
} catch (Exception e) {
}
}
/** This method is called from within the constructor to
* initialize the form.
* WARNING: Do NOT modify this code. The content of this method is
* always regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
internalFrame1 = new widget.InternalFrame();
Scroll = new widget.ScrollPane();
tbKamar = new widget.Table();
panelisi3 = new widget.panelisi();
label9 = new widget.Label();
TCari = new widget.TextBox();
BtnCari = new widget.Button();
BtnAll = new widget.Button();
label10 = new widget.Label();
LCount = new widget.Label();
BtnKeluar = new widget.Button();
setDefaultCloseOperation(javax.swing.WindowConstants.DISPOSE_ON_CLOSE);
setUndecorated(true);
setResizable(false);
addWindowListener(new java.awt.event.WindowAdapter() {
public void windowActivated(java.awt.event.WindowEvent evt) {
formWindowActivated(evt);
}
public void windowOpened(java.awt.event.WindowEvent evt) {
formWindowOpened(evt);
}
});
internalFrame1.setBorder(javax.swing.BorderFactory.createTitledBorder(javax.swing.BorderFactory.createLineBorder(new java.awt.Color(240, 245, 235)), "::[ Unit/Poliklinik ]::", javax.swing.border.TitledBorder.DEFAULT_JUSTIFICATION, javax.swing.border.TitledBorder.DEFAULT_POSITION, new java.awt.Font("Tahoma", 0, 11), new java.awt.Color(50, 70, 40))); // NOI18N
internalFrame1.setName("internalFrame1"); // NOI18N
internalFrame1.setLayout(new java.awt.BorderLayout(1, 1));
Scroll.setName("Scroll"); // NOI18N
Scroll.setOpaque(true);
tbKamar.setAutoCreateRowSorter(true);
tbKamar.setToolTipText("Silahkan klik untuk memilih data yang mau diedit ataupun dihapus");
tbKamar.setName("tbKamar"); // NOI18N
tbKamar.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
tbKamarMouseClicked(evt);
}
});
tbKamar.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyPressed(java.awt.event.KeyEvent evt) {
tbKamarKeyPressed(evt);
}
});
Scroll.setViewportView(tbKamar);
internalFrame1.add(Scroll, java.awt.BorderLayout.CENTER);
panelisi3.setName("panelisi3"); // NOI18N
panelisi3.setPreferredSize(new java.awt.Dimension(100, 43));
panelisi3.setLayout(new java.awt.FlowLayout(java.awt.FlowLayout.LEFT, 4, 9));
label9.setText("Key Word :");
label9.setName("label9"); // NOI18N
label9.setPreferredSize(new java.awt.Dimension(68, 23));
panelisi3.add(label9);
TCari.setName("TCari"); // NOI18N
TCari.setPreferredSize(new java.awt.Dimension(312, 23));
TCari.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyPressed(java.awt.event.KeyEvent evt) {
TCariKeyPressed(evt);
}
});
panelisi3.add(TCari);
BtnCari.setIcon(new javax.swing.ImageIcon(getClass().getResource("/picture/accept.png"))); // NOI18N
BtnCari.setMnemonic('1');
BtnCari.setToolTipText("Alt+1");
BtnCari.setName("BtnCari"); // NOI18N
BtnCari.setPreferredSize(new java.awt.Dimension(28, 23));
BtnCari.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
BtnCariActionPerformed(evt);
}
});
BtnCari.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyPressed(java.awt.event.KeyEvent evt) {
BtnCariKeyPressed(evt);
}
});
panelisi3.add(BtnCari);
BtnAll.setIcon(new javax.swing.ImageIcon(getClass().getResource("/picture/Search-16x16.png"))); // NOI18N
BtnAll.setMnemonic('2');
BtnAll.setToolTipText("2Alt+2");
BtnAll.setName("BtnAll"); // NOI18N
BtnAll.setPreferredSize(new java.awt.Dimension(28, 23));
BtnAll.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
BtnAllActionPerformed(evt);
}
});
BtnAll.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyPressed(java.awt.event.KeyEvent evt) {
BtnAllKeyPressed(evt);
}
});
panelisi3.add(BtnAll);
label10.setText("Record :");
label10.setName("label10"); // NOI18N
label10.setPreferredSize(new java.awt.Dimension(60, 23));
panelisi3.add(label10);
LCount.setHorizontalAlignment(javax.swing.SwingConstants.LEFT);
LCount.setText("0");
LCount.setName("LCount"); // NOI18N
LCount.setPreferredSize(new java.awt.Dimension(50, 23));
panelisi3.add(LCount);
BtnKeluar.setIcon(new javax.swing.ImageIcon(getClass().getResource("/picture/exit.png"))); // NOI18N
BtnKeluar.setMnemonic('4');
BtnKeluar.setToolTipText("Alt+4");
BtnKeluar.setName("BtnKeluar"); // NOI18N
BtnKeluar.setPreferredSize(new java.awt.Dimension(28, 23));
BtnKeluar.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
BtnKeluarActionPerformed(evt);
}
});
panelisi3.add(BtnKeluar);
internalFrame1.add(panelisi3, java.awt.BorderLayout.PAGE_END);
getContentPane().add(internalFrame1, java.awt.BorderLayout.CENTER);
pack();
}// </editor-fold>//GEN-END:initComponents
private void TCariKeyPressed(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_TCariKeyPressed
if(evt.getKeyCode()==KeyEvent.VK_ENTER){
BtnCariActionPerformed(null);
}else if(evt.getKeyCode()==KeyEvent.VK_PAGE_DOWN){
BtnCari.requestFocus();
}else if(evt.getKeyCode()==KeyEvent.VK_PAGE_UP){
BtnKeluar.requestFocus();
}else if(evt.getKeyCode()==KeyEvent.VK_UP){
tbKamar.requestFocus();
}
}//GEN-LAST:event_TCariKeyPressed
private void BtnCariActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_BtnCariActionPerformed
tampil();
}//GEN-LAST:event_BtnCariActionPerformed
private void BtnCariKeyPressed(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_BtnCariKeyPressed
if(evt.getKeyCode()==KeyEvent.VK_SPACE){
BtnCariActionPerformed(null);
}else{
Valid.pindah(evt, TCari, BtnAll);
}
}//GEN-LAST:event_BtnCariKeyPressed
private void BtnAllActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_BtnAllActionPerformed
TCari.setText("");
tampil();
}//GEN-LAST:event_BtnAllActionPerformed
private void BtnAllKeyPressed(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_BtnAllKeyPressed
if(evt.getKeyCode()==KeyEvent.VK_SPACE){
BtnAllActionPerformed(null);
}else{
Valid.pindah(evt, BtnCari, TCari);
}
}//GEN-LAST:event_BtnAllKeyPressed
private void tbKamarMouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_tbKamarMouseClicked
if(tabMode.getRowCount()!=0){
if(evt.getClickCount()==2){
dispose();
}
}
}//GEN-LAST:event_tbKamarMouseClicked
private void BtnKeluarActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_BtnKeluarActionPerformed
dispose();
}//GEN-LAST:event_BtnKeluarActionPerformed
private void tbKamarKeyPressed(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_tbKamarKeyPressed
if(tabMode.getRowCount()!=0){
if(evt.getKeyCode()==KeyEvent.VK_SPACE){
dispose();
}else if(evt.getKeyCode()==KeyEvent.VK_SHIFT){
TCari.setText("");
TCari.requestFocus();
}
}
}//GEN-LAST:event_tbKamarKeyPressed
private void formWindowActivated(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_formWindowActivated
emptTeks();
}//GEN-LAST:event_formWindowActivated
private void formWindowOpened(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_formWindowOpened
tampil();
}//GEN-LAST:event_formWindowOpened
/**
* @param args the command line arguments
*/
public static void main(String args[]) {
java.awt.EventQueue.invokeLater(() -> {
DlgCariPoli dialog = new DlgCariPoli(new javax.swing.JFrame(), true);
dialog.addWindowListener(new java.awt.event.WindowAdapter() {
@Override
public void windowClosing(java.awt.event.WindowEvent e) {
System.exit(0);
}
});
dialog.setVisible(true);
});
}
// Variables declaration - do not modify//GEN-BEGIN:variables
private widget.Button BtnAll;
private widget.Button BtnCari;
private widget.Button BtnKeluar;
private widget.Label LCount;
private widget.ScrollPane Scroll;
private widget.TextBox TCari;
private widget.InternalFrame internalFrame1;
private widget.Label label10;
private widget.Label label9;
private widget.panelisi panelisi3;
private widget.Table tbKamar;
// End of variables declaration//GEN-END:variables
private void tampil() {
Valid.tabelKosong(tabMode);
try{
ps.setString(1,"%"+TCari.getText().trim()+"%");
ps.setString(2,"%"+TCari.getText().trim()+"%");
ps.setString(3,"%"+TCari.getText().trim()+"%");
rs=ps.executeQuery();
while(rs.next()){
tabMode.addRow(new Object[]{rs.getString(1),rs.getString(2),Valid.SetAngka(rs.getDouble(3))});
}
}catch(SQLException e){
System.out.println("Error : "+e);
}
LCount.setText(""+tabMode.getRowCount());
}
public void emptTeks() {
TCari.requestFocus();
}
public JTable getTable(){
return tbKamar;
}
}
| {
"pile_set_name": "Github"
} |
{
"type": "inspec_report",
"node_uuid": "34cbbb4c-c502-4971-b193-00e987b4678c",
"report_uuid": "44024b50-2e0d-42fa-a57c-dddddddddddd",
"job_uuid": "12345678-1234-123e-b12e-222222222222",
"node_name": "debian(2)-zeta-linux(f)-apache(p)-failed",
"environment": "DevSec Prod Zeta",
"roles": ["base_deb", "apache_deb", "debian-hardening-prod", "dot.role"],
"recipes": [],
"end_time": "2018-02-09T09:18:41Z",
"version": "2.1.10",
"platform": {
"name": "debian",
"release": "8.7"
},
"statistics": {
"duration": 0.636833
},
"other_checks": [],
"policy_name": "",
"policy_group": "",
"organization_name": "",
"source_fqdn": "localhost",
"chef_tags": [],
"ipaddress": "192.168.56.33",
"fqdn": "lb-deb.example.com",
"profiles": [
{
"name": "linux-baseline",
"title": "DevSec Linux Security Baseline",
"version": "2.0.1",
"sha256": "b53ca05fbfe17a36363a40f3ad5bd70aa20057eaf15a9a9a8124a84d4ef08015",
"summary": "Test-suite for best-preactice os hardening",
"maintainer": "",
"license": "",
"copyright": "Hardening Framework Team",
"copyright_email": "hello@hardening.io",
"status": "loaded",
"attributes": [
{
"name": "role_name",
"options": {
"default": "base",
"description": "Chef Role"
}
},
{
"name": "profile_id",
"options": {
"default": 1,
"description": "An int id"
}
},
{
"name": "do.this?",
"options": {
"default": true,
"description": "A bool flag"
}
},
{
"name": "take_this",
"options": {
"default": [
"oh",
"hear"
],
"description": "A bloody array"
}
},
{
"name": "bloody_hash",
"options": {
"default": {
"oh": "god"
}
}
},
{
"name": "no_default",
"options": {
"description": "Default is for lazies!"
}
}
],
"controls": [
{
"id": "os-01",
"code": "control 'os-01' do\n impact 1.0\n title 'Trusted hosts login'\n desc \"Rhosts/hosts.equiv files are a weak implemenation of authentication. Disabling the .rhosts and hosts.equiv support helps to prevent users from subverting the system's normal access control mechanisms of the system.\"\n describe command('find / -name \\'.rhosts\\'') do\n its('stdout') { should be_empty }\n end\n describe command('find / -name \\'hosts.equiv\\' ') do\n its('stdout') { should be_empty }\n end\n tag 'web'\n tag 'scope': 'OS'\n tag 'gtitle': 'TitleVal'\n tag 'satisfies': ['SRG-00006', 'SRG-00007']\n tag 'stig_id': 'RHEL-07-010050'\n tag 'cci': ['CCI-000048']\n tag 'hashhash': { \"bad.one\": [6] }\n tag 'documentable': false\n tag 'our_criticality': 8\nend\n ",
"desc": "Rhosts/hosts.equiv files are a weak implemenation of authentication. Disabling the .rhosts and hosts.equiv support helps to prevent users from subverting the system's normal access control mechanisms of the system.",
"descriptions": [
{
"label": "default",
"data": "Rhosts/hosts.equiv files are a weak implemenation of authentication. Disabling the .rhosts and hosts.equiv support helps to prevent users from subverting the system's normal access control mechanisms of the system."
},
{
"label": "en",
"data": "Some english"
},
{
"label": "de",
"data": "Some german"
}
],
"impact": 1,
"title": "Trusted hosts login",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/os_spec.rb",
"line": 21
},
"refs": [],
"tags": {
"web": null,
"scope": "apalache",
"gtitle": "TitleVal",
"satisfies": [
"SRG-00006",
"SRG-00007"
],
"stig_id": "RHEL-07-010050",
"cci": ["CCI-000048"],
"hashhash": {
"bad.one": [6]
},
"documentable": false,
"our_criticality": 8
},
"results": [
{
"status": "passed",
"code_desc": "Command find / -name '.rhosts' stdout should be empty",
"run_time": 0.062734,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "Command find / -name 'hosts.equiv' stdout should be empty",
"run_time": 0.06203,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "os-02",
"code": "control 'os-02' do\n impact 1.0\n title 'Check owner and permissions for /etc/shadow'\n desc 'Check periodically the owner and permissions for /etc/shadow'\n describe file('/etc/shadow') do\n it { should exist }\n it { should be_file }\n it { should be_owned_by 'root' }\n its('group') { should eq 'root' }\n it { should_not be_executable }\n it { should be_writable.by('owner') }\n it { should be_readable.by('owner') }\n it { should_not be_readable.by('group') }\n it { should_not be_readable.by('other') }\n end\n tag 'tag1': 'value1'\nend\n",
"desc": "Check periodically the owner and permissions for /etc/shadow",
"impact": 1,
"title": "Check owner and permissions for /etc/shadow",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/os_spec.rb",
"line": 33
},
"refs": [],
"tags": { "tag1": "value1" },
"waiver_data": {
"justification": "Sound reasoning",
"run": true,
"skipped_due_to_waiver": false,
"message": ""
},
"results": [
{
"status": "passed",
"code_desc": "File /etc/shadow should exist",
"run_time": 0.004684,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/shadow should be file",
"run_time": 0.004503,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/shadow should be owned by \"root\"",
"run_time": 0.000176,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/shadow should not be executable",
"run_time": 0.000138,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/shadow should be writable by owner",
"run_time": 7.6e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/shadow should be readable by owner",
"run_time": 7.2e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "failed",
"code_desc": "File /etc/shadow should not be readable by group",
"run_time": 0.000171,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "expected File /etc/shadow not to be readable by group"
},
{
"status": "passed",
"code_desc": "File /etc/shadow should not be readable by other",
"run_time": 9.4e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "failed",
"code_desc": "File /etc/shadow group should eq \"root\"",
"run_time": 0.000138,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: \"root\"\n got: \"shadow\"\n\n(compared using ==)\n"
}
],
"removed_results_counts": {
"failed": 150,
"skipped": 100,
"passed": 50
}
},
{
"id": "os-03",
"code": "control 'os-03' do\n impact 1.0\n title 'Check owner and permissions for /etc/passwd'\n desc 'Check periodically the owner and permissions for /etc/passwd'\n describe file('/etc/passwd') do\n it { should exist }\n it { should be_file }\n it { should be_owned_by 'root' }\n its('group') { should eq 'root' }\n it { should_not be_executable }\n it { should be_writable.by('owner') }\n it { should_not be_writable.by('group') }\n it { should_not be_writable.by('other') }\n it { should be_readable.by('owner') }\n it { should be_readable.by('group') }\n it { should be_readable.by('other') }\n end\nend\n",
"desc": "Check periodically the owner and permissions for /etc/passwd",
"impact": 1,
"title": "Check owner and permissions for /etc/passwd",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/os_spec.rb",
"line": 50
},
"refs": [],
"tags": {},
"waiver_data": {
"justification": "Sheer cleverness",
"run": true,
"skipped_due_to_waiver": false,
"message": ""
},
"results": [
{
"status": "passed",
"code_desc": "File /etc/passwd should exist",
"run_time": 0.003601,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/passwd should be file",
"run_time": 0.005196,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/passwd should be owned by \"root\"",
"run_time": 0.000162,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/passwd should not be executable",
"run_time": 0.000142,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/passwd should be writable by owner",
"run_time": 9.1e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/passwd should not be writable by group",
"run_time": 0.000111,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/passwd should not be writable by other",
"run_time": 9e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/passwd should be readable by owner",
"run_time": 9.1e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/passwd should be readable by group",
"run_time": 8.8e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/passwd should be readable by other",
"run_time": 8.8e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/passwd group should eq \"root\"",
"run_time": 9.4e-05,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "os-04",
"code": "control 'os-04' do\n impact 1.0\n title 'Dot in PATH variable'\n desc 'Do not include the current working directory in PATH variable. This makes it easier for an attacker to gain extensive rigths by executing a Trojan program'\n describe os_env('PATH') do\n its('split') { should_not include('') }\n its('split') { should_not include('.') }\n end\nend\n",
"desc": "Do not include the current working directory in PATH variable. This makes it easier for an attacker to gain extensive rigths by executing a Trojan program",
"impact": 1,
"title": "Dot in PATH variable",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/os_spec.rb",
"line": 69
},
"refs": [],
"tags": {},
"waiver_data": {
"expiration_date": "1977-06-01",
"justification": "Necessity",
"run": false,
"skipped_due_to_waiver": false,
"message": "Waiver expired on 1977-06-01, evaluating control normally"
},
"results": [
{
"status": "passed",
"code_desc": "Environment variable PATH split should not include \"\"",
"run_time": 0.000134,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "Environment variable PATH split should not include \".\"",
"run_time": 9.8e-05,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "os-05",
"code": "control 'os-05' do\n impact 1.0\n title 'Check login.defs'\n desc 'Check owner and permissions for login.defs. Also check the configured PATH variable and umask in login.defs'\n describe file('/etc/login.defs') do\n it { should exist }\n it { should be_file }\n it { should be_owned_by 'root' }\n its('group') { should eq 'root' }\n it { should_not be_executable }\n it { should_not be_writable }\n it { should be_readable.by('owner') }\n it { should be_readable.by('group') }\n it { should be_readable.by('other') }\n end\n describe login_defs do\n its('ENV_SUPATH') { should include('/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin') }\n its('ENV_PATH') { should include('/usr/local/bin:/usr/bin:/bin') }\n its('UMASK') { should include('027') }\n its('PASS_MAX_DAYS') { should eq '60' }\n its('PASS_MIN_DAYS') { should eq '7' }\n its('PASS_WARN_AGE') { should eq '7' }\n its('LOGIN_RETRIES') { should eq '5' }\n its('LOGIN_TIMEOUT') { should eq '60' }\n its('UID_MIN') { should eq '1000' }\n its('GID_MIN') { should eq '1000' }\n its('SYS_UID_MIN') { should eq '100' }\n its('SYS_UID_MAX') { should eq '999' }\n its('SYS_GID_MIN') { should eq '100' }\n its('SYS_GID_MAX') { should eq '999' }\n its('ENCRYPT_METHOD') { should eq 'SHA512' }\n end\nend\n",
"desc": "Check owner and permissions for login.defs. Also check the configured PATH variable and umask in login.defs",
"impact": 1,
"title": "Check login.defs",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/os_spec.rb",
"line": 79
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "File /etc/login.defs should exist",
"run_time": 0.003574,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/login.defs should be file",
"run_time": 0.004564,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/login.defs should be owned by \"root\"",
"run_time": 0.000154,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/login.defs should not be executable",
"run_time": 0.000138,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "failed",
"code_desc": "File /etc/login.defs should not be writable",
"run_time": 0.000166,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "expected File /etc/login.defs not to be writable"
},
{
"status": "passed",
"code_desc": "File /etc/login.defs should be readable by owner",
"run_time": 9.3e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/login.defs should be readable by group",
"run_time": 8.2e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/login.defs should be readable by other",
"run_time": 8.2e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "File /etc/login.defs group should eq \"root\"",
"run_time": 9.7e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "login.defs ENV_SUPATH should include \"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"",
"run_time": 0.01768,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "login.defs ENV_PATH should include \"/usr/local/bin:/usr/bin:/bin\"",
"run_time": 0.000103,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "failed",
"code_desc": "login.defs UMASK should include \"027\"",
"run_time": 0.000189,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "expected \"022\" to include \"027\""
},
{
"status": "failed",
"code_desc": "login.defs PASS_MAX_DAYS should eq \"60\"",
"run_time": 0.0001,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: \"60\"\n got: \"99999\"\n\n(compared using ==)\n"
},
{
"status": "failed",
"code_desc": "login.defs PASS_MIN_DAYS should eq \"7\"",
"run_time": 9.5e-05,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: \"7\"\n got: \"0\"\n\n(compared using ==)\n"
},
{
"status": "passed",
"code_desc": "login.defs PASS_WARN_AGE should eq \"7\"",
"run_time": 6.2e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "login.defs LOGIN_RETRIES should eq \"5\"",
"run_time": 6.1e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "login.defs LOGIN_TIMEOUT should eq \"60\"",
"run_time": 6.3e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "login.defs UID_MIN should eq \"1000\"",
"run_time": 5.6e-05,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "login.defs GID_MIN should eq \"1000\"",
"run_time": 0.000103,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "failed",
"code_desc": "login.defs SYS_UID_MIN should eq \"100\"",
"run_time": 8.3e-05,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: \"100\"\n got: nil\n\n(compared using ==)\n"
},
{
"status": "failed",
"code_desc": "login.defs SYS_UID_MAX should eq \"999\"",
"run_time": 8.3e-05,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: \"999\"\n got: nil\n\n(compared using ==)\n"
},
{
"status": "failed",
"code_desc": "login.defs SYS_GID_MIN should eq \"100\"",
"run_time": 0.000441,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: \"100\"\n got: nil\n\n(compared using ==)\n"
},
{
"status": "failed",
"code_desc": "login.defs SYS_GID_MAX should eq \"999\"",
"run_time": 0.000118,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: \"999\"\n got: nil\n\n(compared using ==)\n"
},
{
"status": "passed",
"code_desc": "login.defs ENCRYPT_METHOD should eq \"SHA512\"",
"run_time": 6.6e-05,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "os-06",
"code": "control 'os-06' do\n impact 1.0\n title 'Check for SUID/ SGID blacklist'\n desc 'Find blacklisted SUID and SGID files to ensure that no rogue SUID and SGID files have been introduced into the system'\n\n blacklist = [\n # blacklist as provided by NSA\n '/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh',\n # sshd must not use host-based authentication (see ssh cookbook)\n '/usr/libexec/openssh/ssh-keysign',\n '/usr/lib/openssh/ssh-keysign',\n # misc others\n '/sbin/netreport', # not normally required for user\n '/usr/sbin/usernetctl', # modify interfaces via functional accounts\n # connecting to ...\n '/usr/sbin/userisdnctl', # no isdn...\n '/usr/sbin/pppd', # no ppp / dsl ...\n # lockfile\n '/usr/bin/lockfile',\n '/usr/bin/mail-lock',\n '/usr/bin/mail-unlock',\n '/usr/bin/mail-touchlock',\n '/usr/bin/dotlockfile',\n # need more investigation, blacklist for now\n '/usr/bin/arping',\n '/usr/sbin/arping',\n '/usr/sbin/uuidd',\n '/usr/bin/mtr', # investigate current state...\n '/usr/lib/evolution/camel-lock-helper-1.2', # investigate current state...\n '/usr/lib/pt_chown', # pseudo-tty, needed?\n '/usr/lib/eject/dmcrypt-get-device',\n '/usr/lib/mc/cons.saver' # midnight commander screensaver\n ]\n\n output = command('find / -perm -4000 -o -perm -2000 -type f ! -path \\'/proc/*\\' -print 2>/dev/null | grep -v \\'^find:\\'')\n diff = output.stdout.split(/\\r?\\n/) & blacklist\n describe diff do\n it { should be_empty }\n end\nend\n",
"desc": "Find blacklisted SUID and SGID files to ensure that no rogue SUID and SGID files have been introduced into the system",
"impact": 1,
"title": "Check for SUID/ SGID blacklist",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/os_spec.rb",
"line": 113
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "[] should be empty",
"run_time": 7.5e-05,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "os-07",
"code": "control 'os-07' do\n impact 1.0\n title 'Unique uid and gid'\n desc 'Check for unique uids gids'\n describe passwd do\n its('uids') { should_not contain_duplicates }\n end\n describe etc_group do\n its('gids') { should_not contain_duplicates }\n end\nend\n",
"desc": "Check for unique uids gids",
"impact": 1,
"title": "Unique uid and gid",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/os_spec.rb",
"line": 154
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "/etc/passwd uids should not contain duplicates",
"run_time": 0.000168,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "/etc/group gids should not contain duplicates",
"run_time": 0.000187,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "package-01",
"code": "control 'package-01' do\n impact 1.0\n title 'Do not run deprecated inetd or xinetd'\n desc 'http://www.nsa.gov/ia/_files/os/redhat/rhel5-guide-i731.pdf, Chapter 3.2.1'\n describe package('inetd') do\n it { should_not be_installed }\n end\n describe package('xinetd') do\n it { should_not be_installed }\n end\nend\n",
"desc": "http://www.nsa.gov/ia/_files/os/redhat/rhel5-guide-i731.pdf, Chapter 3.2.1",
"impact": 1,
"title": "Do not run deprecated inetd or xinetd",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/package_spec.rb",
"line": 21
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "System Package inetd should not be installed",
"run_time": 0.008312,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "System Package xinetd should not be installed",
"run_time": 0.007024,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "package-02",
"code": "control 'package-02' do\n impact 1.0\n title 'Do not install Telnet server'\n desc 'Telnet protocol uses unencrypted communication, that means the passowrd and other sensitive data are unencrypted. http://www.nsa.gov/ia/_files/os/redhat/rhel5-guide-i731.pdf, Chapter 3.2.2'\n describe package('telnetd') do\n it { should_not be_installed }\n end\nend\n",
"desc": "Telnet protocol uses unencrypted communication, that means the passowrd and other sensitive data are unencrypted. http://www.nsa.gov/ia/_files/os/redhat/rhel5-guide-i731.pdf, Chapter 3.2.2",
"impact": 1,
"title": "Do not install Telnet server",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/package_spec.rb",
"line": 33
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "System Package telnetd should not be installed",
"run_time": 0.2623,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "package-03",
"code": "control 'package-03' do\n impact 1.0\n title 'Do not install rsh server'\n desc 'The r-commands suffers same problem as telnet. http://www.nsa.gov/ia/_files/os/redhat/rhel5-guide-i731.pdf, Chapter 3.2.3'\n describe package('telnetd') do\n it { should_not be_installed }\n end\nend\n",
"desc": "The r-commands suffers same problem as telnet. http://www.nsa.gov/ia/_files/os/redhat/rhel5-guide-i731.pdf, Chapter 3.2.3",
"impact": 1,
"title": "Do not install rsh server",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/package_spec.rb",
"line": 42
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "System Package telnetd should not be installed",
"run_time": 0.006853,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "package-05",
"code": "control 'package-05' do\n impact 1.0\n title 'Do not install ypserv server (NIS)'\n desc 'Network Information Service (NIS) has some security design weaknesses like inadequate protection of important authentication information. http://www.nsa.gov/ia/_files/os/redhat/rhel5-guide-i731.pdf, Chapter 3.2.4'\n describe package('ypserv') do\n it { should_not be_installed }\n end\nend\n",
"desc": "Network Information Service (NIS) has some security design weaknesses like inadequate protection of important authentication information. http://www.nsa.gov/ia/_files/os/redhat/rhel5-guide-i731.pdf, Chapter 3.2.4",
"impact": 1,
"title": "Do not install ypserv server (NIS)",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/package_spec.rb",
"line": 51
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "System Package ypserv should not be installed",
"run_time": 0.006921,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "package-06",
"code": "control 'package-06' do\n impact 1.0\n title 'Do not install tftp server'\n desc 'tftp-server provides little security http://www.nsa.gov/ia/_files/os/redhat/rhel5-guide-i731.pdf, Chapter 3.2.5'\n describe package('tftp-server') do\n it { should_not be_installed }\n end\nend\n",
"desc": "tftp-server provides little security http://www.nsa.gov/ia/_files/os/redhat/rhel5-guide-i731.pdf, Chapter 3.2.5",
"impact": 1,
"title": "Do not install tftp server",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/package_spec.rb",
"line": 60
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "System Package tftp-server should not be installed",
"run_time": 0.006591,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "sysctl-01",
"code": "control 'sysctl-01' do\n impact 1.0\n title 'IPv4 Forwarding'\n desc \"If you're not intending for your system to forward traffic between interfaces, or if you only have a single interface, the forwarding function must be disable.\"\n describe kernel_parameter('net.ipv4.ip_forward') do\n its(:value) { should eq 0 }\n end\n describe kernel_parameter('net.ipv4.conf.all.forwarding') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "If you're not intending for your system to forward traffic between interfaces, or if you only have a single interface, the forwarding function must be disable.",
"impact": 1,
"title": "IPv4 Forwarding",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 21
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv4.ip_forward value should eq 0",
"run_time": 0.004171,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: 1\n\n(compared using ==)\n"
},
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv4.conf.all.forwarding value should eq 0",
"run_time": 0.004013,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: 1\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-02",
"code": "control 'sysctl-02' do\n impact 1.0\n title 'Reverse path filtering'\n desc \"The rp_filter can reject incoming packets if their source address doesn't match the network interface that they're arriving on, which helps to prevent IP spoofing.\"\n describe kernel_parameter('net.ipv4.conf.all.rp_filter') do\n its(:value) { should eq 1 }\n end\n describe kernel_parameter('net.ipv4.conf.default.rp_filter') do\n its(:value) { should eq 1 }\n end\nend\n",
"desc": "The rp_filter can reject incoming packets if their source address doesn't match the network interface that they're arriving on, which helps to prevent IP spoofing.",
"impact": 1,
"title": "Reverse path filtering",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 33
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "Kernel Parameter net.ipv4.conf.all.rp_filter value should eq 1",
"run_time": 0.003864,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "Kernel Parameter net.ipv4.conf.default.rp_filter value should eq 1",
"run_time": 0.003689,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "sysctl-03",
"code": "control 'sysctl-03' do\n impact 1.0\n title 'ICMP ignore bogus error responses'\n desc 'Sometimes routers send out invalid responses to broadcast frames. This is a violation of RFC 1122 and the kernel will logged this. To avoid filling up your logfile with unnecessary stuff, you can tell the kernel not to issue these warnings'\n describe kernel_parameter('net.ipv4.icmp_ignore_bogus_error_responses') do\n its(:value) { should eq 1 }\n end\nend\n",
"desc": "Sometimes routers send out invalid responses to broadcast frames. This is a violation of RFC 1122 and the kernel will logged this. To avoid filling up your logfile with unnecessary stuff, you can tell the kernel not to issue these warnings",
"impact": 1,
"title": "ICMP ignore bogus error responses",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 45
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "Kernel Parameter net.ipv4.icmp_ignore_bogus_error_responses value should eq 1",
"run_time": 0.003764,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "sysctl-04",
"code": "control 'sysctl-04' do\n impact 1.0\n title 'ICMP echo ignore broadcasts'\n desc 'Blocking ICMP ECHO requests to broadcast addresses'\n describe kernel_parameter('net.ipv4.icmp_echo_ignore_broadcasts') do\n its(:value) { should eq 1 }\n end\nend\n",
"desc": "Blocking ICMP ECHO requests to broadcast addresses",
"impact": 1,
"title": "ICMP echo ignore broadcasts",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 54
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "Kernel Parameter net.ipv4.icmp_echo_ignore_broadcasts value should eq 1",
"run_time": 0.003746,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "sysctl-05",
"code": "control 'sysctl-05' do\n impact 1.0\n title 'ICMP ratelimit'\n desc 'icmp_ratelimit defines how many packets that match the icmp_ratemask per second'\n describe kernel_parameter('net.ipv4.icmp_ratelimit') do\n its(:value) { should eq 100 }\n end\nend\n",
"desc": "icmp_ratelimit defines how many packets that match the icmp_ratemask per second",
"impact": 1,
"title": "ICMP ratelimit",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 63
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv4.icmp_ratelimit value should eq 100",
"run_time": 0.004023,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 100\n got: 1000\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-06",
"code": "control 'sysctl-06' do\n impact 1.0\n title 'ICMP ratemask'\n desc 'Ratemask is a logical OR of all ICMP codes to rate limit'\n describe kernel_parameter('net.ipv4.icmp_ratemask') do\n its(:value) { should eq 88089 }\n end\nend\n",
"desc": "Ratemask is a logical OR of all ICMP codes to rate limit",
"impact": 1,
"title": "ICMP ratemask",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 72
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv4.icmp_ratemask value should eq 88089",
"run_time": 0.003728,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 88089\n got: 6168\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-07",
"code": "control 'sysctl-07' do\n impact 1.0\n title 'TCP timestamps'\n desc \"It is possible to estimate the current uptime of a Linux system. It's preferable to disable TCP timestamps on your systems.\"\n describe kernel_parameter('net.ipv4.tcp_timestamps') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "It is possible to estimate the current uptime of a Linux system. It's preferable to disable TCP timestamps on your systems.",
"impact": 1,
"title": "TCP timestamps",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 81
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv4.tcp_timestamps value should eq 0",
"run_time": 0.003771,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: nil\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-08",
"code": "control 'sysctl-08' do\n impact 1.0\n title 'ARP ignore'\n desc 'Reply only if the target IP address is local address configured on the incoming interface.'\n describe kernel_parameter('net.ipv4.conf.all.arp_ignore') do\n its(:value) { should eq 1 }\n end\nend\n",
"desc": "Reply only if the target IP address is local address configured on the incoming interface.",
"impact": 1,
"title": "ARP ignore",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 90
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv4.conf.all.arp_ignore value should eq 1",
"run_time": 0.003666,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 1\n got: 0\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-09",
"code": "control 'sysctl-09' do\n impact 1.0\n title 'ARP announce'\n desc 'Always use the best local address for this target. In this mode we ignore the source address in the IP packet and try to select local address that we prefer for talks with\tthe target host.'\n describe kernel_parameter('net.ipv4.conf.all.arp_announce') do\n its(:value) { should eq 2 }\n end\nend\n",
"desc": "Always use the best local address for this target. In this mode we ignore the source address in the IP packet and try to select local address that we prefer for talks with\tthe target host.",
"impact": 1,
"title": "ARP announce",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 99
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv4.conf.all.arp_announce value should eq 2",
"run_time": 0.003711,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 2\n got: 0\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-10",
"code": "control 'sysctl-10' do\n impact 1.0\n title 'TCP RFC1337 Protect Against TCP Time-Wait'\n desc 'This enables a fix for time-wait assassination hazards in tcp, described in RFC 1337. If enabled, this causes the kernel to drop RST packets for sockets in the time-wait state.'\n describe kernel_parameter('net.ipv4.tcp_rfc1337') do\n its(:value) { should eq 1 }\n end\nend\n",
"desc": "This enables a fix for time-wait assassination hazards in tcp, described in RFC 1337. If enabled, this causes the kernel to drop RST packets for sockets in the time-wait state.",
"impact": 1,
"title": "TCP RFC1337 Protect Against TCP Time-Wait",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 108
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv4.tcp_rfc1337 value should eq 1",
"run_time": 0.003682,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 1\n got: nil\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-11",
"code": "control 'sysctl-11' do\n impact 1.0\n title 'Protection against SYN flood attacks'\n desc 'A SYN-Attack is a denial of service (DoS) attack that consumes resources on your system forcing you to reboot.'\n describe kernel_parameter('net.ipv4.tcp_syncookies') do\n its(:value) { should eq 1 }\n end\nend\n",
"desc": "A SYN-Attack is a denial of service (DoS) attack that consumes resources on your system forcing you to reboot.",
"impact": 1,
"title": "Protection against SYN flood attacks",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 117
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "Kernel Parameter net.ipv4.tcp_syncookies value should eq 1",
"run_time": 0.003911,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "sysctl-12",
"code": "control 'sysctl-12' do\n impact 1.0\n title 'Shared Media IP Architecture'\n desc 'Send(router) or accept(host) RFC1620 shared media redirects. If it is not set the kernel does not assume that different subnets on this device can communicate directly.'\n describe kernel_parameter('net.ipv4.conf.all.shared_media') do\n its(:value) { should eq 1 }\n end\n describe kernel_parameter('net.ipv4.conf.default.shared_media') do\n its(:value) { should eq 1 }\n end\nend\n",
"desc": "Send(router) or accept(host) RFC1620 shared media redirects. If it is not set the kernel does not assume that different subnets on this device can communicate directly.",
"impact": 1,
"title": "Shared Media IP Architecture",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 126
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "Kernel Parameter net.ipv4.conf.all.shared_media value should eq 1",
"run_time": 0.003485,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "Kernel Parameter net.ipv4.conf.default.shared_media value should eq 1",
"run_time": 0.003418,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "sysctl-13",
"code": "control 'sysctl-13' do\n impact 1.0\n title 'Disable Source Routing'\n desc 'The accept_source_route option causes network interfaces to accept packets with the Strict Source Route (SSR) or Loose Source Routing (LSR) option set. An attacker is able to send a source routed packet into the network, then he could intercept the replies and your server might not know that it is not communicating with a trusted server'\n describe kernel_parameter('net.ipv4.conf.all.accept_source_route') do\n its(:value) { should eq 0 }\n end\n describe kernel_parameter('net.ipv4.conf.default.accept_source_route') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "The accept_source_route option causes network interfaces to accept packets with the Strict Source Route (SSR) or Loose Source Routing (LSR) option set. An attacker is able to send a source routed packet into the network, then he could intercept the replies and your server might not know that it is not communicating with a trusted server",
"impact": 1,
"title": "Disable Source Routing",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 138
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "Kernel Parameter net.ipv4.conf.all.accept_source_route value should eq 0",
"run_time": 0.003562,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "Kernel Parameter net.ipv4.conf.default.accept_source_route value should eq 0",
"run_time": 0.003687,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "sysctl-14",
"code": "control 'sysctl-14' do\n impact 1.0\n title 'Disable acceptance of all IPv4 redirected packets'\n desc 'Disable acceptance of all redirected packets these prevents Man-in-the-Middle attacks.'\n describe kernel_parameter('net.ipv4.conf.default.accept_redirects') do\n its(:value) { should eq 0 }\n end\n describe kernel_parameter('net.ipv4.conf.all.accept_redirects') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "Disable acceptance of all redirected packets these prevents Man-in-the-Middle attacks.",
"impact": 1,
"title": "Disable acceptance of all IPv4 redirected packets",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 150
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "Kernel Parameter net.ipv4.conf.default.accept_redirects value should eq 0",
"run_time": 0.003682,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "Kernel Parameter net.ipv4.conf.all.accept_redirects value should eq 0",
"run_time": 0.003555,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "sysctl-15",
"code": "control 'sysctl-15' do\n impact 1.0\n title 'Disable acceptance of all secure redirected packets'\n desc 'Disable acceptance of all secure redirected packets these prevents Man-in-the-Middle attacks.'\n describe kernel_parameter('net.ipv4.conf.all.secure_redirects') do\n its(:value) { should eq 0 }\n end\n describe kernel_parameter('net.ipv4.conf.default.secure_redirects') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "Disable acceptance of all secure redirected packets these prevents Man-in-the-Middle attacks.",
"impact": 1,
"title": "Disable acceptance of all secure redirected packets",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 162
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv4.conf.all.secure_redirects value should eq 0",
"run_time": 0.003499,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: 1\n\n(compared using ==)\n"
},
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv4.conf.default.secure_redirects value should eq 0",
"run_time": 0.003595,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: 1\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-16",
"code": "control 'sysctl-16' do\n impact 1.0\n title 'Disable sending of redirects packets'\n desc 'Disable sending of redirects packets'\n describe kernel_parameter('net.ipv4.conf.all.send_redirects') do\n its(:value) { should eq 0 }\n end\n describe kernel_parameter('net.ipv4.conf.all.send_redirects') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "Disable sending of redirects packets",
"impact": 1,
"title": "Disable sending of redirects packets",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 174
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "Kernel Parameter net.ipv4.conf.all.send_redirects value should eq 0",
"run_time": 0.00397,
"start_time": "2018-02-09T10:17:23+01:00"
},
{
"status": "passed",
"code_desc": "Kernel Parameter net.ipv4.conf.all.send_redirects value should eq 0",
"run_time": 0.003905,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "sysctl-17",
"code": "control 'sysctl-17' do\n impact 1.0\n title 'Disable log martians'\n desc 'log_martians can cause a denial of service attack to the host'\n describe kernel_parameter('net.ipv4.conf.all.log_martians') do\n its(:value) { should eq 1 }\n end\nend\n",
"desc": "log_martians can cause a denial of service attack to the host",
"impact": 1,
"title": "Disable log martians",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 186
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv4.conf.all.log_martians value should eq 1",
"run_time": 0.003698,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 1\n got: 0\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-18",
"code": "control 'sysctl-18' do\n impact 1.0\n title 'Disable IPv6 if it is not needed'\n desc 'Disable IPv6 if it is not needed'\n describe kernel_parameter('net.ipv6.conf.all.disable_ipv6') do\n its(:value) { should eq 1 }\n end\nend\n",
"desc": "Disable IPv6 if it is not needed",
"impact": 1,
"title": "Disable IPv6 if it is not needed",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 195
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv6.conf.all.disable_ipv6 value should eq 1",
"run_time": 0.003765,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 1\n got: 0\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-19",
"code": "control 'sysctl-19' do\n impact 1.0\n title 'IPv6 Forwarding'\n desc \"If you're not intending for your system to forward traffic between interfaces, or if you only have a single interface, the forwarding function must be disable.\"\n describe kernel_parameter('net.ipv6.conf.all.forwarding') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "If you're not intending for your system to forward traffic between interfaces, or if you only have a single interface, the forwarding function must be disable.",
"impact": 1,
"title": "IPv6 Forwarding",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 204
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "Kernel Parameter net.ipv6.conf.all.forwarding value should eq 0",
"run_time": 0.00366,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "sysctl-20",
"code": "control 'sysctl-20' do\n impact 1.0\n title 'Disable acceptance of all IPv6 redirected packets'\n desc 'Disable acceptance of all redirected packets these prevents Man-in-the-Middle attacks.'\n describe kernel_parameter('net.ipv6.conf.default.accept_redirects') do\n its(:value) { should eq 0 }\n end\n describe kernel_parameter('net.ipv6.conf.all.accept_redirects') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "Disable acceptance of all redirected packets these prevents Man-in-the-Middle attacks.",
"impact": 1,
"title": "Disable acceptance of all IPv6 redirected packets",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 213
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv6.conf.default.accept_redirects value should eq 0",
"run_time": 0.003587,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: 1\n\n(compared using ==)\n"
},
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv6.conf.all.accept_redirects value should eq 0",
"run_time": 0.003961,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: 1\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-21",
"code": "control 'sysctl-21' do\n impact 1.0\n title 'Disable acceptance of IPv6 router solicitations messages'\n desc 'The router solicitations setting determines how many router solicitations are sent when bringing up the interface. If addresses are statically assigned, there is no need to send any solicitations.'\n describe kernel_parameter('net.ipv6.conf.default.router_solicitations') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "The router solicitations setting determines how many router solicitations are sent when bringing up the interface. If addresses are statically assigned, there is no need to send any solicitations.",
"impact": 1,
"title": "Disable acceptance of IPv6 router solicitations messages",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 225
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv6.conf.default.router_solicitations value should eq 0",
"run_time": 0.003601,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: \"-1\"\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-22",
"code": "control 'sysctl-22' do\n impact 1.0\n title 'Disable Accept Router Preference from router advertisement'\n desc 'Disable Accept Router Preference from router advertisement'\n describe kernel_parameter('net.ipv6.conf.default.accept_ra_rtr_pref') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "Disable Accept Router Preference from router advertisement",
"impact": 1,
"title": "Disable Accept Router Preference from router advertisement",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 234
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv6.conf.default.accept_ra_rtr_pref value should eq 0",
"run_time": 0.003782,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: 1\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-23",
"code": "control 'sysctl-23' do\n impact 1.0\n title 'Disable learning Prefix Information from router advertisement'\n desc 'The accept_ra_pinfo setting controls whether the system will accept prefix info from the router.'\n describe kernel_parameter('net.ipv6.conf.default.accept_ra_pinfo') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "The accept_ra_pinfo setting controls whether the system will accept prefix info from the router.",
"impact": 1,
"title": "Disable learning Prefix Information from router advertisement",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 243
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv6.conf.default.accept_ra_pinfo value should eq 0",
"run_time": 0.003568,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: 1\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-24",
"code": "control 'sysctl-24' do\n impact 1.0\n title 'Disable learning Hop limit from router advertisement'\n desc 'The accept_ra_defrtr setting controls whether the system will accept Hop Limit settings from a router advertisement. Setting it to 0 prevents a router from changing your default IPv6 Hop Limit for outgoing packets.'\n describe kernel_parameter('net.ipv6.conf.default.accept_ra_defrtr') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "The accept_ra_defrtr setting controls whether the system will accept Hop Limit settings from a router advertisement. Setting it to 0 prevents a router from changing your default IPv6 Hop Limit for outgoing packets.",
"impact": 1,
"title": "Disable learning Hop limit from router advertisement",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 252
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv6.conf.default.accept_ra_defrtr value should eq 0",
"run_time": 0.003662,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: 1\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-25",
"code": "control 'sysctl-25' do\n impact 1.0\n title 'Disable the system`s acceptance of router advertisement'\n desc 'Setting controls whether the system will accept router advertisement'\n describe kernel_parameter('net.ipv6.conf.all.accept_ra') do\n its(:value) { should eq 0 }\n end\n describe kernel_parameter('net.ipv6.conf.default.accept_ra') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "Setting controls whether the system will accept router advertisement",
"impact": 1,
"title": "Disable the system`s acceptance of router advertisement",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 261
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv6.conf.all.accept_ra value should eq 0",
"run_time": 0.00385,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: 1\n\n(compared using ==)\n"
},
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv6.conf.default.accept_ra value should eq 0",
"run_time": 0.003774,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: 1\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-26",
"code": "control 'sysctl-26' do\n impact 1.0\n title 'Disable IPv6 autoconfiguration'\n desc 'The autoconf setting controls whether router advertisements can cause the system to assign a global unicast address to an interface.'\n describe kernel_parameter('net.ipv6.conf.default.autoconf') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "The autoconf setting controls whether router advertisements can cause the system to assign a global unicast address to an interface.",
"impact": 1,
"title": "Disable IPv6 autoconfiguration",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 273
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv6.conf.default.autoconf value should eq 0",
"run_time": 0.003631,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: 1\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-27",
"code": "control 'sysctl-27' do\n impact 1.0\n title 'Disable neighbor solicitations to send out per address'\n desc 'The dad_transmits setting determines how many neighbor solicitations to send out per address (global and link-local) when bringing up an interface to ensure the desired address is unique on the network.'\n describe kernel_parameter('net.ipv6.conf.default.dad_transmits') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "The dad_transmits setting determines how many neighbor solicitations to send out per address (global and link-local) when bringing up an interface to ensure the desired address is unique on the network.",
"impact": 1,
"title": "Disable neighbor solicitations to send out per address",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 282
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv6.conf.default.dad_transmits value should eq 0",
"run_time": 0.003698,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: 1\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-28",
"code": "control 'sysctl-28' do\n impact 1.0\n title 'Assign one global unicast IPv6 addresses to each interface'\n desc 'The max_addresses setting determines how many global unicast IPv6 addresses can be assigned to each interface. The default is 16, but it should be set to exactly the number of statically configured global addresses required.'\n describe kernel_parameter('net.ipv6.conf.default.max_addresses') do\n its(:value) { should eq 1 }\n end\nend\n",
"desc": "The max_addresses setting determines how many global unicast IPv6 addresses can be assigned to each interface. The default is 16, but it should be set to exactly the number of statically configured global addresses required.",
"impact": 1,
"title": "Assign one global unicast IPv6 addresses to each interface",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 291
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter net.ipv6.conf.default.max_addresses value should eq 1",
"run_time": 0.003665,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 1\n got: 16\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-29",
"code": "control 'sysctl-29' do\n impact 1.0\n title 'Disable loading kernel modules'\n desc 'The sysctl key kernel.modules_disabled is very straightforward. If it contains a \"1\" it will disable loading new modules, where a \"0\" will still allow loading them. Using this option will be a great protection against loading malicious kernel modules.'\n describe kernel_parameter('kernel.modules_disabled') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "The sysctl key kernel.modules_disabled is very straightforward. If it contains a \"1\" it will disable loading new modules, where a \"0\" will still allow loading them. Using this option will be a great protection against loading malicious kernel modules.",
"impact": 1,
"title": "Disable loading kernel modules",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 300
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "Kernel Parameter kernel.modules_disabled value should eq 0",
"run_time": 0.004364,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "sysctl-30",
"code": "control 'sysctl-30' do\n impact 1.0\n title 'Magic SysRq'\n desc \"Kernel.sysreg is a 'magical' key combo you can hit which the kernel will respond to regardless of whatever else it is doing, unless it is completely locked up.\"\n describe kernel_parameter('kernel.sysrq') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "Kernel.sysreg is a 'magical' key combo you can hit which the kernel will respond to regardless of whatever else it is doing, unless it is completely locked up.",
"impact": 1,
"title": "Magic SysRq",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 309
},
"refs": [],
"tags": {},
"results": [
{
"status": "failed",
"code_desc": "Kernel Parameter kernel.sysrq value should eq 0",
"run_time": 0.004179,
"start_time": "2018-02-09T10:17:23+01:00",
"message": "\nexpected: 0\n got: 1\n\n(compared using ==)\n"
}
]
},
{
"id": "sysctl-31",
"code": "control 'sysctl-31' do\n impact 1.0\n title 'Disable Core Dumps'\n desc 'Ensure that core dumps can never be made by setuid programs'\n describe kernel_parameter('fs.suid_dumpable') do\n its(:value) { should eq 0 }\n end\nend\n",
"desc": "Ensure that core dumps can never be made by setuid programs",
"impact": 1,
"title": "Disable Core Dumps",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 318
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "Kernel Parameter fs.suid_dumpable value should eq 0",
"run_time": 0.003693,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "sysctl-32",
"code": "control 'sysctl-32' do\n impact 1.0\n title 'kernel.randomize_va_space'\n desc 'kernel.randomize_va_space'\n describe kernel_parameter('kernel.randomize_va_space') do\n its(:value) { should eq 2 }\n end\nend\n",
"desc": "kernel.randomize_va_space",
"impact": 1,
"title": "kernel.randomize_va_space",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 327
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "Kernel Parameter kernel.randomize_va_space value should eq 2",
"run_time": 0.003624,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "sysctl-33",
"code": "control 'sysctl-33' do\n impact 1.0\n title 'CPU No execution Flag or Kernel ExecShield'\n desc 'Kernel features and CPU flags provide a protection against buffer overflows. The CPU NX Flag and the kernel parameter exec-shield prevents code execution on a per memory page basis. If the CPU supports the NX-Flag then this should be used instead of the kernel parameter exec-shield.'\n\n # parse for cpu flags\n flags = parse_config_file('/proc/cpuinfo', assignment_re: /^([^:]*?)\\s+:\\s+(.*?)$/).flags\n flags ||= ''\n flags = flags.split(' ')\n\n describe '/proc/cpuinfo' do\n it 'Flags should include NX' do\n expect(flags).to include('nx')\n end\n end\n\n unless flags.include?('nx')\n # if no nx flag is present, we require exec-shield\n describe kernel_parameter('kernel.exec-shield') do\n its(:value) { should eq 1 }\n end\n end\nend\n",
"desc": "Kernel features and CPU flags provide a protection against buffer overflows. The CPU NX Flag and the kernel parameter exec-shield prevents code execution on a per memory page basis. If the CPU supports the NX-Flag then this should be used instead of the kernel parameter exec-shield.",
"impact": 1,
"title": "CPU No execution Flag or Kernel ExecShield",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/linux-baseline-2.0.1.tar.gz/linux-baseline-2.0.1/controls/sysctl_spec.rb",
"line": 336
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "/proc/cpuinfo Flags should include NX",
"run_time": 7.1e-05,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
}
],
"status": "loaded",
"supports": null,
"groups": null
},
{
"name": "apache-baseline",
"title": "DevSec Apache Baseline",
"version": "2.0.0",
"sha256": "41a02784bfea15592ba2748d55927d8d1f9da205816ef18d3bb2ebe4c5ce18a8",
"summary": "Test-suite for best-practice apache hardening",
"maintainer": "",
"license": "",
"copyright": "Hardening Framework Team",
"copyright_email": "hello@dev-sec.io",
"status": "loaded",
"controls": [
{
"id": "apache-03",
"code": "control 'apache-03' do\n title 'Apache should start max. 1 root-task different'\n desc 'The Apache service in its own non-privileged account. If the web server process runs with administrative privileges, an attack who obtains control over the apache process may control the entire system.'\n total_tasks = command(\"ps aux | grep #{apache.service} | grep -v grep | grep root | wc -l | tr -d [:space:]\").stdout.to_i\n describe total_tasks do\n it { should eq 1 }\n end\nend\n",
"desc": "The Apache service in its own non-privileged account. If the web server process runs with administrative privileges, an attack who obtains control over the apache process may control the entire system.",
"impact": 0.5,
"title": "Apache should start max. 1 root-task different",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/apache-baseline-2.0.1.tar.gz/apache-baseline-2.0.1/controls/apache_spec.rb",
"line": 49
},
"refs": [],
"tags": {},
"results": [
{
"status": "passed",
"code_desc": "Operating System Detection",
"run_time": 2e-06,
"start_time": "2018-02-09T10:17:23+01:00"
}
]
},
{
"id": "apache-01",
"code": "control 'apache-01' do\n impact 1.0\n title 'Apache should be running'\n desc 'Apache should be running.'\n describe service(apache.service) do\n it { should be_installed }\n it { should be_running }\n end\nend\n",
"desc": "Apache should be running.",
"impact": 1,
"title": "Apache should be running",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/apache-baseline-2.0.1.tar.gz/apache-baseline-2.0.1/controls/apache_spec.rb",
"line": 29
},
"refs": [],
"tags": {},
"waiver_data": {
"expiration_date": "2025-06-01",
"justification": "Whimsy",
"run": false,
"skipped_due_to_waiver": true,
"message": ""
},
"results": [
{
"status": "skipped",
"code_desc": "No-op",
"run_time": 7e-06,
"start_time": "2018-02-09T10:17:23+01:00",
"resource": "No-op",
"skip_message": "Skipped control due to waiver condition: Whimsy"
}
]
},
{
"id": "apache-02",
"code": "control 'apache-02' do\n impact 1.0\n title 'Apache should be enabled'\n desc 'Configure apache service to be automatically started at boot time'\n only_if { os[:family] != 'ubuntu' && os[:release] != '16.04' } || only_if { os[:family] != 'debian' && os[:release] != '8' }\n describe service(apache.service) do\n it { should be_enabled }\n end\nend\n",
"desc": "Configure apache service to be automatically started at boot time",
"impact": 1,
"title": "Apache should be enabled",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/apache-baseline-2.0.1.tar.gz/apache-baseline-2.0.1/controls/apache_spec.rb",
"line": 39
},
"refs": [],
"tags": {},
"results": [
{
"status": "skipped",
"code_desc": "Operating System Detection",
"run_time": 3e-06,
"start_time": "2018-02-09T10:17:23+01:00",
"skip_message": "Skipped control due to only_if condition."
}
]
},
{
"id": "apache-04",
"code": "control 'apache-04' do\n impact 1.0\n title 'Check Apache config folder owner, group and permissions.'\n desc 'The Apache config folder should owned and grouped by root, be writable, readable and executable by owner. It should be readable, executable by group and not readable, not writeable by others.'\n describe file(apache.conf_dir) do\n it { should be_owned_by 'root' }\n it { should be_grouped_into 'root' }\n it { should be_readable.by('owner') }\n it { should be_writable.by('owner') }\n it { should be_executable.by('owner') }\n it { should be_readable.by('group') }\n it { should_not be_writable.by('group') }\n it { should be_executable.by('group') }\n it { should_not be_readable.by('others') }\n it { should_not be_writable.by('others') }\n it { should be_executable.by('others') }\n end\nend\n",
"desc": "The Apache config folder should owned and grouped by root, be writable, readable and executable by owner. It should be readable, executable by group and not readable, not writeable by others.",
"impact": 1,
"title": "Check Apache config folder owner, group and permissions.",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/apache-baseline-2.0.1.tar.gz/apache-baseline-2.0.1/controls/apache_spec.rb",
"line": 58
},
"refs": [],
"tags": {},
"results": [
{
"status": "skipped",
"code_desc": "Operating System Detection",
"run_time": 3e-06,
"start_time": "2018-02-09T10:17:23+01:00",
"skip_message": "Skipped control due to only_if condition."
}
]
},
{
"id": "apache-05",
"code": "control 'apache-05' do\n impact 1.0\n title 'Check Apache config file owner, group and permissions.'\n desc 'The Apache config file should owned and grouped by root, only be writable and readable by owner and not write- and readable by others.'\n describe file(apache.conf_path) do\n it { should be_owned_by 'root' }\n it { should be_grouped_into 'root' }\n it { should be_readable.by('owner') }\n it { should be_writable.by('owner') }\n it { should_not be_executable.by('owner') }\n it { should be_readable.by('group') }\n it { should_not be_writable.by('group') }\n it { should_not be_executable.by('group') }\n it { should_not be_readable.by('others') }\n it { should_not be_writable.by('others') }\n it { should_not be_executable.by('others') }\n end\n describe file(File.join(apache.conf_dir, '/conf-enabled/hardening.conf')) do\n it { should be_owned_by 'root' }\n it { should be_grouped_into 'root' }\n it { should be_readable.by('owner') }\n it { should be_writable.by('owner') }\n it { should_not be_executable.by('owner') }\n it { should be_readable.by('group') }\n it { should_not be_writable.by('group') }\n it { should_not be_executable.by('group') }\n it { should_not be_readable.by('others') }\n it { should_not be_writable.by('others') }\n it { should_not be_executable.by('others') }\n end\nend\n",
"desc": "The Apache config file should owned and grouped by root, only be writable and readable by owner and not write- and readable by others.",
"impact": 1,
"title": "Check Apache config file owner, group and permissions.",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/apache-baseline-2.0.1.tar.gz/apache-baseline-2.0.1/controls/apache_spec.rb",
"line": 77
},
"refs": [],
"tags": {},
"results": [
{
"status": "skipped",
"code_desc": "Operating System Detection",
"run_time": 3e-06,
"start_time": "2018-02-09T10:17:23+01:00",
"skip_message": "Skipped control due to only_if condition."
}
]
},
{
"id": "apache-06",
"code": "control 'apache-06' do\n impact 1.0\n title 'User and group should be set properly'\n desc 'For security reasons it is recommended to run Apache in its own non-privileged account.'\n describe apache_conf do\n its('User') { should eq [apache.user] }\n its('Group') { should eq [apache.user] }\n end\nend\n",
"desc": "For security reasons it is recommended to run Apache in its own non-privileged account.",
"impact": 1,
"title": "User and group should be set properly",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/apache-baseline-2.0.1.tar.gz/apache-baseline-2.0.1/controls/apache_spec.rb",
"line": 109
},
"refs": [],
"tags": {},
"results": [
{
"status": "skipped",
"code_desc": "Operating System Detection",
"run_time": 3e-06,
"start_time": "2018-02-09T10:17:23+01:00",
"skip_message": "Skipped control due to only_if condition."
}
]
},
{
"id": "apache-07",
"code": "control 'apache-07' do\n impact 1.0\n title 'Set the apache server token'\n desc '\\'ServerTokens Prod\\' tells Apache to return only Apache as product in the server response header on the every page request'\n\n describe file(File.join(apache.conf_dir, '/conf-enabled/security.conf')) do\n its('content') { should match(/^ServerTokens Prod/) }\n end\n\n # open bug https://github.com/chef/inspec/issues/786, if the bug solved use this test\n # describe apache_conf do\n # its('ServerTokens') { should eq 'Prod' }\n # end\nend\n",
"desc": "'ServerTokens Prod' tells Apache to return only Apache as product in the server response header on the every page request",
"impact": 1,
"title": "Set the apache server token",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/apache-baseline-2.0.1.tar.gz/apache-baseline-2.0.1/controls/apache_spec.rb",
"line": 119
},
"refs": [],
"tags": {},
"results": [
{
"status": "skipped",
"code_desc": "Operating System Detection",
"run_time": 2e-06,
"start_time": "2018-02-09T10:17:23+01:00",
"skip_message": "Skipped control due to only_if condition."
}
]
},
{
"id": "apache-08",
"code": "control 'apache-08' do\n impact 1.0\n title 'Should not load certain modules'\n desc 'Apache HTTP should not load legacy modules'\n\n module_path = File.join(apache.conf_dir, '/mods-enabled/')\n loaded_modules = command('ls ' << module_path).stdout.split.keep_if { |file_name| /.load/.match(file_name) }\n\n loaded_modules.each do |id|\n describe file(File.join(module_path, id)) do\n its('content') { should_not match(/^\\s*?LoadModule\\s+?dav_module/) }\n its('content') { should_not match(/^\\s*?LoadModule\\s+?cgid_module/) }\n its('content') { should_not match(/^\\s*?LoadModule\\s+?cgi_module/) }\n its('content') { should_not match(/^\\s*?LoadModule\\s+?include_module/) }\n end\n end\n\n # open bug https://github.com/chef/inspec/issues/786, if the bug solved use this test\n # describe apache_conf do\n # its('LoadModule') { should_not eq 'dav_module' }\n # its('LoadModule') { should_not eq 'cgid_module' }\n # its('LoadModule') { should_not eq 'cgi_module' }\n # its('LoadModule') { should_not eq 'include_module' }\n # its('content') { should_not match(/^\\s*?LoadModule\\s+?dav_module/) }\n # its('content') { should_not match(/^\\s*?LoadModule\\s+?cgid_module/) }\n # its('content') { should_not match(/^\\s*?LoadModule\\s+?cgi_module/) }\n # its('content') { should_not match(/^\\s*?LoadModule\\s+?include_module/) }\n # end\nend\n",
"desc": "Apache HTTP should not load legacy modules",
"impact": 1,
"title": "Should not load certain modules",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/apache-baseline-2.0.1.tar.gz/apache-baseline-2.0.1/controls/apache_spec.rb",
"line": 134
},
"refs": [],
"tags": {},
"results": [
{
"status": "skipped",
"code_desc": "Operating System Detection",
"run_time": 3e-06,
"start_time": "2018-02-09T10:17:23+01:00",
"skip_message": "Skipped control due to only_if condition."
}
]
},
{
"id": "apache-09",
"code": "control 'apache-09' do\n impact 1.0\n title 'Disable TRACE-methods'\n desc 'The web server doesn’t allow TRACE request and help in blocking Cross Site Tracing attack.'\n\n describe file(File.join(apache.conf_dir, '/conf-enabled/security.conf')) do\n its('content') { should match(/^\\s*?TraceEnable\\s+?Off/) }\n end\n\n # open bug https://github.com/chef/inspec/issues/786, if the bug solved use this test\n # describe apache_conf do\n # its('TraceEnable') { should eq 'Off' }\n # end\nend\n",
"desc": "The web server doesn’t allow TRACE request and help in blocking Cross Site Tracing attack.",
"impact": 1,
"title": "Disable TRACE-methods",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/apache-baseline-2.0.1.tar.gz/apache-baseline-2.0.1/controls/apache_spec.rb",
"line": 164
},
"refs": [],
"tags": {},
"results": [
{
"status": "skipped",
"code_desc": "Operating System Detection",
"run_time": 2e-06,
"start_time": "2018-02-09T10:17:23+01:00",
"skip_message": "Skipped control due to only_if condition."
}
]
},
{
"id": "apache-10",
"code": "control 'apache-10' do\n impact 1.0\n title 'Disable insecure HTTP-methods'\n desc 'Disable insecure HTTP-methods and allow only necessary methods.'\n\n describe file(File.join(apache.conf_dir, '/conf-enabled/hardening.conf')) do\n its('content') { should match(/^\\s*?<LimitExcept\\s+?GET\\s+?POST>/) }\n end\n\n # open bug https://github.com/chef/inspec/issues/786, if the bug solved use this test\n # describe apache_conf do\n # its('LimitExcept') { should eq ['GET','POST'] }\n # end\nend\n",
"desc": "Disable insecure HTTP-methods and allow only necessary methods.",
"impact": 1,
"title": "Disable insecure HTTP-methods",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/apache-baseline-2.0.1.tar.gz/apache-baseline-2.0.1/controls/apache_spec.rb",
"line": 179
},
"refs": [],
"tags": {},
"results": [
{
"status": "skipped",
"code_desc": "Operating System Detection",
"run_time": 2e-06,
"start_time": "2018-02-09T10:17:23+01:00",
"skip_message": "Skipped control due to only_if condition."
}
]
},
{
"id": "apache-11",
"code": "control 'apache-11' do\n impact 1.0\n title 'Disable Apache’s follows Symbolic Links for directories in alias.conf'\n desc 'Should include -FollowSymLinks or +SymLinksIfOwnerMatch for directories in alias.conf'\n\n describe file(File.join(apache.conf_dir, '/mods-enabled/alias.conf')) do\n its('content') { should match(/-FollowSymLinks/).or match(/\\+SymLinksIfOwnerMatch/) }\n end\nend\n",
"desc": "Should include -FollowSymLinks or +SymLinksIfOwnerMatch for directories in alias.conf",
"impact": 1,
"title": "Disable Apache’s follows Symbolic Links for directories in alias.conf",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/apache-baseline-2.0.1.tar.gz/apache-baseline-2.0.1/controls/apache_spec.rb",
"line": 194
},
"refs": [],
"tags": {},
"results": [
{
"status": "skipped",
"code_desc": "Operating System Detection",
"run_time": 3e-06,
"start_time": "2018-02-09T10:17:23+01:00",
"skip_message": "Skipped control due to only_if condition."
}
]
},
{
"id": "apache-12",
"code": "control 'apache-12' do\n impact 1.0\n title 'Disable Directory Listing for directories in alias.conf'\n desc 'Should include -Indexes for directories in alias.conf'\n\n describe file(File.join(apache.conf_dir, '/mods-enabled/alias.conf')) do\n its('content') { should match(/-Indexes/) }\n end\nend\n",
"desc": "Should include -Indexes for directories in alias.conf",
"impact": 1,
"title": "Disable Directory Listing for directories in alias.conf",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/apache-baseline-2.0.1.tar.gz/apache-baseline-2.0.1/controls/apache_spec.rb",
"line": 204
},
"refs": [],
"tags": {},
"results": [
{
"status": "skipped",
"code_desc": "Operating System Detection",
"run_time": 2e-06,
"start_time": "2018-02-09T10:17:23+01:00",
"skip_message": "Skipped control due to only_if condition."
}
]
},
{
"id": "apache-13",
"code": "control 'apache-13' do\n impact 1.0\n title 'SSL honor cipher order'\n desc 'When choosing a cipher during an SSLv3 or TLSv1 handshake, normally the client\\'s preference is used. If this directive is enabled, the server\\'s preference will be used instead.'\n\n describe file(File.join(apache.conf_dir, '/mods-enabled/ssl.conf')) do\n its('content') { should match(/^\\s*?SSLHonorCipherOrder\\s+?On/i) }\n end\n\n sites_enabled_path = File.join(apache.conf_dir, '/sites-enabled/')\n loaded_sites = command('ls ' << sites_enabled_path).stdout.split.keep_if { |file_name| /.conf/.match(file_name) }\n\n loaded_sites.each do |id|\n virtual_host = file(File.join(sites_enabled_path, id)).content.gsub(/#.*$/, '').scan(%r{<virtualhost.*443(.*?)<\\/virtualhost>}im).flatten\n next if virtual_host.empty?\n describe virtual_host do\n it { should include(/^\\s*?SSLHonorCipherOrder\\s+?On/i) }\n end\n end\nend\n",
"desc": "When choosing a cipher during an SSLv3 or TLSv1 handshake, normally the client's preference is used. If this directive is enabled, the server's preference will be used instead.",
"impact": 1,
"title": "SSL honor cipher order",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/apache-baseline-2.0.1.tar.gz/apache-baseline-2.0.1/controls/apache_spec.rb",
"line": 214
},
"refs": [],
"tags": {},
"results": [
{
"status": "skipped",
"code_desc": "Operating System Detection",
"run_time": 2e-06,
"start_time": "2018-02-09T10:17:23+01:00",
"skip_message": "Skipped control due to only_if condition."
}
]
},
{
"id": "apache-14",
"code": "control 'apache-14' do\n impact 1.0\n title 'Enable Apache Logging'\n desc 'Apache allows you to logging independently of your OS logging. It is wise to enable Apache logging, because it provides more information, such as the commands entered by users that have interacted with your Web server.'\n\n sites_enabled_path = File.join(apache.conf_dir, '/sites-enabled/')\n loaded_sites = command('ls ' << sites_enabled_path).stdout.split.keep_if { |file_name| /.conf/.match(file_name) }\n\n loaded_sites.each do |id|\n describe file(File.join(sites_enabled_path, id)).content.gsub(/#.*$/, '').scan(%r{<virtualhost(.*?)<\\/virtualhost>}im).flatten do\n it { should include(/CustomLog.*$/i) }\n end\n end\nend\n",
"desc": "Apache allows you to logging independently of your OS logging. It is wise to enable Apache logging, because it provides more information, such as the commands entered by users that have interacted with your Web server.",
"impact": 1,
"title": "Enable Apache Logging",
"source_location": {
"ref": "./.tmp/profiles/dist/unpacked/apache-baseline-2.0.1.tar.gz/apache-baseline-2.0.1/controls/apache_spec.rb",
"line": 235
},
"refs": [],
"tags": {},
"results": [
{
"status": "skipped",
"code_desc": "Operating System Detection",
"run_time": 3e-06,
"skip_message": "Skipped control due to only_if condition."
}
]
}
],
"status": "loaded",
"supports": null,
"attributes": null,
"groups": null
}
]
}
| {
"pile_set_name": "Github"
} |
var isIterateeCall = require('../internal/isIterateeCall');
/* Native method references for those with the same name as other `lodash` methods. */
var nativeCeil = Math.ceil,
nativeMax = Math.max;
/**
* Creates an array of numbers (positive and/or negative) progressing from
* `start` up to, but not including, `end`. If `end` is not specified it's
* set to `start` with `start` then set to `0`. If `end` is less than `start`
* a zero-length range is created unless a negative `step` is specified.
*
* @static
* @memberOf _
* @category Utility
* @param {number} [start=0] The start of the range.
* @param {number} end The end of the range.
* @param {number} [step=1] The value to increment or decrement by.
* @returns {Array} Returns the new array of numbers.
* @example
*
* _.range(4);
* // => [0, 1, 2, 3]
*
* _.range(1, 5);
* // => [1, 2, 3, 4]
*
* _.range(0, 20, 5);
* // => [0, 5, 10, 15]
*
* _.range(0, -4, -1);
* // => [0, -1, -2, -3]
*
* _.range(1, 4, 0);
* // => [1, 1, 1]
*
* _.range(0);
* // => []
*/
function range(start, end, step) {
if (step && isIterateeCall(start, end, step)) {
end = step = undefined;
}
start = +start || 0;
step = step == null ? 1 : (+step || 0);
if (end == null) {
end = start;
start = 0;
} else {
end = +end || 0;
}
// Use `Array(length)` so engines like Chakra and V8 avoid slower modes.
// See https://youtu.be/XAqIpGU8ZZk#t=17m25s for more details.
var index = -1,
length = nativeMax(nativeCeil((end - start) / (step || 1)), 0),
result = Array(length);
while (++index < length) {
result[index] = start;
start += step;
}
return result;
}
module.exports = range;
| {
"pile_set_name": "Github"
} |
Scanning .: .........................................................................................................................
Scanning . (Phase 2): ................................
res/drawable-xhdpi/account_photo_normal.png: Warning: The resource R.drawable.account_photo_normal appears to be unused [UnusedResources]
res/drawable-xhdpi/add_handler.png: Warning: The resource R.drawable.add_handler appears to be unused [UnusedResources]
res/drawable-xhdpi/arrow_left.png: Warning: The resource R.drawable.arrow_left appears to be unused [UnusedResources]
res/drawable-xhdpi/arrow_right.png: Warning: The resource R.drawable.arrow_right appears to be unused [UnusedResources]
res/drawable-xhdpi/back_arrow.png: Warning: The resource R.drawable.back_arrow appears to be unused [UnusedResources]
res/drawable-xhdpi/blue_card.png: Warning: The resource R.drawable.blue_card appears to be unused [UnusedResources]
res/drawable-xhdpi/button_normal.png: Warning: The resource R.drawable.button_normal appears to be unused [UnusedResources]
res/drawable-xhdpi/button_pressed.png: Warning: The resource R.drawable.button_pressed appears to be unused [UnusedResources]
res/drawable-xhdpi/buy_handler.png: Warning: The resource R.drawable.buy_handler appears to be unused [UnusedResources]
res/values/colors.xml:3: Warning: The resource R.drawable.black_bg appears to be unused [UnusedResources]
<drawable name="black_bg">#00000000</drawable>
~~~~~~~~~~~~~~~
res/values/colors.xml:4: Warning: The resource R.drawable.color_hight_light appears to be unused [UnusedResources]
<drawable name="color_hight_light">#00A2ff</drawable>
~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:5: Warning: The resource R.drawable.transparent appears to be unused [UnusedResources]
<drawable name="transparent">#00000000</drawable>
~~~~~~~~~~~~~~~~~~
res/values/colors.xml:6: Warning: The resource R.drawable.half_transparent appears to be unused [UnusedResources]
<drawable name="half_transparent">#cc000000</drawable>
~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:14: Warning: The resource R.color.available_space appears to be unused [UnusedResources]
<color name="available_space">#ff93de68</color>
~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:15: Warning: The resource R.color.no_space appears to be unused [UnusedResources]
<color name="no_space">#ffeb3d3c</color>
~~~~~~~~~~~~~~~
res/values/colors.xml:16: Warning: The resource R.color.game_detail_color appears to be unused [UnusedResources]
<color name="game_detail_color">#ffe0e0e0</color>
~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:17: Warning: The resource R.color.space_shortage_bg appears to be unused [UnusedResources]
<color name="space_shortage_bg">#FF22242A</color>
~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:18: Warning: The resource R.color.uninstall_mask_bg appears to be unused [UnusedResources]
<color name="uninstall_mask_bg">#CC000000 </color>
~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:22: Warning: The resource R.color.common_text_color appears to be unused [UnusedResources]
<color name="common_text_color">#FFFFFFFF</color>
~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:23: Warning: The resource R.color.common_text_shadow_color appears to be unused [UnusedResources]
<color name="common_text_shadow_color">#7f000000</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:25: Warning: The resource R.color.text_color_white_10 appears to be unused [UnusedResources]
<color name="text_color_white_10">#19FFFFFF</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:26: Warning: The resource R.color.text_color_white_15 appears to be unused [UnusedResources]
<color name="text_color_white_15">#26FFFFFF</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:27: Warning: The resource R.color.text_color_white_20 appears to be unused [UnusedResources]
<color name="text_color_white_20">#33FFFFFF</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:28: Warning: The resource R.color.text_color_white_30 appears to be unused [UnusedResources]
<color name="text_color_white_30">#4CFFFFFF</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:29: Warning: The resource R.color.text_color_white_40 appears to be unused [UnusedResources]
<color name="text_color_white_40">#66FFFFFF</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:30: Warning: The resource R.color.text_color_white_50 appears to be unused [UnusedResources]
<color name="text_color_white_50">#80FFFFFF</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:31: Warning: The resource R.color.text_color_white_60 appears to be unused [UnusedResources]
<color name="text_color_white_60">#99FFFFFF</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:32: Warning: The resource R.color.text_color_white_70 appears to be unused [UnusedResources]
<color name="text_color_white_70">#B3FFFFFF</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:33: Warning: The resource R.color.text_color_white_75 appears to be unused [UnusedResources]
<color name="text_color_white_75">#C0FFFFFF</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:34: Warning: The resource R.color.text_color_white_80 appears to be unused [UnusedResources]
<color name="text_color_white_80">#CCFFFFFF</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:35: Warning: The resource R.color.text_color_white_90 appears to be unused [UnusedResources]
<color name="text_color_white_90">#E6FFFFFF</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:37: Warning: The resource R.color.text_color_black_10 appears to be unused [UnusedResources]
<color name="text_color_black_10">#19000000</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:38: Warning: The resource R.color.text_color_black_20 appears to be unused [UnusedResources]
<color name="text_color_black_20">#33000000</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:39: Warning: The resource R.color.text_color_black_30 appears to be unused [UnusedResources]
<color name="text_color_black_30">#4C000000</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:40: Warning: The resource R.color.text_color_black_40 appears to be unused [UnusedResources]
<color name="text_color_black_40">#66000000</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:41: Warning: The resource R.color.text_color_black_50 appears to be unused [UnusedResources]
<color name="text_color_black_50">#80000000</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:42: Warning: The resource R.color.text_color_black_60 appears to be unused [UnusedResources]
<color name="text_color_black_60">#99000000</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:43: Warning: The resource R.color.text_color_black_70 appears to be unused [UnusedResources]
<color name="text_color_black_70">#B3000000</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:44: Warning: The resource R.color.text_color_black_80 appears to be unused [UnusedResources]
<color name="text_color_black_80">#CC000000</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:45: Warning: The resource R.color.text_color_black_90 appears to be unused [UnusedResources]
<color name="text_color_black_90">#E6000000</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/colors.xml:46: Warning: The resource R.color.text_color_black_100 appears to be unused [UnusedResources]
<color name="text_color_black_100">#FE000000</color>
~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/drawable-xhdpi/dialog_bg.9.png: Warning: The resource R.drawable.dialog_bg appears to be unused [UnusedResources]
res/drawable-xhdpi/dialog_bg_mask.png: Warning: The resource R.drawable.dialog_bg_mask appears to be unused [UnusedResources]
res/values/dimens.xml:3: Warning: The resource R.dimen.text_size_16 appears to be unused [UnusedResources]
<dimen name="text_size_16">8dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:4: Warning: The resource R.dimen.text_size_18 appears to be unused [UnusedResources]
<dimen name="text_size_18">9dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:5: Warning: The resource R.dimen.text_size_20 appears to be unused [UnusedResources]
<dimen name="text_size_20">10dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:6: Warning: The resource R.dimen.text_size_22 appears to be unused [UnusedResources]
<dimen name="text_size_22">11dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:7: Warning: The resource R.dimen.text_size_24 appears to be unused [UnusedResources]
<dimen name="text_size_24">12dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:8: Warning: The resource R.dimen.text_size_25 appears to be unused [UnusedResources]
<dimen name="text_size_25">12.5dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:9: Warning: The resource R.dimen.text_size_26 appears to be unused [UnusedResources]
<dimen name="text_size_26">13dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:10: Warning: The resource R.dimen.text_size_28 appears to be unused [UnusedResources]
<dimen name="text_size_28">14dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:11: Warning: The resource R.dimen.text_size_30 appears to be unused [UnusedResources]
<dimen name="text_size_30">15dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:12: Warning: The resource R.dimen.text_size_32 appears to be unused [UnusedResources]
<dimen name="text_size_32">16dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:13: Warning: The resource R.dimen.text_size_33 appears to be unused [UnusedResources]
<dimen name="text_size_33">16.5dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:14: Warning: The resource R.dimen.text_size_34 appears to be unused [UnusedResources]
<dimen name="text_size_34">17dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:15: Warning: The resource R.dimen.text_size_35 appears to be unused [UnusedResources]
<dimen name="text_size_35">17.5dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:16: Warning: The resource R.dimen.text_size_36 appears to be unused [UnusedResources]
<dimen name="text_size_36">18dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:18: Warning: The resource R.dimen.text_size_40 appears to be unused [UnusedResources]
<dimen name="text_size_40">20dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:19: Warning: The resource R.dimen.text_size_42 appears to be unused [UnusedResources]
<dimen name="text_size_42">21dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:20: Warning: The resource R.dimen.text_size_44 appears to be unused [UnusedResources]
<dimen name="text_size_44">22dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:21: Warning: The resource R.dimen.text_size_46 appears to be unused [UnusedResources]
<dimen name="text_size_46">23dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:22: Warning: The resource R.dimen.text_size_48 appears to be unused [UnusedResources]
<dimen name="text_size_48">24dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:24: Warning: The resource R.dimen.text_size_52 appears to be unused [UnusedResources]
<dimen name="text_size_52">26dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:25: Warning: The resource R.dimen.text_size_54 appears to be unused [UnusedResources]
<dimen name="text_size_54">27dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:26: Warning: The resource R.dimen.text_size_56 appears to be unused [UnusedResources]
<dimen name="text_size_56">28dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:27: Warning: The resource R.dimen.text_size_66 appears to be unused [UnusedResources]
<dimen name="text_size_66">33dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:29: Warning: The resource R.dimen.progress_height appears to be unused [UnusedResources]
<dimen name="progress_height">12.5dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:30: Warning: The resource R.dimen.progress_game_item_width appears to be unused [UnusedResources]
<dimen name="progress_game_item_width">102.5dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:31: Warning: The resource R.dimen.progress_game_item_height appears to be unused [UnusedResources]
<dimen name="progress_game_item_height">8dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:32: Warning: The resource R.dimen.progress_horizontal_margin appears to be unused [UnusedResources]
<dimen name="progress_horizontal_margin">8.5dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:33: Warning: The resource R.dimen.v_progress_horizontal_margin appears to be unused [UnusedResources]
<dimen name="v_progress_horizontal_margin">11.5dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:35: Warning: The resource R.dimen.icon_width appears to be unused [UnusedResources]
<dimen name="icon_width">142dp</dimen>
~~~~~~~~~~~~~~~~~
res/values/dimens.xml:36: Warning: The resource R.dimen.icon_name_text_size appears to be unused [UnusedResources]
<dimen name="icon_name_text_size">20dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:38: Warning: The resource R.dimen.detail_name_text_size appears to be unused [UnusedResources]
<dimen name="detail_name_text_size">14sp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:40: Warning: The resource R.dimen.title_margin_top appears to be unused [UnusedResources]
<dimen name="title_margin_top">15dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:42: Warning: The resource R.dimen.detail_middle_margin_top appears to be unused [UnusedResources]
<dimen name="detail_middle_margin_top">9dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:43: Warning: The resource R.dimen.detail_button_margin_top appears to be unused [UnusedResources]
<dimen name="detail_button_margin_top">24dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:45: Warning: The resource R.dimen.detail_game_icon_name_top_margin appears to be unused [UnusedResources]
<dimen name="detail_game_icon_name_top_margin">54dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:47: Warning: The resource R.dimen.screen_shot_grid_horizontal_spacing appears to be unused [UnusedResources]
<dimen name="screen_shot_grid_horizontal_spacing">19.5dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:48: Warning: The resource R.dimen.screen_shot_grid_vertical_spacing appears to be unused [UnusedResources]
<dimen name="screen_shot_grid_vertical_spacing">21dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:77: Warning: The resource R.dimen.screen_width appears to be unused [UnusedResources]
<dimen name="screen_width">870dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:78: Warning: The resource R.dimen.screen_width_negative appears to be unused [UnusedResources]
<dimen name="screen_width_negative">-870dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:84: Warning: The resource R.dimen.tab_category_margin_left appears to be unused [UnusedResources]
<dimen name="tab_category_margin_left">175dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:85: Warning: The resource R.dimen.tab_rank_margin_left appears to be unused [UnusedResources]
<dimen name="tab_rank_margin_left">280dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:86: Warning: The resource R.dimen.tab_user_margin_left appears to be unused [UnusedResources]
<dimen name="tab_user_margin_left">385dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:89: Warning: The resource R.dimen.tab_highlight_width appears to be unused [UnusedResources]
<dimen name="tab_highlight_width">75dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:98: Warning: The resource R.dimen.title_bar_left_margin appears to be unused [UnusedResources]
<dimen name="title_bar_left_margin">22.5dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:99: Warning: The resource R.dimen.title_bar_top_margin appears to be unused [UnusedResources]
<dimen name="title_bar_top_margin">24.5dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:100: Warning: The resource R.dimen.title_divider_left_margin appears to be unused [UnusedResources]
<dimen name="title_divider_left_margin">6dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:101: Warning: The resource R.dimen.title_divider_top_margin appears to be unused [UnusedResources]
<dimen name="title_divider_top_margin">-1dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:103: Warning: The resource R.dimen.category_scroll_margin_top appears to be unused [UnusedResources]
<dimen name="category_scroll_margin_top">78dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:104: Warning: The resource R.dimen.subject_scroll_margin_top appears to be unused [UnusedResources]
<dimen name="subject_scroll_margin_top">94dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:105: Warning: The resource R.dimen.category_item_focus_width appears to be unused [UnusedResources]
<dimen name="category_item_focus_width">172dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:106: Warning: The resource R.dimen.category_item_focus_height appears to be unused [UnusedResources]
<dimen name="category_item_focus_height">110dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:108: Warning: The resource R.dimen.game_info_margin_left appears to be unused [UnusedResources]
<dimen name="game_info_margin_left">70dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:109: Warning: The resource R.dimen.game_info_margin_top appears to be unused [UnusedResources]
<dimen name="game_info_margin_top">71.5dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:110: Warning: The resource R.dimen.game_info_divider_margin_top appears to be unused [UnusedResources]
<dimen name="game_info_divider_margin_top">15dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:111: Warning: The resource R.dimen.game_thumb_height appears to be unused [UnusedResources]
<dimen name="game_thumb_height">180dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:112: Warning: The resource R.dimen.game_thumb_margin_top appears to be unused [UnusedResources]
<dimen name="game_thumb_margin_top">27.5dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:113: Warning: The resource R.dimen.game_divider_margin_top_55px appears to be unused [UnusedResources]
<dimen name="game_divider_margin_top_55px">27.5dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:114: Warning: The resource R.dimen.game_margin_top_50px appears to be unused [UnusedResources]
<dimen name="game_margin_top_50px">25dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:115: Warning: The resource R.dimen.game_margin_top_20px appears to be unused [UnusedResources]
<dimen name="game_margin_top_20px">10dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:116: Warning: The resource R.dimen.game_divider_margin_top_10px appears to be unused [UnusedResources]
<dimen name="game_divider_margin_top_10px">5dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:117: Warning: The resource R.dimen.game_divider_margin_top_20px appears to be unused [UnusedResources]
<dimen name="game_divider_margin_top_20px">10dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:118: Warning: The resource R.dimen.game_divider_margin_bottom_50px appears to be unused [UnusedResources]
<dimen name="game_divider_margin_bottom_50px">25dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:119: Warning: The resource R.dimen.game_margin_top_372px appears to be unused [UnusedResources]
<dimen name="game_margin_top_372px">186dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:120: Warning: The resource R.dimen.screen_shot_item_focus_width appears to be unused [UnusedResources]
<dimen name="screen_shot_item_focus_width">366dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:121: Warning: The resource R.dimen.screen_shot_item_focus_height appears to be unused [UnusedResources]
<dimen name="screen_shot_item_focus_height">226dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:122: Warning: The resource R.dimen.relative_item_focus_width appears to be unused [UnusedResources]
<dimen name="relative_item_focus_width">172dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:123: Warning: The resource R.dimen.relative_item_focus_height appears to be unused [UnusedResources]
<dimen name="relative_item_focus_height">110dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:124: Warning: The resource R.dimen.base_grid_item_width appears to be unused [UnusedResources]
<dimen name="base_grid_item_width">172dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:125: Warning: The resource R.dimen.base_grid_item_height appears to be unused [UnusedResources]
<dimen name="base_grid_item_height">110dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:127: Warning: The resource R.dimen.game_icon_item_width appears to be unused [UnusedResources]
<dimen name="game_icon_item_width">142dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:128: Warning: The resource R.dimen.game_icon_item_height appears to be unused [UnusedResources]
<dimen name="game_icon_item_height">80dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:129: Warning: The resource R.dimen.container_margin_left appears to be unused [UnusedResources]
<dimen name="container_margin_left">77.5dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:130: Warning: The resource R.dimen.item_default_height appears to be unused [UnusedResources]
<dimen name="item_default_height">130dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:131: Warning: The resource R.dimen.item_horizontal_space appears to be unused [UnusedResources]
<dimen name="item_horizontal_space">39dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:132: Warning: The resource R.dimen.container_place_holder_height appears to be unused [UnusedResources]
<dimen name="container_place_holder_height">130dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:134: Warning: The resource R.dimen.screen_shot_thumb_width appears to be unused [UnusedResources]
<dimen name="screen_shot_thumb_width">320dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:135: Warning: The resource R.dimen.screen_shot_thumb_height appears to be unused [UnusedResources]
<dimen name="screen_shot_thumb_height">180dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:137: Warning: The resource R.dimen.focus_screen_shot_thumb_x_diff appears to be unused [UnusedResources]
<dimen name="focus_screen_shot_thumb_x_diff">23dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:138: Warning: The resource R.dimen.focus_screen_shot_thumb_y_diff appears to be unused [UnusedResources]
<dimen name="focus_screen_shot_thumb_y_diff">23dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:139: Warning: The resource R.dimen.left_margin appears to be unused [UnusedResources]
<dimen name="left_margin">70dp</dimen>
~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:140: Warning: The resource R.dimen.game_info_right_margin appears to be unused [UnusedResources]
<dimen name="game_info_right_margin">55dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:141: Warning: The resource R.dimen.horizontal_margin appears to be unused [UnusedResources]
<dimen name="horizontal_margin">38dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:142: Warning: The resource R.dimen.item_border_width appears to be unused [UnusedResources]
<dimen name="item_border_width">15dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:143: Warning: The resource R.dimen.item_border_height appears to be unused [UnusedResources]
<dimen name="item_border_height">15dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:144: Warning: The resource R.dimen.item_margin_h appears to be unused [UnusedResources]
<dimen name="item_margin_h">30dp</dimen>
~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:145: Warning: The resource R.dimen.FOCUS_DIFF_X appears to be unused [UnusedResources]
<dimen name="FOCUS_DIFF_X">15dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:146: Warning: The resource R.dimen.FOCUS_DIFF_Y appears to be unused [UnusedResources]
<dimen name="FOCUS_DIFF_Y">15dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:147: Warning: The resource R.dimen.progress_top_mafgin appears to be unused [UnusedResources]
<dimen name="progress_top_mafgin">15dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:148: Warning: The resource R.dimen.summary_margin_top appears to be unused [UnusedResources]
<dimen name="summary_margin_top">72dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:149: Warning: The resource R.dimen.seekbar_left_margin appears to be unused [UnusedResources]
<dimen name="seekbar_left_margin">15dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:151: Warning: The resource R.dimen.DIALOG_WIDTH appears to be unused [UnusedResources]
<dimen name="DIALOG_WIDTH">637.5dp</dimen>
~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:152: Warning: The resource R.dimen.DIALOG_HEIGHT appears to be unused [UnusedResources]
<dimen name="DIALOG_HEIGHT">372.5dp</dimen>
~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:154: Warning: The resource R.dimen.ACCOUNT_ITEM_SIZE appears to be unused [UnusedResources]
<dimen name="ACCOUNT_ITEM_SIZE">93dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:155: Warning: The resource R.dimen.account_top_margin appears to be unused [UnusedResources]
<dimen name="account_top_margin">65dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:156: Warning: The resource R.dimen.mine_view_left_margin appears to be unused [UnusedResources]
<dimen name="mine_view_left_margin">3dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:157: Warning: The resource R.dimen.mine_mirror_view_width appears to be unused [UnusedResources]
<dimen name="mine_mirror_view_width">201dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:158: Warning: The resource R.dimen.mine_mirror_view_height appears to be unused [UnusedResources]
<dimen name="mine_mirror_view_height">100dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:159: Warning: The resource R.dimen.MIRROR_HEIGHTER appears to be unused [UnusedResources]
<dimen name="MIRROR_HEIGHTER">93dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:160: Warning: The resource R.dimen.right_space_width appears to be unused [UnusedResources]
<dimen name="right_space_width">80dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:161: Warning: The resource R.dimen.TAB_HORIZONTAL_MARGIN appears to be unused [UnusedResources]
<dimen name="TAB_HORIZONTAL_MARGIN">120dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:162: Warning: The resource R.dimen.ITEM_FOCUS_DIFF_X appears to be unused [UnusedResources]
<dimen name="ITEM_FOCUS_DIFF_X">10dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~
res/values/dimens.xml:174: Warning: The resource R.dimen.detail_y_distance appears to be unused [UnusedResources]
<dimen name="detail_y_distance">423dp</dimen>
~~~~~~~~~~~~~~~~~~~~~~~~
res/drawable-xhdpi/divider.png: Warning: The resource R.drawable.divider appears to be unused [UnusedResources]
res/drawable-xhdpi/focus.9.png: Warning: The resource R.drawable.focus appears to be unused [UnusedResources]
res/drawable-xhdpi/game_detail_content_bottom_mask.9.png: Warning: The resource R.drawable.game_detail_content_bottom_mask appears to be unused [UnusedResources]
res/drawable-xhdpi/game_detail_mask.png: Warning: The resource R.drawable.game_detail_mask appears to be unused [UnusedResources]
res/drawable-xhdpi/guide_bg.jpg: Warning: The resource R.drawable.guide_bg appears to be unused [UnusedResources]
res/drawable-xhdpi/handler_connect_success.png: Warning: The resource R.drawable.handler_connect_success appears to be unused [UnusedResources]
res/drawable-xhdpi/handler_loading.png: Warning: The resource R.drawable.handler_loading appears to be unused [UnusedResources]
res/drawable-xhdpi/handler_mode.png: Warning: The resource R.drawable.handler_mode appears to be unused [UnusedResources]
res/drawable-xhdpi/handler_start_searching.png: Warning: The resource R.drawable.handler_start_searching appears to be unused [UnusedResources]
res/drawable-xhdpi/handler_url.png: Warning: The resource R.drawable.handler_url appears to be unused [UnusedResources]
res/drawable-xhdpi/icon_default_app.png: Warning: The resource R.drawable.icon_default_app appears to be unused [UnusedResources]
res/drawable-xhdpi/icon_screenshot.9.png: Warning: The resource R.drawable.icon_screenshot appears to be unused [UnusedResources]
res/drawable-xhdpi/install_icon_select.png: Warning: The resource R.drawable.install_icon_select appears to be unused [UnusedResources]
res/drawable-xhdpi/list_bottom_normal.png: Warning: The resource R.drawable.list_bottom_normal appears to be unused [UnusedResources]
res/drawable-xhdpi/list_focus.png: Warning: The resource R.drawable.list_focus appears to be unused [UnusedResources]
res/drawable-xhdpi/list_middle_normal.png: Warning: The resource R.drawable.list_middle_normal appears to be unused [UnusedResources]
res/drawable-xhdpi/list_top_normal.png: Warning: The resource R.drawable.list_top_normal appears to be unused [UnusedResources]
res/drawable-xhdpi/loading_bar1.png: Warning: The resource R.drawable.loading_bar1 appears to be unused [UnusedResources]
res/drawable-xhdpi/loading_bar2.png: Warning: The resource R.drawable.loading_bar2 appears to be unused [UnusedResources]
res/drawable-xhdpi/loading_bar3.png: Warning: The resource R.drawable.loading_bar3 appears to be unused [UnusedResources]
res/drawable-xhdpi/loading_bar4.png: Warning: The resource R.drawable.loading_bar4 appears to be unused [UnusedResources]
res/drawable-xhdpi/loading_bar5.png: Warning: The resource R.drawable.loading_bar5 appears to be unused [UnusedResources]
res/drawable-xhdpi/main_bg.png: Warning: The resource R.drawable.main_bg appears to be unused [UnusedResources]
res/drawable-xhdpi/mask_left.png: Warning: The resource R.drawable.mask_left appears to be unused [UnusedResources]
res/drawable-xhdpi/mask_right.png: Warning: The resource R.drawable.mask_right appears to be unused [UnusedResources]
res/drawable-xhdpi/mine_account.png: Warning: The resource R.drawable.mine_account appears to be unused [UnusedResources]
res/drawable-xhdpi/mine_handler.png: Warning: The resource R.drawable.mine_handler appears to be unused [UnusedResources]
res/drawable-xhdpi/nav_highlight.png: Warning: The resource R.drawable.nav_highlight appears to be unused [UnusedResources]
res/drawable-xhdpi/operator_controller.png: Warning: The resource R.drawable.operator_controller appears to be unused [UnusedResources]
res/drawable-xhdpi/operator_controller_big.png: Warning: The resource R.drawable.operator_controller_big appears to be unused [UnusedResources]
res/drawable-xhdpi/operator_handler_big.png: Warning: The resource R.drawable.operator_handler_big appears to be unused [UnusedResources]
res/drawable-xhdpi/operator_mouse.png: Warning: The resource R.drawable.operator_mouse appears to be unused [UnusedResources]
res/drawable-xhdpi/operator_mouse_big.png: Warning: The resource R.drawable.operator_mouse_big appears to be unused [UnusedResources]
res/drawable-xhdpi/progress_bar_bg.9.png: Warning: The resource R.drawable.progress_bar_bg appears to be unused [UnusedResources]
res/drawable-xhdpi/progress_bar_fg.9.png: Warning: The resource R.drawable.progress_bar_fg appears to be unused [UnusedResources]
res/anim/recommend_translate_in.xml: Warning: The resource R.anim.recommend_translate_in appears to be unused [UnusedResources]
res/anim/recommend_translate_out.xml: Warning: The resource R.anim.recommend_translate_out appears to be unused [UnusedResources]
res/drawable-xhdpi/screen_view_seekpoint_highlight.png: Warning: The resource R.drawable.screen_view_seekpoint_highlight appears to be unused [UnusedResources]
res/drawable-xhdpi/screen_view_seekpoint_normal.png: Warning: The resource R.drawable.screen_view_seekpoint_normal appears to be unused [UnusedResources]
res/drawable-xhdpi/screenshot_browse_foucs.9.png: Warning: The resource R.drawable.screenshot_browse_foucs appears to be unused [UnusedResources]
res/drawable-xhdpi/screenshot_mask_left.png: Warning: The resource R.drawable.screenshot_mask_left appears to be unused [UnusedResources]
res/drawable-xhdpi/screenshot_mask_right.png: Warning: The resource R.drawable.screenshot_mask_right appears to be unused [UnusedResources]
res/drawable-xhdpi/small_button_normal.png: Warning: The resource R.drawable.small_button_normal appears to be unused [UnusedResources]
res/drawable-xhdpi/small_button_pressed.png: Warning: The resource R.drawable.small_button_pressed appears to be unused [UnusedResources]
res/values/strings.xml:8: Warning: The resource R.string.fzlt_hei appears to be unused [UnusedResources]
<string name="fzlt_hei">FZLTH_GBK.ttf</string>
~~~~~~~~~~~~~~~
res/values/strings.xml:11: Warning: The resource R.string.nav_tab_focus_ttf appears to be unused [UnusedResources]
<string name="nav_tab_focus_ttf">@string/fzlt_cu_hei</string>
~~~~~~~~~~~~~~~~~~~~~~~~
res/values/strings.xml:13: Warning: The resource R.string.tab_recommend appears to be unused [UnusedResources]
<string name="tab_recommend">推荐</string>
~~~~~~~~~~~~~~~~~~~~
res/values/strings.xml:14: Warning: The resource R.string.tab_category appears to be unused [UnusedResources]
<string name="tab_category">分类</string>
~~~~~~~~~~~~~~~~~~~
res/values/strings.xml:15: Warning: The resource R.string.tab_rank appears to be unused [UnusedResources]
<string name="tab_rank">排行</string>
~~~~~~~~~~~~~~~
res/values/strings.xml:16: Warning: The resource R.string.tab_user appears to be unused [UnusedResources]
<string name="tab_user">用户</string>
~~~~~~~~~~~~~~~
res/values/strings.xml:19: Warning: The resource R.string.nav_tab_category appears to be unused [UnusedResources]
<string name="nav_tab_category">分类</string>
~~~~~~~~~~~~~~~~~~~~~~~
res/values/strings.xml:20: Warning: The resource R.string.nav_tab_rank appears to be unused [UnusedResources]
<string name="nav_tab_rank">排行</string>
~~~~~~~~~~~~~~~~~~~
res/values/strings.xml:21: Warning: The resource R.string.nav_tab_user appears to be unused [UnusedResources]
<string name="nav_tab_user">用户</string>
~~~~~~~~~~~~~~~~~~~
res/drawable-xhdpi/subject_mask.png: Warning: The resource R.drawable.subject_mask appears to be unused [UnusedResources]
res/color/tab_text.xml: Warning: The resource R.color.tab_text appears to be unused [UnusedResources]
res/drawable-xhdpi/tag_new.png: Warning: The resource R.drawable.tag_new appears to be unused [UnusedResources]
res/drawable-xhdpi/title_divider.png: Warning: The resource R.drawable.title_divider appears to be unused [UnusedResources]
res/drawable-xhdpi/title_icon_arrow.png: Warning: The resource R.drawable.title_icon_arrow appears to be unused [UnusedResources]
res/drawable-xhdpi/video_screen_shot.png: Warning: The resource R.drawable.video_screen_shot appears to be unused [UnusedResources]
res/drawable-xhdpi/video_thumb.png: Warning: The resource R.drawable.video_thumb appears to be unused [UnusedResources]
0 errors, 205 warnings
| {
"pile_set_name": "Github"
} |
package com.vipulasri.ticketview;
import android.content.Context;
import android.content.res.Resources;
import android.util.TypedValue;
public class Utils {
public static int dpToPx(float dp, Context context) {
return dpToPx(dp, context.getResources());
}
public static int dpToPx(float dp, Resources resources) {
float px = TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, dp, resources.getDisplayMetrics());
return (int) px;
}
public static boolean isJellyBeanAndAbove() {
return android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.JELLY_BEAN_MR1;
}
} | {
"pile_set_name": "Github"
} |
require 'spec_helper'
describe 'custom rendering' do
before(:each) do
visit '/'
end
it 'should work' do
page.html.should include('<div class="tabs" class="from_custom_renderer">')
page.html.should include('<li>stub</li><li>stub</li><li>stub</li><li>stub</li>')
end
end
| {
"pile_set_name": "Github"
} |
{stdenv, darwin}:
/*
* This is needed to build GCC on Darwin.
*
* These are the collection of headers that would normally be available under
* /usr/include in macOS machines with command line tools installed. They need
* to be in one folder for gcc to use them correctly.
*/
stdenv.mkDerivation {
name = "darwin-usr-include";
buildInputs = [ darwin.CF stdenv.libc ];
buildCommand = ''
mkdir -p $out
cd $out
ln -sf ${stdenv.libc}/include/* .
mkdir CoreFoundation
ln -sf ${darwin.CF}/Library/Frameworks/CoreFoundation.framework/Headers/* CoreFoundation
'';
meta.platforms = stdenv.lib.platforms.darwin;
}
| {
"pile_set_name": "Github"
} |
/**
*
* WARNING! This file was autogenerated by:
* _ _ _ _ __ __
* | | | | | | |\ \ / /
* | | | | |_| | \ V /
* | | | | _ | / \
* | |_| | | | |/ /^\ \
* \___/\_| |_/\/ \/
*
* This file was autogenerated by UnrealHxGenerator using UHT definitions.
* It only includes UPROPERTYs and UFUNCTIONs. Do not modify it!
* In order to add more definitions, create or edit a type with the same name/package, but with an `_Extra` suffix
**/
package unreal.umg;
/**
The button is a click-able primitive widget to enable basic interaction, you
can place any other widget inside a button to make a more complex and
interesting click-able element in your UI.
* Single Child
* Clickable
**/
@:umodule("UMG")
@:glueCppIncludes("UMG.h")
@:uextern @:uclass extern class UButton extends unreal.umg.UContentWidget {
@:uproperty public var OnUnhovered : unreal.umg.FOnButtonHoverEvent;
@:uproperty public var OnHovered : unreal.umg.FOnButtonHoverEvent;
/**
Called when the button is released
**/
@:uproperty public var OnReleased : unreal.umg.FOnButtonReleasedEvent;
/**
Called when the button is pressed
**/
@:uproperty public var OnPressed : unreal.umg.FOnButtonPressedEvent;
/**
Called when the button is clicked
**/
@:uproperty public var OnClicked : unreal.umg.FOnButtonClickedEvent;
/**
Sometimes a button should only be mouse-clickable and never keyboard focusable.
**/
@:uproperty public var IsFocusable : Bool;
/**
The type of keyboard/gamepad button press action required by the user to trigger the buttons 'Click'
**/
@:uproperty public var PressMethod : unreal.slatecore.EButtonPressMethod;
/**
The type of touch action required by the user to trigger the buttons 'Click'
**/
@:uproperty public var TouchMethod : unreal.slatecore.EButtonTouchMethod;
/**
The type of mouse action required by the user to trigger the buttons 'Click'
**/
@:uproperty public var ClickMethod : unreal.slatecore.EButtonClickMethod;
/**
The color multiplier for the button background
**/
@:uproperty public var BackgroundColor : unreal.FLinearColor;
/**
The color multiplier for the button content
**/
@:uproperty public var ColorAndOpacity : unreal.FLinearColor;
/**
The button style used at runtime
**/
@:uproperty public var WidgetStyle : unreal.slatecore.FButtonStyle;
/**
The template style asset, used to seed the mutable instance of the style.
**/
@:deprecated @:uproperty public var Style_DEPRECATED : unreal.slatecore.USlateWidgetStyleAsset;
/**
Sets the color multiplier for the button background
**/
@:ufunction(BlueprintCallable) @:final public function SetStyle(InStyle : unreal.Const<unreal.PRef<unreal.slatecore.FButtonStyle>>) : Void;
/**
Sets the color multiplier for the button content
**/
@:ufunction(BlueprintCallable) @:final public function SetColorAndOpacity(InColorAndOpacity : unreal.FLinearColor) : Void;
/**
Sets the color multiplier for the button background
**/
@:ufunction(BlueprintCallable) @:final public function SetBackgroundColor(InBackgroundColor : unreal.FLinearColor) : Void;
/**
Returns true if the user is actively pressing the button. Do not use this for detecting 'Clicks', use the OnClicked event instead.
@return true if the user is actively pressing the button otherwise false.
**/
@:ufunction(BlueprintCallable) @:thisConst @:final public function IsPressed() : Bool;
@:ufunction(BlueprintCallable) @:final public function SetClickMethod(InClickMethod : unreal.slatecore.EButtonClickMethod) : Void;
@:ufunction(BlueprintCallable) @:final public function SetTouchMethod(InTouchMethod : unreal.slatecore.EButtonTouchMethod) : Void;
@:ufunction(BlueprintCallable) @:final public function SetPressMethod(InPressMethod : unreal.slatecore.EButtonPressMethod) : Void;
}
| {
"pile_set_name": "Github"
} |
<snippet>
<content><![CDATA[export {
${1:$TM_SELECTED_TEXT}
}
]]></content>
<tabTrigger>export</tabTrigger>
<scope>source.flow</scope>
<description>export</description>
</snippet>
| {
"pile_set_name": "Github"
} |
/* ocsp_ht.c */
/*
* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project
* 2006.
*/
/* ====================================================================
* Copyright (c) 2006 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* licensing@OpenSSL.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* (eay@cryptsoft.com). This product includes software written by Tim
* Hudson (tjh@cryptsoft.com).
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <string.h>
#include "e_os.h"
#include <openssl/asn1.h>
#include <openssl/ocsp.h>
#include <openssl/err.h>
#include <openssl/buffer.h>
#ifdef OPENSSL_SYS_SUNOS
# define strtoul (unsigned long)strtol
#endif /* OPENSSL_SYS_SUNOS */
/* Stateful OCSP request code, supporting non-blocking I/O */
/* Opaque OCSP request status structure */
struct ocsp_req_ctx_st {
int state; /* Current I/O state */
unsigned char *iobuf; /* Line buffer */
int iobuflen; /* Line buffer length */
BIO *io; /* BIO to perform I/O with */
BIO *mem; /* Memory BIO response is built into */
unsigned long asn1_len; /* ASN1 length of response */
unsigned long max_resp_len; /* Maximum length of response */
};
#define OCSP_MAX_RESP_LENGTH (100 * 1024)
#define OCSP_MAX_LINE_LEN 4096;
/* OCSP states */
/* If set no reading should be performed */
#define OHS_NOREAD 0x1000
/* Error condition */
#define OHS_ERROR (0 | OHS_NOREAD)
/* First line being read */
#define OHS_FIRSTLINE 1
/* MIME headers being read */
#define OHS_HEADERS 2
/* OCSP initial header (tag + length) being read */
#define OHS_ASN1_HEADER 3
/* OCSP content octets being read */
#define OHS_ASN1_CONTENT 4
/* First call: ready to start I/O */
#define OHS_ASN1_WRITE_INIT (5 | OHS_NOREAD)
/* Request being sent */
#define OHS_ASN1_WRITE (6 | OHS_NOREAD)
/* Request being flushed */
#define OHS_ASN1_FLUSH (7 | OHS_NOREAD)
/* Completed */
#define OHS_DONE (8 | OHS_NOREAD)
/* Headers set, no final \r\n included */
#define OHS_HTTP_HEADER (9 | OHS_NOREAD)
static int parse_http_line1(char *line);
OCSP_REQ_CTX *OCSP_REQ_CTX_new(BIO *io, int maxline)
{
OCSP_REQ_CTX *rctx;
rctx = OPENSSL_malloc(sizeof(OCSP_REQ_CTX));
if (!rctx)
return NULL;
rctx->state = OHS_ERROR;
rctx->max_resp_len = OCSP_MAX_RESP_LENGTH;
rctx->mem = BIO_new(BIO_s_mem());
rctx->io = io;
rctx->asn1_len = 0;
if (maxline > 0)
rctx->iobuflen = maxline;
else
rctx->iobuflen = OCSP_MAX_LINE_LEN;
rctx->iobuf = OPENSSL_malloc(rctx->iobuflen);
if (!rctx->iobuf || !rctx->mem) {
OCSP_REQ_CTX_free(rctx);
return NULL;
}
return rctx;
}
void OCSP_REQ_CTX_free(OCSP_REQ_CTX *rctx)
{
if (rctx->mem)
BIO_free(rctx->mem);
if (rctx->iobuf)
OPENSSL_free(rctx->iobuf);
OPENSSL_free(rctx);
}
BIO *OCSP_REQ_CTX_get0_mem_bio(OCSP_REQ_CTX *rctx)
{
return rctx->mem;
}
void OCSP_set_max_response_length(OCSP_REQ_CTX *rctx, unsigned long len)
{
if (len == 0)
rctx->max_resp_len = OCSP_MAX_RESP_LENGTH;
else
rctx->max_resp_len = len;
}
int OCSP_REQ_CTX_i2d(OCSP_REQ_CTX *rctx, const ASN1_ITEM *it, ASN1_VALUE *val)
{
static const char req_hdr[] =
"Content-Type: application/ocsp-request\r\n"
"Content-Length: %d\r\n\r\n";
int reqlen = ASN1_item_i2d(val, NULL, it);
if (BIO_printf(rctx->mem, req_hdr, reqlen) <= 0)
return 0;
if (ASN1_item_i2d_bio(it, rctx->mem, val) <= 0)
return 0;
rctx->state = OHS_ASN1_WRITE_INIT;
return 1;
}
int OCSP_REQ_CTX_nbio_d2i(OCSP_REQ_CTX *rctx,
ASN1_VALUE **pval, const ASN1_ITEM *it)
{
int rv, len;
const unsigned char *p;
rv = OCSP_REQ_CTX_nbio(rctx);
if (rv != 1)
return rv;
len = BIO_get_mem_data(rctx->mem, &p);
*pval = ASN1_item_d2i(NULL, &p, len, it);
if (*pval == NULL) {
rctx->state = OHS_ERROR;
return 0;
}
return 1;
}
int OCSP_REQ_CTX_http(OCSP_REQ_CTX *rctx, const char *op, const char *path)
{
static const char http_hdr[] = "%s %s HTTP/1.0\r\n";
if (!path)
path = "/";
if (BIO_printf(rctx->mem, http_hdr, op, path) <= 0)
return 0;
rctx->state = OHS_HTTP_HEADER;
return 1;
}
int OCSP_REQ_CTX_set1_req(OCSP_REQ_CTX *rctx, OCSP_REQUEST *req)
{
return OCSP_REQ_CTX_i2d(rctx, ASN1_ITEM_rptr(OCSP_REQUEST),
(ASN1_VALUE *)req);
}
int OCSP_REQ_CTX_add1_header(OCSP_REQ_CTX *rctx,
const char *name, const char *value)
{
if (!name)
return 0;
if (BIO_puts(rctx->mem, name) <= 0)
return 0;
if (value) {
if (BIO_write(rctx->mem, ": ", 2) != 2)
return 0;
if (BIO_puts(rctx->mem, value) <= 0)
return 0;
}
if (BIO_write(rctx->mem, "\r\n", 2) != 2)
return 0;
rctx->state = OHS_HTTP_HEADER;
return 1;
}
OCSP_REQ_CTX *OCSP_sendreq_new(BIO *io, const char *path, OCSP_REQUEST *req,
int maxline)
{
OCSP_REQ_CTX *rctx = NULL;
rctx = OCSP_REQ_CTX_new(io, maxline);
if (!rctx)
return NULL;
if (!OCSP_REQ_CTX_http(rctx, "POST", path))
goto err;
if (req && !OCSP_REQ_CTX_set1_req(rctx, req))
goto err;
return rctx;
err:
OCSP_REQ_CTX_free(rctx);
return NULL;
}
/*
* Parse the HTTP response. This will look like this: "HTTP/1.0 200 OK". We
* need to obtain the numeric code and (optional) informational message.
*/
static int parse_http_line1(char *line)
{
int retcode;
char *p, *q, *r;
/* Skip to first white space (passed protocol info) */
for (p = line; *p && !isspace((unsigned char)*p); p++)
continue;
if (!*p) {
OCSPerr(OCSP_F_PARSE_HTTP_LINE1, OCSP_R_SERVER_RESPONSE_PARSE_ERROR);
return 0;
}
/* Skip past white space to start of response code */
while (*p && isspace((unsigned char)*p))
p++;
if (!*p) {
OCSPerr(OCSP_F_PARSE_HTTP_LINE1, OCSP_R_SERVER_RESPONSE_PARSE_ERROR);
return 0;
}
/* Find end of response code: first whitespace after start of code */
for (q = p; *q && !isspace((unsigned char)*q); q++)
continue;
if (!*q) {
OCSPerr(OCSP_F_PARSE_HTTP_LINE1, OCSP_R_SERVER_RESPONSE_PARSE_ERROR);
return 0;
}
/* Set end of response code and start of message */
*q++ = 0;
/* Attempt to parse numeric code */
retcode = strtoul(p, &r, 10);
if (*r)
return 0;
/* Skip over any leading white space in message */
while (*q && isspace((unsigned char)*q))
q++;
if (*q) {
/*
* Finally zap any trailing white space in message (include CRLF)
*/
/* We know q has a non white space character so this is OK */
for (r = q + strlen(q) - 1; isspace((unsigned char)*r); r--)
*r = 0;
}
if (retcode != 200) {
OCSPerr(OCSP_F_PARSE_HTTP_LINE1, OCSP_R_SERVER_RESPONSE_ERROR);
if (!*q)
ERR_add_error_data(2, "Code=", p);
else
ERR_add_error_data(4, "Code=", p, ",Reason=", q);
return 0;
}
return 1;
}
int OCSP_REQ_CTX_nbio(OCSP_REQ_CTX *rctx)
{
int i, n;
const unsigned char *p;
next_io:
if (!(rctx->state & OHS_NOREAD)) {
n = BIO_read(rctx->io, rctx->iobuf, rctx->iobuflen);
if (n <= 0) {
if (BIO_should_retry(rctx->io))
return -1;
return 0;
}
/* Write data to memory BIO */
if (BIO_write(rctx->mem, rctx->iobuf, n) != n)
return 0;
}
switch (rctx->state) {
case OHS_HTTP_HEADER:
/* Last operation was adding headers: need a final \r\n */
if (BIO_write(rctx->mem, "\r\n", 2) != 2) {
rctx->state = OHS_ERROR;
return 0;
}
rctx->state = OHS_ASN1_WRITE_INIT;
case OHS_ASN1_WRITE_INIT:
rctx->asn1_len = BIO_get_mem_data(rctx->mem, NULL);
rctx->state = OHS_ASN1_WRITE;
case OHS_ASN1_WRITE:
n = BIO_get_mem_data(rctx->mem, &p);
i = BIO_write(rctx->io, p + (n - rctx->asn1_len), rctx->asn1_len);
if (i <= 0) {
if (BIO_should_retry(rctx->io))
return -1;
rctx->state = OHS_ERROR;
return 0;
}
rctx->asn1_len -= i;
if (rctx->asn1_len > 0)
goto next_io;
rctx->state = OHS_ASN1_FLUSH;
(void)BIO_reset(rctx->mem);
case OHS_ASN1_FLUSH:
i = BIO_flush(rctx->io);
if (i > 0) {
rctx->state = OHS_FIRSTLINE;
goto next_io;
}
if (BIO_should_retry(rctx->io))
return -1;
rctx->state = OHS_ERROR;
return 0;
case OHS_ERROR:
return 0;
case OHS_FIRSTLINE:
case OHS_HEADERS:
/* Attempt to read a line in */
next_line:
/*
* Due to &%^*$" memory BIO behaviour with BIO_gets we have to check
* there's a complete line in there before calling BIO_gets or we'll
* just get a partial read.
*/
n = BIO_get_mem_data(rctx->mem, &p);
if ((n <= 0) || !memchr(p, '\n', n)) {
if (n >= rctx->iobuflen) {
rctx->state = OHS_ERROR;
return 0;
}
goto next_io;
}
n = BIO_gets(rctx->mem, (char *)rctx->iobuf, rctx->iobuflen);
if (n <= 0) {
if (BIO_should_retry(rctx->mem))
goto next_io;
rctx->state = OHS_ERROR;
return 0;
}
/* Don't allow excessive lines */
if (n == rctx->iobuflen) {
rctx->state = OHS_ERROR;
return 0;
}
/* First line */
if (rctx->state == OHS_FIRSTLINE) {
if (parse_http_line1((char *)rctx->iobuf)) {
rctx->state = OHS_HEADERS;
goto next_line;
} else {
rctx->state = OHS_ERROR;
return 0;
}
} else {
/* Look for blank line: end of headers */
for (p = rctx->iobuf; *p; p++) {
if ((*p != '\r') && (*p != '\n'))
break;
}
if (*p)
goto next_line;
rctx->state = OHS_ASN1_HEADER;
}
/* Fall thru */
case OHS_ASN1_HEADER:
/*
* Now reading ASN1 header: can read at least 2 bytes which is enough
* for ASN1 SEQUENCE header and either length field or at least the
* length of the length field.
*/
n = BIO_get_mem_data(rctx->mem, &p);
if (n < 2)
goto next_io;
/* Check it is an ASN1 SEQUENCE */
if (*p++ != (V_ASN1_SEQUENCE | V_ASN1_CONSTRUCTED)) {
rctx->state = OHS_ERROR;
return 0;
}
/* Check out length field */
if (*p & 0x80) {
/*
* If MSB set on initial length octet we can now always read 6
* octets: make sure we have them.
*/
if (n < 6)
goto next_io;
n = *p & 0x7F;
/* Not NDEF or excessive length */
if (!n || (n > 4)) {
rctx->state = OHS_ERROR;
return 0;
}
p++;
rctx->asn1_len = 0;
for (i = 0; i < n; i++) {
rctx->asn1_len <<= 8;
rctx->asn1_len |= *p++;
}
if (rctx->asn1_len > rctx->max_resp_len) {
rctx->state = OHS_ERROR;
return 0;
}
rctx->asn1_len += n + 2;
} else
rctx->asn1_len = *p + 2;
rctx->state = OHS_ASN1_CONTENT;
/* Fall thru */
case OHS_ASN1_CONTENT:
n = BIO_get_mem_data(rctx->mem, NULL);
if (n < (int)rctx->asn1_len)
goto next_io;
rctx->state = OHS_DONE;
return 1;
break;
case OHS_DONE:
return 1;
}
return 0;
}
int OCSP_sendreq_nbio(OCSP_RESPONSE **presp, OCSP_REQ_CTX *rctx)
{
return OCSP_REQ_CTX_nbio_d2i(rctx,
(ASN1_VALUE **)presp,
ASN1_ITEM_rptr(OCSP_RESPONSE));
}
/* Blocking OCSP request handler: now a special case of non-blocking I/O */
OCSP_RESPONSE *OCSP_sendreq_bio(BIO *b, const char *path, OCSP_REQUEST *req)
{
OCSP_RESPONSE *resp = NULL;
OCSP_REQ_CTX *ctx;
int rv;
ctx = OCSP_sendreq_new(b, path, req, -1);
if (!ctx)
return NULL;
do {
rv = OCSP_sendreq_nbio(&resp, ctx);
} while ((rv == -1) && BIO_should_retry(b));
OCSP_REQ_CTX_free(ctx);
if (rv)
return resp;
return NULL;
}
| {
"pile_set_name": "Github"
} |
' Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
Imports System.Collections.Immutable
Imports System.Diagnostics
Imports System.Runtime.InteropServices
Imports Microsoft.CodeAnalysis.Text
Imports Microsoft.CodeAnalysis.VisualBasic.Symbols
Imports Microsoft.CodeAnalysis.VisualBasic.Syntax
Imports TypeKind = Microsoft.CodeAnalysis.TypeKind
Namespace Microsoft.CodeAnalysis.VisualBasic
Partial Friend NotInheritable Class LocalRewriter
''' <summary>
''' A using statement of the form:
''' using Expression
''' list_of_statements
''' end using
'''
''' will be rewritten into:
'''
''' temp = Expression
''' Try
''' list_of_statements
''' Finally
''' If Temp IsNot Nothing Then
''' CType(temp, IDisposable).Dispose()
''' End If
''' End Try
'''
''' when the resource is a using locally declared variable no temporary is generated but the variable is read-only
''' A using statement of the form:
''' Using v As New MyDispose()
''' list_of_statements
''' End Using
'''
''' is rewritten to:
'''
''' Dim v As New MyDispose()
''' Try
''' list_of_statements
''' Finally
''' If v IsNot Nothing Then
''' CType(v, IDisposable).Dispose()
''' End If
''' End Try
'''
''' A using with multiple variable resources are equivalent to a nested using statement.
''' So a using statement of the form:
''' Using v1 As New MyDispose(), v2 As myDispose = New MyDispose()
''' list_of_statements
''' end using
'''
''' is rewritten to:
''' Dim v1 As New MyDispose
''' Try
''' Dim v2 As MyDispose = new MyDispose()
''' Try
''' list_of_statements
''' Finally
''' If v2 IsNot Nothing Then
''' CType(v2, IDisposable).Dispose()
''' End If
''' End Try
''' Finally
''' If v1 IsNot Nothing Then
''' CType(v1, IDisposable).Dispose()
''' End If
''' end try
'''</summary>
Public Overrides Function VisitUsingStatement(node As BoundUsingStatement) As BoundNode
Dim saveState As UnstructuredExceptionHandlingContext = LeaveUnstructuredExceptionHandlingContext(node)
Dim blockSyntax = DirectCast(node.Syntax, UsingBlockSyntax)
' rewrite the original using body only once here.
Dim currentBody = DirectCast(Visit(node.Body), BoundBlock)
Dim locals As ImmutableArray(Of LocalSymbol)
Dim placeholderInfo As ValueTuple(Of BoundRValuePlaceholder, BoundExpression, BoundExpression)
' the initialization expressions (variable declaration & expression case) will be rewritten in
' "RewriteToInnerTryConstructForVariableDeclaration" to avoid code duplication
If Not node.ResourceList.IsDefault Then
' Case "Using <variable declarations>"
Dim localsBuilder = ArrayBuilder(Of LocalSymbol).GetInstance
Dim hasMultipleResources = node.ResourceList.Length > 1
' the try statements will be nested. To avoid re-rewriting we're iterating through the resource list in reverse
For declarationIndex = node.ResourceList.Length - 1 To 0 Step -1
Dim localDeclaration = node.ResourceList(declarationIndex)
Dim syntaxForSequencePoint = If(hasMultipleResources, localDeclaration.Syntax.Parent, Nothing)
If localDeclaration.Kind = BoundKind.LocalDeclaration Then
Dim localVariableDeclaration = DirectCast(localDeclaration, BoundLocalDeclaration)
placeholderInfo = node.UsingInfo.PlaceholderInfo(localVariableDeclaration.LocalSymbol.Type)
currentBody = RewriteSingleUsingToTryFinally(blockSyntax,
syntaxForSequencePoint,
localVariableDeclaration.LocalSymbol,
localVariableDeclaration.InitializerOpt,
placeholderInfo,
currentBody)
localsBuilder.Add(localVariableDeclaration.LocalSymbol)
Else
Dim localAsNewDeclaration = DirectCast(localDeclaration, BoundAsNewLocalDeclarations)
syntaxForSequencePoint = If(hasMultipleResources, localAsNewDeclaration.Syntax, Nothing)
Dim variableCount = localAsNewDeclaration.LocalDeclarations.Length
placeholderInfo = node.UsingInfo.PlaceholderInfo(localAsNewDeclaration.LocalDeclarations.First.LocalSymbol.Type)
For initializedVariableIndex = localAsNewDeclaration.LocalDeclarations.Length - 1 To 0 Step -1
currentBody = RewriteSingleUsingToTryFinally(blockSyntax,
syntaxForSequencePoint,
localAsNewDeclaration.LocalDeclarations(initializedVariableIndex).LocalSymbol,
localAsNewDeclaration.Initializer,
placeholderInfo,
currentBody)
localsBuilder.Add(localAsNewDeclaration.LocalDeclarations(initializedVariableIndex).LocalSymbol)
Next
End If
Next
' we are adding the locals to the builder in reverse order. Therefore we need to reverse the array to have
' the same forward declaration order in IL as Dev10 did.
localsBuilder.ReverseContents()
locals = localsBuilder.ToImmutableAndFree()
Else
' Case "Using <expression>"
Debug.Assert(node.ResourceExpressionOpt IsNot Nothing)
Dim initializationExpression = node.ResourceExpressionOpt
placeholderInfo = node.UsingInfo.PlaceholderInfo(initializationExpression.Type)
Dim tempResourceSymbol As LocalSymbol = New SynthesizedLocal(Me.currentMethodOrLambda,
initializationExpression.Type,
SynthesizedLocalKind.Using,
blockSyntax.UsingStatement)
currentBody = RewriteSingleUsingToTryFinally(blockSyntax,
Nothing,
tempResourceSymbol,
initializationExpression,
placeholderInfo,
currentBody)
locals = ImmutableArray.Create(tempResourceSymbol)
End If
RestoreUnstructuredExceptionHandlingContext(node, saveState)
Dim statements As ImmutableArray(Of BoundStatement) = currentBody.Statements
If ShouldGenerateUnstructuredExceptionHandlingResumeCode(node) Then
statements = RegisterUnstructuredExceptionHandlingResumeTarget(node.Syntax, canThrow:=True).Concat(statements)
End If
currentBody = New BoundBlock(node.Syntax,
currentBody.StatementListSyntax,
locals,
statements)
If GenerateDebugInfo Then
' create a sequence point that contains the whole using statement as the first reachable sequence point
' of the using statement. The resource variables are not yet in scope.
Return New BoundStatementList(node.UsingInfo.UsingStatementSyntax, ImmutableArray.Create(Of BoundStatement)(
New BoundSequencePoint(node.UsingInfo.UsingStatementSyntax.UsingStatement, Nothing),
currentBody))
Else
Return New BoundStatementList(node.UsingInfo.UsingStatementSyntax, ImmutableArray.Create(Of BoundStatement)(currentBody))
End If
End Function
''' <summary>
''' Creates a TryFinally Statement for the given resource.
'''
''' This method creates the following for the arguments:
''' <localSymbol> = <initializationExpression>
''' Try
''' <currentBody>
''' Finally
''' If <disposeCondition> Then
''' <disposeConversion>.Dispose()
''' End If
''' End Try
'''
''' Note: this is used for both kinds of using statements (resource locals and resource expressions).
'''
''' </summary>
''' <returns>The new bound block containing the assignment of the initialization and the try/finally statement with
''' the passed body.</returns>
Private Function RewriteSingleUsingToTryFinally(
syntaxNode As UsingBlockSyntax,
syntaxForSequencePoint As VisualBasicSyntaxNode,
localSymbol As LocalSymbol,
initializationExpression As BoundExpression,
ByRef placeholderInfo As ValueTuple(Of BoundRValuePlaceholder, BoundExpression, BoundExpression),
currentBody As BoundBlock
) As BoundBlock
Dim resourceType = localSymbol.Type
Dim boundResourceLocal As BoundLocal = New BoundLocal(syntaxNode, localSymbol, isLValue:=True, type:=resourceType)
Dim resourcePlaceholder As BoundRValuePlaceholder = placeholderInfo.Item1
Dim disposeConversion As BoundExpression = placeholderInfo.Item2
Dim disposeCondition As BoundExpression = placeholderInfo.Item3
AddPlaceholderReplacement(resourcePlaceholder, boundResourceLocal.MakeRValue())
' add a sequence point to stop on the "End Using" statement
' because there are a lot of hidden sequence points between the dispose call and the "end using" in Roslyn
' (caused by emitting try catch), we need to add a sequence point after each call with the syntax of the end using
' to match the Dev10 debugging experience.
Dim newBody = DirectCast(InsertEndBlockSequencePoint(currentBody, Nothing), BoundBlock)
' assign initialization to variable
Dim boundResourceInitializationAssignment As BoundStatement = New BoundAssignmentOperator(syntaxNode,
boundResourceLocal,
VisitAndGenerateObjectCloneIfNeeded(initializationExpression, suppressObjectClone:=True),
suppressObjectClone:=True,
type:=resourceType).ToStatement
If GenerateDebugInfo AndAlso syntaxForSequencePoint IsNot Nothing Then
boundResourceInitializationAssignment = New BoundSequencePoint(syntaxForSequencePoint, boundResourceInitializationAssignment)
End If
' create if statement with dispose call
Dim disposeCall = GenerateDisposeCallForForeachAndUsing(syntaxNode, boundResourceLocal,
VisitExpressionNode(disposeCondition), True,
VisitExpressionNode(disposeConversion))
Dim finallyStatements As ImmutableArray(Of BoundStatement)
If GenerateDebugInfo Then
' The block should start with a sequence point that points to the "End Using" statement. This is required in order to
' highlight the end using when someone step next after the last statement of the original body and in case an exception
' was thrown.
finallyStatements = ImmutableArray.Create(Of BoundStatement)(New BoundSequencePoint(
syntaxNode.EndUsingStatement,
Nothing),
disposeCall)
Else
finallyStatements = ImmutableArray.Create(Of BoundStatement)(disposeCall)
End If
' create finally block from the dispose call
Dim finallyBlock = New BoundBlock(syntaxNode,
Nothing, ImmutableArray(Of LocalSymbol).Empty,
finallyStatements)
' rewrite try/finally block
Dim tryFinally = RewriteTryStatement(syntaxNode, newBody, ImmutableArray(Of BoundCatchBlock).Empty, finallyBlock, Nothing)
newBody = New BoundBlock(syntaxNode,
Nothing,
ImmutableArray(Of LocalSymbol).Empty,
ImmutableArray.Create(Of BoundStatement)(boundResourceInitializationAssignment,
tryFinally))
RemovePlaceholderReplacement(resourcePlaceholder)
Return newBody
End Function
End Class
End Namespace
| {
"pile_set_name": "Github"
} |
/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __MACH_QDSP6_V2_SNDDEV_H
#define __MACH_QDSP6_V2_SNDDEV_H
#include <mach/qdsp5v2/audio_def.h>
#include <sound/q6afe.h>
#define AUDIO_DEV_CTL_MAX_DEV 64
#define DIR_TX 2
#define DIR_RX 1
#define DEVICE_IGNORE 0xffff
#define COPP_IGNORE 0xffffffff
#define SESSION_IGNORE 0x0UL
/* 8 concurrent sessions with Q6 possible, session:0
reserved in DSP */
#define MAX_SESSIONS 0x09
/* This represents Maximum bit needed for representing sessions
per clients, MAX_BIT_PER_CLIENT >= MAX_SESSIONS */
#define MAX_BIT_PER_CLIENT 16
#define VOICE_STATE_INVALID 0x0
#define VOICE_STATE_INCALL 0x1
#define VOICE_STATE_OFFCALL 0x2
#define ONE_TO_MANY 1
#define MANY_TO_ONE 2
struct msm_snddev_info {
const char *name;
u32 capability;
u32 copp_id;
u32 acdb_id;
u32 dev_volume;
struct msm_snddev_ops {
int (*open)(struct msm_snddev_info *);
int (*close)(struct msm_snddev_info *);
int (*set_freq)(struct msm_snddev_info *, u32);
int (*enable_sidetone)(struct msm_snddev_info *, u32, uint16_t);
int (*set_device_volume)(struct msm_snddev_info *, u32);
int (*enable_anc)(struct msm_snddev_info *, u32);
} dev_ops;
u8 opened;
void *private_data;
bool state;
u32 sample_rate;
u32 channel_mode;
u32 set_sample_rate;
u64 sessions;
int usage_count;
s32 max_voc_rx_vol[VOC_RX_VOL_ARRAY_NUM]; /* [0] is for NB,[1] for WB */
s32 min_voc_rx_vol[VOC_RX_VOL_ARRAY_NUM];
};
struct msm_volume {
int volume; /* Volume parameter, in % Scale */
int pan;
};
extern struct msm_volume msm_vol_ctl;
void msm_snddev_register(struct msm_snddev_info *);
void msm_snddev_unregister(struct msm_snddev_info *);
int msm_snddev_devcount(void);
int msm_snddev_query(int dev_id);
unsigned short msm_snddev_route_dec(int popp_id);
unsigned short msm_snddev_route_enc(int enc_id);
int msm_snddev_set_dec(int popp_id, int copp_id, int set,
int rate, int channel_mode);
int msm_snddev_set_enc(int popp_id, int copp_id, int set,
int rate, int channel_mode);
int msm_snddev_is_set(int popp_id, int copp_id);
int msm_get_voc_route(u32 *rx_id, u32 *tx_id);
int msm_set_voc_route(struct msm_snddev_info *dev_info, int stream_type,
int dev_id);
int msm_snddev_enable_sidetone(u32 dev_id, u32 enable, uint16_t gain);
int msm_set_copp_id(int session_id, int copp_id);
int msm_clear_copp_id(int session_id, int copp_id);
int msm_clear_session_id(int session_id);
int msm_reset_all_device(void);
int reset_device(void);
int msm_clear_all_session(void);
struct msm_snddev_info *audio_dev_ctrl_find_dev(u32 dev_id);
void msm_release_voc_thread(void);
int snddev_voice_set_volume(int vol, int path);
struct auddev_evt_voc_devinfo {
u32 dev_type; /* Rx or Tx */
u32 acdb_dev_id; /* acdb id of device */
u32 dev_sample; /* Sample rate of device */
s32 max_rx_vol[VOC_RX_VOL_ARRAY_NUM]; /* unit is mb (milibel),
[0] is for NB, other for WB */
s32 min_rx_vol[VOC_RX_VOL_ARRAY_NUM]; /* unit is mb */
u32 dev_id; /* registered device id */
u32 dev_port_id;
};
struct auddev_evt_audcal_info {
u32 dev_id;
u32 acdb_id;
u32 sample_rate;
u32 dev_type;
u32 sessions;
};
union msm_vol_mute {
int vol;
bool mute;
};
struct auddev_evt_voc_mute_info {
u32 dev_type;
u32 acdb_dev_id;
u32 voice_session_id;
union msm_vol_mute dev_vm_val;
};
struct auddev_evt_freq_info {
u32 dev_type;
u32 acdb_dev_id;
u32 sample_rate;
};
union auddev_evt_data {
struct auddev_evt_voc_devinfo voc_devinfo;
struct auddev_evt_voc_mute_info voc_vm_info;
struct auddev_evt_freq_info freq_info;
u32 routing_id;
s32 session_vol;
s32 voice_state;
struct auddev_evt_audcal_info audcal_info;
u32 voice_session_id;
};
struct message_header {
uint32_t id;
uint32_t data_len;
};
#define AUDDEV_EVT_DEV_CHG_VOICE 0x01 /* device change event */
#define AUDDEV_EVT_DEV_RDY 0x02 /* device ready event */
#define AUDDEV_EVT_DEV_RLS 0x04 /* device released event */
#define AUDDEV_EVT_REL_PENDING 0x08 /* device release pending */
#define AUDDEV_EVT_DEVICE_VOL_MUTE_CHG 0x10 /* device volume changed */
#define AUDDEV_EVT_START_VOICE 0x20 /* voice call start */
#define AUDDEV_EVT_END_VOICE 0x40 /* voice call end */
#define AUDDEV_EVT_STREAM_VOL_CHG 0x80 /* device volume changed */
#define AUDDEV_EVT_FREQ_CHG 0x100 /* Change in freq */
#define AUDDEV_EVT_VOICE_STATE_CHG 0x200 /* Change in voice state */
#define AUDDEV_CLNT_VOC 0x1 /*Vocoder clients*/
#define AUDDEV_CLNT_DEC 0x2 /*Decoder clients*/
#define AUDDEV_CLNT_ENC 0x3 /* Encoder clients */
#define AUDDEV_CLNT_AUDIOCAL 0x4 /* AudioCalibration client */
#define AUDIO_DEV_CTL_MAX_LISTNER 20 /* Max Listeners Supported */
struct msm_snd_evt_listner {
uint32_t evt_id;
uint32_t clnt_type;
uint32_t clnt_id;
void *private_data;
void (*auddev_evt_listener)(u32 evt_id,
union auddev_evt_data *evt_payload,
void *private_data);
struct msm_snd_evt_listner *cb_next;
struct msm_snd_evt_listner *cb_prev;
};
struct event_listner {
struct msm_snd_evt_listner *cb;
u32 num_listner;
int state; /* Call state */ /* TODO remove this if not req*/
};
extern struct event_listner event;
int auddev_register_evt_listner(u32 evt_id, u32 clnt_type, u32 clnt_id,
void (*listner)(u32 evt_id,
union auddev_evt_data *evt_payload,
void *private_data),
void *private_data);
int auddev_unregister_evt_listner(u32 clnt_type, u32 clnt_id);
void mixer_post_event(u32 evt_id, u32 dev_id);
void broadcast_event(u32 evt_id, u32 dev_id, u64 session_id);
int auddev_cfg_tx_copp_topology(int session_id, int cfg);
int msm_snddev_request_freq(int *freq, u32 session_id,
u32 capability, u32 clnt_type);
int msm_snddev_withdraw_freq(u32 session_id,
u32 capability, u32 clnt_type);
int msm_device_is_voice(int dev_id);
int msm_get_voc_freq(int *tx_freq, int *rx_freq);
int msm_snddev_get_enc_freq(int session_id);
int msm_set_voice_vol(int dir, s32 volume, u32 session_id);
int msm_set_voice_mute(int dir, int mute, u32 session_id);
int msm_get_voice_state(void);
int msm_enable_incall_recording(int popp_id, int rec_mode, int rate,
int channel_mode);
int msm_disable_incall_recording(uint32_t popp_id, uint32_t rec_mode);
#endif
| {
"pile_set_name": "Github"
} |
package com.wrbug.developerhelper.ui.activity.databaseedit
import android.content.Context
import android.view.View
import android.view.ViewGroup
import com.evrencoskun.tableview.adapter.AbstractTableAdapter
import com.evrencoskun.tableview.adapter.recyclerview.holder.AbstractViewHolder
import android.widget.TextView
import com.wrbug.developerhelper.R
import android.view.LayoutInflater
import android.widget.LinearLayout
class DatabaseTableAdapter(val context: Context) : AbstractTableAdapter<String, Int, String?>(context) {
override fun onCreateColumnHeaderViewHolder(parent: ViewGroup?, viewType: Int): AbstractViewHolder {
val layout = LayoutInflater.from(context).inflate(
R.layout
.table_view_column_header_layout, parent, false
)
return ColumnHeaderViewHolder(layout)
}
override fun onBindColumnHeaderViewHolder(
holder: AbstractViewHolder?,
columnHeaderItemModel: Any?,
columnPosition: Int
) {
val viewHolder = holder as ColumnHeaderViewHolder?
viewHolder?.run {
cellText?.text = columnHeaderItemModel?.toString()
}
}
override fun onCreateRowHeaderViewHolder(parent: ViewGroup?, viewType: Int): AbstractViewHolder {
val layout = LayoutInflater.from(context).inflate(
R.layout.table_view_row_header_layout, parent, false
)
return RowHeaderViewHolder(layout)
}
override fun onBindRowHeaderViewHolder(holder: AbstractViewHolder?, rowHeaderItemModel: Any?, rowPosition: Int) {
val viewHolder = holder as RowHeaderViewHolder?
viewHolder?.run {
cellText?.text = rowHeaderItemModel?.toString()
}
}
override fun onCreateCellViewHolder(parent: ViewGroup?, viewType: Int): AbstractViewHolder {
val layout = LayoutInflater.from(context).inflate(
R.layout.table_view_cell_layout,
parent, false
)
return CellViewHolder(layout)
}
override fun onCreateCornerView(): View {
return LayoutInflater.from(context).inflate(R.layout.table_view_corner_layout, null)
}
override fun onBindCellViewHolder(
holder: AbstractViewHolder?,
cellItemModel: Any?,
columnPosition: Int,
rowPosition: Int
) {
val viewHolder = holder as CellViewHolder?
viewHolder?.run {
cellText?.text = cellItemModel?.toString() ?: "NULL"
itemView.layoutParams.width = LinearLayout.LayoutParams.WRAP_CONTENT
cellText?.requestLayout()
}
}
override fun getColumnHeaderItemViewType(position: Int): Int {
return 0
}
override fun getRowHeaderItemViewType(position: Int): Int {
return 0
}
override fun getCellItemViewType(position: Int): Int {
return 0
}
internal inner class ColumnHeaderViewHolder(itemView: View) : AbstractViewHolder(itemView) {
val cellText: TextView? = itemView.findViewById(R.id.column_header_textView)
}
internal inner class RowHeaderViewHolder(itemView: View) : AbstractViewHolder(itemView) {
val cellText: TextView? = itemView.findViewById(R.id.row_header_textview)
}
internal inner class CellViewHolder(itemView: View) : AbstractViewHolder(itemView) {
val cellText: TextView? = itemView.findViewById(R.id.cell_data)
}
} | {
"pile_set_name": "Github"
} |
// Package filesystem is a storage backend base on filesystems
package filesystem
import (
"gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
"gopkg.in/src-d/go-billy.v4"
)
// Storage is an implementation of git.Storer that stores data on disk in the
// standard git format (this is, the .git directory). Zero values of this type
// are not safe to use, see the NewStorage function below.
type Storage struct {
fs billy.Filesystem
dir *dotgit.DotGit
ObjectStorage
ReferenceStorage
IndexStorage
ShallowStorage
ConfigStorage
ModuleStorage
}
// NewStorage returns a new Storage backed by a given `fs.Filesystem`
func NewStorage(fs billy.Filesystem) (*Storage, error) {
dir := dotgit.New(fs)
o, err := NewObjectStorage(dir)
if err != nil {
return nil, err
}
return &Storage{
fs: fs,
dir: dir,
ObjectStorage: o,
ReferenceStorage: ReferenceStorage{dir: dir},
IndexStorage: IndexStorage{dir: dir},
ShallowStorage: ShallowStorage{dir: dir},
ConfigStorage: ConfigStorage{dir: dir},
ModuleStorage: ModuleStorage{dir: dir},
}, nil
}
// Filesystem returns the underlying filesystem
func (s *Storage) Filesystem() billy.Filesystem {
return s.fs
}
func (s *Storage) Init() error {
return s.dir.Initialize()
}
| {
"pile_set_name": "Github"
} |
onedev.server.groupChoiceFormatter = {
formatSelection: function(group) {
return group.name;
},
formatResult: function(group) {
return group.name;
},
escapeMarkup: function(m) {
return m;
}
}; | {
"pile_set_name": "Github"
} |
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file
* csr.cuh
*
* @brief CSR (Compressed Sparse Row) Graph Data Structure
*/
#pragma once
#include <gunrock/util/array_utils.cuh>
#include <gunrock/graph/graph_base.cuh>
#include <gunrock/graph/coo.cuh>
#include <gunrock/util/binary_search.cuh>
#include <gunrock/util/device_intrinsics.cuh>
namespace gunrock {
namespace graph {
/**
* @brief CSR data structure which uses Compressed Sparse Row
* format to store a graph. It is a compressed way to present
* the graph as a sparse matrix.
*
* @tparam VertexT Vertex identifier type.
* @tparam SizeT Graph size type.
* @tparam ValueT Associated value type.
*/
template<
typename _VertexT = int,
typename _SizeT = _VertexT,
typename _ValueT = _VertexT,
GraphFlag _FLAG = GRAPH_NONE,
unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault,
bool VALID = true>
struct Csr :
public GraphBase<_VertexT, _SizeT, _ValueT,
_FLAG | HAS_CSR, cudaHostRegisterFlag>
{
typedef _VertexT VertexT;
typedef _SizeT SizeT;
typedef _ValueT ValueT;
static const GraphFlag FLAG = _FLAG | HAS_CSR;
static const util::ArrayFlag ARRAY_FLAG =
util::If_Val<(FLAG & GRAPH_PINNED) != 0, (FLAG & ARRAY_RESERVE) | util::PINNED,
FLAG & ARRAY_RESERVE>::Value;
typedef GraphBase<VertexT, SizeT, ValueT, FLAG, cudaHostRegisterFlag> BaseGraph;
typedef Csr<VertexT, SizeT, ValueT, _FLAG, cudaHostRegisterFlag> CsrT;
// Column indices corresponding to all the
// non-zero values in the sparse matrix
util::Array1D<SizeT, VertexT, ARRAY_FLAG,
cudaHostRegisterFlag> column_indices;
// List of indices where each row of the
// sparse matrix starts
util::Array1D<SizeT, SizeT, ARRAY_FLAG,
cudaHostRegisterFlag> row_offsets;
typedef util::Array1D<SizeT, ValueT, ARRAY_FLAG,
cudaHostRegisterFlag> Array_ValueT;
typedef util::NullArray<SizeT, ValueT, ARRAY_FLAG,
cudaHostRegisterFlag> Array_NValueT;
// List of values attached to edges in the graph
typename util::If<(FLAG & HAS_EDGE_VALUES) != 0,
Array_ValueT, Array_NValueT>::Type edge_values;
// List of values attached to nodes in the graph
typename util::If<(FLAG & HAS_NODE_VALUES) != 0,
Array_ValueT, Array_NValueT>::Type node_values;
/**
* @brief CSR Constructor
*
* @param[in] pinned Use pinned memory for CSR data structure
* (default: do not use pinned memory)
*/
Csr() : BaseGraph()
{
column_indices.SetName("column_indices");
row_offsets .SetName("row_offsets");
edge_values .SetName("edge_values");
node_values .SetName("node_values");
}
/**
* @brief CSR destructor
*/
__host__ __device__
~Csr()
{
//Release();
}
/**
* @brief Deallocates CSR graph
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL)
{
cudaError_t retval = cudaSuccess;
GUARD_CU(row_offsets .Release(target));
GUARD_CU(column_indices.Release(target));
GUARD_CU(node_values .Release(target));
GUARD_CU(edge_values .Release(target));
GUARD_CU(BaseGraph ::Release(target));
return retval;
}
/**
* @brief Allocate memory for CSR graph.
*
* @param[in] nodes Number of nodes in COO-format graph
* @param[in] edges Number of edges in COO-format graph
*/
cudaError_t Allocate(SizeT nodes, SizeT edges,
util::Location target = GRAPH_DEFAULT_TARGET)
{
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseGraph ::Allocate(nodes, edges, target));
GUARD_CU(row_offsets .Allocate(nodes + 1 , target));
GUARD_CU(column_indices.Allocate(edges , target));
GUARD_CU(node_values .Allocate(nodes , target));
GUARD_CU(edge_values .Allocate(edges , target));
return retval;
}
cudaError_t Move(
util::Location source,
util::Location target,
cudaStream_t stream = 0)
{
cudaError_t retval = cudaSuccess;
SizeT invalid_size = util::PreDefinedValues<SizeT>::InvalidValue;
GUARD_CU(BaseGraph ::Move(source, target, stream));
GUARD_CU(row_offsets .Move(source, target, invalid_size, 0, stream));
GUARD_CU(column_indices.Move(source, target, invalid_size, 0, stream));
GUARD_CU(edge_values .Move(source, target, invalid_size, 0, stream));
GUARD_CU(node_values .Move(source, target, invalid_size, 0, stream));
return retval;
}
template <
typename VertexT_in, typename SizeT_in,
typename ValueT_in, GraphFlag FLAG_in,
unsigned int cudaHostRegisterFlag_in>
cudaError_t FromCsr(
Csr<VertexT_in, SizeT_in, ValueT_in, FLAG_in,
cudaHostRegisterFlag_in> &source,
util::Location target = util::LOCATION_DEFAULT,
cudaStream_t stream = 0,
bool quiet = false)
{
cudaError_t retval = cudaSuccess;
if (target == util::LOCATION_DEFAULT)
target = source.row_offsets.GetSetted() |
source.row_offsets.GetAllocated();
GUARD_CU(BaseGraph::Set(source));
GUARD_CU(Allocate(source.nodes, source.edges, target));
GUARD_CU(row_offsets .Set(source.row_offsets,
this -> nodes + 1, target, stream));
GUARD_CU(column_indices.Set(source.column_indices,
this -> edges, target, stream));
GUARD_CU(edge_values .Set(source.edge_values,
this -> edges, target, stream));
GUARD_CU(node_values .Set(source.node_values,
this -> nodes, target, stream));
return retval;
}
/**
* @brief Build CSR graph from COO graph, sorted or unsorted
*
* @param[in] output_file Output file to dump the graph topology info
* @param[in] coo Pointer to COO-format graph
* @param[in] coo_nodes Number of nodes in COO-format graph
* @param[in] coo_edges Number of edges in COO-format graph
* @param[in] ordered_rows Are the rows sorted? If not, sort them.
* @param[in] undirected Is the graph directed or not?
* @param[in] reversed Is the graph reversed or not?
* @param[in] quiet Don't print out anything.
*
* Default: Assume rows are not sorted.
*/
template <typename GraphT>
cudaError_t FromCoo(
GraphT &source,
util::Location target = util::LOCATION_DEFAULT,
cudaStream_t stream = 0,
//bool ordered_rows = false,
//bool undirected = false,
//bool reversed = false,
bool quiet = false)
{
typedef typename GraphT::CooT CooT;
//typedef Coo<VertexT_in, SizeT_in, ValueT_in, FLAG_in,
// cudaHostRegisterFlag_in> CooT;
util::PrintMsg("Converting " +
std::to_string(source.CooT::nodes) +
" vertices, " + std::to_string(source.CooT::edges) +
(source.CooT::directed ? " directed" : " undirected") +
" edges (" + (source.CooT::edge_order == BY_ROW_ASCENDING ? " ordered" : "unordered") +
" tuples) to CSR format...", !quiet, false);
time_t mark1 = time(NULL);
cudaError_t retval = cudaSuccess;
if (target == util::LOCATION_DEFAULT)
target = source.CooT::edge_pairs.GetSetted() |
source.CooT::edge_pairs.GetAllocated();
/*if (retval = BaseGraph:: template Set<typename CooT::CooT>((typename CooT::CooT)source))
return retval;
*/
this -> nodes = source.CooT::nodes;
this -> edges = source.CooT::edges;
this -> directed = source.CooT::directed;
GUARD_CU(Allocate(source.CooT::nodes, source.CooT::edges, target));
// Sort COO by row
GUARD_CU(source.CooT::Order(BY_ROW_ASCENDING, target, stream));
//source.CooT::Display();
// assign column_indices
GUARD_CU(column_indices.ForEach(source.CooT::edge_pairs,
[]__host__ __device__ (VertexT &column_index,
const typename CooT::EdgePairT &edge_pair){
column_index = edge_pair.y;},
this -> edges, target, stream));
// assign edge_values
if (FLAG & HAS_EDGE_VALUES)
{
GUARD_CU(edge_values.ForEach(source.CooT::edge_values,
[]__host__ __device__ (ValueT &edge_value,
const typename CooT::ValueT &edge_value_in){
edge_value = edge_value_in;},
this -> edges, target, stream));
}
// assign row_offsets
SizeT edges = this -> edges;
SizeT nodes = this -> nodes;
auto row_edge_compare = [] __host__ __device__ (
const typename CooT::EdgePairT &edge_pair,
const VertexT &row){
return edge_pair.x < row;
};
GUARD_CU(row_offsets.ForAll(source.CooT::edge_pairs,
[nodes, edges, row_edge_compare] __host__ __device__ (
SizeT *row_offsets,
const typename CooT::EdgePairT *edge_pairs,
const VertexT &row){
if (row <= edge_pairs[0].x)
row_offsets[row] = 0;
else if (row < nodes)
{
auto pos = util::BinarySearch_LeftMost(row,
edge_pairs, (SizeT)0, edges-1,
row_edge_compare,
[] (const typename CooT::EdgePairT &pair, const VertexT &row)
{
return (pair.x == row);
});
//if (row > edge_pairs[edges-1].x)
// pos = edges;
//else {
while (pos < edges && row > edge_pairs[pos].x)
pos ++;
//}
//if (pos > edges || row >= edge_pairs[edges-1].x)
// printf("Error row_offsets[%d] = %d\n",
// row, pos);
row_offsets[row] = pos;
} else row_offsets[row] = edges;
}, this -> nodes + 1, target, stream));
time_t mark2 = time(NULL);
util::PrintMsg("Done (" +
std::to_string(mark2 - mark1) + "s).", !quiet);
//for (SizeT v = 0; v < nodes; v++)
//{
// if (row_offsets [v] > row_offsets[v+1])
// {
// util::PrintMsg("Error: row_offsets["
// + std::to_string(v) + "] = " + std::to_string(row_offsets[v])
// + " > row_offsets[" + std::to_string(v+1)
// + "] = " + std::to_string(row_offsets[v+1]));
// continue;
// }
//
// if (row_offsets[v] < 0 || row_offsets[v] > edges)
// {
// util::PrintMsg("Error: row_offsets["
// + std::to_string(v) + "] = " + std::to_string(row_offsets[v])
// + " > edges = " + std::to_string(edges));
// continue;
// }
//
// SizeT e_start = row_offsets[v];
// SizeT e_end = row_offsets[v+1];
// SizeT degree = e_end - e_start;
// for (SizeT e = e_start; e < e_end; e++)
// {
// if (source.CooT::edge_pairs[e].x != v)
// util::PrintMsg("Error: edge_pairs[" + std::to_string(e)
// + "] = (" + std::to_string(source.CooT::edge_pairs[e].x)
// + ", " + std::to_string(source.CooT::edge_pairs[e].y)
// + ") != v " + std::to_string(v));
// }
//}
return retval;
}
template <typename GraphT>
cudaError_t FromCsc(
GraphT &source,
util::Location target = util::LOCATION_DEFAULT,
cudaStream_t stream = 0,
bool quiet = false)
{
typedef typename GraphT::CscT CscT;
typedef Coo<VertexT, SizeT, ValueT, FLAG | HAS_COO, cudaHostRegisterFlag> CooT;
cudaError_t retval = cudaSuccess;
CooT coo;
GUARD_CU(coo.FromCsc(source, target, stream, quiet));
GUARD_CU( FromCoo( coo, target, stream, quiet));
GUARD_CU(coo.Release());
return retval;
}
/**
* @brief Display CSR graph to console
*
* @param[in] with_edge_value Whether display graph with edge values.
*/
cudaError_t Display(
std::string graph_prefix = "",
SizeT nodes_to_show = 40,
bool with_edge_values = true)
{
cudaError_t retval = cudaSuccess;
if (nodes_to_show > this -> nodes)
nodes_to_show = this -> nodes;
util::PrintMsg(graph_prefix + "Graph containing " +
std::to_string(this -> nodes) + " vertices, " +
std::to_string(this -> edges) + " edges, in CSR format."
+ " Neighbor list of first " + std::to_string(nodes_to_show) +
" nodes :");
for (SizeT node = 0; node < nodes_to_show; node++)
{
std::string str = "v " + std::to_string(node) +
" " + std::to_string(row_offsets[node]) + " : ";
for (SizeT edge = row_offsets[node];
edge < row_offsets[node + 1];
edge++)
{
if (edge - row_offsets[node] > 40) break;
str = str + "[" + std::to_string(column_indices[edge]);
if (with_edge_values && (FLAG & HAS_EDGE_VALUES))
{
str = str + "," + std::to_string(edge_values[edge]);
}
if (edge - row_offsets[node] != 40 &&
edge != row_offsets[node+1] -1)
str = str + "], ";
else str = str + "]";
}
if (row_offsets[node + 1] - row_offsets[node] > 40)
str = str + "...";
util::PrintMsg(str);
}
return retval;
}
/**
* @brief Sort CSR graph edges per vertex in ascending order
*
*/
cudaError_t Sort()
{
cudaError_t retval = cudaSuccess;
SizeT num_nodes = this -> nodes;
SizeT num_edges = this -> edges;
typedef std::pair<VertexT, ValueT> EdgeValPairT;
util::Array1D<SizeT, EdgeValPairT> sorted_neighbors;
GUARD_CU(sorted_neighbors.Allocate(num_edges, util::HOST));
#pragma omp parallel
do {
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
SizeT node_start = (SizeT)(num_nodes) * thread_num / num_threads;
SizeT node_end = (SizeT)(num_nodes) * (thread_num + 1) / num_threads;
node_end = (thread_num == (num_threads - 1)) ? num_nodes : node_end;
for (SizeT node = node_start; node < node_end; node++) {
SizeT start_offset = row_offsets[node];
SizeT end_offset = row_offsets[node + 1];
for (SizeT off = start_offset; off < end_offset; off++) {
sorted_neighbors[off] = std::make_pair(column_indices[off], edge_values[off]);
}
std::sort(sorted_neighbors + start_offset,
sorted_neighbors + end_offset,
[](const EdgeValPairT & a, const EdgeValPairT & b) -> bool
{
return a.first < b.first;
}
);
for (SizeT off = start_offset; off < end_offset; off++) {
column_indices[off] = sorted_neighbors[off].first;
edge_values [off] = sorted_neighbors[off].second;
}
}
}while (false);
GUARD_CU(sorted_neighbors.Release(util::HOST));
return cudaSuccess;
}
__device__ __host__ __forceinline__
SizeT GetNeighborListLength(const VertexT &v) const
{
if (util::lessThanZero(v) || v >= this -> nodes)
return 0;
return _ldg(row_offsets + (v+1)) - _ldg(row_offsets + v);
}
__device__ __host__ __forceinline__
SizeT GetNeighborListOffset(const VertexT &v) const
{
return _ldg(row_offsets + v);
}
__device__ __host__ __forceinline__
VertexT GetEdgeSrc(const SizeT &e) const
{
return util::BinarySearch_RightMost(e, row_offsets + 0, (SizeT)0, this -> nodes);
}
__device__ __host__ __forceinline__
VertexT GetEdgeDest(const SizeT &e) const
{
//return _ldg(column_indices + e);
return column_indices[e];
}
__device__ __host__ __forceinline__
void GetEdgeSrcDest(const SizeT &e, VertexT &src, VertexT &dest) const
{
src = util::BinarySearch_RightMost(e, row_offsets + 0, (SizeT)0, this -> nodes);
dest = column_indices[e];
}
__device__ __host__ __forceinline__
SizeT GetSrcDestEdge(const VertexT &src, const VertexT &dest)
{
return util::BinarySearch(dest, column_indices + 0, row_offsets[src], row_offsets[src + 1] - 1);
}
/*template <typename Tuple>
void CsrToCsc(Csr<VertexId, SizeT, Value> &target,
Csr<VertexId, SizeT, Value> &source)
{
target.nodes = source.nodes;
target.edges = source.edges;
target.average_degree = source.average_degree;
target.average_edge_value = source.average_edge_value;
target.average_node_value = source.average_node_value;
target.out_nodes = source.out_nodes;
{
Tuple *coo = (Tuple*)malloc(sizeof(Tuple) * source.edges);
int idx = 0;
for (int i = 0; i < source.nodes; ++i)
{
for (int j = source.row_offsets[i]; j < source.row_offsets[i+1]; ++j)
{
coo[idx].row = source.column_indices[j];
coo[idx].col = i;
coo[idx++].val = (source.edge_values == NULL) ? 0 : source.edge_values[j];
}
}
if (source.edge_values == NULL)
target.template FromCoo<false>(NULL, coo, nodes, edges);
else
target.template FromCoo<true>(NULL, coo, nodes, edges);
free(coo);
}
}*/
/**
*
* @brief Store graph information into a file.
*
* @param[in] file_name Original graph file path and name.
* @param[in] v Number of vertices in input graph.
* @param[in] e Number of edges in input graph.
* @param[in] row Row-offsets array store row pointers.
* @param[in] col Column-indices array store destinations.
* @param[in] edge_values Per edge weight values associated.
*
*/
/*void WriteBinary(
char *file_name,
SizeT v,
SizeT e,
SizeT *row,
VertexId *col,
Value *edge_values = NULL)
{
std::ofstream fout(file_name);
if (fout.is_open())
{
fout.write(reinterpret_cast<const char*>(&v), sizeof(SizeT));
fout.write(reinterpret_cast<const char*>(&e), sizeof(SizeT));
fout.write(reinterpret_cast<const char*>(row), (v + 1)*sizeof(SizeT));
fout.write(reinterpret_cast<const char*>(col), e * sizeof(VertexId));
if (edge_values != NULL)
{
fout.write(reinterpret_cast<const char*>(edge_values),
e * sizeof(Value));
}
fout.close();
}
}*/
/*
* @brief Write human-readable CSR arrays into 3 files.
* Can be easily used for python interface.
*
* @param[in] file_name Original graph file path and name.
* @param[in] v Number of vertices in input graph.
* @param[in] e Number of edges in input graph.
* @param[in] row_offsets Row-offsets array store row pointers.
* @param[in] col_indices Column-indices array store destinations.
* @param[in] edge_values Per edge weight values associated.
*/
/*void WriteCSR(
char *file_name,
SizeT v, SizeT e,
SizeT *row_offsets,
VertexId *col_indices,
Value *edge_values = NULL)
{
std::cout << file_name << std::endl;
char rows[256], cols[256], vals[256];
sprintf(rows, "%s.rows", file_name);
sprintf(cols, "%s.cols", file_name);
sprintf(vals, "%s.vals", file_name);
std::ofstream rows_output(rows);
if (rows_output.is_open())
{
std::copy(row_offsets, row_offsets + v + 1,
std::ostream_iterator<SizeT>(rows_output, "\n"));
rows_output.close();
}
std::ofstream cols_output(cols);
if (cols_output.is_open())
{
std::copy(col_indices, col_indices + e,
std::ostream_iterator<VertexId>(cols_output, "\n"));
cols_output.close();
}
if (edge_values != NULL)
{
std::ofstream vals_output(vals);
if (vals_output.is_open())
{
std::copy(edge_values, edge_values + e,
std::ostream_iterator<Value>(vals_output, "\n"));
vals_output.close();
}
}
}*/
/*
* @brief Write Ligra input CSR arrays into .adj file.
* Can be easily used for python interface.
*
* @param[in] file_name Original graph file path and name.
* @param[in] v Number of vertices in input graph.
* @param[in] e Number of edges in input graph.
* @param[in] row Row-offsets array store row pointers.
* @param[in] col Column-indices array store destinations.
* @param[in] edge_values Per edge weight values associated.
* @param[in] quiet Don't print out anything.
*/
/*void WriteToLigraFile(
const char *file_name,
SizeT v, SizeT e,
SizeT *row,
VertexId *col,
Value *edge_values = NULL,
bool quiet = false)
{
char adj_name[256];
sprintf(adj_name, "%s.adj", file_name);
if (!quiet)
{
printf("writing to ligra .adj file.\n");
}
std::ofstream fout3(adj_name);
if (fout3.is_open())
{
fout3 << "AdjacencyGraph" << std::endl << v << std::endl << e << std::endl;
for (int i = 0; i < v; ++i)
fout3 << row[i] << std::endl;
for (int i = 0; i < e; ++i)
fout3 << col[i] << std::endl;
if (edge_values != NULL)
{
for (int i = 0; i < e; ++i)
fout3 << edge_values[i] << std::endl;
}
fout3.close();
}
}
void WriteToMtxFile(
const char *file_name,
SizeT v, SizeT e,
SizeT *row,
VertexId *col,
Value *edge_values = NULL,
bool quiet = false)
{
char adj_name[256];
sprintf(adj_name, "%s.mtx", file_name);
if (!quiet)
{
printf("writing to .mtx file.\n");
}
std::ofstream fout3(adj_name);
if (fout3.is_open())
{
fout3 << v << " " << v << " " << e << std::endl;
for (int i = 0; i < v; ++i) {
SizeT begin = row[i];
SizeT end = row[i+1];
for (int j = begin; j < end; ++j) {
fout3 << col[j]+1 << " " << i+1;
if (edge_values != NULL)
{
fout3 << " " << edge_values[j] << std::endl;
}
else
{
fout3 << " " << rand() % 64 << std::endl;
}
}
}
fout3.close();
}
}*/
/**
* @brief Read from stored row_offsets, column_indices arrays.
*
* @tparam LOAD_EDGE_VALUES Whether or not to load edge values.
*
* @param[in] f_in Input file name.
* @param[in] quiet Don't print out anything.
*/
/*template <bool LOAD_EDGE_VALUES>
void FromCsr(char *f_in, bool quiet = false)
{
if (!quiet)
{
printf(" Reading directly from stored binary CSR arrays ...\n");
}
time_t mark1 = time(NULL);
std::ifstream input(f_in);
SizeT v, e;
input.read(reinterpret_cast<char*>(&v), sizeof(SizeT));
input.read(reinterpret_cast<char*>(&e), sizeof(SizeT));
FromScratch<LOAD_EDGE_VALUES, false>(v, e);
input.read(reinterpret_cast<char*>(row_offsets), (v + 1)*sizeof(SizeT));
input.read(reinterpret_cast<char*>(column_indices), e * sizeof(VertexId));
if (LOAD_EDGE_VALUES)
{
input.read(reinterpret_cast<char*>(edge_values), e * sizeof(Value));
}
time_t mark2 = time(NULL);
if (!quiet)
{
printf("Done reading (%ds).\n", (int) (mark2 - mark1));
}
// compute out_nodes
SizeT out_node = 0;
for (SizeT node = 0; node < nodes; node++)
{
if (row_offsets[node + 1] - row_offsets[node] > 0)
{
++out_node;
}
}
out_nodes = out_node;
}*/
/**
* @brief (Specific for SM) Read from stored row_offsets, column_indices arrays.
*
* @tparam LOAD_NODE_VALUES Whether or not to load node values.
*
* @param[in] f_in Input graph file name.
* @param[in] f_label Input label file name.
* @param[in] quiet Don't print out anything.
*/
/*template <bool LOAD_NODE_VALUES>
void FromCsr_SM(char *f_in, char *f_label, bool quiet = false)
{
if (!quiet)
{
printf(" Reading directly from stored binary CSR arrays ...\n");
if(LOAD_NODE_VALUES)
printf(" Reading directly from stored binary label arrays ...\n");
}
time_t mark1 = time(NULL);
std::ifstream input(f_in);
std::ifstream input_label(f_label);
SizeT v, e;
input.read(reinterpret_cast<char*>(&v), sizeof(SizeT));
input.read(reinterpret_cast<char*>(&e), sizeof(SizeT));
FromScratch<false, LOAD_NODE_VALUES>(v, e);
input.read(reinterpret_cast<char*>(row_offsets), (v + 1)*sizeof(SizeT));
input.read(reinterpret_cast<char*>(column_indices), e * sizeof(VertexId));
if (LOAD_NODE_VALUES)
{
input_label.read(reinterpret_cast<char*>(node_values), v * sizeof(Value));
}
// for(int i=0; i<v; i++) printf("%lld ", (long long)node_values[i]); printf("\n");
time_t mark2 = time(NULL);
if (!quiet)
{
printf("Done reading (%ds).\n", (int) (mark2 - mark1));
}
// compute out_nodes
SizeT out_node = 0;
for (SizeT node = 0; node < nodes; node++)
{
if (row_offsets[node + 1] - row_offsets[node] > 0)
{
++out_node;
}
}
out_nodes = out_node;
}*/
/**
* \addtogroup PublicInterface
* @{
*/
/**
* @brief Check values.
*/
/*bool CheckValue()
{
for (SizeT node = 0; node < nodes; ++node)
{
for (SizeT edge = row_offsets[node];
edge < row_offsets[node + 1];
++edge)
{
int src_node = node;
int dst_node = column_indices[edge];
int edge_value = edge_values[edge];
for (SizeT r_edge = row_offsets[dst_node];
r_edge < row_offsets[dst_node + 1];
++r_edge)
{
if (column_indices[r_edge] == src_node)
{
if (edge_values[r_edge] != edge_value)
return false;
}
}
}
}
return true;
}*/
/**
* @brief Find node with largest neighbor list
* @param[in] max_degree Maximum degree in the graph.
*
* \return int the source node with highest degree
*/
/*int GetNodeWithHighestDegree(int& max_degree)
{
int degree = 0;
int src = 0;
for (SizeT node = 0; node < nodes; node++)
{
if (row_offsets[node + 1] - row_offsets[node] > degree)
{
degree = row_offsets[node + 1] - row_offsets[node];
src = node;
}
}
max_degree = degree;
return src;
}*/
/**
* @brief Display the neighbor list of a given node.
*
* @param[in] node Vertex ID to display.
*/
/*void DisplayNeighborList(VertexId node)
{
if (node < 0 || node >= nodes) return;
for (SizeT edge = row_offsets[node];
edge < row_offsets[node + 1];
edge++)
{
util::PrintValue(column_indices[edge]);
printf(", ");
}
printf("\n");
}*/
/**
* @brief Get the degrees of all the nodes in graph
*
* @param[in] node_degrees node degrees to fill in
*/
/*void GetNodeDegree(unsigned long long *node_degrees)
{
for(SizeT node=0; node < nodes; ++node)
{
node_degrees[node] = row_offsets[node+1]-row_offsets[node];
}
}*/
/**
* @brief Get the average node value in graph
*/
/*Value GetAverageNodeValue()
{
if (abs(average_node_value - 0) < 0.001 && node_values != NULL)
{
double mean = 0, count = 0;
for (SizeT node = 0; node < nodes; ++node)
{
if (node_values[node] < UINT_MAX)
{
count += 1;
mean += (node_values[node] - mean) / count;
}
}
average_node_value = static_cast<Value>(mean);
}
return average_node_value;
}*/
/**
* @brief Get the average edge value in graph
*/
/*Value GetAverageEdgeValue()
{
if (abs(average_edge_value - 0) < 0.001 && edge_values != NULL)
{
double mean = 0, count = 0;
for (SizeT edge = 0; edge < edges; ++edge)
{
if (edge_values[edge] < UINT_MAX)
{
count += 1;
mean += (edge_values[edge] - mean) / count;
}
}
}
return average_edge_value;
}*/
/**@}*/
}; // CSR
template<
typename VertexT,
typename SizeT ,
typename ValueT ,
GraphFlag _FLAG ,
unsigned int cudaHostRegisterFlag>
struct Csr<VertexT, SizeT, ValueT, _FLAG, cudaHostRegisterFlag, false>
{
cudaError_t Release(util::Location target = util::LOCATION_ALL)
{
return cudaSuccess;
}
template <typename CooT_in>
cudaError_t FromCoo(
CooT_in &coo,
util::Location target = util::LOCATION_DEFAULT,
cudaStream_t stream = 0,
bool quiet = false)
{
return cudaSuccess;
}
template <typename CsrT_in>
cudaError_t FromCsr(
CsrT_in &csr,
util::Location target = util::LOCATION_DEFAULT,
cudaStream_t stream = 0,
bool quiet = false)
{
return cudaSuccess;
}
template <typename CscT_in>
cudaError_t FromCsc(
CscT_in &csc,
util::Location target = util::LOCATION_DEFAULT,
cudaStream_t stream = 0,
bool quiet = false)
{
return cudaSuccess;
}
cudaError_t Sort()
{
return cudaSuccess;
}
__device__ __host__ __forceinline__
SizeT GetNeighborListLength(const VertexT &v) const
{
return 0;
}
cudaError_t Move(
util::Location source,
util::Location target,
cudaStream_t stream = 0)
{
return cudaSuccess;
}
cudaError_t Display(
std::string graph_prefix = "",
SizeT nodes_to_show = 40,
bool with_edge_values = true)
{
return cudaSuccess;
}
};
} // namespace graph
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| {
"pile_set_name": "Github"
} |
#include "w_system_pch.h"
#include "w_image.h"
#include <turbojpeg.h>
#include <png.h>
namespace wolf
{
namespace system
{
struct w_image_pimp
{
#pragma region png
static W_RESULT is_png_file(_Inout_ std::istream& pStream)
{
const int _png_paging_size = 8;
png_byte _header[_png_paging_size];
pStream.read((char*)_header, _png_paging_size);
if (!pStream.good()) return W_INVALID_FILE_ATTRIBUTES;
//check for png
auto _hr = png_sig_cmp(_header, 0, _png_paging_size);
//seek to first of stream
pStream.seekg(0, std::ios::beg);
return _hr == 0 ? W_PASSED : W_FAILED;
}
static W_RESULT is_png_file(_In_z_ const char* pFilePath)
{
std::ifstream _file(pFilePath, std::ios::binary);
if (!_file)
{
//file is exist but it might be corrupted
return W_INVALID_FILE_ATTRIBUTES;
}
auto _hr = is_png_file(_file);
_file.close();
return _hr == 0 ? W_PASSED : W_FAILED;
}
static uint8_t* read_png_from_stream(
_Inout_ std::istream& pStream,
_Out_ int& pWidth,
_Out_ int& pHeight,
_Out_ uint8_t& pColorType,
_Out_ uint8_t& pBitDepth,
_Out_ int& pNumberOfPasses,
_Out_ int& pState,
_In_ const w_png_pixel_format& pPixelFormat)
{
pState = 0;
const int _png_paging_size = 8;
png_byte _header[_png_paging_size];
pStream.read((char*)_header, _png_paging_size);
if (!pStream.good())
{
pState = -2;
return nullptr;
}
//check for png
if (png_sig_cmp(_header, 0, _png_paging_size))
{
pState = 1;
return nullptr;
}
//initialize stuff
auto _png_ptr = png_create_read_struct(
PNG_LIBPNG_VER_STRING,
NULL,
NULL,
NULL);
if (!_png_ptr)
{
pState = -2;
return nullptr;
}
auto _info_ptr = png_create_info_struct(_png_ptr);
if (!_info_ptr)
{
pState = -2;
return nullptr;
}
if (setjmp(png_jmpbuf(_png_ptr)))
{
pState = -2;
png_destroy_read_struct(&_png_ptr, &_info_ptr, (png_infopp)0);
return nullptr;
}
png_set_read_fn(_png_ptr, (void*)&pStream, png_user_read_data);//png_init_io(_png_ptr, _file);
png_set_sig_bytes(_png_ptr, _png_paging_size);
png_read_info(_png_ptr, _info_ptr);
pWidth = (int)png_get_image_width(_png_ptr, _info_ptr);
pHeight = (int)png_get_image_height(_png_ptr, _info_ptr);
pColorType = png_get_color_type(_png_ptr, _info_ptr);
pBitDepth = png_get_bit_depth(_png_ptr, _info_ptr);
pNumberOfPasses = png_set_interlace_handling(_png_ptr);
//check bit depth
if (pBitDepth == 16)
{
png_set_strip_16(_png_ptr);
}
if (pColorType == PNG_COLOR_TYPE_PALETTE)
{
png_set_palette_to_rgb(_png_ptr);
}
// PNG_COLOR_TYPE_GRAY_ALPHA is always 8 or 16 bit depth.
if (pColorType == PNG_COLOR_TYPE_GRAY && pBitDepth < 8)
{
png_set_expand_gray_1_2_4_to_8(_png_ptr);
}
if (png_get_valid(_png_ptr, _info_ptr, PNG_INFO_tRNS))
{
png_set_tRNS_to_alpha(_png_ptr);
}
// These color_type don't have an alpha channel then fill it with 0xff.
if (pColorType == PNG_COLOR_TYPE_RGB ||
pColorType == PNG_COLOR_TYPE_GRAY ||
pColorType == PNG_COLOR_TYPE_PALETTE)
{
png_set_filler(_png_ptr, 0xFF, PNG_FILLER_AFTER);
}
if (pColorType == PNG_COLOR_TYPE_GRAY || pColorType == PNG_COLOR_TYPE_GRAY_ALPHA)
{
png_set_gray_to_rgb(_png_ptr);
}
png_read_update_info(_png_ptr, _info_ptr);
//now data must be rgba
auto _comp = 4;
if (pPixelFormat == w_png_pixel_format::RGB_PNG ||
pPixelFormat == w_png_pixel_format::BGR_PNG)
{
_comp = 3;
}
//allocate memory
auto _pixels = (uint8_t*)malloc(_comp * pWidth * pHeight * sizeof(uint8_t));
auto _bytes_per_row = png_get_rowbytes(_png_ptr, _info_ptr);
auto _raw_data = (uint8_t*)malloc(_bytes_per_row * sizeof(uint8_t));
//pixels counter
uint32_t _k = 0;
//read single row at a time and then convert it to desired pixel format
switch (pPixelFormat)
{
case w_png_pixel_format::RGB_PNG:
for (auto i = 0; i < pHeight; ++i)
{
png_read_row(_png_ptr, (png_bytep)_raw_data, NULL);
const auto _row_offset = i * pWidth;
uint32_t _byte_index = 0;
for (auto j = 0; j < pWidth; ++j)
{
const uint32_t _r = _raw_data[_byte_index++];
const uint32_t _g = _raw_data[_byte_index++];
const uint32_t _b = _raw_data[_byte_index++];
const uint32_t _a = _raw_data[_byte_index++];//ignored
_pixels[_k] = _r;
_pixels[_k + 1] = _g;
_pixels[_k + 2] = _b;
_k += _comp;
}
}
break;
case w_png_pixel_format::BGR_PNG:
for (auto i = 0; i < pHeight; ++i)
{
png_read_row(_png_ptr, (png_bytep)_raw_data, NULL);
const auto _row_offset = i * pWidth;
uint32_t _byte_index = 0;
for (auto j = 0; j < pWidth; ++j)
{
const uint32_t _r = _raw_data[_byte_index++];
const uint32_t _g = _raw_data[_byte_index++];
const uint32_t _b = _raw_data[_byte_index++];
const uint32_t _a = _raw_data[_byte_index++];//ignored
_pixels[_k] = _b;
_pixels[_k + 1] = _g;
_pixels[_k + 2] = _r;
_k += _comp;
}
}
break;
case w_png_pixel_format::RGBA_PNG:
for (auto i = 0; i < pHeight; ++i)
{
png_read_row(_png_ptr, (png_bytep)_raw_data, NULL);
const auto _row_offset = i * pWidth;
uint32_t _byte_index = 0;
for (auto j = 0; j < pWidth; ++j)
{
const uint32_t _r = _raw_data[_byte_index++];
const uint32_t _g = _raw_data[_byte_index++];
const uint32_t _b = _raw_data[_byte_index++];
const uint32_t _a = _raw_data[_byte_index++];//ignored
_pixels[_k] = _r;
_pixels[_k + 1] = _g;
_pixels[_k + 2] = _b;
_pixels[_k + 3] = _a;
_k += _comp;
}
}
break;
case w_png_pixel_format::BGRA_PNG:
for (auto i = 0; i < pHeight; ++i)
{
png_read_row(_png_ptr, (png_bytep)_raw_data, NULL);
const auto _row_offset = i * pWidth;
uint32_t _byte_index = 0;
for (auto j = 0; j < pWidth; ++j)
{
const uint32_t _r = _raw_data[_byte_index++];
const uint32_t _g = _raw_data[_byte_index++];
const uint32_t _b = _raw_data[_byte_index++];
const uint32_t _a = _raw_data[_byte_index++];//ignored
_pixels[_k] = _b;
_pixels[_k + 1] = _g;
_pixels[_k + 2] = _r;
_pixels[_k + 3] = _a;
_k += _comp;
}
}
break;
};
png_destroy_read_struct(&_png_ptr, &_info_ptr, (png_infopp)0);
free(_raw_data);
return _pixels;
}
static uint8_t* read_png_from_file(
_In_z_ const char* pFilePath,
_Out_ int& pWidth,
_Out_ int& pHeight,
_Out_ uint8_t& pColorType,
_Out_ uint8_t& pBitDepth,
_Out_ int& pNumberOfPasses,
_Out_ int& pState,
_In_ const w_png_pixel_format& pPixelFormat)
{
pState = 0;
std::ifstream _file(pFilePath, std::ios::binary);
if (!_file)
{
pState = -1;
return nullptr;
}
auto _pixels = read_png_from_stream(
_file,
pWidth,
pHeight,
pColorType,
pBitDepth,
pNumberOfPasses,
pState,
pPixelFormat);
_file.close();
return _pixels;
}
//TODO: WRITE PNG FILEs
#pragma region Needs to check
/*
write png to file
pState indicates the state
0 means succeeded
-1 means file could not be opened for writing
-2 means internal function error
*/
// inline void write_png_to_file(
// _In_z_ const char* pFilePath,
// _In_ const uint8_t* pPixels,
// _In_ const int& pWidth,
// _In_ const int& pHeight,
// _In_ const uint8_t& pBitDepth,
// _Out_ int& pState)
// {
// pState = 0;
//
// //create file
//#if defined(__WIN32) || defined(__UWP)
// FILE* _file;
// fopen_s(&_file, pFilePath, "wb");
//
//#else
// FILE* _file = fopen(pFilePath, "wb");
//#endif
// if (!_file)
// {
// pState = -1;
// return;
// }
//
// //initialize stuff
// auto _png_ptr = png_create_write_struct(
// PNG_LIBPNG_VER_STRING,
// NULL,
// NULL,
// NULL);
// if (!_png_ptr)
// {
// pState = -2;
// fclose(_file);
// return;
// }
//
// auto _info_ptr = png_create_info_struct(_png_ptr);
// if (!_info_ptr)
// {
// pState = -2;
// png_destroy_write_struct(&_png_ptr, &_info_ptr);
// fclose(_file);
// return;
// }
//
// if (setjmp(png_jmpbuf(_png_ptr)))
// {
// pState = -2;
// png_destroy_write_struct(&_png_ptr, &_info_ptr);
// fclose(_file);
// return;
// }
// png_init_io(_png_ptr, _file);
//
// //write header
// if (setjmp(png_jmpbuf(_png_ptr)))
// {
// pState = -2;
// png_destroy_write_struct(&_png_ptr, &_info_ptr);
// fclose(_file);
// return;
// }
//
// png_set_IHDR(
// _png_ptr,
// _info_ptr,
// pWidth,
// pHeight,
// pBitDepth,
// PNG_COLOR_TYPE_RGBA,
// PNG_INTERLACE_NONE,
// PNG_COMPRESSION_TYPE_BASE,
// PNG_FILTER_TYPE_BASE);
//
// png_write_info(_png_ptr, _info_ptr);
//
// //write bytes
// if (setjmp(png_jmpbuf(_png_ptr)))
// {
// pState = -2;
// png_destroy_write_struct(&_png_ptr, &_info_ptr);
// fclose(_file);
// return;
// }
// png_write_image(_png_ptr, (png_bytep*)pPixels);
//
// //end write
// if (setjmp(png_jmpbuf(_png_ptr)))
// {
// pState = -2;
// png_destroy_write_struct(&_png_ptr, &_info_ptr);
// fclose(_file);
// return;
// }
// png_write_end(_png_ptr, NULL);
// png_destroy_write_struct(&_png_ptr, &_info_ptr);
// fclose(_file);
// }
/*
write png to stream
pState indicates the state
0 means succeeded
-1 means file could not be opened for writing
-2 means internal function error
*/
//inline void write_png_to_stream(
// _Inout_ std::ostream& pStream,
// _In_ const uint8_t* pPixels,
// _In_ const int& pWidth,
// _In_ const int& pHeight,
// _In_ const uint8_t& pBitDepth,
// _Out_ int& pState)
//{
// pState = 0;
// //initialize stuff
// auto _png_ptr = png_create_write_struct(
// PNG_LIBPNG_VER_STRING,
// NULL,
// NULL,
// NULL);
// if (!_png_ptr)
// {
// pState = -2;
// return;
// }
// auto _info_ptr = png_create_info_struct(_png_ptr);
// if (!_info_ptr)
// {
// pState = -2;
// png_destroy_write_struct(&_png_ptr, &_info_ptr);
// return;
// }
// if (setjmp(png_jmpbuf(_png_ptr)))
// {
// pState = -2;
// png_destroy_write_struct(&_png_ptr, &_info_ptr);
// return;
// }
// png_set_write_fn(_png_ptr, (void*)&pStream, png_user_write_data, NULL);//png_init_io(_png_ptr, _file);
// //write header
// if (setjmp(png_jmpbuf(_png_ptr)))
// {
// pState = -2;
// png_destroy_write_struct(&_png_ptr, &_info_ptr);
// return;
// }
// png_set_IHDR(
// _png_ptr,
// _info_ptr,
// pWidth,
// pHeight,
// pBitDepth,
// PNG_COLOR_TYPE_RGBA,
// PNG_INTERLACE_NONE,
// PNG_COMPRESSION_TYPE_BASE,
// PNG_FILTER_TYPE_BASE);
// png_write_info(_png_ptr, _info_ptr);
// //write bytes
// if (setjmp(png_jmpbuf(_png_ptr)))
// {
// pState = -2;
// png_destroy_write_struct(&_png_ptr, &_info_ptr);
// return;
// }
// png_write_image(_png_ptr, (png_bytep*)pPixels);
// //end write
// if (setjmp(png_jmpbuf(_png_ptr)))
// {
// pState = -2;
// png_destroy_write_struct(&_png_ptr, &_info_ptr);
// return;
// }
// png_write_end(_png_ptr, NULL);
// png_destroy_write_struct(&_png_ptr, &_info_ptr);
//}
#pragma endregion
#pragma endregion
#pragma region jpeg
static uint8_t* read_jpeg_from_stream(
_In_z_ std::istream& pStream,
_Out_ int& pWidth,
_Out_ int& pHeight,
_Out_ int& pSubSample,
_Out_ int& pColorSpace,
_Out_ int& pNumberOfPasses,
_Out_ int& pState,
_In_ const TJPF& pPixelFormat = TJPF_RGB)
{
pState = 0;
if (pPixelFormat == TJPF_UNKNOWN)
{
pState = -2;
return nullptr;
}
//find size of file
pStream.seekg(0, std::ios::end);
unsigned long _jpeg_buffer_len = static_cast<unsigned long>(pStream.tellg());
if (_jpeg_buffer_len <= 0)
{
pState = -1;
return nullptr;
}
//go to begin
pStream.seekg(0, std::ios::beg);
auto _jpeg_buffer = (uint8_t*)tjAlloc(_jpeg_buffer_len);
if (!_jpeg_buffer)
{
pState = -2;
return nullptr;
}
pStream.read((char*)_jpeg_buffer, _jpeg_buffer_len);
if (!pStream.good())
{
pState = -1;
return nullptr;
}
auto _tj_instance = tjInitDecompress();
if (!_tj_instance)
{
pState = -2;
return nullptr;
}
if (tjDecompressHeader3(
_tj_instance,
_jpeg_buffer,
_jpeg_buffer_len,
&pWidth,
&pHeight,
&pSubSample,
&pColorSpace))
{
pState = 1;
free(_jpeg_buffer);
return nullptr;
}
auto _comp = 4;
switch (pPixelFormat)
{
case TJPF_RGB:
case TJPF_BGR:
_comp = 3;
break;
}
//TODO: should be alligned_malloc
auto _pixels = (uint8_t*)malloc(_comp * pWidth * pHeight * sizeof(uint8_t));
auto _hr = tjDecompress2(
_tj_instance,
_jpeg_buffer,
_jpeg_buffer_len,
_pixels,
pWidth,
_comp * pWidth,
pHeight,
pPixelFormat,
0);
tjFree(_jpeg_buffer);
tjDestroy(_tj_instance);
if (_hr)
{
pState = -2;
return nullptr;
}
return _pixels;
}
static uint8_t* read_jpeg_from_file(
_In_z_ const char* pFilePath,
_Out_ int& pWidth,
_Out_ int& pHeight,
_Out_ int& pSubSample,
_Out_ int& pColorSpace,
_Out_ int& pNumberOfPasses,
_Out_ int& pState,
_In_ const TJPF& pPixelFormat = TJPF_RGB)
{
pState = 0;
std::ifstream _file(pFilePath, std::ios::binary);
if (!_file)
{
pState = -1;
return nullptr;
}
auto _pixels = read_jpeg_from_stream(
_file,
pWidth,
pHeight,
pSubSample,
pColorSpace,
pNumberOfPasses,
pState,
pPixelFormat);
_file.close();
return _pixels;
}
#pragma endregion
private:
static void png_user_read_data(
png_structp pPngPtr,
png_bytep pData,
png_size_t pLength)
{
//cast istream
png_voidp _io = png_get_io_ptr(pPngPtr);
((std::istream*)_io)->read((char*)pData, pLength);
}
static void png_user_write_data(
png_structp pPngPtr,
png_bytep pData,
png_size_t pLength)
{
//cast istream
png_voidp _io = png_get_io_ptr(pPngPtr);
((std::ostream*)_io)->write((char*)pData, pLength);
}
//static void png_user_flush_data(png_structp png_ptr)
//{
//}
};
}
}
using namespace wolf::system;
w_image_pimp* w_image::_pimp = new (std::nothrow) w_image_pimp();
W_RESULT w_image::is_png_file(_Inout_ std::istream& pStream)
{
if (!w_image::_pimp)
{
wolf::logger.error("memory not allocated for w_image_pimp");
return W_FAILED;
}
return _pimp->is_png_file(pStream);
}
W_RESULT w_image::is_png_file(_In_z_ const char* pFilePath)
{
if (!w_image::_pimp)
{
wolf::logger.error("memory not allocated for w_image_pimp");
return W_FAILED;
}
return _pimp->is_png_file(pFilePath);
}
uint8_t* w_image::read_png_from_stream(
_Inout_ std::istream& pStream,
_Out_ int& pWidth,
_Out_ int& pHeight,
_Out_ uint8_t& pColorType,
_Out_ uint8_t& pBitDepth,
_Out_ int& pNumberOfPasses,
_Out_ int& pState,
_In_ const w_png_pixel_format& pPixelFormat)
{
if (!w_image::_pimp)
{
wolf::logger.error("memory not allocated for w_image_pimp");
return nullptr;
}
return _pimp->read_png_from_stream(
pStream,
pWidth,
pHeight,
pColorType,
pBitDepth,
pNumberOfPasses,
pState,
pPixelFormat);
}
uint8_t* w_image::read_png_from_file(
_In_z_ const char* pFilePath,
_Out_ int& pWidth,
_Out_ int& pHeight,
_Out_ uint8_t& pColorType,
_Out_ uint8_t& pBitDepth,
_Out_ int& pNumberOfPasses,
_Out_ int& pState,
_In_ const w_png_pixel_format& pPixelFormat)
{
if (!w_image::_pimp)
{
wolf::logger.error("memory not allocated for w_image_pimp");
return nullptr;
}
return _pimp->read_png_from_file(
pFilePath,
pWidth,
pHeight,
pColorType,
pBitDepth,
pNumberOfPasses,
pState,
pPixelFormat);
}
uint8_t* w_image::read_jpeg_from_stream(
_In_z_ std::istream& pStream,
_Out_ int& pWidth,
_Out_ int& pHeight,
_Out_ int& pSubSample,
_Out_ int& pColorSpace,
_Out_ int& pNumberOfPasses,
_Out_ int& pState,
_In_ const w_jpeg_pixel_format& pPixelFormat)
{
if (!w_image::_pimp)
{
wolf::logger.error("memory not allocated for w_image_pimp");
return nullptr;
}
return _pimp->read_jpeg_from_stream(
pStream,
pWidth,
pHeight,
pSubSample,
pColorSpace,
pNumberOfPasses,
pState,
(TJPF)pPixelFormat);
}
uint8_t* w_image::read_jpeg_from_file(
_In_z_ const char* pFilePath,
_Out_ int& pWidth,
_Out_ int& pHeight,
_Out_ int& pSubSample,
_Out_ int& pColorSpace,
_Out_ int& pNumberOfPasses,
_Out_ int& pState,
_In_ const w_jpeg_pixel_format& pPixelFormat)
{
if (!w_image::_pimp)
{
wolf::logger.error("memory not allocated for w_image_pimp");
return nullptr;
}
return _pimp->read_jpeg_from_file(
pFilePath,
pWidth,
pHeight,
pSubSample,
pColorSpace,
pNumberOfPasses,
pState,
(TJPF)pPixelFormat);
}
| {
"pile_set_name": "Github"
} |
package example.service;
import example.repo.Customer705Repository;
import org.springframework.stereotype.Service;
@Service
public class Customer705Service {
public Customer705Service(Customer705Repository repo) {
}
}
| {
"pile_set_name": "Github"
} |
/**
* Copyright (c) 2005-2013 by Appcelerator, Inc. All Rights Reserved.
* Licensed under the terms of the Eclipse Public License (EPL).
* Please see the license.txt included with this distribution for details.
* Any modifications to this file must keep this entire header intact.
*/
/*
* Created on Jan 17, 2006
*/
package org.python.pydev.ui.wizards.files;
import java.io.ByteArrayInputStream;
import org.eclipse.core.resources.IContainer;
import org.eclipse.core.resources.IFile;
import org.eclipse.core.resources.IFolder;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.core.runtime.Path;
import org.python.pydev.core.docutils.StringUtils;
import org.python.pydev.ui.filetypes.FileTypesPreferencesPage;
public class PythonPackageWizard extends AbstractPythonWizard {
public PythonPackageWizard() {
super("Create a new Python package");
}
public static final String WIZARD_ID = "org.python.pydev.ui.wizards.files.PythonPackageWizard";
@Override
protected AbstractPythonWizardPage createPathPage() {
return new AbstractPythonWizardPage(this.description, selection) {
@Override
protected boolean shouldCreatePackageSelect() {
return false;
}
@Override
protected String checkNameText(String text) {
String result = super.checkNameText(text);
if (result != null) {
return result;
}
if (getValidatedSourceFolder().findMember(text) != null) {
return "The package " + text +
" already exists in the source folder " + getValidatedSourceFolder().getName() + ".";
}
return null;
}
};
}
/**
* We will create the complete package path given by the user (all filled with __init__)
* and we should return the last __init__ module created.
*/
@Override
protected IFile doCreateNew(IProgressMonitor monitor) throws CoreException {
return createPackage(monitor, filePage.getValidatedSourceFolder(), filePage.getValidatedName());
}
/**
* Creates the complete package path given by the user (all filled with __init__)
* and returns the last __init__ module created.
*/
public static IFile createPackage(IProgressMonitor monitor, IContainer validatedSourceFolder, String packageName)
throws CoreException {
IFile lastFile = null;
if (validatedSourceFolder == null) {
return null;
}
IContainer parent = validatedSourceFolder;
for (String packagePart : StringUtils.dotSplit(packageName)) {
IFolder folder = parent.getFolder(new Path(packagePart));
if (!folder.exists()) {
folder.create(true, true, monitor);
}
parent = folder;
IFile file = parent.getFile(new Path("__init__"
+ FileTypesPreferencesPage.getDefaultDottedPythonExtension()));
if (!file.exists()) {
file.create(new ByteArrayInputStream(new byte[0]), true, monitor);
}
lastFile = file;
}
return lastFile;
}
}
| {
"pile_set_name": "Github"
} |
components {
id: "script"
component: "/factory/factory_test.script"
}
components {
id: "factory"
component: "/factory/factory_test.factory"
}
| {
"pile_set_name": "Github"
} |
# Aggregation methods for whisper files. Entries are scanned in order,
# and first match wins. This file is scanned for changes every 60 seconds
#
# [name]
# pattern = <regex>
# xFilesFactor = <float between 0 and 1>
# aggregationMethod = <average|sum|last|max|min>
#
# name: Arbitrary unique name for the rule
# pattern: Regex pattern to match against the metric name
# xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur
# aggregationMethod: function to apply to data points for aggregation
#
[min]
pattern = \.min$
xFilesFactor = 0.1
aggregationMethod = min
[max]
pattern = \.max$
xFilesFactor = 0.1
aggregationMethod = max
[sum]
pattern = \.count$
xFilesFactor = 0
aggregationMethod = sum
[default_average]
pattern = .*
xFilesFactor = 0.0
aggregationMethod = average
| {
"pile_set_name": "Github"
} |
Test tool version: 2.1.3
Issuer: https://oidc-certification.ory.sh:8443/
Profile: []
Test ID: OP-Discovery-JWKs
Test description: Keys in OP JWKs well formed
Timestamp: 2018-06-23T10:43:23Z
============================================================
Trace output
0.0 phase <--<-- 0 --- Webfinger -->-->
0.0 not expected to do WebFinger
0.0 phase <--<-- 1 --- Discovery -->-->
0.0 provider_config kwargs:{'issuer': 'https://oidc-certification.ory.sh:8443/'}
0.108 http response url:https://oidc-certification.ory.sh:8443/.well-known/openid-configuration status_code:200
0.11 ProviderConfigurationResponse {
"authorization_endpoint": "https://oidc-certification.ory.sh:8443/oauth2/auth",
"claims_parameter_supported": false,
"claims_supported": [
"sub"
],
"grant_types_supported": [
"authorization_code",
"implicit",
"client_credentials",
"refresh_token"
],
"id_token_signing_alg_values_supported": [
"RS256"
],
"issuer": "https://oidc-certification.ory.sh:8443/",
"jwks_uri": "https://oidc-certification.ory.sh:8443/.well-known/jwks.json",
"registration_endpoint": "https://oidc-certification.ory.sh:8443/clients",
"request_parameter_supported": true,
"request_uri_parameter_supported": true,
"require_request_uri_registration": true,
"response_modes_supported": [
"query",
"fragment"
],
"response_types_supported": [
"code",
"code id_token",
"id_token",
"token id_token",
"token",
"token id_token code"
],
"scopes_supported": [
"offline",
"openid"
],
"subject_types_supported": [
"pairwise",
"public"
],
"token_endpoint": "https://oidc-certification.ory.sh:8443/oauth2/token",
"token_endpoint_auth_methods_supported": [
"client_secret_post",
"client_secret_basic",
"private_key_jwt",
"none"
],
"userinfo_endpoint": "https://oidc-certification.ory.sh:8443/userinfo",
"userinfo_signing_alg_values_supported": [
"none",
"RS256"
],
"version": "3.0"
}
0.11 phase <--<-- 2 --- Done -->-->
0.11 end
0.11 assertion CheckHTTPResponse
0.11 condition check-http-response: status=OK [Checks that the HTTP response status is within the 200 or 300 range. Also does some extra JSON checks]
0.111 assertion VerifyBase64URL
0.212 http response url:https://oidc-certification.ory.sh:8443/.well-known/jwks.json status_code:200
0.213 condition verify-base64url: status=OK [Verifies that the base64 encoded parts of a JWK is in fact base64url encoded and not just base64 encoded]
0.213 condition Done: status=OK
============================================================
Conditions
check-http-response: status=OK [Checks that the HTTP response status is within the 200 or 300 range. Also does some extra JSON checks]
verify-base64url: status=OK [Verifies that the base64 encoded parts of a JWK is in fact base64url encoded and not just base64 encoded]
Done: status=OK
============================================================
RESULT: PASSED
| {
"pile_set_name": "Github"
} |
#!/usr/bin/perl
use strict;
use warnings;
use Getopt::Long;
use Pod::Usage;
use IPC::Open3;
use Time::HiRes qw(usleep);
exit &main();
sub main() {
print STDERR <<BLOCKOUT
_____ __ __ ____ _____ _
/ ____| \\/ | _ \\ / ____| | |
| (___ | \\ / | |_) | | __ _ __ __ _| |__
\\___ \\| |\\/| | _ <| | |_ | '__/ _` | '_ \\
____) | | | | |_) | |__| | | | (_| | |_) |
|_____/|_| |_|____/ \\_____|_| \\__,_|_.__/
By Chris King
\@raikiasec
BLOCKOUT
;
print STDERR "SMBGrab - Chris King\n\n";
my ($inputRead, $inputSave, $inputNoEdit, $inputHelp, $inputAll);
$inputSave = $inputNoEdit = $inputHelp = $inputAll = '';
GetOptions('savedir=s', \$inputSave,
'noedit', \$inputNoEdit,
'all', \$inputAll,
'help', \$inputHelp);
pod2usage(-verbose => 1) and exit if ($inputHelp);
unless (not -t STDIN) {
print STDERR "ERROR! You must pipe input from a SMBList.pl output! If you don't know how to use this script, try ./SMBGrab.pl -h\n" and exit;
}
my $outputDir = '/tmp/share_read_'.int(rand(10000000));
$outputDir = $inputSave if ($inputSave ne '');
my $tempAuthFile = '/tmp/share_read_auth_'.int(rand(10000000)).'.txt';
if (! -d $outputDir) {
mkdir $outputDir;
}
chdir $outputDir;
my @stdinLines = <STDIN>;
if ($inputAll eq '' and scalar(@stdinLines) > 100) {
print "Alert: You're about to grab ".scalar(@stdinLines)." files! If you really want to do this, run with the '-a' flag\n" and exit;
}
foreach my $line (@stdinLines) {
chomp $line;
my ($userpass, $share) = split('\\|:\\|', $line,2);
my ($username, $password) = split(/:/, $userpass, 2);
my ($empty, $empty1, $server, $sharename, $file) = split (/\\/, $share,5);
open (AUTHFILE, '>'.$tempAuthFile) or die("Couldnt create temporary authentication file $tempAuthFile: $!\n");
print AUTHFILE "username = $username\n";
print AUTHFILE "password = $password\n";
close(AUTHFILE);
my $short_filename = '...'.substr($file, -35);
printf "%-45s",$short_filename;
my $unquoted_filename = $file;
$file =~ s/'/'"'"'/g;
my @lines = `smbclient -N -A $tempAuthFile '\\\\$server\\$sharename' -c 'get "$file" temp_out.txt' 2> /dev/null`;
if (scalar(@lines) != 0) {
if ($lines[0] =~ /NT_STATUS_FILE_IS_A_DIRECTORY/) {
printf "%-45s\n", "Error: Directory";
next;
}
elsif ($lines[0] =~ /NT_STATUS_SHARING_VIOLATION/) {
printf "%-45s\n", "Error: Sharing violation";
next;
}
elsif ($lines[0] =~ /NT_STATUS_ACCESS_DENIED/) {
printf "%-45s\n", "Error: Access denied error";
next;
}
elsif ($lines[0] =~ /NT_STATUS_OBJECT_NAME_NOT_FOUND/) {
printf "%-45s\n","Error: Not found";
next;
}
}
else {
printf "%-45s\n", "Success";
}
my $new_file_name = $file;
$new_file_name =~ s/\\/_/g;
my $new_unquoted_file_name = $unquoted_filename;
$new_unquoted_file_name =~ s/\\/_/g;
$new_file_name = $server.'_'.$sharename.'_'.$new_file_name;
$new_unquoted_file_name = $server.'_'.$sharename.'_'.$new_unquoted_file_name;
`mv temp_out.txt '$new_file_name'`;
if ($inputNoEdit eq '') {
open(NEWFILE, ">>$new_unquoted_file_name");
print NEWFILE "\n# File from \\\\$server\\$sharename\\$unquoted_filename using $username:$password\n";
my @data_lines = `smbclient -N -A $tempAuthFile '\\\\$server\\$sharename' -c 'allinfo "$file"' 2> /dev/null`;
for my $data_line (@data_lines) {
chomp $data_line;
print NEWFILE "# $data_line\n"
}
print NEWFILE "# END\n";
close(NEWFILE);
}
unlink($tempAuthFile);
if ($inputSave eq '') {
open(FILE, "<$new_unquoted_file_name");
my @output = <FILE>;
print "\n--------------------------------------\n";
print "# File from \\\\$server\\$sharename\\$new_unquoted_file_name using $username:$password\n";
for my $out_line (@output) {
print $out_line;
}
print "\n--------------------------------------\n";
close(FILE);
}
}
if ($inputSave eq '') {
if ($outputDir =~ /^\/tmp\/share_read_/) {
`rm -rf '$outputDir'`;
}
}
return 0;
}
__END__
=head1 Name
SMBGrab.pl
=head1 SYNOPSIS
File listings from SMBList.pl can be pipped into this utility to grab the files
wanted from the shares. The original listing from SMBList.pl should be "grepped"
through before passing it to this script, otherwise all files will be downloaded.
Example:
cat SMBList_output/ALL_COMBINED_RESULTS.txt | grep 'password.txt' | ./SMBGrab.pl -s savedfiles
=head1 DESCRIPTION
TBD
=head1 Example
=head1 ARGUMENTS
If no arguments are used, files grabbed will be displayed to the screen without saving.
=head1 OPTIONS
-s, --savedir <directory> A directory to save all the grabbed files to. If this
directory does not exist, it will be created.
Using this argument saves the files but prevents the files
from immediately being printed to the screen.
-a, --all Read all files pipped in. Without this switch, the script
protects against accidentally downloading massive amounts
of files by limiting the input to 100 files.
-n, --noedit This will preserve the files to their original form. If
this switch is not used, a note will be made at the bottom
of each file containing information about the file.
-h, --help Display this menu
=head1 AUTHOR
Chris King
| {
"pile_set_name": "Github"
} |
{
"name": "tema2",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",
"build": "NODE_ENV=production webpack -p",
"start": "NODE_ENV=development webpack-dev-server -d --inline --hot"
},
"author": "",
"license": "ISC",
"dependencies": {
"react": "^15.0.1",
"react-dom": "^15.0.1"
},
"devDependencies": {
"babel-core": "^6.1.21",
"babel-loader": "^6.1.0",
"babel-preset-es2015": "^6.1.18",
"babel-preset-react": "^6.1.18",
"babel-preset-stage-0": "^6.5.0",
"react-hot-loader": "^1.3.0",
"webpack": "^1.13.0",
"webpack-dev-server": "^1.14.1"
}
}
| {
"pile_set_name": "Github"
} |
apiVersion: v1
name: workload-sds
version: 1.0
description: Helm chart for workload sds tests
keywords:
- istio
- citadel
- sds
- performance
- qualification
sources:
- http://github.com/istio/istio
engine: gotpl
icon: https://istio.io/favicons/android-192x192.png | {
"pile_set_name": "Github"
} |
var searchData=
[
['radius',['radius',['../classcom_1_1ab_1_1view_1_1progress_1_1_ab_circle_progress_bar.html#a4db3114dfb332d5d93b9410336d88321',1,'com::ab::view::progress::AbCircleProgressBar']]],
['rawtype',['rawType',['../classcom_1_1google_1_1gson_1_1internal_1_1_0B_types_1_1_parameterized_type_impl.html#a202eb30a765923ed435b6ccbb9b0b3cc',1,'com.google.gson.internal.$Types.ParameterizedTypeImpl.rawType()'],['../classcom_1_1google_1_1gson_1_1reflect_1_1_type_token_3_01_t_01_4.html#a19dd92c9f2389f43a407a40715b8e963',1,'com.google.gson.reflect.TypeToken< T >.rawType()']]],
['realcontentsize',['realContentSize',['../classcom_1_1ab_1_1view_1_1app_1_1_ab_popover_view.html#a81cb7ac0dacfec36fbcb7dcc444f6986',1,'com::ab::view::app::AbPopoverView']]],
['rect',['rect',['../classcom_1_1ab_1_1view_1_1calendar_1_1_calendar_cell.html#ab850892049a5ca3081e6001c77c01134',1,'com.ab.view.calendar.CalendarCell.rect()'],['../classcom_1_1ab_1_1view_1_1calendar_1_1_calendar_header.html#a3ed1e21710d749409b84b9d8485c7c27',1,'com.ab.view.calendar.CalendarHeader.rect()'],['../classcom_1_1ab_1_1view_1_1chart_1_1_clickable_area.html#aa5cb99b7e221bc4f999fefd3be49953e',1,'com.ab.view.chart.ClickableArea.rect()']]],
['regular_5ftext_5ffont',['REGULAR_TEXT_FONT',['../classcom_1_1ab_1_1view_1_1chart_1_1_default_renderer.html#a576495c5f1c9d0a05e032e01466bb387',1,'com::ab::view::chart::DefaultRenderer']]],
['remote_5fservice_5fexception',['REMOTE_SERVICE_EXCEPTION',['../classcom_1_1ab_1_1global_1_1_ab_app_config.html#af6b90f2e2a593a38999bd3b0bbcdd3c7',1,'com::ab::global::AbAppConfig']]],
['requireexpose',['requireExpose',['../classcom_1_1google_1_1gson_1_1internal_1_1_excluder.html#ad483a51af18a98b2f2a9b8c2115157d8',1,'com::google::gson::internal::Excluder']]],
['reset',['reset',['../classcom_1_1ab_1_1view_1_1progress_1_1_ab_circle_progress_bar.html#a6a4d6b3e3054ad587712b6434049b134',1,'com::ab::view::progress::AbCircleProgressBar']]],
['resource',['resource',['../classcom_1_1ab_1_1util_1_1_ab_character_parser.html#abbbc108bebf2370aff229d1631dce904',1,'com::ab::util::AbCharacterParser']]],
['response',['response',['../classcom_1_1ab_1_1http_1_1_ab_http_client_1_1_responder_handler.html#a77fa1772dcaa4e6567687f5d82296609',1,'com::ab::http::AbHttpClient::ResponderHandler']]],
['response_5ftimeout_5fcode',['RESPONSE_TIMEOUT_CODE',['../classcom_1_1ab_1_1http_1_1_ab_http_status.html#a878412dec7231202eec07a27b2de1796',1,'com::ab::http::AbHttpStatus']]],
['responseheaders',['responseHeaders',['../classcom_1_1ab_1_1network_1_1toolbox_1_1_cache_1_1_entry.html#a07c06ac9f2e427baeab822a316701051',1,'com.ab.network.toolbox.Cache.Entry.responseHeaders()'],['../classcom_1_1ab_1_1network_1_1toolbox_1_1_disk_based_cache_1_1_cache_header.html#a20896ae2dc930b46a4a428a28f598fd8',1,'com.ab.network.toolbox.DiskBasedCache.CacheHeader.responseHeaders()']]],
['responselistener',['responseListener',['../classcom_1_1ab_1_1http_1_1_ab_http_client_1_1_responder_handler.html#ab6f09b5e485bc683b577b61f39bab6c4',1,'com.ab.http.AbHttpClient.ResponderHandler.responseListener()'],['../classcom_1_1ab_1_1http_1_1_ab_multipart_entity.html#aae36712bd792ed1a2183f3c855c40096',1,'com.ab.http.AbMultipartEntity.responseListener()']]],
['resulr_5ferror',['RESULR_ERROR',['../classcom_1_1ab_1_1model_1_1_ab_result.html#ac4bab121d2c2b8dd85787e5485bd1512',1,'com::ab::model::AbResult']]],
['resulr_5fok',['RESULR_OK',['../classcom_1_1ab_1_1model_1_1_ab_result.html#a60582e1feeaf4bc7b16c90ecdaf58cd9',1,'com::ab::model::AbResult']]],
['result',['result',['../classcom_1_1ab_1_1network_1_1toolbox_1_1_response_3_01_t_01_4.html#a88ecc6a55386d0a47fbc1ba69e69f102',1,'com.ab.network.toolbox.Response< T >.result()'],['../classcom_1_1ab_1_1task_1_1_ab_task.html#a2409153462492edde780910a931cbf77',1,'com.ab.task.AbTask.result()'],['../classcom_1_1ab_1_1task_1_1_ab_task_pool.html#a4d8f4490bdb60200d6f281cd2e1c36ee',1,'com.ab.task.AbTaskPool.result()'],['../classcom_1_1ab_1_1task_1_1_ab_task_queue.html#ade9363e364a384e39a0d078a4faed471',1,'com.ab.task.AbTaskQueue.result()']]],
['resultcode',['resultCode',['../classcom_1_1ab_1_1model_1_1_ab_result.html#a483b57cf6d0d6fa74bbf6f318bf77803',1,'com::ab::model::AbResult']]],
['resultmessage',['resultMessage',['../classcom_1_1ab_1_1model_1_1_ab_result.html#a56a6cb28c40d92aa0dffbee51843c67b',1,'com::ab::model::AbResult']]],
['retry_5fmessage',['RETRY_MESSAGE',['../classcom_1_1ab_1_1http_1_1_ab_http_client.html#a198f641a5c0679554bbc673078962c0d',1,'com::ab::http::AbHttpClient']]],
['right',['RIGHT',['../classcom_1_1ab_1_1view_1_1slidingmenu_1_1_sliding_menu.html#a6103be015c52bc61b28b7eb9c23d673a',1,'com::ab::view::slidingmenu::SlidingMenu']]],
['rightlayout',['rightLayout',['../classcom_1_1ab_1_1view_1_1titlebar_1_1_ab_title_bar.html#ad4b1be084c5fca75b2e2401c3dfc76b6',1,'com::ab::view::titlebar::AbTitleBar']]],
['rightpadding',['rightPadding',['../classcom_1_1ab_1_1view_1_1sample_1_1_ab_text_view.html#a0760f0ce37ade734080d276f13094ddc',1,'com::ab::view::sample::AbTextView']]],
['rightviewlayoutparams',['rightViewLayoutParams',['../classcom_1_1ab_1_1view_1_1titlebar_1_1_ab_title_bar.html#ab0dec07936ceb383fde18ce5dc446151',1,'com::ab::view::titlebar::AbTitleBar']]],
['rootview',['rootView',['../classcom_1_1ab_1_1fragment_1_1_ab_fragment.html#a5655eb595ea689bb80aee5c9e74e5941',1,'com::ab::fragment::AbFragment']]],
['rotate_5fanim_5fduration',['ROTATE_ANIM_DURATION',['../classcom_1_1ab_1_1view_1_1pullview_1_1_ab_list_view_header.html#a55cdca80f41c0c3f20a9cf85cacb047c',1,'com::ab::view::pullview::AbListViewHeader']]],
['rowheight',['rowHeight',['../classcom_1_1ab_1_1view_1_1calendar_1_1_calendar_view.html#abe250dcafac29b4b3088c184ef2fa5b9',1,'com::ab::view::calendar::CalendarView']]],
['runnable',['runnable',['../classcom_1_1ab_1_1view_1_1sliding_1_1_ab_sliding_play_view.html#ae6be9fdfe258e6ec3b00a5fd4b464d0f',1,'com::ab::view::sliding::AbSlidingPlayView']]]
];
| {
"pile_set_name": "Github"
} |
#!/bin/bash
# Check for basic programs
# Find cpplint and run it on all source and header files
lint=`which cpplint cpplint.py | head -1`
if [ -z "$lint" ]; then
echo "Command 'cpplint' or 'cpplint.py' not found in path"
exit 1
fi
repository=$PWD
# check-deletion <oldrev> <newrev> <refspec>
#
# True if no critical branch is deleted.
function check-deletion() {
case $3 in
refs/heads/master|refs/heads/version/*)
return $(test `expr $2 : "0*$"` -eq 0)
;;
esac
return 0
}
# check-branch-name <oldrev> <newrev> <refspec>
#
# True if branch name was OK.
function check-branch-name () {
case $3 in
refs/heads/*) # Reference to real branch
branch=${3##refs/heads/}
case $branch in
master|feature/*|issue/*)
return 0
;;
version/*) # Version branch
return $(expr $branch : "version/[0-9]\+.[0-9]\+")
;;
*)
return 1
;;
esac
;;
esac
}
# check-filenames <oldrev> <newrev> <refspec>
#
# Check that all file names are valid and acceptable.
function check-filenames () {
files=$(
git diff --name-only --diff-filter=A -z $1 $2 |
LC_ALL=C tr -d '[ -~]\0' |
wc -c
)
test $files -eq 0
}
# check-coding-style <oldrev> <newrev> <refspec>
#
# Check that the coding style is followed. We do this by checking out
# a temporary tree.
function check-coding-style () (
local -r newrev=$2
local -r branch=${3##refs/heads/}
local -r safename=`echo $branch | tr '/' '-'`
local -r workdir=`mktemp --directory --tmpdir $safename.XXX`
local -r dirs='harness plugins shared examples'
local -r tmpfile=`mktemp --tmpdir`
local exit_code=0
# Ignore references that are not branches
if [ $(expr $3 : "refs/heads/") -eq 0 ]; then
return 0
fi
# Create the work directory and check out a temporary version of
# the tree.
git --git-dir=$repository --work-tree=$workdir checkout $newrev -f -q -- ./
cd $workdir
# Run Cpplint on all C++ files and print the offending lines, if
# there are any.
find $dirs '(' -name '*.cc' -o -name '*.h' ')' \
-exec $lint {} + >$tmpfile 2>&1
exit_code=$?
if [ $exit_code -ne 0 ]; then
grep -v '^Done\|^Total' $tmpfile
fi
rm $tmpfile
rm -rf $workdir
return $exit_code
)
function check-branch () {
check-deletion "$@" || return 1
check-branch-name "$@" || return 1
check-filenames "$@" || return 1
# Check for trailing whitespace on lines
git diff --check $1 $2 || return 1
check-coding-style "$@" || return 1
}
exit_code=0
while read oldrev newrev refname; do
case $refname in
refs/heads/*)
branch=${refname##refs/heads/}
if ! check-branch $oldrev $newrev $refname; then
exit_code=1
continue
fi
;;
*)
;;
esac
done
exit $exit_code
| {
"pile_set_name": "Github"
} |
import Foundation
struct GraphQLFragment: Hashable {
let name: String
let api: API
let target: Schema.GraphQLType
let object: GraphQLObject
}
extension GraphQLFragment {
var arguments: OrderedSet<GraphQLArgument> {
return object.arguments
}
}
extension GraphQLFragment {
static func ~= (lhs: GraphQLFragment, rhs: GraphQLFragment) -> Bool {
return lhs.api.name == rhs.api.name && lhs.target.name == rhs.target.name
}
static func + (lhs: GraphQLFragment, rhs: GraphQLFragment) -> GraphQLFragment {
assert(lhs ~= rhs)
return GraphQLFragment(name: lhs.name,
api: lhs.api,
target: lhs.target,
object: lhs.object + rhs.object)
}
}
| {
"pile_set_name": "Github"
} |
{ stdenv
, lib
, fetchurl
}:
stdenv.mkDerivation rec {
pname = "nauty";
version = "27r1";
src = fetchurl {
url = "http://pallini.di.uniroma1.it/nauty${version}.tar.gz";
sha256 = "1nym0p2djws8ylkpr0kgpxfa6fxdlh46cmvz0gn5vd02jzgs0aww";
};
outputs = [ "out" "dev" ];
configureFlags = [
# Prevent nauty from sniffing some cpu features. While those are very
# widely available, it can lead to nasty bugs when they are not available:
# https://groups.google.com/forum/#!topic/sage-packaging/Pe4SRDNYlhA
"--${if stdenv.hostPlatform.sse4_2Support then "enable" else "disable"}-popcnt"
"--${if stdenv.hostPlatform.sse4_aSupport then "enable" else "disable"}-clz"
];
installPhase = ''
mkdir -p "$out"/{bin,share/doc/nauty} "$dev"/{lib,include/nauty}
find . -type f -perm -111 \! -name '*.*' \! -name configure -exec cp '{}' "$out/bin" \;
cp [Rr][Ee][Aa][Dd]* COPYRIGHT This* [Cc]hange* "$out/share/doc/nauty"
cp *.h "$dev/include/nauty"
for i in *.a; do
cp "$i" "$dev/lib/lib$i";
done
'';
checkTarget = "checks";
meta = with lib; {
inherit version;
description = ''Programs for computing automorphism groups of graphs and digraphs'';
license = licenses.asl20;
maintainers = with maintainers; [ raskin timokau ];
platforms = platforms.unix;
# I'm not sure if the filename will remain the same for future changelog or
# if it will track changes to minor releases. Lets see. Better than nothing
# in any case.
changelog = "http://pallini.di.uniroma1.it/changes24-27.txt";
homepage = "http://pallini.di.uniroma1.it/";
};
}
| {
"pile_set_name": "Github"
} |
1
00:00:00,000 --> 00:00:09,075
Koncn Tseng Presents
2
00:00:11,056 --> 00:00:13,080
前情摘要
3
00:00:14,017 --> 00:00:15,083
以后你可能不能跟我联络
4
00:00:15,090 --> 00:00:18,010
-为什么?
-我得搞定查裴尔
5
00:00:18,017 --> 00:00:20,031
所以他们怀疑到我了
6
00:00:20,038 --> 00:00:23,044
-他们很快会带我走
-我要跟谁求援?
7
00:00:23,054 --> 00:00:25,093
米歇尔会用远程存取
尽力来协助你
8
00:00:26,001 --> 00:00:28,079
-你被捕了 米歇尔在哪里?
-我知道她在哪里
9
00:00:30,058 --> 00:00:32,069
抓到了 她已被逮捕
10
00:00:33,098 --> 00:00:35,057
金姆? 你好
11
00:00:35,067 --> 00:00:37,036
-你是谁?
-我是凯蒂华纳
12
00:00:37,046 --> 00:00:39,012
你父亲要我来接你
13
00:00:39,022 --> 00:00:42,060
我爸怎么会叫一个
他刚认识的人来接我?
14
00:00:42,071 --> 00:00:44,017
他信任我
15
00:00:44,025 --> 00:00:47,018
我们越早离开
你就越早能跟你父亲相聚
16
00:00:47,025 --> 00:00:49,062
司法部长说你要找我
17
00:00:49,069 --> 00:00:52,030
-他们怎么说的?
-你被卸职了
18
00:00:52,037 --> 00:00:56,046
这个证据
被彼得金斯利所利用
19
00:00:56,053 --> 00:00:58,064
我要你尽全力调查这个人
20
00:00:58,074 --> 00:01:02,045
-怎么帮你?
-只剩最后一招 交出金斯利
21
00:01:02,055 --> 00:01:05,090
他这人极端危险
而且你没办法接近他
22
00:01:05,097 --> 00:01:08,062
说的对 是你不是我去
23
00:01:08,069 --> 00:01:10,093
金斯利不知道修特已死
24
00:01:11,003 --> 00:01:13,068
就说你要交出修特
就会引起他的兴趣
25
00:01:13,078 --> 00:01:16,008
我要我们谈话的录音带
26
00:01:16,018 --> 00:01:17,090
我会用修特做交换
27
00:01:17,097 --> 00:01:21,004
洛城体育馆
18号出入口 30分钟
28
00:01:21,014 --> 00:01:25,026
-反恐小组有什么支援?
-什么都没有
29
00:01:30,032 --> 00:01:34,080
以下事件
发生在早上7点至8点
30
00:01:43,035 --> 00:01:45,032
还好吗?
31
00:01:52,098 --> 00:01:55,022
快呀
32
00:01:55,028 --> 00:01:59,008
车子发不动
我们下车
33
00:02:01,001 --> 00:02:03,073
安全带卡住了
34
00:02:05,027 --> 00:02:08,046
帕默夫人 请你帮我
35
00:02:10,093 --> 00:02:12,062
请不要这样
36
00:02:12,069 --> 00:02:17,064
抱歉 我得顾我自己
等我可以了我会叫救护车
37
00:02:17,075 --> 00:02:21,042
-不可以这样
-别无选择
38
00:02:21,049 --> 00:02:26,022
你很棒 可惜你对善恶太分明
就像大卫一样
39
00:02:26,029 --> 00:02:29,067
-这世界是很复杂的
-是很简单的
40
00:02:29,078 --> 00:02:33,074
一场战争快要开始了
你是唯一可以帮我阻止此事的人
41
00:02:33,081 --> 00:02:37,090
-不为总统吗?
-他离弃了我
42
00:02:37,097 --> 00:02:42,061
25年的生活 建立的家庭
他却离弃了我
43
00:02:42,071 --> 00:02:45,036
他们最后还是会知道是你的
44
00:02:45,046 --> 00:02:47,085
核弹本来是不该引爆的
45
00:02:47,092 --> 00:02:50,086
事情本来不该是这样的
46
00:02:50,093 --> 00:02:55,021
我知道 我相信你
但你可以阻止此事的
47
00:02:55,028 --> 00:02:59,037
帮我捉到金斯利
你丈夫会重新出任总统
48
00:02:59,044 --> 00:03:01,081
那会是你的功劳
49
00:03:01,090 --> 00:03:04,029
帕默夫人 求求你
50
00:03:21,017 --> 00:03:24,004
你要我怎么做?
51
00:03:24,014 --> 00:03:28,030
置物箱里面有小刀
帮我割开
52
00:03:32,037 --> 00:03:33,077
进来吧
53
00:03:33,084 --> 00:03:38,063
这样好多了 比较简单
我们必须想办法说明
54
00:03:38,074 --> 00:03:42,031
-麦克 我有事跟你说
-我五分钟后下来
55
00:03:42,042 --> 00:03:44,014
麦克
56
00:03:44,021 --> 00:03:45,080
很重要
57
00:03:47,085 --> 00:03:49,016
失陪一下
58
00:04:01,026 --> 00:04:02,085
什么事?
59
00:04:02,093 --> 00:04:06,024
这是金斯利的档案
几分钟前调出来的
60
00:04:06,032 --> 00:04:10,015
杰克鲍尔
认为一切都是他主使的
61
00:04:10,025 --> 00:04:15,059
他在约六小时前打电话
给一名叫做乔纳森华的军人
62
00:04:15,069 --> 00:04:18,073
那军人就是
珊瑚蛇突袭队的人
63
00:04:18,080 --> 00:04:21,042
明显的是他杀了自己人
64
00:04:22,045 --> 00:04:24,062
我知道他是谁
65
00:04:24,069 --> 00:04:30,088
轰炸机刚通过最后的哨站
一小时内就会到达目的地
66
00:04:30,096 --> 00:04:33,003
主攻击目标是军事单位
67
00:04:33,013 --> 00:04:37,067
但那是个位于住宅区域
的指挥中心
68
00:04:37,077 --> 00:04:42,076
不论我们武器有多精确
都会产生间接伤害
69
00:04:43,076 --> 00:04:46,069
会伤到平民
70
00:04:48,068 --> 00:04:52,003
-无可避免吗?
-没办法
71
00:04:54,076 --> 00:04:57,090
那我们就只好这么办
72
00:05:02,006 --> 00:05:03,004
我是查裴尔
73
00:05:03,012 --> 00:05:07,065
-我是麦克纳维
-有什么事 长官?
74
00:05:07,072 --> 00:05:12,077
我知道有人对有关塞浦路斯
录音的真假有所争议
75
00:05:12,084 --> 00:05:16,014
我们的鉴定
与朗利那边的是一样的
76
00:05:16,020 --> 00:05:21,029
那鲍尔还在寻找
伪造录音的证据吗?
77
00:05:21,036 --> 00:05:24,029
对 他未经授权
78
00:05:24,036 --> 00:05:26,091
我要你协助他的调查
79
00:05:27,002 --> 00:05:31,018
-长官 我根本不知道他在哪里
-那就找到他
80
00:05:31,024 --> 00:05:36,071
为何要我这么做?
我们不是该准备作战吗?
81
00:05:36,081 --> 00:05:42,047
还没有 30分钟内给我
杰克所找到的结果
82
00:05:42,057 --> 00:05:44,081
-可是长官...
-这是命令
83
00:05:44,087 --> 00:05:48,083
你知道我的联络方式
用这条专线回报给我
84
00:05:52,068 --> 00:05:56,003
-我要过一小时才上飞机
-为何要延迟?
85
00:05:56,014 --> 00:05:59,085
核弹引爆地点不对
所以有些额外的工作
86
00:05:59,091 --> 00:06:02,021
不必担心什么 我希望
87
00:06:02,028 --> 00:06:04,077
不会的
我不想烦你了
88
00:06:04,084 --> 00:06:06,075
你讲
89
00:06:07,088 --> 00:06:10,053
我还得处理修特
90
00:06:10,060 --> 00:06:13,063
修特是唯一的证人了
91
00:06:13,074 --> 00:06:16,070
-你不是说已经处理掉他了
-我找不到他
92
00:06:16,081 --> 00:06:19,001
我会处理的
别担心
93
00:06:27,056 --> 00:06:29,050
你们俩 坐下
94
00:06:29,061 --> 00:06:32,061
你不必对付米歇尔
她只是听命行事
95
00:06:32,068 --> 00:06:36,003
-我自己来讲
-我要你们坐下
96
00:06:42,092 --> 00:06:46,053
我要你们帮我
联络杰克鲍尔
97
00:06:47,075 --> 00:06:53,035
我明白了 杰克上面有人支持
现在你有压力了
98
00:06:53,045 --> 00:06:56,099
-到底办不办得到?
-是怎么一回事?
99
00:06:59,021 --> 00:07:02,069
或许杰克找到的
证据是真的
100
00:07:02,076 --> 00:07:06,008
-所以我们是对的
-不 你们差点害死我
101
00:07:06,015 --> 00:07:08,035
竟敢攻击我
102
00:07:11,005 --> 00:07:13,002
我告诉你
103
00:07:14,012 --> 00:07:19,039
你不要对付我们
我们就会帮你
104
00:07:23,072 --> 00:07:28,019
少来了 雷恩
所有人都会建议你这么做的
105
00:07:33,089 --> 00:07:36,031
-好吧
-白纸黑字
106
00:07:36,039 --> 00:07:40,051
好的 写下来
快帮我找到鲍尔
107
00:07:43,088 --> 00:07:48,038
-不找人支援吗?
-我现在不是听命于反恐小组
108
00:07:48,049 --> 00:07:51,039
-对他们来说我并不存在
-所以我们只能靠自己
109
00:07:51,049 --> 00:07:53,008
完全靠自己
110
00:07:53,016 --> 00:07:56,047
-你还好吗?需要帮助吗?
-他受伤了
111
00:07:56,055 --> 00:07:59,048
-可以送我们到医院去吗?
-好的 当然
112
00:07:59,056 --> 00:08:02,069
-帕默夫人 你来开车
-什么意思?
113
00:08:02,076 --> 00:08:07,001
-抱歉 我们要用你的车
-别紧张 拿去 开走吧
114
00:08:09,092 --> 00:08:12,003
谢谢
115
00:08:30,072 --> 00:08:32,096
已经好了 长官
116
00:08:44,019 --> 00:08:47,074
-我姓华纳 嗨 卡丽你好
-嗨 凯蒂
117
00:08:47,084 --> 00:08:50,046
-你一定就是杰克的女儿
-我爸在哪里?
118
00:08:50,056 --> 00:08:55,003
他还在外面 等一下
我去告诉查裴尔你们来了
119
00:08:55,011 --> 00:08:58,052
-让我来处理
-你不是被看管了
120
00:08:58,059 --> 00:09:01,024
已经释放了
121
00:09:01,089 --> 00:09:06,008
金姆 我知道出了什么事
你还好吧?
122
00:09:06,015 --> 00:09:10,085
应该没事 我想看我爸爸
我要知道他是否有事
123
00:09:10,091 --> 00:09:14,036
他现在不在
要几个小时才会回来
124
00:09:14,043 --> 00:09:17,056
-他知道我在这儿吗?
-他若来电我们会告诉他
125
00:09:17,063 --> 00:09:21,072
-杰克说我们一到达就要打给他
-现在电话接不通
126
00:09:21,079 --> 00:09:25,088
卡丽会带你们到后面休息
我马上就过来
127
00:09:29,034 --> 00:09:31,003
不会有事的
128
00:09:37,025 --> 00:09:39,016
华纳小姐?
129
00:09:40,051 --> 00:09:46,029
不知你是否知道
你父亲被释放后有回来
130
00:09:46,040 --> 00:09:47,035
为什么?
131
00:09:47,042 --> 00:09:50,077
他要在你妹妹被带走前
和她讲几句话
132
00:09:50,088 --> 00:09:54,026
玛莉被带到这边吗?
133
00:10:01,025 --> 00:10:03,090
你欺骗了我?
134
00:10:04,000 --> 00:10:06,058
你杀了理沙?
135
00:10:07,055 --> 00:10:10,068
还想杀你姐姐?
136
00:10:10,075 --> 00:10:16,047
你真的想引爆核弹
害死上千万人?
137
00:10:20,051 --> 00:10:24,067
告诉我为什么?
因为我必须了解
138
00:10:26,091 --> 00:10:31,086
告诉我你是被逼的
你被洗脑了 你受到威胁
139
00:10:31,097 --> 00:10:36,019
告诉我你不知道
自己在做什么
140
00:10:37,079 --> 00:10:40,051
一定是有理由的
141
00:10:41,034 --> 00:10:43,083
没有理由 爸爸
142
00:10:48,054 --> 00:10:51,006
我不能接受
143
00:10:57,031 --> 00:10:59,022
我也不能
144
00:11:00,054 --> 00:11:07,022
当时她举起枪对着我要扣扳机
我看着她的眼睛
145
00:11:11,064 --> 00:11:13,055
相信我 爸爸
146
00:11:14,036 --> 00:11:17,056
她不会回答你的
147
00:11:18,046 --> 00:11:23,003
她无法做到
那是我们无法理解的原因
148
00:11:37,063 --> 00:11:39,022
爸...
149
00:11:43,071 --> 00:11:46,010
好了
我们走
150
00:12:05,031 --> 00:12:06,090
凯蒂
151
00:12:41,030 --> 00:12:44,002
你以为在外面会安全
152
00:12:47,022 --> 00:12:49,013
其实不然
153
00:13:04,031 --> 00:13:07,000
-我是托尼
-你复职了吗?
154
00:13:07,051 --> 00:13:09,055
金姆已经到这里了
155
00:13:09,062 --> 00:13:13,062
-她很安全
-谢谢老天 别告诉她我的事
156
00:13:13,072 --> 00:13:16,043
-就说我很快会回来
-我会的
157
00:13:16,050 --> 00:13:19,080
-我是雷恩 我们得谈谈
-你这浑蛋
158
00:13:19,086 --> 00:13:22,038
是你阻止救援的直升机
159
00:13:22,046 --> 00:13:26,061
-害修特死掉 这要你负责
-现在不一样了
160
00:13:26,071 --> 00:13:30,019
你有完全的支持
告诉我现在的任务
161
00:13:31,048 --> 00:13:36,015
我要去见金斯利
在洛城体育馆18号出入口
162
00:13:36,021 --> 00:13:41,017
帕默夫人会让他确认
事情与那三个国家无关
163
00:13:41,027 --> 00:13:45,013
想办法撑到后援到达
很快就会到了
164
00:13:45,024 --> 00:13:48,062
没时间了
现在就要和金斯利会面了
165
00:13:48,069 --> 00:13:50,060
你需要后援
166
00:13:50,071 --> 00:13:56,062
你想帮忙 就找出金斯利
与帕默夫人的声纹做确认
167
00:13:56,069 --> 00:13:57,087
好了
168
00:13:57,094 --> 00:14:03,041
-还要与白宫现场语音传输
-通到总统吗?
169
00:14:04,044 --> 00:14:06,061
好吧 线路不要断掉
170
00:14:06,071 --> 00:14:11,095
-收到 我们要进体育馆了
-赶快派迅雷小组到那里去
171
00:14:17,088 --> 00:14:20,027
停在那里
172
00:14:22,058 --> 00:14:24,097
在这里等我
173
00:14:38,020 --> 00:14:40,010
过来
174
00:14:45,030 --> 00:14:48,017
我帮你装窃听器
175
00:14:52,047 --> 00:14:55,022
脱掉外套
176
00:14:57,024 --> 00:14:59,018
这是无线发讯器
177
00:14:59,025 --> 00:15:04,068
不会被他们知道
也不会被发现
178
00:15:04,079 --> 00:15:09,039
-这太疯狂了 不能单独行动
-别无选择
179
00:15:09,046 --> 00:15:12,097
金斯利没有见到
修特不会走的
180
00:15:13,007 --> 00:15:16,004
这就是我们的优势
181
00:15:21,087 --> 00:15:24,036
讲话
182
00:15:24,047 --> 00:15:26,044
我好怕
183
00:15:28,059 --> 00:15:30,042
可以用
184
00:15:41,094 --> 00:15:47,034
总统先生 我现在推翻自己
从一开始反对延迟开打的立场
185
00:15:47,041 --> 00:15:53,057
因为现在很有机会取得
伪造塞浦路斯录音的证据
186
00:15:53,065 --> 00:15:56,094
我们都知道
第二波带了核弹入境
187
00:15:57,001 --> 00:16:00,094
也知道这一切
都是那三个国家赞助的
188
00:16:01,001 --> 00:16:04,084
虽然现在还没有细节
但我刚发现一个线索
189
00:16:04,094 --> 00:16:08,059
第二波与一个
叫金斯利的男子有关联
190
00:16:08,066 --> 00:16:10,018
他是谁?
191
00:16:10,026 --> 00:16:15,066
他控制 海油田
192
00:16:15,073 --> 00:16:17,070
是油田的人主使核弹的吗?
193
00:16:17,078 --> 00:16:22,060
鲍尔相信他能让金斯利讲出实话
他要现场语音连线
194
00:16:22,070 --> 00:16:25,096
轰炸机再20分钟就要轰炸了
195
00:16:26,006 --> 00:16:29,000
鲍尔马上要和他会面了
196
00:16:30,025 --> 00:16:32,097
这是现场语音声纹传送
197
00:16:33,007 --> 00:16:37,013
这是纪录上的对照语音档
198
00:16:37,023 --> 00:16:42,060
这可以证明说话的人
的确是雪莉帕默
199
00:16:42,070 --> 00:16:46,044
-确认了
-下面这个是金斯利?
200
00:16:46,054 --> 00:16:48,081
-你们收得到吗?
-很清楚
201
00:16:48,091 --> 00:16:51,081
-多久后援会到?
-11分钟
202
00:16:51,089 --> 00:16:54,085
-太久了 现在就要进去了
-等一下
203
00:16:54,093 --> 00:16:59,050
不行 我们已经迟到10分钟
普雷斯科特准备好了吗?
204
00:16:59,057 --> 00:17:03,021
托尼已经接上白宫
正在设定现场语音传送
205
00:17:03,031 --> 00:17:04,084
好 那一切就搞定了
206
00:17:07,095 --> 00:17:10,025
你那边如何?帕默夫人?
207
00:17:10,035 --> 00:17:13,045
万一金斯利要杀我呢?
208
00:17:13,055 --> 00:17:15,094
没看到修特之前
他不会动手的
209
00:17:16,001 --> 00:17:20,029
但是一旦他发现
修特不在我们手上...
210
00:17:20,037 --> 00:17:22,070
我会掩护你的
211
00:17:22,076 --> 00:17:27,065
你这种状况
让我不能安心
212
00:17:27,072 --> 00:17:30,011
我可能不会活着走出来
213
00:17:32,017 --> 00:17:36,000
我不想骗你
那是有可能的
214
00:17:42,067 --> 00:17:46,044
我这么做是为了大卫
你很清楚 对不对?
215
00:17:48,065 --> 00:17:50,021
对
216
00:17:50,028 --> 00:17:54,011
你会告诉他吗?
万一出了什么事情
217
00:17:56,059 --> 00:18:01,013
我会的 帕默夫人
你过去吧
218
00:18:30,003 --> 00:18:31,040
-麦克
-是的 长官
219
00:18:31,050 --> 00:18:35,052
传输连线已经准备好了
希望这不是浪费时间
220
00:18:35,063 --> 00:18:38,021
相信我 总统先生
我也一样
221
00:18:41,096 --> 00:18:44,003
怎么了 麦克?
222
00:18:45,000 --> 00:18:48,013
总统与我都觉得这件事
应该也让你叁与
223
00:18:48,020 --> 00:18:52,029
普雷斯科特与司法部长
都会一起听现场语音传送
224
00:18:52,036 --> 00:18:55,097
-听什么?
-对付彼得金斯利的秘密行动
225
00:18:56,004 --> 00:19:01,038
他主使核弹之事
并以塞浦路斯录音栽赃
226
00:19:03,034 --> 00:19:05,086
鲍尔找到金斯利
227
00:19:05,093 --> 00:19:09,086
但是要与金斯利对话的
不是鲍尔
228
00:19:10,066 --> 00:19:15,088
只要能让他认罪
也没什么差别
229
00:19:16,068 --> 00:19:18,027
有什么事?麦克
230
00:19:25,032 --> 00:19:28,067
是雪莉要与金斯利对谈
231
00:19:29,070 --> 00:19:31,046
我不懂
232
00:19:31,053 --> 00:19:36,032
看来几个月前
金斯利找了她串通
233
00:19:36,042 --> 00:19:38,098
她也叁与其中
234
00:19:41,032 --> 00:19:44,013
雪莉为金斯利工作?
235
00:19:44,020 --> 00:19:45,089
看似如此 长官
236
00:19:47,021 --> 00:19:51,036
引爆核弹并挑起战争?
不会的
237
00:19:51,046 --> 00:19:55,072
目前不清楚她了解多少
或牵涉有多深
238
00:19:56,052 --> 00:20:00,093
希望她能引出金斯利认罪
239
00:20:01,000 --> 00:20:05,079
现在雪莉与鲍尔配合
要把金斯利引出来吗?
240
00:20:05,089 --> 00:20:08,068
她要冒相当的危险
241
00:21:02,018 --> 00:21:05,005
史考特
我没有指示前别动作
242
00:21:05,012 --> 00:21:07,068
-我们需要修特
-了解
243
00:22:03,004 --> 00:22:06,017
-修特在哪里?
-他很安全
244
00:22:07,046 --> 00:22:08,047
他在哪里?
245
00:22:09,028 --> 00:22:13,011
你骗我 你根本
就计画要引爆核弹
246
00:22:14,027 --> 00:22:17,024
你利用了我与罗杰斯坦顿
247
00:22:18,056 --> 00:22:19,077
是的 没错
248
00:22:19,084 --> 00:22:25,014
现在我被你牵连 也与核弹牵连
我要求自保
249
00:22:25,025 --> 00:22:28,073
-那是你的问题
-现在是你的了
250
00:22:28,080 --> 00:22:31,029
我若出事
你也别想见到修特
251
00:22:31,036 --> 00:22:35,032
-你别想骗我
-你也别当我是呆子
252
00:22:35,039 --> 00:22:39,048
-找到符合金斯利的声音
-雷恩 听到没
253
00:22:40,093 --> 00:22:43,080
不交出修特你就别想走
254
00:22:43,087 --> 00:22:45,091
这不是让你来决定的
255
00:22:49,079 --> 00:22:55,061
等我离开到安全之处
我才会打电话告诉你他在哪里
256
00:22:55,068 --> 00:22:58,055
我不会让你知道我的行踪
257
00:22:58,066 --> 00:23:01,092
-没有我们要听的
-她会问出来的
258
00:23:06,088 --> 00:23:11,013
-录音带的问题要解决
-都在这里
259
00:23:11,020 --> 00:23:14,071
我们之间所有的通话录音
260
00:23:17,060 --> 00:23:21,091
不 我要全部
包括塞浦路斯的
261
00:23:24,057 --> 00:23:26,099
为什么?那与你无关
262
00:23:27,007 --> 00:23:30,010
要保障
263
00:23:30,021 --> 00:23:35,080
只要一开打
政府也不会发表此证据的
264
00:23:35,087 --> 00:23:38,080
修特是唯一能伤害我们的人
265
00:23:38,088 --> 00:23:41,072
-因为录音是他伪造的
-对
266
00:23:41,079 --> 00:23:46,033
-证据都有了 我们走
-让白宫先确认
267
00:23:46,040 --> 00:23:49,088
-她必须赶快抽身
-满意了吗?
268
00:23:49,095 --> 00:23:53,015
反恐小组要知道
这样的证据是否充分
269
00:23:53,025 --> 00:23:57,021
有确认过这声音真的是现场
并通过鉴定?
270
00:23:57,028 --> 00:23:59,048
是的 长官
271
00:24:01,047 --> 00:24:04,025
那就够了
272
00:24:04,032 --> 00:24:07,066
等我降落 我再打电话给你
273
00:24:11,052 --> 00:24:15,048
你在唬我
修特根本就不在你手上
274
00:24:15,055 --> 00:24:18,010
-是真的
-不 是假的
275
00:24:24,048 --> 00:24:27,013
史考特 杀了她
276
00:24:35,052 --> 00:24:37,042
干掉她
277
00:24:44,079 --> 00:24:46,097
雪莉 快跑
278
00:25:46,039 --> 00:25:48,078
快点
279
00:26:00,015 --> 00:26:02,070
雪莉 快离开 快
280
00:27:35,035 --> 00:27:36,039
杰克鲍尔
281
00:27:42,055 --> 00:27:46,022
你今天给我很多麻烦了
282
00:27:50,026 --> 00:27:53,042
你不会交出修特 对不对?
283
00:28:33,020 --> 00:28:36,002
轰炸机离目标只有三分钟
284
00:28:36,011 --> 00:28:40,027
开始探戈一号第六一程序
285
00:28:40,091 --> 00:28:43,030
目标出现 武器锁定
286
00:28:43,098 --> 00:28:46,080
要开始攻击吗?
287
00:28:46,086 --> 00:28:49,071
可以发射了吗?
288
00:28:51,018 --> 00:28:52,091
我们已经待命中
289
00:28:52,098 --> 00:28:54,095
取消攻击
290
00:28:55,002 --> 00:28:58,006
取消 取消
291
00:29:12,018 --> 00:29:15,031
什么意思?金斯利死了?
怎么会这样?
292
00:29:15,038 --> 00:29:17,071
我警告过你不要用他
293
00:29:17,078 --> 00:29:20,001
我们用了他 但失败了
294
00:29:20,011 --> 00:29:22,089
没关系
只要战争一开始就好了
295
00:29:22,099 --> 00:29:26,085
-不会开始了
-为什么?
296
00:29:26,096 --> 00:29:30,050
总统已经知道录音的事
取消攻击了
297
00:29:30,057 --> 00:29:33,044
不会打仗了 麦克斯
298
00:29:35,018 --> 00:29:39,046
我们得用另一种方法来
299
00:29:39,053 --> 00:29:43,018
-你在讲什么?
-你会知道的
300
00:29:43,028 --> 00:29:45,019
今天就会开始
301
00:29:53,052 --> 00:29:56,007
是我 开始吧
302
00:29:59,031 --> 00:30:01,068
内阁取消之前的决议
303
00:30:01,074 --> 00:30:05,067
我已经请特勤组
恢复对您的服务
304
00:30:05,074 --> 00:30:08,046
您现在是美国总统
305
00:30:09,039 --> 00:30:10,070
我明白了
306
00:30:10,080 --> 00:30:15,002
我也已经提出辞呈
即刻生效
307
00:30:15,012 --> 00:30:20,087
所有投票反对您的内阁
也已经都同意辞职
308
00:30:23,031 --> 00:30:25,028
吉姆
309
00:30:27,018 --> 00:30:29,009
各位先生 女士...
310
00:30:32,017 --> 00:30:34,041
今天我们几乎
要发动无谓的战争
311
00:30:35,085 --> 00:30:41,057
大家对核弹爆炸之事
感到情绪化是可以理解的
312
00:30:41,068 --> 00:30:46,056
但身为领袖
必须比常人更具耐心
313
00:30:48,059 --> 00:30:53,003
我们所要采取的行动
314
00:30:53,010 --> 00:30:56,061
应该在所有证据查明后方可执行
315
00:30:58,009 --> 00:31:02,040
必须透过最严苛的求证
316
00:31:03,092 --> 00:31:09,099
你们草率的隔离我
只会降低求证的水准
317
00:31:10,006 --> 00:31:12,058
这是很大的错误
318
00:31:24,023 --> 00:31:28,077
但是我相信
以后绝不会再发生这种错误
319
00:31:29,074 --> 00:31:33,012
所以我不接受你们的辞呈
320
00:31:34,057 --> 00:31:37,012
我们今天要赶快安定国家
321
00:31:39,031 --> 00:31:43,053
你们每一位都担负着
重要的任务
322
00:31:46,057 --> 00:31:48,077
总统先生...
323
00:31:50,063 --> 00:31:53,009
我不知道该说什么
324
00:31:54,022 --> 00:31:56,016
不必说了
325
00:31:57,019 --> 00:31:58,072
还有很多工作要做
326
00:32:00,017 --> 00:32:01,054
就这样子
327
00:32:07,075 --> 00:32:10,002
-珍妮 准备开记者会
-几时?
328
00:32:10,009 --> 00:32:12,071
尽快
我要公开一切
329
00:32:12,078 --> 00:32:17,073
我要直接面对人民
他们需要我给的保障
330
00:32:17,083 --> 00:32:19,084
是的 长官
331
00:32:23,046 --> 00:32:28,073
麦克 我要谢谢你
在最后关头找反恐小组的人
332
00:32:30,054 --> 00:32:33,044
不客气 总统先生
333
00:32:39,043 --> 00:32:41,085
你本应该
与我共同坚持到底
334
00:32:43,085 --> 00:32:46,002
这是我对你的期望
335
00:32:46,085 --> 00:32:49,034
所以我才对你交付重任
336
00:32:58,041 --> 00:33:02,078
现在你可以卸职了
即刻生效
337
00:33:07,069 --> 00:33:09,044
好的 总统先生
338
00:33:26,009 --> 00:33:29,053
谢谢你 长官
我不能占功劳
339
00:33:29,060 --> 00:33:33,070
乔治梅森所带领的人员
都很出色
340
00:33:33,077 --> 00:33:36,000
我知道
谢谢 长官
341
00:33:37,099 --> 00:33:41,009
是总局局长范恩
342
00:33:41,019 --> 00:33:45,018
他对今天我们的表现大感高兴
不知要从哪里开始讲
343
00:33:45,025 --> 00:33:47,036
太棒了
344
00:33:48,077 --> 00:33:51,067
怎么了 朋友?
345
00:33:54,005 --> 00:33:55,096
是...
346
00:33:58,037 --> 00:34:00,044
是这样的
347
00:34:01,019 --> 00:34:02,094
你若不开除我
348
00:34:03,001 --> 00:34:05,043
就请马上离开我的位置
349
00:34:26,005 --> 00:34:28,044
这位置是你的
350
00:34:38,079 --> 00:34:43,042
轮班的人到了
他们都得到交接资料了
351
00:34:43,049 --> 00:34:46,062
我想去接我哥回家
352
00:34:52,013 --> 00:34:56,086
今天的少数人
创造了很大的成绩
353
00:34:58,056 --> 00:35:00,070
你也是其中之一
354
00:35:00,077 --> 00:35:05,024
你下了难以抉择的决定
而且反抗过我 但你是对的
355
00:35:07,084 --> 00:35:09,060
谢谢
356
00:35:10,072 --> 00:35:14,071
赶快回去休息吧
357
00:35:31,068 --> 00:35:33,088
明天见
358
00:35:51,058 --> 00:35:57,073
-我得检查袋子里的东西
-我是凯蒂华纳 这是金姆鲍尔
359
00:36:12,009 --> 00:36:14,007
借过
360
00:36:19,013 --> 00:36:21,052
-嗨 亲爱的
-爸爸
361
00:36:24,000 --> 00:36:26,055
一切都会好的
362
00:36:32,096 --> 00:36:34,087
我知道
363
00:36:38,040 --> 00:36:40,063
我爱你
364
00:36:45,088 --> 00:36:47,079
我也爱你
365
00:37:00,048 --> 00:37:03,029
爸 我要照顾你
366
00:37:14,084 --> 00:37:17,005
我要强烈的要求你们
367
00:37:17,012 --> 00:37:22,083
不可对现在或可能会听到的
不实传闻与谣言加以渲染
368
00:37:23,064 --> 00:37:27,073
一旦全部事实查明后
我会亲自对各位发布
369
00:37:27,080 --> 00:37:33,037
在此之前 大家都要知道自己
很安全 也不会再有任何危险
370
00:37:35,051 --> 00:37:41,066
今天大家可以充分表现出的
爱国精神就是好好的过日子
371
00:37:41,072 --> 00:37:46,061
我呼 大家 回到工作岗位
回到学校去
372
00:37:47,058 --> 00:37:51,051
因为我们所享受的自由
每天所得到的自由
373
00:37:51,061 --> 00:37:56,046
就是我们用以战胜敌人的一切
也是让我们坚强的理由
374
00:37:57,053 --> 00:37:59,044
天 美国
375
00:38:02,039 --> 00:38:05,021
天 吾民 谢谢
376
00:38:06,039 --> 00:38:09,056
非常谢谢
主保佑大家
377
00:38:10,052 --> 00:38:11,090
谢谢
378
00:38:14,017 --> 00:38:17,017
谢谢 见到你们真好
379
00:38:17,024 --> 00:38:18,067
谢谢
380
00:38:34,014 --> 00:38:36,014
总统先生
381
00:38:39,051 --> 00:38:41,037
嗨
382
00:38:42,071 --> 00:38:45,005
抱歉 总统先生
383
00:38:45,011 --> 00:38:46,035
谢谢
384
00:38:46,046 --> 00:38:48,008
总统先生
385
00:38:49,043 --> 00:38:51,067
-嗨
-谢谢
386
00:38:53,034 --> 00:38:56,020
-主保佑你
-谢谢
387
00:39:58,039 --> 00:40:00,056
-喂
-完成了
388
00:40:00,063 --> 00:40:02,077
谢谢
389
00:40:12,063 --> 00:40:14,054
总统先生?
390
00:40:22,055 --> 00:40:26,092
总统倒下了
快找医护人员 救命
391
00:40:27,099 --> 00:40:30,047
快搜查那个人
392
00:40:30,055 --> 00:40:33,084
-总统先生?
-检查武器
393
00:40:33,091 --> 00:40:36,084
总统先生?
不会有事的
394
00:40:36,091 --> 00:40:39,002
需要救护车 快点
395
00:41:10,083 --> 00:41:13,058
字幕由VisiOntext制作
| {
"pile_set_name": "Github"
} |
/*
LUFA Library
Copyright (C) Dean Camera, 2010.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
*/
/*
Copyright 2010 Dean Camera (dean [at] fourwalledcubicle [dot] com)
Permission to use, copy, modify, distribute, and sell this
software and its documentation for any purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the name of the author not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
The author disclaim all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall the author be liable for any
special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
*/
/** \file
*
* Header file for Descriptors.c.
*/
#ifndef _DESCRIPTORS_H_
#define _DESCRIPTORS_H_
/* Includes: */
#include <avr/pgmspace.h>
#include <LUFA/Drivers/USB/USB.h>
/* Type Defines: */
/** Type define for the device configuration descriptor structure. This must be defined in the
* application code, as the configuration descriptor contains several sub-descriptors which
* vary between devices, and which describe the device's usage to the host.
*/
typedef struct
{
USB_Descriptor_Configuration_Header_t Config;
USB_Descriptor_Interface_t HID_Interface;
USB_HID_Descriptor_HID_t HID_GenericHID;
USB_Descriptor_Endpoint_t HID_ReportINEndpoint;
} USB_Descriptor_Configuration_t;
/* Macros: */
/** Endpoint number of the Generic HID reporting IN endpoint. */
#define GENERIC_IN_EPNUM 1
/** Size in bytes of the Generic HID reporting endpoint. */
#define GENERIC_EPSIZE 8
/** Size in bytes of the Generic HID reports (including report ID byte). */
#define GENERIC_REPORT_SIZE 8
/* Function Prototypes: */
uint16_t CALLBACK_USB_GetDescriptor(const uint16_t wValue,
const uint8_t wIndex,
const void** const DescriptorAddress)
ATTR_WARN_UNUSED_RESULT ATTR_NON_NULL_PTR_ARG(3);
#endif
| {
"pile_set_name": "Github"
} |
/* Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
// First include (the generated) my_config.h, to get correct platform defines.
#include "my_config.h"
#include <gtest/gtest.h>
#include <my_global.h>
#include <my_sys.h>
namespace like_range_unittest {
/*
Test that like_range() returns well-formed results.
*/
static void
test_like_range_for_charset(CHARSET_INFO *cs, const char *src, size_t src_len)
{
char min_str[32], max_str[32];
size_t min_len, max_len, min_well_formed_len, max_well_formed_len;
int error= 0;
cs->coll->like_range(cs, src, src_len, '\\', '_', '%',
sizeof(min_str), min_str, max_str, &min_len, &max_len);
// diag("min_len=%d\tmax_len=%d\t%s", (int) min_len, (int) max_len, cs->name);
min_well_formed_len= cs->cset->well_formed_len(cs,
min_str, min_str + min_len,
10000, &error);
max_well_formed_len= cs->cset->well_formed_len(cs,
max_str, max_str + max_len,
10000, &error);
EXPECT_EQ(min_len, min_well_formed_len)
<< "Bad min_str: min_well_formed_len=" << min_well_formed_len
<< " min_str[" << min_well_formed_len << "]="
<< (uchar) min_str[min_well_formed_len];
EXPECT_EQ(max_len, max_well_formed_len)
<< "Bad max_str: max_well_formed_len=" << max_well_formed_len
<< " max_str[" << max_well_formed_len << "]="
<< (uchar) max_str[max_well_formed_len];
}
static CHARSET_INFO *charset_list[]=
{
#ifdef HAVE_CHARSET_big5
&my_charset_big5_chinese_ci,
&my_charset_big5_bin,
#endif
#ifdef HAVE_CHARSET_euckr
&my_charset_euckr_korean_ci,
&my_charset_euckr_bin,
#endif
#ifdef HAVE_CHARSET_gb2312
&my_charset_gb2312_chinese_ci,
&my_charset_gb2312_bin,
#endif
#ifdef HAVE_CHARSET_gbk
&my_charset_gbk_chinese_ci,
&my_charset_gbk_bin,
#endif
#ifdef HAVE_CHARSET_latin1
&my_charset_latin1,
&my_charset_latin1_bin,
#endif
#ifdef HAVE_CHARSET_sjis
&my_charset_sjis_japanese_ci,
&my_charset_sjis_bin,
#endif
#ifdef HAVE_CHARSET_tis620
&my_charset_tis620_thai_ci,
&my_charset_tis620_bin,
#endif
#ifdef HAVE_CHARSET_ujis
&my_charset_ujis_japanese_ci,
&my_charset_ujis_bin,
#endif
#ifdef HAVE_CHARSET_utf8
&my_charset_utf8_general_ci,
&my_charset_utf8_unicode_ci,
&my_charset_utf8_bin,
#endif
};
#if defined(GTEST_HAS_PARAM_TEST)
class LikeRangeTest : public ::testing::TestWithParam<CHARSET_INFO*>
{
protected:
virtual void SetUp()
{
m_charset= GetParam();
}
CHARSET_INFO *m_charset;
};
INSTANTIATE_TEST_CASE_P(Foo1, LikeRangeTest,
::testing::ValuesIn(charset_list));
TEST_P(LikeRangeTest, TestLikeRange)
{
test_like_range_for_charset(m_charset, "abc%", 4);
}
#endif
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2012, The Iconfactory. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of The Iconfactory nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE ICONFACTORY BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import Foundation
extension NSObject {
class func appearance() -> AnyObject {
return self.appearanceWhenContainedIn(nil)
}
convenience init(containerClass: UIAppearanceContainer) {
var appearanceRules = objc_getAssociatedObject(self, UIAppearanceClassAssociatedObjectKey) as? NSDictionary as? [NSObject : AnyObject]
if appearanceRules == nil {
appearanceRules = [NSObject : AnyObject](minimumCapacity: 1)
objc_setAssociatedObject(self, UIAppearanceClassAssociatedObjectKey, appearanceRules, .OBJC_ASSOCIATION_RETAIN)
}
var containmentPath: [AnyObject] = [AnyObject]()
var args: va_list
va_start(args, containerClass)
for ; containerClass != nil; containerClass = {
}
args, AnyClass < UIAppearanceContainer >
containmentPath.append(containerClass)
va_end(args)
var record: UIAppearanceProxy = (appearanceRules[containmentPath] as! UIAppearanceProxy)
if !record {
record = UIAppearanceProxy(class: self)
appearanceRules[containmentPath] = record
}
return record
}
func _UIAppearanceContainer() -> AnyObject? {
return nil
}
func _UIAppearancePropertyDidChange(property: AnyObject) {
// note an overridden property value so we don't override it with a default value in -_UIAppearanceUpdateIfNeeded
// this occurs when a value is set directly using a setter on an instance (such as "label.textColor = myColor;")
var changedProperties: Set<AnyObject> = Set<AnyObject>.setWithSet(objc_getAssociatedObject(self, UIAppearanceChangedPropertiesKey))
objc_setAssociatedObject(self, UIAppearanceChangedPropertiesKey, changedProperties.byAddingObject = property, OBJC_ASSOCIATION_RETAIN)
}
func _UIAppearanceUpdateIfNeeded() {
// check if we are already up to date, if so, return early
if objc_getAssociatedObject(self, UIAppearancePropertiesAreUpToDateKey) {
return
}
// first go down our own class heirarchy until we find the root of the UIAppearance protocol
// then we'll start at the bottom and work up while checking each class for all relevant rules
// that apply to this instance at this time.
var classes: [AnyObject] = UIAppearanceHierarchyForClass(self)
var propertiesToSet: [NSObject : AnyObject] = [NSObject : AnyObject](minimumCapacity: 0)
for klass: AnyClass in classes {
var rules: [NSObject : AnyObject] = objc_getAssociatedObject(klass, UIAppearanceClassAssociatedObjectKey)
// sorts the rule keys (which are arrays of classes) by length
// if the lengths match, it sorts based on the last class being a superclass of the other or vice-versa
// if the last classes aren't related at all, it marks them equal (I suspect these cases will always be filtered out in the next step)
var sortedRulePaths: [AnyObject] = rules.allKeys().sortedArrayUsingComparator({(path1: [AnyObject], path2: [AnyObject]) -> NSComparisonResult in
if path1.count == path2.count {
if (path2.lastObject() is path1.lastObject()) {
return NSOrderedAscending as! NSComparisonResult
}
else if (path1.lastObject() is path2.lastObject()) {
return NSOrderedDescending as! NSComparisonResult
}
else {
return NSOrderedSame as! NSComparisonResult
}
}
else if path1.count < path2.count {
return NSOrderedAscending as! NSComparisonResult
}
else {
return NSOrderedDescending as! NSComparisonResult
}
})
// we should now have a list of classes to check for rule settings for this instance, so now we spin
// through those and fetch the properties and values and add them to the dictionary of things to do.
// before applying a rule's properties, we must make sure this instance is qualified, so we must check
// this instance's container hierarchy against ever class that makes up the rule.
for rule: [AnyObject] in sortedRulePaths {
var shouldApplyRule: Bool = true
for klass: AnyClass in rule.reverseObjectEnumerator() {
var container: AnyObject = self._UIAppearanceContainer()
while container && !(container is klass) {
container = container._UIAppearanceContainer()
}
if !container {
shouldApplyRule = false
}
}
if shouldApplyRule {
var proxy: UIAppearanceProxy = (rules[rule] as! UIAppearanceProxy)
propertiesToSet.addEntriesFromDictionary(proxy._appearancePropertiesAndValues())
}
}
}
// before setting the actual properties on the instance, save off a copy of the existing modified properties
// because the act of setting the UIAppearance properties will end up messing with that set.
// after we're done actually applying everything, reset the modified properties set to what it was before.
var originalProperties = (objc_getAssociatedObject(self, UIAppearanceChangedPropertiesKey) as! NSSet) as Set<NSObject>
// subtract any properties that have been overriden from the list to apply
propertiesToSet.removeObjectsForKeys(originalProperties.allObjects())
// now apply everything that's left
for property: UIAppearanceProperty in propertiesToSet.allValues() {
property.invokeUsingTarget(self)
}
// now reset our set of changes properties to the original set so we don't count the UIAppearance defaults as overrides
objc_setAssociatedObject(self, UIAppearanceChangedPropertiesKey, originalProperties, .OBJC_ASSOCIATION_RETAIN)
// done!
objc_setAssociatedObject(self, UIAppearancePropertiesAreUpToDateKey, 1, .OBJC_ASSOCIATION_RETAIN)
}
func _UIAppearanceSetNeedsUpdate() {
// this removes UIAppearancePropertiesAreUpToDateKey which will trigger _UIAppearanceUpdateIfNeeded to run (if it is called later)
objc_setAssociatedObject(self, UIAppearancePropertiesAreUpToDateKey, nil, .OBJC_ASSOCIATION_RETAIN)
}
}
let UIAppearanceClassAssociatedObjectKey: String = "UIAppearanceClassAssociatedObjectKey"
let UIAppearanceChangedPropertiesKey: String = "UIAppearanceChangedPropertiesKey"
let UIAppearancePropertiesAreUpToDateKey: String = "UIAppearancePropertiesAreUpToDateKey"
var classes: [AnyObject] = [AnyObject]()
while klass as! AnyObject.conformsToProtocol() {
classes.insertObject(klass, atIndex: 0)
klass = klass.superclass()
}
return classes | {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2001 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* video decoding with libavcodec API example
*
* @example decode_video.c
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <libavcodec/avcodec.h>
#define INBUF_SIZE 4096
static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
char *filename)
{
FILE *f;
int i;
f = fopen(filename,"w");
fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
for (i = 0; i < ysize; i++)
fwrite(buf + i * wrap, 1, xsize, f);
fclose(f);
}
static void decode(AVCodecContext *dec_ctx, AVFrame *frame, AVPacket *pkt,
const char *filename)
{
char buf[1024];
int ret;
ret = avcodec_send_packet(dec_ctx, pkt);
if (ret < 0) {
fprintf(stderr, "Error sending a packet for decoding\n");
exit(1);
}
while (ret >= 0) {
ret = avcodec_receive_frame(dec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return;
else if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
exit(1);
}
printf("saving frame %3d\n", dec_ctx->frame_number);
fflush(stdout);
/* the picture is allocated by the decoder. no need to
free it */
snprintf(buf, sizeof(buf), "%s-%d", filename, dec_ctx->frame_number);
pgm_save(frame->data[0], frame->linesize[0],
frame->width, frame->height, buf);
}
}
int main(int argc, char **argv)
{
const char *filename, *outfilename;
const AVCodec *codec;
AVCodecParserContext *parser;
AVCodecContext *c= NULL;
FILE *f;
AVFrame *frame;
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
uint8_t *data;
size_t data_size;
int ret;
AVPacket *pkt;
if (argc <= 2) {
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
exit(0);
}
filename = argv[1];
outfilename = argv[2];
pkt = av_packet_alloc();
if (!pkt)
exit(1);
/* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
/* find the MPEG-1 video decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
parser = av_parser_init(codec->id);
if (!parser) {
fprintf(stderr, "parser not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
/* For some codecs, such as msmpeg4 and mpeg4, width and height
MUST be initialized there because this information is not
available in the bitstream. */
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
while (!feof(f)) {
/* read raw data from the input file */
data_size = fread(inbuf, 1, INBUF_SIZE, f);
if (!data_size)
break;
/* use the parser to split the data into frames */
data = inbuf;
while (data_size > 0) {
ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
if (ret < 0) {
fprintf(stderr, "Error while parsing\n");
exit(1);
}
data += ret;
data_size -= ret;
if (pkt->size)
decode(c, frame, pkt, outfilename);
}
}
/* flush the decoder */
decode(c, frame, NULL, outfilename);
fclose(f);
av_parser_close(parser);
avcodec_free_context(&c);
av_frame_free(&frame);
av_packet_free(&pkt);
return 0;
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<odoo>
<data noupdate="1">
<record id="product_category_deliveries" model="product.category">
<field name="parent_id" ref="product.product_category_all"/>
<field name="name">Deliveries</field>
</record>
<record id="product_product_delivery" model="product.product">
<field name="name">Free delivery charges</field>
<field name="default_code">Delivery_007</field>
<field name="type">service</field>
<field name="categ_id" ref="delivery.product_category_deliveries"/>
<field name="sale_ok" eval="False"/>
<field name="purchase_ok" eval="False"/>
<field name="list_price">0.0</field>
</record>
<record id="free_delivery_carrier" model="delivery.carrier">
<field name="name">Free delivery charges</field>
<field name="fixed_price">0.0</field>
<field name="free_over" eval="True"/>
<field name="amount">1000</field>
<field name="sequence">1</field>
<field name="delivery_type">fixed</field>
<field name="product_id" ref="delivery.product_product_delivery"/>
</record>
</data>
</odoo>
| {
"pile_set_name": "Github"
} |
/*
* This file is part of Cleanflight.
*
* Cleanflight is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Cleanflight is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Cleanflight. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#ifdef UNIT_TEST
static inline void __set_BASEPRI(uint32_t basePri) {(void)basePri;}
static inline void __set_BASEPRI_MAX(uint32_t basePri) {(void)basePri;}
#endif // UNIT_TEST
// cleanup BASEPRI restore function, with global memory barrier
static inline void __basepriRestoreMem(uint8_t *val)
{
__set_BASEPRI(*val);
}
// set BASEPRI_MAX function, with global memory barrier, returns true
static inline uint8_t __basepriSetMemRetVal(uint8_t prio)
{
__set_BASEPRI_MAX(prio);
return 1;
}
// The CMSIS provides the function __set_BASEPRI(priority) for changing the value of the BASEPRI register.
// The function uses the hardware convention for the ‘priority’ argument, which means that the priority must
// be shifted left by the number of unimplemented bits (8 – __NVIC_PRIO_BITS).
//
// NOTE: The priority numbering convention used in __set_BASEPRI(priority) is thus different than in the
// NVIC_SetPriority(priority) function, which expects the “priority” argument not shifted.
// Run block with elevated BASEPRI (using BASEPRI_MAX), restoring BASEPRI on exit. All exit paths are handled
// Full memory barrier is placed at start and exit of block
#ifdef UNIT_TEST
#define ATOMIC_BLOCK(prio) {}
#else
#define ATOMIC_BLOCK(prio) for ( uint8_t __basepri_save __attribute__((__cleanup__(__basepriRestoreMem))) = __get_BASEPRI(), \
__ToDo = __basepriSetMemRetVal((prio) << (8U - __NVIC_PRIO_BITS)); __ToDo ; __ToDo = 0 )
#endif // UNIT_TEST
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2016 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation (the "GPL").
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 (GPLv2) for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 (GPLv2) along with this source code.
*/
#include <linux/debugfs.h>
#include "cipher.h"
#include "util.h"
/* offset of SPU_OFIFO_CTRL register */
#define SPU_OFIFO_CTRL 0x40
#define SPU_FIFO_WATERMARK 0x1FF
/**
* spu_sg_at_offset() - Find the scatterlist entry at a given distance from the
* start of a scatterlist.
* @sg: [in] Start of a scatterlist
* @skip: [in] Distance from the start of the scatterlist, in bytes
* @sge: [out] Scatterlist entry at skip bytes from start
* @sge_offset: [out] Number of bytes from start of sge buffer to get to
* requested distance.
*
* Return: 0 if entry found at requested distance
* < 0 otherwise
*/
int spu_sg_at_offset(struct scatterlist *sg, unsigned int skip,
struct scatterlist **sge, unsigned int *sge_offset)
{
/* byte index from start of sg to the end of the previous entry */
unsigned int index = 0;
/* byte index from start of sg to the end of the current entry */
unsigned int next_index;
next_index = sg->length;
while (next_index <= skip) {
sg = sg_next(sg);
index = next_index;
if (!sg)
return -EINVAL;
next_index += sg->length;
}
*sge_offset = skip - index;
*sge = sg;
return 0;
}
/* Copy len bytes of sg data, starting at offset skip, to a dest buffer */
void sg_copy_part_to_buf(struct scatterlist *src, u8 *dest,
unsigned int len, unsigned int skip)
{
size_t copied;
unsigned int nents = sg_nents(src);
copied = sg_pcopy_to_buffer(src, nents, dest, len, skip);
if (copied != len) {
flow_log("%s copied %u bytes of %u requested. ",
__func__, (u32)copied, len);
flow_log("sg with %u entries and skip %u\n", nents, skip);
}
}
/*
* Copy data into a scatterlist starting at a specified offset in the
* scatterlist. Specifically, copy len bytes of data in the buffer src
* into the scatterlist dest, starting skip bytes into the scatterlist.
*/
void sg_copy_part_from_buf(struct scatterlist *dest, u8 *src,
unsigned int len, unsigned int skip)
{
size_t copied;
unsigned int nents = sg_nents(dest);
copied = sg_pcopy_from_buffer(dest, nents, src, len, skip);
if (copied != len) {
flow_log("%s copied %u bytes of %u requested. ",
__func__, (u32)copied, len);
flow_log("sg with %u entries and skip %u\n", nents, skip);
}
}
/**
* spu_sg_count() - Determine number of elements in scatterlist to provide a
* specified number of bytes.
* @sg_list: scatterlist to examine
* @skip: index of starting point
* @nbytes: consider elements of scatterlist until reaching this number of
* bytes
*
* Return: the number of sg entries contributing to nbytes of data
*/
int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes)
{
struct scatterlist *sg;
int sg_nents = 0;
unsigned int offset;
if (!sg_list)
return 0;
if (spu_sg_at_offset(sg_list, skip, &sg, &offset) < 0)
return 0;
while (sg && (nbytes > 0)) {
sg_nents++;
nbytes -= (sg->length - offset);
offset = 0;
sg = sg_next(sg);
}
return sg_nents;
}
/**
* spu_msg_sg_add() - Copy scatterlist entries from one sg to another, up to a
* given length.
* @to_sg: scatterlist to copy to
* @from_sg: scatterlist to copy from
* @from_skip: number of bytes to skip in from_sg. Non-zero when previous
* request included part of the buffer in entry in from_sg.
* Assumes from_skip < from_sg->length.
* @from_nents number of entries in from_sg
* @length number of bytes to copy. may reach this limit before exhausting
* from_sg.
*
* Copies the entries themselves, not the data in the entries. Assumes to_sg has
* enough entries. Does not limit the size of an individual buffer in to_sg.
*
* to_sg, from_sg, skip are all updated to end of copy
*
* Return: Number of bytes copied
*/
u32 spu_msg_sg_add(struct scatterlist **to_sg,
struct scatterlist **from_sg, u32 *from_skip,
u8 from_nents, u32 length)
{
struct scatterlist *sg; /* an entry in from_sg */
struct scatterlist *to = *to_sg;
struct scatterlist *from = *from_sg;
u32 skip = *from_skip;
u32 offset;
int i;
u32 entry_len = 0;
u32 frag_len = 0; /* length of entry added to to_sg */
u32 copied = 0; /* number of bytes copied so far */
if (length == 0)
return 0;
for_each_sg(from, sg, from_nents, i) {
/* number of bytes in this from entry not yet used */
entry_len = sg->length - skip;
frag_len = min(entry_len, length - copied);
offset = sg->offset + skip;
if (frag_len)
sg_set_page(to++, sg_page(sg), frag_len, offset);
copied += frag_len;
if (copied == entry_len) {
/* used up all of from entry */
skip = 0; /* start at beginning of next entry */
}
if (copied == length)
break;
}
*to_sg = to;
*from_sg = sg;
if (frag_len < entry_len)
*from_skip = skip + frag_len;
else
*from_skip = 0;
return copied;
}
void add_to_ctr(u8 *ctr_pos, unsigned int increment)
{
__be64 *high_be = (__be64 *)ctr_pos;
__be64 *low_be = high_be + 1;
u64 orig_low = __be64_to_cpu(*low_be);
u64 new_low = orig_low + (u64)increment;
*low_be = __cpu_to_be64(new_low);
if (new_low < orig_low)
/* there was a carry from the low 8 bytes */
*high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
}
struct sdesc {
struct shash_desc shash;
char ctx[];
};
/* do a synchronous decrypt operation */
int do_decrypt(char *alg_name,
void *key_ptr, unsigned int key_len,
void *iv_ptr, void *src_ptr, void *dst_ptr,
unsigned int block_len)
{
struct scatterlist sg_in[1], sg_out[1];
struct crypto_blkcipher *tfm =
crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
struct blkcipher_desc desc = {.tfm = tfm, .flags = 0 };
int ret = 0;
void *iv;
int ivsize;
flow_log("%s() name:%s block_len:%u\n", __func__, alg_name, block_len);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
crypto_blkcipher_setkey((void *)tfm, key_ptr, key_len);
sg_init_table(sg_in, 1);
sg_set_buf(sg_in, src_ptr, block_len);
sg_init_table(sg_out, 1);
sg_set_buf(sg_out, dst_ptr, block_len);
iv = crypto_blkcipher_crt(tfm)->iv;
ivsize = crypto_blkcipher_ivsize(tfm);
memcpy(iv, iv_ptr, ivsize);
ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, block_len);
crypto_free_blkcipher(tfm);
if (ret < 0)
pr_err("aes_decrypt failed %d\n", ret);
return ret;
}
/**
* do_shash() - Do a synchronous hash operation in software
* @name: The name of the hash algorithm
* @result: Buffer where digest is to be written
* @data1: First part of data to hash. May be NULL.
* @data1_len: Length of data1, in bytes
* @data2: Second part of data to hash. May be NULL.
* @data2_len: Length of data2, in bytes
* @key: Key (if keyed hash)
* @key_len: Length of key, in bytes (or 0 if non-keyed hash)
*
* Note that the crypto API will not select this driver's own transform because
* this driver only registers asynchronous algos.
*
* Return: 0 if hash successfully stored in result
* < 0 otherwise
*/
int do_shash(unsigned char *name, unsigned char *result,
const u8 *data1, unsigned int data1_len,
const u8 *data2, unsigned int data2_len,
const u8 *key, unsigned int key_len)
{
int rc;
unsigned int size;
struct crypto_shash *hash;
struct sdesc *sdesc;
hash = crypto_alloc_shash(name, 0, 0);
if (IS_ERR(hash)) {
rc = PTR_ERR(hash);
pr_err("%s: Crypto %s allocation error %d", __func__, name, rc);
return rc;
}
size = sizeof(struct shash_desc) + crypto_shash_descsize(hash);
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc) {
rc = -ENOMEM;
pr_err("%s: Memory allocation failure", __func__);
goto do_shash_err;
}
sdesc->shash.tfm = hash;
sdesc->shash.flags = 0x0;
if (key_len > 0) {
rc = crypto_shash_setkey(hash, key, key_len);
if (rc) {
pr_err("%s: Could not setkey %s shash", __func__, name);
goto do_shash_err;
}
}
rc = crypto_shash_init(&sdesc->shash);
if (rc) {
pr_err("%s: Could not init %s shash", __func__, name);
goto do_shash_err;
}
rc = crypto_shash_update(&sdesc->shash, data1, data1_len);
if (rc) {
pr_err("%s: Could not update1", __func__);
goto do_shash_err;
}
if (data2 && data2_len) {
rc = crypto_shash_update(&sdesc->shash, data2, data2_len);
if (rc) {
pr_err("%s: Could not update2", __func__);
goto do_shash_err;
}
}
rc = crypto_shash_final(&sdesc->shash, result);
if (rc)
pr_err("%s: Could not genereate %s hash", __func__, name);
do_shash_err:
crypto_free_shash(hash);
kfree(sdesc);
return rc;
}
/* Dump len bytes of a scatterlist starting at skip bytes into the sg */
void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len)
{
u8 dbuf[16];
unsigned int idx = skip;
unsigned int num_out = 0; /* number of bytes dumped so far */
unsigned int count;
if (packet_debug_logging) {
while (num_out < len) {
count = (len - num_out > 16) ? 16 : len - num_out;
sg_copy_part_to_buf(sg, dbuf, count, idx);
num_out += count;
print_hex_dump(KERN_ALERT, " sg: ", DUMP_PREFIX_NONE,
4, 1, dbuf, count, false);
idx += 16;
}
}
if (debug_logging_sleep)
msleep(debug_logging_sleep);
}
/* Returns the name for a given cipher alg/mode */
char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode)
{
switch (alg) {
case CIPHER_ALG_RC4:
return "rc4";
case CIPHER_ALG_AES:
switch (mode) {
case CIPHER_MODE_CBC:
return "cbc(aes)";
case CIPHER_MODE_ECB:
return "ecb(aes)";
case CIPHER_MODE_OFB:
return "ofb(aes)";
case CIPHER_MODE_CFB:
return "cfb(aes)";
case CIPHER_MODE_CTR:
return "ctr(aes)";
case CIPHER_MODE_XTS:
return "xts(aes)";
case CIPHER_MODE_GCM:
return "gcm(aes)";
default:
return "aes";
}
break;
case CIPHER_ALG_DES:
switch (mode) {
case CIPHER_MODE_CBC:
return "cbc(des)";
case CIPHER_MODE_ECB:
return "ecb(des)";
case CIPHER_MODE_CTR:
return "ctr(des)";
default:
return "des";
}
break;
case CIPHER_ALG_3DES:
switch (mode) {
case CIPHER_MODE_CBC:
return "cbc(des3_ede)";
case CIPHER_MODE_ECB:
return "ecb(des3_ede)";
case CIPHER_MODE_CTR:
return "ctr(des3_ede)";
default:
return "3des";
}
break;
default:
return "other";
}
}
static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
struct device_private *ipriv;
char *buf;
ssize_t ret, out_offset, out_count;
int i;
u32 fifo_len;
u32 spu_ofifo_ctrl;
u32 alg;
u32 mode;
u32 op_cnt;
out_count = 2048;
buf = kmalloc(out_count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ipriv = filp->private_data;
out_offset = 0;
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Number of SPUs.........%u\n",
ipriv->spu.num_spu);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Current sessions.......%u\n",
atomic_read(&ipriv->session_count));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Session count..........%u\n",
atomic_read(&ipriv->stream_count));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Cipher setkey..........%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_CIPHER]));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Cipher Ops.............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_CIPHER]));
for (alg = 0; alg < CIPHER_ALG_LAST; alg++) {
for (mode = 0; mode < CIPHER_MODE_LAST; mode++) {
op_cnt = atomic_read(&ipriv->cipher_cnt[alg][mode]);
if (op_cnt) {
out_offset += snprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
spu_alg_name(alg, mode), op_cnt);
}
}
}
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Hash Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_HASH]));
for (alg = 0; alg < HASH_ALG_LAST; alg++) {
op_cnt = atomic_read(&ipriv->hash_cnt[alg]);
if (op_cnt) {
out_offset += snprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
hash_alg_name[alg], op_cnt);
}
}
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"HMAC setkey............%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_HMAC]));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"HMAC Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_HMAC]));
for (alg = 0; alg < HASH_ALG_LAST; alg++) {
op_cnt = atomic_read(&ipriv->hmac_cnt[alg]);
if (op_cnt) {
out_offset += snprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
hash_alg_name[alg], op_cnt);
}
}
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"AEAD setkey............%u\n",
atomic_read(&ipriv->setkey_cnt[SPU_OP_AEAD]));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"AEAD Ops...............%u\n",
atomic_read(&ipriv->op_counts[SPU_OP_AEAD]));
for (alg = 0; alg < AEAD_TYPE_LAST; alg++) {
op_cnt = atomic_read(&ipriv->aead_cnt[alg]);
if (op_cnt) {
out_offset += snprintf(buf + out_offset,
out_count - out_offset,
" %-13s%11u\n",
aead_alg_name[alg], op_cnt);
}
}
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Bytes of req data......%llu\n",
(u64)atomic64_read(&ipriv->bytes_out));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Bytes of resp data.....%llu\n",
(u64)atomic64_read(&ipriv->bytes_in));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Mailbox full...........%u\n",
atomic_read(&ipriv->mb_no_spc));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Mailbox send failures..%u\n",
atomic_read(&ipriv->mb_send_fail));
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"Check ICV errors.......%u\n",
atomic_read(&ipriv->bad_icv));
if (ipriv->spu.spu_type == SPU_TYPE_SPUM)
for (i = 0; i < ipriv->spu.num_spu; i++) {
spu_ofifo_ctrl = ioread32(ipriv->spu.reg_vbase[i] +
SPU_OFIFO_CTRL);
fifo_len = spu_ofifo_ctrl & SPU_FIFO_WATERMARK;
out_offset += snprintf(buf + out_offset,
out_count - out_offset,
"SPU %d output FIFO high water.....%u\n",
i, fifo_len);
}
if (out_offset > out_count)
out_offset = out_count;
ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
kfree(buf);
return ret;
}
static const struct file_operations spu_debugfs_stats = {
.owner = THIS_MODULE,
.open = simple_open,
.read = spu_debugfs_read,
};
/*
* Create the debug FS directories. If the top-level directory has not yet
* been created, create it now. Create a stats file in this directory for
* a SPU.
*/
void spu_setup_debugfs(void)
{
if (!debugfs_initialized())
return;
if (!iproc_priv.debugfs_dir)
iproc_priv.debugfs_dir = debugfs_create_dir(KBUILD_MODNAME,
NULL);
if (!iproc_priv.debugfs_stats)
/* Create file with permissions S_IRUSR */
debugfs_create_file("stats", 0400, iproc_priv.debugfs_dir,
&iproc_priv, &spu_debugfs_stats);
}
void spu_free_debugfs(void)
{
debugfs_remove_recursive(iproc_priv.debugfs_dir);
iproc_priv.debugfs_dir = NULL;
}
/**
* format_value_ccm() - Format a value into a buffer, using a specified number
* of bytes (i.e. maybe writing value X into a 4 byte
* buffer, or maybe into a 12 byte buffer), as per the
* SPU CCM spec.
*
* @val: value to write (up to max of unsigned int)
* @buf: (pointer to) buffer to write the value
* @len: number of bytes to use (0 to 255)
*
*/
void format_value_ccm(unsigned int val, u8 *buf, u8 len)
{
int i;
/* First clear full output buffer */
memset(buf, 0, len);
/* Then, starting from right side, fill in with data */
for (i = 0; i < len; i++) {
buf[len - i - 1] = (val >> (8 * i)) & 0xff;
if (i >= 3)
break; /* Only handle up to 32 bits of 'val' */
}
}
| {
"pile_set_name": "Github"
} |
using System;
using Appium.Net.Integration.Tests.helpers;
using NUnit.Framework;
using OpenQA.Selenium;
using OpenQA.Selenium.Appium;
using OpenQA.Selenium.Appium.Android;
using OpenQA.Selenium.Appium.Enums;
namespace Appium.Net.Integration.Tests.Android.Device
{
internal class NetworkTests
{
private AppiumDriver<IWebElement> _driver;
private AppiumOptions _androidOptions;
[OneTimeSetUp]
public void SetUp()
{
_androidOptions = Caps.GetAndroidUIAutomatorCaps(Apps.Get(Apps.androidApiDemos));
_driver = new AndroidDriver<IWebElement>(
Env.ServerIsLocal() ? AppiumServers.LocalServiceUri : AppiumServers.RemoteServerUri,
_androidOptions);
_driver.Manage().Timeouts().ImplicitWait = TimeSpan.FromSeconds(10);
}
[OneTimeTearDown]
public void TearDown()
{
_driver.Dispose();
}
[Test]
public void CanToggleDataTest()
{
var androidDriver = (AndroidDriver<IWebElement>) _driver;
androidDriver.ToggleData();
androidDriver.ToggleData();
}
[Test]
public void CanToggleAirplaneModeTest()
{
var androidDriver = (AndroidDriver<IWebElement>) _driver;
androidDriver.ToggleAirplaneMode();
var currentConnectionType = androidDriver.ConnectionType;
Assert.That(currentConnectionType, Is.EqualTo(ConnectionType.AirplaneMode));
androidDriver.ToggleAirplaneMode();
}
[Test]
public void CanToggleWifiTest()
{
var androidDriver = (AndroidDriver<IWebElement>) _driver;
var beforeToggleConnectionType = androidDriver.ConnectionType;
androidDriver.ToggleWifi();
var currentConnectionType = androidDriver.ConnectionType;
Assert.That(currentConnectionType, Is.Not.EqualTo(beforeToggleConnectionType));
androidDriver.ToggleWifi();
}
[Test]
public void CanMakeGsmCallTest()
{
var androidDriver = (AndroidDriver<IWebElement>) _driver;
Assert.Multiple(() =>
{
Assert.DoesNotThrow(() => androidDriver.MakeGsmCall("5551234567", GsmCallActions.Call));
Assert.DoesNotThrow(() => androidDriver.MakeGsmCall("5551234567", GsmCallActions.Accept));
Assert.DoesNotThrow(() => androidDriver.MakeGsmCall("5551234567", GsmCallActions.Cancel));
Assert.DoesNotThrow(() => androidDriver.MakeGsmCall("5551234567", GsmCallActions.Hold));
});
}
[Test]
public void CanSetGsmSignalStrengthTest()
{
var androidDriver = (AndroidDriver<IWebElement>) _driver;
Assert.Multiple(() =>
{
Assert.DoesNotThrow(() => androidDriver.SetGsmSignalStrength(GsmSignalStrength.NoneOrUnknown));
Assert.DoesNotThrow(() => androidDriver.SetGsmSignalStrength(GsmSignalStrength.Poor));
Assert.DoesNotThrow(() => androidDriver.SetGsmSignalStrength(GsmSignalStrength.Good));
Assert.DoesNotThrow(() => androidDriver.SetGsmSignalStrength(GsmSignalStrength.Moderate));
Assert.DoesNotThrow(() => androidDriver.SetGsmSignalStrength(GsmSignalStrength.Great));
});
}
[Test]
public void CanSetGsmVoiceStateTest()
{
var androidDriver = (AndroidDriver<IWebElement>) _driver;
Assert.Multiple(() =>
{
Assert.DoesNotThrow(() =>
androidDriver.SetGsmVoice(GsmVoiceState.Unregistered));
Assert.DoesNotThrow(() =>
androidDriver.SetGsmVoice(GsmVoiceState.Home));
Assert.DoesNotThrow(() =>
androidDriver.SetGsmVoice(GsmVoiceState.Roaming));
Assert.DoesNotThrow(() =>
androidDriver.SetGsmVoice(GsmVoiceState.Denied));
Assert.DoesNotThrow(() =>
androidDriver.SetGsmVoice(GsmVoiceState.Off));
Assert.DoesNotThrow(() =>
androidDriver.SetGsmVoice(GsmVoiceState.On));
}
);
}
[Test]
public void CanSendSmsTest()
{
var androidDriver = (AndroidDriver<IWebElement>) _driver;
Assert.DoesNotThrow(() => androidDriver.SendSms("5551234567", "Hey lol"));
}
}
}
| {
"pile_set_name": "Github"
} |
/* Copyright (C) 2014-2020 FastoGT. All right reserved.
This file is part of FastoNoSQL.
FastoNoSQL is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FastoNoSQL is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with FastoNoSQL. If not, see <http://www.gnu.org/licenses/>.
*/
#include "gui/workers/statistic_sender.h"
#include <common/net/socket_tcp.h>
#include "proxy/server_config.h"
namespace fastonosql {
namespace {
#if defined(PRO_VERSION) || defined(ENTERPRISE_VERSION)
common::Error sendUserStatisticRoutine(const std::string& login, const std::string& build_strategy) {
CHECK(!login.empty());
typedef common::net::SocketGuard<common::net::ClientSocketTcp> ClientSocket;
#if defined(FASTONOSQL)
ClientSocket client(common::net::HostAndPort(FASTONOSQL_HOST, SERVER_REQUESTS_PORT));
#elif defined(FASTOREDIS)
ClientSocket client(common::net::HostAndPort(FASTOREDIS_HOST, SERVER_REQUESTS_PORT));
#else
#error please specify url and port to send statistic information
#endif
common::ErrnoError err = client.Connect();
if (err) {
return common::make_error_from_errno(err);
}
std::string request;
common::Error request_gen_err = proxy::GenStatisticRequest(login, build_strategy, &request);
if (request_gen_err) {
return request_gen_err;
}
size_t nwrite = 0;
err = client.Write(request.data(), request.size(), &nwrite);
if (err) {
return common::make_error_from_errno(err);
}
common::char_buffer_t stat_reply;
err = client.ReadToBuffer(&stat_reply, 256);
if (err) {
return common::make_error_from_errno(err);
}
return proxy::ParseSendStatisticResponse(stat_reply.as_string());
}
#endif
common::Error sendAnonymousStatisticRoutine() {
typedef common::net::SocketGuard<common::net::ClientSocketTcp> ClientSocket;
#if defined(FASTONOSQL)
ClientSocket client(common::net::HostAndPort(FASTONOSQL_HOST, SERVER_REQUESTS_PORT));
#elif defined(FASTOREDIS)
ClientSocket client(common::net::HostAndPort(FASTOREDIS_HOST, SERVER_REQUESTS_PORT));
#else
#error please specify url and port to send statistic information
#endif
common::ErrnoError err = client.Connect();
if (err) {
return common::make_error_from_errno(err);
}
std::string request;
common::Error request_gen_err = proxy::GenAnonymousStatisticRequest(&request);
if (request_gen_err) {
return request_gen_err;
}
size_t nwrite = 0;
err = client.Write(request.data(), request.size(), &nwrite);
if (err) {
return common::make_error_from_errno(err);
}
common::char_buffer_t stat_reply;
err = client.ReadToBuffer(&stat_reply, 256);
if (err) {
return common::make_error_from_errno(err);
}
return proxy::ParseSendStatisticResponse(stat_reply.as_string());
}
} // namespace
namespace gui {
AnonymousStatisticSender::AnonymousStatisticSender(QObject* parent) : QObject(parent) {
qRegisterMetaType<common::Error>("common::Error");
}
void AnonymousStatisticSender::routine() {
sendStatistic();
}
void AnonymousStatisticSender::sendStatistic() {
const common::Error err = sendAnonymousStatisticRoutine();
statisticSended(err);
}
#if defined(PRO_VERSION) || defined(ENTERPRISE_VERSION)
StatisticSender::StatisticSender(const std::string& login, const std::string& build_strategy, QObject* parent)
: base_class(parent), login_(login), build_strategy_(build_strategy) {}
void StatisticSender::sendStatistic() {
const common::Error err = sendUserStatisticRoutine(login_, build_strategy_);
statisticSended(err);
}
#endif
} // namespace gui
} // namespace fastonosql
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.