code
stringlengths
3
1.01M
repo_name
stringlengths
5
116
path
stringlengths
3
311
language
stringclasses
30 values
license
stringclasses
15 values
size
int64
3
1.01M
package cn.edu.sdut.softlab.oopbasic.objref; /** * Created by subaochen on 17-2-26. */ public class Client1 { public static void main(String[] args) { Person zhangsan1 = new Person("zhangsan","123"); Person zhangsan2 = new Person("zhangsan","123"); Person zhangsan3 = zhangsan1; System.out.println("zhangsan1 == zhangsan2 ? " + (zhangsan1 == zhangsan2)); System.out.println("zhangsan3 == zhangsan1 ? " + (zhangsan3 == zhangsan1)); } }
JamesLiAndroid/java-tutorial
guide/code/oopbasic/src/cn/edu/sdut/softlab/oopbasic/objref/Client1.java
Java
apache-2.0
450
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.avro; import java.math.RoundingMode; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericEnumSymbol; import org.apache.avro.generic.GenericFixed; import org.apache.avro.generic.IndexedRecord; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collection; import java.util.Map; import java.util.UUID; public class Conversions { public static class UUIDConversion extends Conversion<UUID> { @Override public Class<UUID> getConvertedType() { return UUID.class; } @Override public Schema getRecommendedSchema() { return LogicalTypes.uuid().addToSchema(Schema.create(Schema.Type.STRING)); } @Override public String getLogicalTypeName() { return "uuid"; } @Override public UUID fromCharSequence(CharSequence value, Schema schema, LogicalType type) { return UUID.fromString(value.toString()); } @Override public CharSequence toCharSequence(UUID value, Schema schema, LogicalType type) { return value.toString(); } } public static class DecimalConversion extends Conversion<BigDecimal> { @Override public Class<BigDecimal> getConvertedType() { return BigDecimal.class; } @Override public Schema getRecommendedSchema() { throw new UnsupportedOperationException("No recommended schema for decimal (scale is required)"); } @Override public String getLogicalTypeName() { return "decimal"; } @Override public BigDecimal fromBytes(ByteBuffer value, Schema schema, LogicalType type) { int scale = ((LogicalTypes.Decimal) type).getScale(); // always copy the bytes out because BigInteger has no offset/length ctor byte[] bytes = new byte[value.remaining()]; value.duplicate().get(bytes); return new BigDecimal(new BigInteger(bytes), scale); } @Override public ByteBuffer toBytes(BigDecimal value, Schema schema, LogicalType type) { value = validate((LogicalTypes.Decimal) type, value); return ByteBuffer.wrap(value.unscaledValue().toByteArray()); } @Override public BigDecimal fromFixed(GenericFixed value, Schema schema, LogicalType type) { int scale = ((LogicalTypes.Decimal) type).getScale(); return new BigDecimal(new BigInteger(value.bytes()), scale); } @Override public GenericFixed toFixed(BigDecimal value, Schema schema, LogicalType type) { value = validate((LogicalTypes.Decimal) type, value); byte fillByte = (byte) (value.signum() < 0 ? 0xFF : 0x00); byte[] unscaled = value.unscaledValue().toByteArray(); byte[] bytes = new byte[schema.getFixedSize()]; int offset = bytes.length - unscaled.length; // Fill the front of the array and copy remaining with unscaled values Arrays.fill(bytes, 0, offset, fillByte); System.arraycopy(unscaled, 0, bytes, offset, bytes.length - offset); return new GenericData.Fixed(schema, bytes); } private static BigDecimal validate(final LogicalTypes.Decimal decimal, BigDecimal value) { final int scale = decimal.getScale(); final int valueScale = value.scale(); boolean scaleAdjusted = false; if (valueScale != scale) { try { value = value.setScale(scale, RoundingMode.UNNECESSARY); scaleAdjusted = true; } catch (ArithmeticException aex) { throw new AvroTypeException( "Cannot encode decimal with scale " + valueScale + " as scale " + scale + " without rounding"); } } int precision = decimal.getPrecision(); int valuePrecision = value.precision(); if (valuePrecision > precision) { if (scaleAdjusted) { throw new AvroTypeException("Cannot encode decimal with precision " + valuePrecision + " as max precision " + precision + ". This is after safely adjusting scale from " + valueScale + " to required " + scale); } else { throw new AvroTypeException( "Cannot encode decimal with precision " + valuePrecision + " as max precision " + precision); } } return value; } } /** * Convert a underlying representation of a logical type (such as a ByteBuffer) * to a higher level object (such as a BigDecimal). * * @param datum The object to be converted. * @param schema The schema of datum. Cannot be null if datum is not null. * @param type The {@link org.apache.avro.LogicalType} of datum. Cannot be * null if datum is not null. * @param conversion The tool used to finish the conversion. Cannot be null if * datum is not null. * @return The result object, which is a high level object of the logical type. * If a null datum is passed in, a null value will be returned. * @throws IllegalArgumentException if a null schema, type or conversion is * passed in while datum is not null. */ public static Object convertToLogicalType(Object datum, Schema schema, LogicalType type, Conversion<?> conversion) { if (datum == null) { return null; } if (schema == null || type == null || conversion == null) { throw new IllegalArgumentException("Parameters cannot be null! Parameter values:" + Arrays.deepToString(new Object[] { datum, schema, type, conversion })); } try { switch (schema.getType()) { case RECORD: return conversion.fromRecord((IndexedRecord) datum, schema, type); case ENUM: return conversion.fromEnumSymbol((GenericEnumSymbol) datum, schema, type); case ARRAY: return conversion.fromArray((Collection) datum, schema, type); case MAP: return conversion.fromMap((Map<?, ?>) datum, schema, type); case FIXED: return conversion.fromFixed((GenericFixed) datum, schema, type); case STRING: return conversion.fromCharSequence((CharSequence) datum, schema, type); case BYTES: return conversion.fromBytes((ByteBuffer) datum, schema, type); case INT: return conversion.fromInt((Integer) datum, schema, type); case LONG: return conversion.fromLong((Long) datum, schema, type); case FLOAT: return conversion.fromFloat((Float) datum, schema, type); case DOUBLE: return conversion.fromDouble((Double) datum, schema, type); case BOOLEAN: return conversion.fromBoolean((Boolean) datum, schema, type); } return datum; } catch (ClassCastException e) { throw new AvroRuntimeException( "Cannot convert " + datum + ":" + datum.getClass().getSimpleName() + ": expected generic type", e); } } /** * Convert a high level representation of a logical type (such as a BigDecimal) * to the its underlying representation object (such as a ByteBuffer) * * @param datum The object to be converted. * @param schema The schema of datum. Cannot be null if datum is not null. * @param type The {@link org.apache.avro.LogicalType} of datum. Cannot be * null if datum is not null. * @param conversion The tool used to finish the conversion. Cannot be null if * datum is not null. * @return The result object, which is an underlying representation object of * the logical type. If the input param datum is null, a null value will * be returned. * @throws IllegalArgumentException if a null schema, type or conversion is * passed in while datum is not null. */ public static <T> Object convertToRawType(Object datum, Schema schema, LogicalType type, Conversion<T> conversion) { if (datum == null) { return null; } if (schema == null || type == null || conversion == null) { throw new IllegalArgumentException("Parameters cannot be null! Parameter values:" + Arrays.deepToString(new Object[] { datum, schema, type, conversion })); } try { Class<T> fromClass = conversion.getConvertedType(); switch (schema.getType()) { case RECORD: return conversion.toRecord(fromClass.cast(datum), schema, type); case ENUM: return conversion.toEnumSymbol(fromClass.cast(datum), schema, type); case ARRAY: return conversion.toArray(fromClass.cast(datum), schema, type); case MAP: return conversion.toMap(fromClass.cast(datum), schema, type); case FIXED: return conversion.toFixed(fromClass.cast(datum), schema, type); case STRING: return conversion.toCharSequence(fromClass.cast(datum), schema, type); case BYTES: return conversion.toBytes(fromClass.cast(datum), schema, type); case INT: return conversion.toInt(fromClass.cast(datum), schema, type); case LONG: return conversion.toLong(fromClass.cast(datum), schema, type); case FLOAT: return conversion.toFloat(fromClass.cast(datum), schema, type); case DOUBLE: return conversion.toDouble(fromClass.cast(datum), schema, type); case BOOLEAN: return conversion.toBoolean(fromClass.cast(datum), schema, type); } return datum; } catch (ClassCastException e) { throw new AvroRuntimeException( "Cannot convert " + datum + ":" + datum.getClass().getSimpleName() + ": expected logical type", e); } } }
apache/avro
lang/java/avro/src/main/java/org/apache/avro/Conversions.java
Java
apache-2.0
10,400
REPO = git@github.com:twitter/hogan.js.git BUILD := build VERSION = ${shell node -e 'var s = JSON.parse(require("fs").readFileSync("package.json").toString()).version; console.log(s.substring(0, s.indexOf("-")));'} # # Run command line tests # test: @ node test/index.js # # Run Mustache spec tests # spec: @ node test/spec.js # # Run benchmark # benchmark: @ node benchmark/console/index.js clean: @ rm -rf dist/* # # Make a new version of Hogan from the current dev version. # release: clean @ echo "Creating a new version of Hogan." @ mkdir -p dist/nodejs @ cp -R lib dist/nodejs/lib @ node tools/release.js @ mkdir -p web/builds/$(VERSION) @ cp dist/*.* web/builds/$(VERSION)/. # # Make the gh-pages jeesite # # This target builds the hogan.js github jeesite using hogan.js. # # cd into build/gh-pages to check in the new site. # GH_PAGES = $(BUILD)/gh-pages web: | pages @cp -R web/* $(GH_PAGES) @@ node tools/web_templates.js @echo @echo "Website built in $(GH_PAGES)." # # Checkout the gh-pages branch. # pages: | $(BUILD) @if [ ! -d "$(GH_PAGES)" ]; then \ git clone -b gh-pages $(REPO) $(GH_PAGES); \ rm -rf $(GH_PAGES)/*; \ fi; @mkdir -p $(GH_PAGES)/images $(BUILD): mkdir -p $(BUILD) .PHONY: test spec benchmark web release
lianying/some
src/main/webapp/static/bootstrap/2.0.4/docs/build/node_modules/hogan.js/Makefile
Makefile
apache-2.0
1,262
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/micro/micro_allocator.h" #include <cstdint> #include "tensorflow/lite/micro/memory_helpers.h" #include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/simple_memory_allocator.h" #include "tensorflow/lite/micro/test_helpers.h" #include "tensorflow/lite/micro/testing/micro_test.h" #include "tensorflow/lite/micro/testing/test_conv_model.h" namespace tflite { namespace testing { namespace { constexpr int kExpectedAlignment = 4; constexpr int t0 = 0; constexpr int t1 = 1; constexpr int t2 = 2; constexpr int t3 = 3; constexpr int t4 = 4; constexpr int t5 = 5; void VerifyMockTfLiteTensor(TfLiteTensor* tensor, bool is_variable = false) { TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, tensor->type); TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->size); TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->data[0]); TF_LITE_MICRO_EXPECT_EQ(is_variable, tensor->is_variable); TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), tensor->bytes); TF_LITE_MICRO_EXPECT_NE(nullptr, tensor->data.raw); TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(0), (reinterpret_cast<std::uintptr_t>(tensor->data.raw) % kExpectedAlignment)); } void VerifyMockWeightTfLiteTensor(TfLiteTensor* tensor) { TF_LITE_MICRO_EXPECT_EQ(kTfLiteUInt8, tensor->type); TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->size); TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->data[0]); TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), tensor->bytes); TF_LITE_MICRO_EXPECT_NE(nullptr, tensor->data.raw); } void VerifyMockTfLiteEvalTensor(TfLiteEvalTensor* tensor) { TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, tensor->type); TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->size); TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->data[0]); size_t buffer_size; TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, tflite::TfLiteEvalTensorByteLength(tensor, &buffer_size)); TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), buffer_size); TF_LITE_MICRO_EXPECT_NE(nullptr, tensor->data.raw); TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(0), (reinterpret_cast<std::uintptr_t>(tensor->data.raw) % kExpectedAlignment)); } void VerifyMockWeightTfLiteEvalTensor(TfLiteEvalTensor* tensor) { TF_LITE_MICRO_EXPECT_EQ(kTfLiteUInt8, tensor->type); TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->size); TF_LITE_MICRO_EXPECT_EQ(1, tensor->dims->data[0]); size_t buffer_size; TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, tflite::TfLiteEvalTensorByteLength(tensor, &buffer_size)); TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), buffer_size); TF_LITE_MICRO_EXPECT_NE(nullptr, tensor->data.raw); } void VerifyMockTensor(const Model* model, MicroAllocator* allocator, TfLiteEvalTensor* eval_tensors, int tensor_idx, bool is_variable = false) { VerifyMockTfLiteTensor(allocator->AllocatePersistentTfLiteTensor( model, eval_tensors, tensor_idx), is_variable); VerifyMockTfLiteEvalTensor(&eval_tensors[tensor_idx]); } void VerifyMockWeightTensor(const Model* model, MicroAllocator* allocator, TfLiteEvalTensor* eval_tensors, int tensor_idx) { VerifyMockWeightTfLiteTensor(allocator->AllocatePersistentTfLiteTensor( model, eval_tensors, tensor_idx)); VerifyMockWeightTfLiteEvalTensor(&eval_tensors[tensor_idx]); } void EnsureUniqueVariableTensorBuffer(const Model* model, TfLiteEvalTensor* eval_tensors, const int variable_tensor_idx) { for (size_t i = 0; i < GetModelTensorCount(model); i++) { if (i != static_cast<size_t>(variable_tensor_idx)) { TF_LITE_MICRO_EXPECT_NE(eval_tensors[variable_tensor_idx].data.raw, eval_tensors[i].data.raw); } } } void VerifyRegistrationAndNodeAllocation( NodeAndRegistration* node_and_registration, size_t count) { for (size_t i = 0; i < count; i++) { TF_LITE_MICRO_EXPECT_NE(nullptr, node_and_registration[i].registration); TF_LITE_MICRO_EXPECT_NE(nullptr, node_and_registration[i].node.inputs); TF_LITE_MICRO_EXPECT_NE(nullptr, node_and_registration[i].node.outputs); } } } // namespace } // namespace testing } // namespace tflite TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(TestInitializeRuntimeTensor) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::SimpleMemoryAllocator* simple_allocator = tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(), arena, arena_size); const tflite::Tensor* tensor = tflite::testing::Create1dFlatbufferTensor(100); const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>>* buffers = tflite::testing::CreateFlatbufferBuffers(); TfLiteTensor allocated_tensor; TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, tflite::internal::InitializeTfLiteTensorFromFlatbuffer( simple_allocator, /*allocate_temp=*/false, *tensor, buffers, tflite::GetMicroErrorReporter(), &allocated_tensor)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type); TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size); TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]); TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(400), allocated_tensor.bytes); TF_LITE_MICRO_EXPECT(nullptr == allocated_tensor.data.i32); TF_LITE_MICRO_EXPECT_EQ(kTfLiteArenaRw, allocated_tensor.allocation_type); simple_allocator->~SimpleMemoryAllocator(); } // TODO(b/162311891): Drop this test when InitializeTfLiteTensorFromFlatbuffer() // always allocates from temp (interpreter returns buffers from // TfLiteEvalTensor): TF_LITE_MICRO_TEST(TestInitializeTempRuntimeTensor) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::SimpleMemoryAllocator* simple_allocator = tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(), arena, arena_size); const tflite::Tensor* tensor = tflite::testing::Create1dFlatbufferTensor(100); const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>>* buffers = tflite::testing::CreateFlatbufferBuffers(); TfLiteTensor allocated_temp_tensor; TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, tflite::internal::InitializeTfLiteTensorFromFlatbuffer( simple_allocator, /*allocate_temp=*/true, *tensor, buffers, tflite::GetMicroErrorReporter(), &allocated_temp_tensor)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_temp_tensor.type); TF_LITE_MICRO_EXPECT_EQ(1, allocated_temp_tensor.dims->size); TF_LITE_MICRO_EXPECT_EQ(100, allocated_temp_tensor.dims->data[0]); TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(400), allocated_temp_tensor.bytes); TF_LITE_MICRO_EXPECT(nullptr == allocated_temp_tensor.data.i32); TF_LITE_MICRO_EXPECT_EQ(kTfLiteArenaRw, allocated_temp_tensor.allocation_type); simple_allocator->~SimpleMemoryAllocator(); } TF_LITE_MICRO_TEST(TestInitializeQuantizedTensor) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::SimpleMemoryAllocator* simple_allocator = tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(), arena, arena_size); const tflite::Tensor* tensor = tflite::testing::CreateQuantizedFlatbufferTensor(100); const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>>* buffers = tflite::testing::CreateFlatbufferBuffers(); TfLiteTensor allocated_tensor; TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, tflite::internal::InitializeTfLiteTensorFromFlatbuffer( simple_allocator, /*allocate_temp=*/false, *tensor, buffers, tflite::GetMicroErrorReporter(), &allocated_tensor)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type); TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size); TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]); TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(400), allocated_tensor.bytes); TF_LITE_MICRO_EXPECT(nullptr == allocated_tensor.data.i32); TF_LITE_MICRO_EXPECT_EQ(kTfLiteArenaRw, allocated_tensor.allocation_type); simple_allocator->~SimpleMemoryAllocator(); } TF_LITE_MICRO_TEST(TestMissingQuantization) { constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::SimpleMemoryAllocator* simple_allocator = tflite::SimpleMemoryAllocator::Create(tflite::GetMicroErrorReporter(), arena, arena_size); const tflite::Tensor* tensor = tflite::testing::CreateMissingQuantizationFlatbufferTensor(100); const flatbuffers::Vector<flatbuffers::Offset<tflite::Buffer>>* buffers = tflite::testing::CreateFlatbufferBuffers(); TfLiteTensor allocated_tensor; TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, tflite::internal::InitializeTfLiteTensorFromFlatbuffer( simple_allocator, /*allocate_temp=*/false, *tensor, buffers, tflite::GetMicroErrorReporter(), &allocated_tensor)); TF_LITE_MICRO_EXPECT_EQ(kTfLiteInt32, allocated_tensor.type); TF_LITE_MICRO_EXPECT_EQ(1, allocated_tensor.dims->size); TF_LITE_MICRO_EXPECT_EQ(100, allocated_tensor.dims->data[0]); TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(400), allocated_tensor.bytes); TF_LITE_MICRO_EXPECT(nullptr == allocated_tensor.data.i32); } TF_LITE_MICRO_TEST(TestFailsWhenModelStartsTwice) { const tflite::Model* model = tflite::testing::GetSimpleMockModel(); TfLiteEvalTensor* eval_tensors = nullptr; tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); tflite::NodeAndRegistration* node_and_registration; constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT(nullptr != allocator); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model, op_resolver, &node_and_registration, &eval_tensors)); TF_LITE_MICRO_EXPECT_EQ( kTfLiteError, allocator->StartModelAllocation(model, op_resolver, &node_and_registration, &eval_tensors)); } TF_LITE_MICRO_TEST(TestFailsWithWrongSequence) { const tflite::Model* model = tflite::testing::GetSimpleMockModel(); TfLiteEvalTensor* eval_tensors = nullptr; tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); tflite::NodeAndRegistration* node_and_registration; constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(nullptr, allocator); // We can't finish allocation before it ever got started. TF_LITE_MICRO_EXPECT_EQ( kTfLiteError, allocator->FinishModelAllocation(model, eval_tensors, &scratch_buffer_handles)); // Start twice is not allowed. TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model, op_resolver, &node_and_registration, &eval_tensors)); TF_LITE_MICRO_EXPECT_EQ( kTfLiteError, allocator->StartModelAllocation(model, op_resolver, &node_and_registration, &eval_tensors)); } TF_LITE_MICRO_TEST(TestMockModelAllocation) { const tflite::Model* model = tflite::testing::GetSimpleMockModel(); TfLiteEvalTensor* eval_tensors = nullptr; tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); tflite::NodeAndRegistration* node_and_registration; constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT(nullptr != allocator); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model, op_resolver, &node_and_registration, &eval_tensors)); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors, &scratch_buffer_handles)); size_t model_tensor_size = tflite::testing::GetModelTensorCount(model); TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), model_tensor_size); tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 0); tflite::testing::VerifyMockWeightTensor(model, allocator, eval_tensors, 1); tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 2); tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 3); TF_LITE_MICRO_EXPECT_NE(eval_tensors[1].data.raw, eval_tensors[0].data.raw); TF_LITE_MICRO_EXPECT_NE(eval_tensors[2].data.raw, eval_tensors[0].data.raw); TF_LITE_MICRO_EXPECT_NE(eval_tensors[1].data.raw, eval_tensors[2].data.raw); TF_LITE_MICRO_EXPECT_NE(eval_tensors[3].data.raw, eval_tensors[0].data.raw); TF_LITE_MICRO_EXPECT_NE(eval_tensors[3].data.raw, eval_tensors[1].data.raw); TF_LITE_MICRO_EXPECT_NE(eval_tensors[3].data.raw, eval_tensors[2].data.raw); TF_LITE_MICRO_EXPECT_LE(allocator->used_bytes(), 856 + 100); // SimpleMockModel has 2 operators: tflite::testing::VerifyRegistrationAndNodeAllocation(node_and_registration, /*count=*/2); } TF_LITE_MICRO_TEST(TestMultiTenantAllocation) { // The `OpResolver` is shared among different models in this test for // simplicity but in practice you could have different `OpResolver`. tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); // Create a shared allocator. constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(nullptr, allocator); TfLiteEvalTensor* eval_tensors = nullptr; tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; // Allocate for model 1. We use ComplexMockModel here to cover the code path // allocatig variables. const tflite::Model* model1 = tflite::testing::GetComplexMockModel(); tflite::NodeAndRegistration* node_and_registration1; TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model1, op_resolver, &node_and_registration1, &eval_tensors)); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->FinishModelAllocation(model1, eval_tensors, &scratch_buffer_handles)); const size_t single_model_used_bytes = allocator->used_bytes(); // Allocate for model 2. const tflite::Model* model2 = tflite::testing::GetComplexMockModel(); tflite::NodeAndRegistration* node_and_registration2; TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model2, op_resolver, &node_and_registration2, &eval_tensors)); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->FinishModelAllocation(model2, eval_tensors, &scratch_buffer_handles)); // Allocation for two instances of the same model takes less memory as `head` // of the arena is reused. TF_LITE_MICRO_EXPECT_LE(allocator->used_bytes(), 2 * single_model_used_bytes); } TF_LITE_MICRO_TEST(TestAllocationForModelsWithBranches) { const tflite::Model* model = tflite::testing::GetSimpleModelWithBranch(); TfLiteEvalTensor* eval_tensors = nullptr; tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); tflite::NodeAndRegistration* node_and_registration; constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(nullptr, allocator); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model, op_resolver, &node_and_registration, &eval_tensors)); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors, &scratch_buffer_handles)); uint8_t* start = eval_tensors[0].data.uint8; // Check test_helpers.cc BuildSimpleModelWithBranch for model structure. // t0 is the first tensor, so place it in offset 0. TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[0].data.uint8 - start); // bytes = 2 * 2 * 3 * sizeof(float32) = 48, same for other tensors. size_t buffer_size; TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, tflite::TfLiteEvalTensorByteLength( &eval_tensors[0], &buffer_size)); TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(48), buffer_size); // t1 can't reuse any memory, as n0 requires both t0 and t1. TF_LITE_MICRO_EXPECT_EQ(96, eval_tensors[1].data.uint8 - start); // t2 can't reuse any memory, as n1 requires both t0 and t2. Also n2 requires // both t1 and t2. TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[2].data.uint8 - start); // t3 reuses the same memory from t0 as t0 is not an input to any node. TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[3].data.uint8 - start); // SimpleModelWithBranch has 3 operators: tflite::testing::VerifyRegistrationAndNodeAllocation(node_and_registration, /*count=*/3); } TF_LITE_MICRO_TEST(TestAllocationForComplexModelAllocation) { const tflite::Model* model = tflite::testing::GetComplexMockModel(); TfLiteEvalTensor* eval_tensors = nullptr; tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); tflite::NodeAndRegistration* node_and_registration; constexpr size_t arena_size = 2048; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT(nullptr != allocator); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model, op_resolver, &node_and_registration, &eval_tensors)); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors, &scratch_buffer_handles)); size_t model_tensor_size = tflite::testing::GetModelTensorCount(model); TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(10), model_tensor_size); // NOTE: Tensor indexes match the values in GetComplexMockModel(). tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 0); tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 1, /*is_variable=*/true); tflite::testing::VerifyMockWeightTensor(model, allocator, eval_tensors, 2); tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 3); tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 4, /*is_variable=*/true); tflite::testing::VerifyMockWeightTensor(model, allocator, eval_tensors, 5); tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 6); tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 7, /*is_variable=*/true); tflite::testing::VerifyMockWeightTensor(model, allocator, eval_tensors, 8); tflite::testing::VerifyMockTensor(model, allocator, eval_tensors, 9); // // Ensure that variable tensors have unique address tflite::testing::EnsureUniqueVariableTensorBuffer(model, eval_tensors, 1); tflite::testing::EnsureUniqueVariableTensorBuffer(model, eval_tensors, 4); tflite::testing::EnsureUniqueVariableTensorBuffer(model, eval_tensors, 7); // ComplexMockModel has 3 operators: tflite::testing::VerifyRegistrationAndNodeAllocation(node_and_registration, /*count=*/3); } TF_LITE_MICRO_TEST(OfflinePlannerBranchesAllOnline) { int version = 1; int subgraph = 0; constexpr int number_tensors = 4; tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); tflite::NodeAndRegistration* node_and_registration; const int32_t metadata_buffer[tflite::testing::kOfflinePlannerHeaderSize + number_tensors] = {version, subgraph, number_tensors, // header // memory offsets: -1, -1, -1, -1}; // The structure is identical to the one in // TestAllocationForModelsWithBranches int number_connections = 3; tflite::testing::NodeConnection node_list[3] = {{ {0}, // input {1} // output }, { {0}, // input {2} // output }, { {1, 2}, // input1, input2 {3} // output }}; const tflite::Model* model = tflite::testing::GetModelWithOfflinePlanning( number_tensors, metadata_buffer, node_list, number_connections); TfLiteEvalTensor* eval_tensors = nullptr; tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model, op_resolver, &node_and_registration, &eval_tensors)); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors, &scratch_buffer_handles)); // Since all of the tensors are online planned and the model structure is // identical to that in TestAllocationForModelsWithBranches, // the offsets be should identical to that test. uint8_t* start = eval_tensors[0].data.uint8; TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[0].data.uint8 - start); size_t buffer_size; TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, tflite::TfLiteEvalTensorByteLength( &eval_tensors[0], &buffer_size)); TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(48), buffer_size); TF_LITE_MICRO_EXPECT_EQ(96, eval_tensors[1].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[2].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[3].data.uint8 - start); } TF_LITE_MICRO_TEST(OfflinePlannerBasic) { constexpr int number_tensors = 4; tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); tflite::NodeAndRegistration* node_and_registration; const int32_t metadata_buffer[tflite::testing::kOfflinePlannerHeaderSize + number_tensors] = {1, 0, number_tensors, /*t0=*/0, /*t1=*/48, /*t2=*/0, /*t3=*/48}; constexpr int number_connections = 3; tflite::testing::NodeConnection node_list[number_connections] = { {/*input=*/{tflite::testing::t0}, /*output=*/{tflite::testing::t1}}, {/*input=*/{tflite::testing::t1}, /*output=*/{tflite::testing::t2}}, {/*input=*/{tflite::testing::t2}, /*output=*/{tflite::testing::t3}}}; const tflite::Model* model = tflite::testing::GetModelWithOfflinePlanning( number_tensors, metadata_buffer, node_list, number_connections); TfLiteEvalTensor* eval_tensors = nullptr; tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model, op_resolver, &node_and_registration, &eval_tensors)); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors, &scratch_buffer_handles)); uint8_t* start = eval_tensors[0].data.uint8; TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[0].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[1].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[2].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[3].data.uint8 - start); } TF_LITE_MICRO_TEST(OfflinePlannerOverlappingAllocation) { constexpr int number_tensors = 4; tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); tflite::NodeAndRegistration* node_and_registration; const int32_t metadata_buffer[tflite::testing::kOfflinePlannerHeaderSize + number_tensors] = {/*version=*/1, /*subgraph=*/0, number_tensors, /*t0=*/0, /*t1=*/0, /*t2=*/48, /*t3=*/-1}; int number_connections = 2; tflite::testing::NodeConnection node_list[2] = { {/*input, scratch=*/{tflite::testing::t0, tflite::testing::t1}, /*output=*/{tflite::testing::t2}}, {/*input=*/{tflite::testing::t2}, /*output=*/{tflite::testing::t3}}, }; const tflite::Model* model = tflite::testing::GetModelWithOfflinePlanning( number_tensors, metadata_buffer, node_list, number_connections); TfLiteEvalTensor* eval_tensors = nullptr; tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model, op_resolver, &node_and_registration, &eval_tensors)); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors, &scratch_buffer_handles)); uint8_t* start = eval_tensors[0].data.uint8; TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[0].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[1].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[2].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[3].data.uint8 - start); // TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(48), context.tensors[0].bytes); } TF_LITE_MICRO_TEST(OfflinePlannerOfflineOnline) { constexpr int number_tensors = 5; tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); tflite::NodeAndRegistration* node_and_registration; const int32_t metadata_buffer[tflite::testing::kOfflinePlannerHeaderSize + number_tensors] = {/*version=*/1, /*subgraph=*/0, number_tensors, /*t0=*/0, /*t1=*/48, /*t2=*/-1, /*t3=*/0, /*t4=*/-1}; constexpr int number_connections = 2; tflite::testing::NodeConnection node_list[number_connections] = { { /*input, scratch=*/{tflite::testing::t0, tflite::testing::t1}, /*output=*/{tflite::testing::t2}, }, { /*input=*/{tflite::testing::t2}, /*output1, output2=*/{tflite::testing::t3, tflite::testing::t4}, }, }; const tflite::Model* model = tflite::testing::GetModelWithOfflinePlanning( number_tensors, metadata_buffer, node_list, number_connections); TfLiteEvalTensor* eval_tensors = nullptr; tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model, op_resolver, &node_and_registration, &eval_tensors)); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors, &scratch_buffer_handles)); uint8_t* start = eval_tensors[0].data.uint8; TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[0].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[1].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(96, eval_tensors[2].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[4].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[3].data.uint8 - start); } TF_LITE_MICRO_TEST(TestAllocatePersistentTfLiteTensor) { const tflite::Model* model = tflite::GetModel(kTestConvModelData); constexpr size_t arena_size = 1024 * 12; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(allocator, nullptr); TfLiteTensor* tensor1 = allocator->AllocatePersistentTfLiteTensor( model, /*eval_tensors=*/nullptr, /*tensor_index=*/1); TF_LITE_MICRO_EXPECT_NE(tensor1, nullptr); TF_LITE_MICRO_EXPECT_NE(tensor1->quantization.params, nullptr); TF_LITE_MICRO_EXPECT_FALSE(tensor1->is_variable); TfLiteTensor* tensor2 = allocator->AllocatePersistentTfLiteTensor( model, /*eval_tensors=*/nullptr, /*tensor_index=*/2); TF_LITE_MICRO_EXPECT_NE(tensor2, nullptr); TF_LITE_MICRO_EXPECT_NE(tensor2->quantization.params, nullptr); TF_LITE_MICRO_EXPECT_FALSE(tensor2->is_variable); // The address of tensor1 should be higher than the address of tensor2 since // persistent allocations take place in the tail which grows downward. TF_LITE_MICRO_EXPECT_GT(tensor1, tensor2); } TF_LITE_MICRO_TEST(TestAllocateSingleTempTfLiteTensor) { const tflite::Model* model = tflite::testing::GetSimpleMockModel(); constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(allocator, nullptr); TfLiteTensor* tensor1 = allocator->AllocateTempTfLiteTensor( model, /*eval_tensors=*/nullptr, /*tensor_index=*/1); TF_LITE_MICRO_EXPECT_NE(tensor1, nullptr); } TF_LITE_MICRO_TEST(TestAllocateChainOfTfLiteTensor) { const tflite::Model* model = tflite::testing::GetSimpleMockModel(); constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_NE(allocator, nullptr); TfLiteTensor* tensor1 = allocator->AllocateTempTfLiteTensor( model, /*eval_tensors=*/nullptr, /*tensor_index=*/1); TF_LITE_MICRO_EXPECT_NE(tensor1, nullptr); TfLiteTensor* tensor2 = allocator->AllocateTempTfLiteTensor( model, /*eval_tensors=*/nullptr, /*tensor_index=*/2); TF_LITE_MICRO_EXPECT_NE(tensor2, nullptr); // The address of tensor2 should be higher than the address of tensor1 // (chained allocations): TF_LITE_MICRO_EXPECT_GT(tensor2, tensor1); } TF_LITE_MICRO_TEST(TestAllocateTfLiteTensorWithReset) { const tflite::Model* model = tflite::testing::GetSimpleMockModel(); constexpr size_t arena_size = 1024; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT(allocator != nullptr); TfLiteTensor* tensor1 = allocator->AllocateTempTfLiteTensor( model, /*eval_tensors=*/nullptr, /*tensor_index=*/1); TF_LITE_MICRO_EXPECT(tensor1 != nullptr); allocator->ResetTempAllocations(); TfLiteTensor* tensor2 = allocator->AllocateTempTfLiteTensor( model, /*eval_tensors=*/nullptr, /*tensor_index=*/2); TF_LITE_MICRO_EXPECT(tensor2 != nullptr); // The address of tensor2 should be equal than the address of tensor1 since // allocations were not chained: TF_LITE_MICRO_EXPECT(tensor2 == tensor1); } TF_LITE_MICRO_TEST(TestOperatorInputsNotInSubgraphInputs) { constexpr int number_tensors = 5; tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); tflite::NodeAndRegistration* node_and_registration; const int32_t metadata_buffer[tflite::testing::kOfflinePlannerHeaderSize + number_tensors] = {/*version=*/1, /*subgraph=*/0, number_tensors, /*t0=*/0, /*t1=*/0, /*t2=*/0, /*t3=*/48, /*t4=*/-1}; constexpr int number_connections = 2; tflite::testing::NodeConnection node_list[number_connections] = { {// t0: input (actual input part of subgraph inputs as // well as operator inputs) // t1: scratch1 (only in operator inputs) // t2: scratch2 (only in operator inputs) {tflite::testing::t0, tflite::testing::t1, tflite::testing::t2}, /*t3: output=*/{tflite::testing::t3}}, {/*t3: input=*/{tflite::testing::t3}, /*t4: output=*/{tflite::testing::t4}}, }; const tflite::Model* model = tflite::testing::GetModelWithOfflinePlanning( number_tensors, metadata_buffer, node_list, number_connections, /*Only first tensor (t0) is in subgraph input list=*/1); TfLiteEvalTensor* eval_tensors = nullptr; tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model, op_resolver, &node_and_registration, &eval_tensors)); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors, &scratch_buffer_handles)); uint8_t* start = eval_tensors[0].data.uint8; TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[0].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[1].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[2].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[3].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[4].data.uint8 - start); } TF_LITE_MICRO_TEST(TestTypicalFirstOpAndSecondOpWithScratchTensors) { constexpr int number_tensors = 6; tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); tflite::NodeAndRegistration* node_and_registration; const int32_t metadata_buffer[tflite::testing::kOfflinePlannerHeaderSize + number_tensors] = {/*version=*/1, /*subgraph=*/0, number_tensors, /*t0=*/0, /*t1=*/0, /*t2=*/0, /*t3=*/0, /*t4=*/48, /*t5=*/-1}; constexpr int number_connections = 3; tflite::testing::NodeConnection node_list[number_connections] = { {/*t0: input (subgraph and operator input)=*/{tflite::testing::t0}, /*t1: output=*/{tflite::testing::t1}}, {// t1: input // t2: scratch1 (only in operator inputs) // t3: scratch2 (only in operator inputs) {tflite::testing::t1, tflite::testing::t2, tflite::testing::t3}, /*t4: output=*/{tflite::testing::t4}}, {/*t4: input=*/{tflite::testing::t4}, /*t5: output=*/{tflite::testing::t5}}, }; const tflite::Model* model = tflite::testing::GetModelWithOfflinePlanning( number_tensors, metadata_buffer, node_list, number_connections, /*Only first tensor (t0) is in subgraph input list=*/1); TfLiteEvalTensor* eval_tensors = nullptr; tflite::ScratchBufferHandle* scratch_buffer_handles = nullptr; constexpr size_t arena_size = 4096; uint8_t arena[arena_size]; tflite::MicroAllocator* allocator = tflite::MicroAllocator::Create( arena, arena_size, tflite::GetMicroErrorReporter()); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->StartModelAllocation(model, op_resolver, &node_and_registration, &eval_tensors)); TF_LITE_MICRO_EXPECT_EQ( kTfLiteOk, allocator->FinishModelAllocation(model, eval_tensors, &scratch_buffer_handles)); uint8_t* start = eval_tensors[0].data.uint8; TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[0].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[1].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[2].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[3].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(48, eval_tensors[4].data.uint8 - start); TF_LITE_MICRO_EXPECT_EQ(0, eval_tensors[5].data.uint8 - start); } TF_LITE_MICRO_TESTS_END
annarev/tensorflow
tensorflow/lite/micro/micro_allocator_test.cc
C++
apache-2.0
39,778
var controllers = require('../model/backend-controllers-manager'); var mongoose = require('../model/backend-database').mongoose; var generatePassword = require("password-maker"); //var User = mongoose.model('User'); var promise = require('../model/utils').promise; var validate = require('../model/validator').validate; var handleMissingKeys = require('../model/validator').handleMissingKeys; var actions = require('../model/backend-mongoose-wrapper').create('User'); var Log = require('../model/backend-mongoose-wrapper').create('Log'); var Order = require('../model/backend-mongoose-wrapper').create('Order'); var Balance = require('../model/backend-mongoose-wrapper').create('Balance'); var BalanceItem = require('../model/backend-mongoose-wrapper').create('BalanceItem'); //var email = require('./handlers.email').actions; var _ = require('lodash'); var moment = require('moment'); var Notif = controllers.notification; //User.methods.name = ()=>{return }; const MODULE = 'USER'; var logger = require('../model/logger')(MODULE); function everyAdmin(cb) { controllers.diagsUser.getAll({ userType: 'admin' }, (err, _admins) => { if (err) return cb(err); _admins.forEach((_admin) => { cb(null, _admin); }); }); } function departmentCoveredBy(data, cb) { actions.log('departmentCoveredBy=' + JSON.stringify(data)); if (!data.department) return cb("department required"); var User = controllers.diagsUser; User.getAll({ __select: "departments", __rules: { disabled: { $ne: true } //exclude disabled diags } }, (err, _users) => { if (err) return cb(err); //actions.log('departmentCoveredBy=_users:len=' + JSON.stringify(_users.length)); for (var x in _users) { //actions.log('departmentCoveredBy=looping='+x+'='+JSON.stringify(_users[x].departments||[])); if (_users[x].departments) { if (_.includes(_users[x].departments, data.department)) { // actions.log('departmentCoveredBy=check=' + JSON.stringify(_users[x].departments) + ' contra ' + data.department); return cb(null, true); } } } return cb(null, false); }); } function balance(data, cb) { data.period = data.period || 'year'; actions.log('balance=' + JSON.stringify(data)); data._calculate = data._calculate && data._calculate.toString() == 'true' || false; if (!data._id) return cb("_id required"); if (data._calculate) { _calculate(null, null, true); } else { _retrieve(); } // function _calculate(_err, _user, firstTime) { if (!_user && firstTime == true) return actions.model.findById(data._id, _calculate); if (!_user) return cb("balance:calculate=User not found:" + data._id); // if (_user.userType == 'admin') { return cb('Admin balance unsupported'); } // actions.log('balance:_calculate'); var balanceKeys = ['_user', 'amount']; var balanceMatch = ['_user']; var balanceItemKeys = ['_user', '_order', 'pending', 'amount', 'description']; var balanceItemMatch = ['_user', '_order']; // Balance.createUpdate({ _user: _user._id, amount: 0 }, (err, bal) => { if (err) return cb(err); actions.log('balance:_calculate:creteUpdate:rta', JSON.stringify(bal)); BalanceItem.removeAll({ _user: _user._id }, (err, rr) => { if (err) return cb(err); bal.items = []; _calculateBalance(null, bal); }); }, balanceMatch, balanceKeys); // function _calculateBalance(err, _balance) { actions.log('balance:_calculateBalance', JSON.stringify(_balance)); if (err) return cb(err, _balance); if (!_balance) return cb('balance:create:error'); // //remove prev balance items // Order.getAll(_orderRules(), (err, _orders) => { actions.log('balance:_calculateBalance:orders=', _orders.length); if (err) return cb(err, _orders); if (!_orders || _orders.length == 0) { _balance.amount = 0; _balance.save((_err, r) => { _retrieve(); }); } else { var balanceAmount = 0; var _stackSaving = []; var exit = false; _orders.forEach(_order => { //validate period var now = moment(); if (!now.isSame(moment(_order.diagStart), data.period)) { actions.log('balance:_calculateBalance:excluding order=' + _order._id); return; // exclude } _stackSaving.push(_order._id); var d = {}; d.pending = !_.includes(['prepaid', 'completed'], _order.status); d.description = _order.address + ' (' + moment(_order.diagStart).format('DD-MM-YY') + ', ' + moment(_order.diagStart).format('HH:mm') + ' - ' + moment(_order.diagEnd).format('HH:mm') + ')'; d.amount = _order.price; //diag //-_user.diagWebsiteComission (admin decide it) (-) //-_order.fastDiagComm (+) if (_user.userType == 'diag') { var diagWebsiteComission = ((_order.price * _user.comission) / 100) * -1; d.amount = _order.price + diagWebsiteComission; var fastDiagComm = (d.amount * _order.fastDiagComm) / 100; d.amount += fastDiagComm; } //admin //-diag price (-) //-client disccount (-) //-stripe % (-) if (_user.userType == 'admin') { cb('Admin balance unsupported'); exit = true; return false; } //client //just the order price d._order = _order._id; d._user = _user._id; // balanceAmount += d.amount; BalanceItem.createUpdate(d, (_err, _balanceItem) => { _stackSaving = _stackSaving.slice(1); //_balance.save(); //async actions.log('balance:items:remain-for-saving', _stackSaving.length); }, balanceItemMatch, balanceItemKeys).on('created', (_err, _balanceItem) => { //_balance.items = _balance.items || []; _balance.items.push(_balanceItem); actions.log('balance:item:created **'); }).on('updated', (_err, _balanceItem) => { actions.log('balance:item:updated **'); }); }); if (exit) return; //headers alredy sent; _balance.amount = balanceAmount; var waitChilds = setInterval(() => { if (_stackSaving.length === 0) { clearInterval(waitChilds); _balance.save((_err, r) => { _retrieve(); }); } }, 50); } }); } function _orderRules() { if (_user.userType == 'diag') return { _diag: _user._id }; if (_user.userType == 'client') return { _client: _user._id }; if (_user.userType == 'admin') return {}; } } function _retrieve() { actions.log('balance:retrieve'); Balance.get({ _user: data._id, __populate: { 'items': '_user _order pending amount description' } }, (err, _balance) => { return cb(err, _balance); }); } } function _preCreateWallet(data, cb,next) { if (!data.wallet && (data.userType == 'client' || data.userType == 'diag')) { return controllers.lemonway.registerWallet({ clientMail: data.email, clientFirstName: data.firstName, clientLastName: data.lastName, postCode: data.postCode, mobileNumber: data.cellPhone }, (err, res) => { if (!err && res && res.WALLET) { data.wallet = res.WALLET.ID; logger.info('LEMONWAY WALLET (automatic registration before saving user)', data.wallet); } if (err) { logger.error('LEMONWAY WALLET (automatic registration before saving user)', err); LogSave('LEMONWAY WALLET (automatic registration before saving user)', 'error', err); } return next(data, cb); }); } return next(data, cb); } function save(data, cb) { if (!_.includes(['diag', 'client', 'admin'], data.userType)) { return cb("invalid userType " + data.userType); } _preCreateWallet(data, cb,__save); function __save(data, cb) { actions.createUpdate(data, cb, { email: data.email, userType: data.userType }, ['userType', 'email']).on('created', postCreate_notifications); } function postCreate_notifications(err, _user) { switch (_user.userType) { case 'admin': { Notif.trigger(Notif.NOTIFICATION.ADMIN_ADMIN_ACCOUNT_CREATED, { _user: _user }, (_err, r) => handleNewAccount(_user, err, r)); } break; case 'client': { Notif.trigger(Notif.CLIENT_CLIENT_NEW_ACCOUNT, { _user: _user }, (_err, r) => handleNewAccount(_user, err, r)); everyAdmin((err, _admin) => { if (err) return cb && cb(err) || LogSave(JSON.stringify(err), 'error', err); Notif.trigger(Notif.ADMIN_CLIENT_ACCOUNT_CREATED, { _user: _user, _admin: _admin }, (_err, r) => handleNewAccount(_user, err, r)); }) } break; } } } function LogSave(msg, type, data) { Log.save({ message: msg, type: type, data: data }); } function handleNewAccount(_user, err, r) { if (err) return LogSave(err.message, 'error', err); if (r && r.ok) { actions.log(_user.email + ':passwordSended'); _user.passwordSended = true; _user.save(); } else { actions.log(_user.email + ' passwordSended email fail ' + JSON.stringify(r)); LogSave(r.message, 'warning', r); } } function create(data, cb) { _preCreateWallet(data,cb, __create); function __create(data, cb) { actions.create(data, cb, ['email', 'userType', 'password']); } } function createUser(data, cb) { actions.log('createUser=' + JSON.stringify(data)); data.password = data.password || generatePassword(8); data.userType = data.userType || 'admin'; create(data, cb); } function createDiag(data, cb) { actions.log('createDiag=' + JSON.stringify(data)); data.userType = 'diag'; createUser(data, (err, _user) => { if (err) return cb(err, null); Notif.DIAG_NEW_ACCOUNT(_user, (err, r) => { //async (write log on error) if (r.ok) { actions.log(_user.email + ' new account email sended' + JSON.stringify(r)); _user.passwordSended = true; _user.save((err, r) => { if (!err) actions.log(_user.email + ' passwordSended=true'); }); } else { actions.log(_user.email + ' new account email sended failed'); actions.log(JSON.stringify(err)); } }); return cb(err, _user); }); } function createClient(data, cb) { actions.log('createClient=' + JSON.stringify(data)); data.userType = 'client'; data.clientType = data.clientType || 'landlord'; createUser(data, (err, _user) => { if (err) return cb(err, null); sendAccountsDetails(_user); return cb(err, _user); }); } function sendAccountsDetails(_user) { Notif.CLIENT_CLIENT_NEW_ACCOUNT({ _user: _user }, (err, r) => { //async (write log on error) if (r.ok) { actions.log(_user.email + ' new account email sended' + JSON.stringify(r)); _user.passwordSended = true; _user.save((err, r) => { if (!err) actions.log(_user.email + ' passwordSended=true'); }); } else { actions.log(_user.email + ' new account email sended failed'); actions.log(JSON.stringify(err)); } }); } function createClientIfNew(data, cb) { actions.log('createClientIfNew=' + JSON.stringify(data)); actions.check(data, ['email'], (err, r) => { if (err) return cb(err, null); actions.get({ email: data.email }, (err, r) => { if (err) return cb(err, null); if (!r) { createClient(data, cb); } else { //in 10 seconds, try send account details if passwordSended==false setTimeout(function() { if (!r.passwordSended) { sendAccountsDetails(r); } }, 10000); cb(null, r); } }); }); } function login(data, cb) { console.log('USER:login=' + JSON.stringify(data)); actions.model.findOne(actions.toRules({ email: data.email, password: data.password })).exec(cb); } function passwordReset(data, cb) { actions.check(data, ['email'], (err, r) => { if (err) return cb(err, r); actions.get({ email: data.email }, (err, _user) => { if (err) return cb(err, _user); if (_user) { _user.password = generatePassword(8); _user.save(); Notif.trigger('USER_PASSWORD_RESET', _user, (err, r) => { return cb(err, r); }) } }) }); } module.exports = { //custom departmentCoveredBy: departmentCoveredBy, balance: balance, save: save, createClientIfNew: createClientIfNew, createClient: createClient, login: login, createDiag: createDiag, passwordReset: passwordReset, //heredado existsById: actions.existsById, existsByField: actions.existsByField, createUpdate: actions.createUpdate, getAll: actions.getAll, remove: actions.remove, result: actions.result, get: actions.get, check: actions.check, removeAll: actions.removeAll, toRules: actions.toRules, find: actions.find, create: create, log: actions.log, _configure: (hook) => { hook('preSave', preSave); } }; function preSave(data) { //ADMIN#1 OK ctrl.user if (data.notifications && data.userType == 'admin' && !data.notifications.ADMIN_ADMIN_ACCOUNT_CREATED) { Notif.trigger(Notif.NOTIFICATION.ADMIN_ADMIN_ACCOUNT_CREATED, { _user: data }); } //ADMIN//#2 OK ctrl.user if (data.notifications && data.userType == 'client' && !data.notifications.ADMIN_CLIENT_ACCOUNT_CREATED) { everyAdmin((err, _admin) => { if (err) return LogSave(JSON.stringify(err), 'error', err); Notif.trigger(Notif.NOTIFICATION.ADMIN_CLIENT_ACCOUNT_CREATED, { _user: _.cloneDeep(data), _admin: _admin }); }); data.notifications.ADMIN_CLIENT_ACCOUNT_CREATED = true; } //DIAG//#1 OK ctrl.user app.diag.complete if (data.notifications && data.userType == 'diag' && data.disabled == false && !data.notifications.DIAG_DIAG_ACCOUNT_CREATED) { Notif.trigger(Notif.NOTIFICATION.DIAG_DIAG_ACCOUNT_CREATED, { _user: data }); } //ADMIN//#3 OK ctrl.user if (data.notifications && data.userType == 'diag' && data.disabled == true && !data.notifications.ADMIN_DIAG_ACCOUNT_CREATED) { everyAdmin((err, _admin) => { if (err) return LogSave(JSON.stringify(err), 'error', err); console.log(JSON.stringify(_admin)); Notif.trigger(Notif.NOTIFICATION.ADMIN_DIAG_ACCOUNT_CREATED, { _user: _.cloneDeep(data), _admin: _admin }); }); } return data; }
javimosch/bastack
lib/backend/deprecated/controllers/diags-user-controller.js
JavaScript
apache-2.0
17,489
package org.apache.tapestry5.internal.plastic; import org.apache.tapestry5.internal.plastic.asm.tree.AnnotationNode; import org.apache.tapestry5.plastic.MethodParameter; import java.util.List; class MethodParameterImpl extends PlasticMember implements MethodParameter { private final String type; private final int index; MethodParameterImpl(PlasticClassImpl plasticClass, List<AnnotationNode> visibleAnnotations, String type, int index) { super(plasticClass, visibleAnnotations); this.type = type; this.index = index; } @Override public String getType() { plasticClass.check(); return type; } @Override public int getIndex() { plasticClass.check(); return index; } }
apache/tapestry-5
plastic/src/main/java/org/apache/tapestry5/internal/plastic/MethodParameterImpl.java
Java
apache-2.0
786
/** * FreeRDP: A Remote Desktop Protocol Implementation * Clipboard Virtual Channel * * Copyright 2009-2011 Jay Sorg * Copyright 2010-2011 Vic Lee * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <winpr/crt.h> #include <winpr/print.h> #include <freerdp/types.h> #include <freerdp/constants.h> #include <freerdp/client/cliprdr.h> #include "cliprdr_main.h" #include "cliprdr_format.h" const char* const CB_MSG_TYPE_STRINGS[] = { "", "CB_MONITOR_READY", "CB_FORMAT_LIST", "CB_FORMAT_LIST_RESPONSE", "CB_FORMAT_DATA_REQUEST", "CB_FORMAT_DATA_RESPONSE", "CB_TEMP_DIRECTORY", "CB_CLIP_CAPS", "CB_FILECONTENTS_REQUEST", "CB_FILECONTENTS_RESPONSE", "CB_LOCK_CLIPDATA" "CB_UNLOCK_CLIPDATA" }; CliprdrClientContext* cliprdr_get_client_interface(cliprdrPlugin* cliprdr) { CliprdrClientContext* pInterface; pInterface = (CliprdrClientContext*) cliprdr->channelEntryPoints.pInterface; return pInterface; } wStream* cliprdr_packet_new(UINT16 msgType, UINT16 msgFlags, UINT32 dataLen) { wStream* s; s = Stream_New(NULL, dataLen + 8); Stream_Write_UINT16(s, msgType); Stream_Write_UINT16(s, msgFlags); /* Write actual length after the entire packet has been constructed. */ Stream_Seek(s, 4); return s; } void cliprdr_packet_send(cliprdrPlugin* cliprdr, wStream* s) { int pos; UINT32 dataLen; UINT32 status = 0; pos = Stream_GetPosition(s); dataLen = pos - 8; Stream_SetPosition(s, 4); Stream_Write_UINT32(s, dataLen); Stream_SetPosition(s, pos); #ifdef WITH_DEBUG_CLIPRDR WLog_DBG(TAG, "Cliprdr Sending (%d bytes)", dataLen + 8); winpr_HexDump(TAG, WLOG_DEBUG, Stream_Buffer(s), dataLen + 8); #endif if (!cliprdr) { status = CHANNEL_RC_BAD_INIT_HANDLE; } else { status = cliprdr->channelEntryPoints.pVirtualChannelWrite(cliprdr->OpenHandle, Stream_Buffer(s), (UINT32) Stream_GetPosition(s), s); } if (status != CHANNEL_RC_OK) { Stream_Free(s, TRUE); WLog_ERR(TAG, "VirtualChannelWrite failed with %s [%08X]", WTSErrorToString(status), status); } } void cliprdr_print_general_capability_flags(UINT32 flags) { WLog_INFO(TAG, "generalFlags (0x%08X) {", flags); if (flags & CB_USE_LONG_FORMAT_NAMES) WLog_INFO(TAG, "\tCB_USE_LONG_FORMAT_NAMES"); if (flags & CB_STREAM_FILECLIP_ENABLED) WLog_INFO(TAG, "\tCB_STREAM_FILECLIP_ENABLED"); if (flags & CB_FILECLIP_NO_FILE_PATHS) WLog_INFO(TAG, "\tCB_FILECLIP_NO_FILE_PATHS"); if (flags & CB_CAN_LOCK_CLIPDATA) WLog_INFO(TAG, "\tCB_CAN_LOCK_CLIPDATA"); WLog_INFO(TAG, "}"); } static int cliprdr_process_general_capability(cliprdrPlugin* cliprdr, wStream* s) { UINT32 version; UINT32 generalFlags; CLIPRDR_CAPABILITIES capabilities; CLIPRDR_GENERAL_CAPABILITY_SET generalCapabilitySet; CliprdrClientContext* context = cliprdr_get_client_interface(cliprdr); Stream_Read_UINT32(s, version); /* version (4 bytes) */ Stream_Read_UINT32(s, generalFlags); /* generalFlags (4 bytes) */ DEBUG_CLIPRDR("Version: %d", version); #ifdef WITH_DEBUG_CLIPRDR cliprdr_print_general_capability_flags(generalFlags); #endif if (cliprdr->useLongFormatNames) cliprdr->useLongFormatNames = (generalFlags & CB_USE_LONG_FORMAT_NAMES) ? TRUE : FALSE; if (cliprdr->streamFileClipEnabled) cliprdr->streamFileClipEnabled = (generalFlags & CB_STREAM_FILECLIP_ENABLED) ? TRUE : FALSE; if (cliprdr->fileClipNoFilePaths) cliprdr->fileClipNoFilePaths = (generalFlags & CB_FILECLIP_NO_FILE_PATHS) ? TRUE : FALSE; if (cliprdr->canLockClipData) cliprdr->canLockClipData = (generalFlags & CB_CAN_LOCK_CLIPDATA) ? TRUE : FALSE; cliprdr->capabilitiesReceived = TRUE; if (!context->custom) return -1; capabilities.cCapabilitiesSets = 1; capabilities.capabilitySets = (CLIPRDR_CAPABILITY_SET*) &(generalCapabilitySet); generalCapabilitySet.capabilitySetType = CB_CAPSTYPE_GENERAL; generalCapabilitySet.capabilitySetLength = 12; generalCapabilitySet.version = version; generalCapabilitySet.generalFlags = generalFlags; if (context->ServerCapabilities) context->ServerCapabilities(context, &capabilities); return 1; } static int cliprdr_process_clip_caps(cliprdrPlugin* cliprdr, wStream* s, UINT16 length, UINT16 flags) { UINT16 index; UINT16 lengthCapability; UINT16 cCapabilitiesSets; UINT16 capabilitySetType; Stream_Read_UINT16(s, cCapabilitiesSets); /* cCapabilitiesSets (2 bytes) */ Stream_Seek_UINT16(s); /* pad1 (2 bytes) */ WLog_Print(cliprdr->log, WLOG_DEBUG, "ServerCapabilities"); for (index = 0; index < cCapabilitiesSets; index++) { Stream_Read_UINT16(s, capabilitySetType); /* capabilitySetType (2 bytes) */ Stream_Read_UINT16(s, lengthCapability); /* lengthCapability (2 bytes) */ switch (capabilitySetType) { case CB_CAPSTYPE_GENERAL: cliprdr_process_general_capability(cliprdr, s); break; default: WLog_ERR(TAG, "unknown cliprdr capability set: %d", capabilitySetType); break; } } return 1; } static int cliprdr_process_monitor_ready(cliprdrPlugin* cliprdr, wStream* s, UINT16 length, UINT16 flags) { CLIPRDR_MONITOR_READY monitorReady; CliprdrClientContext* context = cliprdr_get_client_interface(cliprdr); WLog_Print(cliprdr->log, WLOG_DEBUG, "MonitorReady"); if (!context->custom) return -1; if (!cliprdr->capabilitiesReceived) { /** * The clipboard capabilities pdu from server to client is optional, * but a server using it must send it before sending the monitor ready pdu. * When the server capabilities pdu is not used, default capabilities * corresponding to a generalFlags field set to zero are assumed. */ cliprdr->useLongFormatNames = FALSE; cliprdr->streamFileClipEnabled = FALSE; cliprdr->fileClipNoFilePaths = TRUE; cliprdr->canLockClipData = FALSE; } monitorReady.msgType = CB_MONITOR_READY; monitorReady.msgFlags = flags; monitorReady.dataLen = length; if (context->MonitorReady) context->MonitorReady(context, &monitorReady); return 1; } static int cliprdr_process_filecontents_request(cliprdrPlugin* cliprdr, wStream* s, UINT32 length, UINT16 flags) { CLIPRDR_FILE_CONTENTS_REQUEST request; CliprdrClientContext* context = cliprdr_get_client_interface(cliprdr); WLog_Print(cliprdr->log, WLOG_DEBUG, "FileContentsRequest"); if (!context->custom) return -1; if (Stream_GetRemainingLength(s) < 28) return -1; request.msgType = CB_FILECONTENTS_REQUEST; request.msgFlags = flags; request.dataLen = length; Stream_Read_UINT32(s, request.streamId); /* streamId (4 bytes) */ Stream_Read_UINT32(s, request.listIndex); /* listIndex (4 bytes) */ Stream_Read_UINT32(s, request.dwFlags); /* dwFlags (4 bytes) */ Stream_Read_UINT32(s, request.nPositionLow); /* nPositionLow (4 bytes) */ Stream_Read_UINT32(s, request.nPositionHigh); /* nPositionHigh (4 bytes) */ Stream_Read_UINT32(s, request.cbRequested); /* cbRequested (4 bytes) */ Stream_Read_UINT32(s, request.clipDataId); /* clipDataId (4 bytes) */ if (context->ServerFileContentsRequest) context->ServerFileContentsRequest(context, &request); return 1; } static int cliprdr_process_filecontents_response(cliprdrPlugin* cliprdr, wStream* s, UINT32 length, UINT16 flags) { CLIPRDR_FILE_CONTENTS_RESPONSE response; CliprdrClientContext* context = cliprdr_get_client_interface(cliprdr); WLog_Print(cliprdr->log, WLOG_DEBUG, "FileContentsResponse"); if (!context->custom) return -1; if (Stream_GetRemainingLength(s) < 4) return -1; response.msgType = CB_FILECONTENTS_RESPONSE; response.msgFlags = flags; response.dataLen = length; Stream_Read_UINT32(s, response.streamId); /* streamId (4 bytes) */ response.cbRequested = length - 4; response.requestedData = Stream_Pointer(s); /* requestedFileContentsData */ if (context->ServerFileContentsResponse) context->ServerFileContentsResponse(context, &response); return 1; } static int cliprdr_process_lock_clipdata(cliprdrPlugin* cliprdr, wStream* s, UINT32 length, UINT16 flags) { CLIPRDR_LOCK_CLIPBOARD_DATA lockClipboardData; CliprdrClientContext* context = cliprdr_get_client_interface(cliprdr); WLog_Print(cliprdr->log, WLOG_DEBUG, "LockClipData"); if (!context->custom) return -1; if (Stream_GetRemainingLength(s) < 4) return -1; lockClipboardData.msgType = CB_LOCK_CLIPDATA; lockClipboardData.msgFlags = flags; lockClipboardData.dataLen = length; Stream_Read_UINT32(s, lockClipboardData.clipDataId); /* clipDataId (4 bytes) */ if (context->ServerLockClipboardData) context->ServerLockClipboardData(context, &lockClipboardData); return 1; } static int cliprdr_process_unlock_clipdata(cliprdrPlugin* cliprdr, wStream* s, UINT32 length, UINT16 flags) { CLIPRDR_UNLOCK_CLIPBOARD_DATA unlockClipboardData; CliprdrClientContext* context = cliprdr_get_client_interface(cliprdr); WLog_Print(cliprdr->log, WLOG_DEBUG, "UnlockClipData"); if (!context->custom) return -1; if (Stream_GetRemainingLength(s) < 4) return -1; unlockClipboardData.msgType = CB_UNLOCK_CLIPDATA; unlockClipboardData.msgFlags = flags; unlockClipboardData.dataLen = length; Stream_Read_UINT32(s, unlockClipboardData.clipDataId); /* clipDataId (4 bytes) */ if (context->ServerUnlockClipboardData) context->ServerUnlockClipboardData(context, &unlockClipboardData); return 1; } static void cliprdr_order_recv(cliprdrPlugin* cliprdr, wStream* s) { UINT16 msgType; UINT16 msgFlags; UINT32 dataLen; Stream_Read_UINT16(s, msgType); /* msgType (2 bytes) */ Stream_Read_UINT16(s, msgFlags); /* msgFlags (2 bytes) */ Stream_Read_UINT32(s, dataLen); /* dataLen (4 bytes) */ DEBUG_CLIPRDR("msgType: %s (%d), msgFlags: %d dataLen: %d", CB_MSG_TYPE_STRINGS[msgType], msgType, msgFlags, dataLen); #ifdef WITH_DEBUG_CLIPRDR winpr_HexDump(TAG, WLOG_DEBUG, Stream_Buffer(s), dataLen + 8); #endif switch (msgType) { case CB_CLIP_CAPS: cliprdr_process_clip_caps(cliprdr, s, dataLen, msgFlags); break; case CB_MONITOR_READY: cliprdr_process_monitor_ready(cliprdr, s, dataLen, msgFlags); break; case CB_FORMAT_LIST: cliprdr_process_format_list(cliprdr, s, dataLen, msgFlags); break; case CB_FORMAT_LIST_RESPONSE: cliprdr_process_format_list_response(cliprdr, s, dataLen, msgFlags); break; case CB_FORMAT_DATA_REQUEST: cliprdr_process_format_data_request(cliprdr, s, dataLen, msgFlags); break; case CB_FORMAT_DATA_RESPONSE: cliprdr_process_format_data_response(cliprdr, s, dataLen, msgFlags); break; case CB_FILECONTENTS_REQUEST: cliprdr_process_filecontents_request(cliprdr, s, dataLen, msgFlags); break; case CB_FILECONTENTS_RESPONSE: cliprdr_process_filecontents_response(cliprdr, s, dataLen, msgFlags); break; case CB_LOCK_CLIPDATA: cliprdr_process_lock_clipdata(cliprdr, s, dataLen, msgFlags); break; case CB_UNLOCK_CLIPDATA: cliprdr_process_unlock_clipdata(cliprdr, s, dataLen, msgFlags); break; default: WLog_ERR(TAG, "unknown msgType %d", msgType); break; } Stream_Free(s, TRUE); } /** * Callback Interface */ int cliprdr_client_capabilities(CliprdrClientContext* context, CLIPRDR_CAPABILITIES* capabilities) { wStream* s; CLIPRDR_GENERAL_CAPABILITY_SET* generalCapabilitySet; cliprdrPlugin* cliprdr = (cliprdrPlugin*) context->handle; s = cliprdr_packet_new(CB_CLIP_CAPS, 0, 4 + CB_CAPSTYPE_GENERAL_LEN); Stream_Write_UINT16(s, 1); /* cCapabilitiesSets */ Stream_Write_UINT16(s, 0); /* pad1 */ generalCapabilitySet = (CLIPRDR_GENERAL_CAPABILITY_SET*) capabilities->capabilitySets; Stream_Write_UINT16(s, generalCapabilitySet->capabilitySetType); /* capabilitySetType */ Stream_Write_UINT16(s, generalCapabilitySet->capabilitySetLength); /* lengthCapability */ Stream_Write_UINT32(s, generalCapabilitySet->version); /* version */ Stream_Write_UINT32(s, generalCapabilitySet->generalFlags); /* generalFlags */ WLog_Print(cliprdr->log, WLOG_DEBUG, "ClientCapabilities"); cliprdr_packet_send(cliprdr, s); return 0; } int cliprdr_temp_directory(CliprdrClientContext* context, CLIPRDR_TEMP_DIRECTORY* tempDirectory) { int length; wStream* s; WCHAR* wszTempDir = NULL; cliprdrPlugin* cliprdr = (cliprdrPlugin*) context->handle; s = cliprdr_packet_new(CB_TEMP_DIRECTORY, 0, 520 * 2); length = ConvertToUnicode(CP_UTF8, 0, tempDirectory->szTempDir, -1, &wszTempDir, 0); if (length < 0) return -1; if (length > 520) length = 520; Stream_Write(s, tempDirectory->szTempDir, length * 2); Stream_Zero(s, (520 - length) * 2); free(wszTempDir); WLog_Print(cliprdr->log, WLOG_DEBUG, "TempDirectory: %s", tempDirectory->szTempDir); cliprdr_packet_send(cliprdr, s); return 1; } int cliprdr_client_format_list(CliprdrClientContext* context, CLIPRDR_FORMAT_LIST* formatList) { wStream* s; UINT32 index; int length = 0; int cchWideChar; LPWSTR lpWideCharStr; int formatNameSize; int formatNameLength; char* szFormatName; WCHAR* wszFormatName; BOOL asciiNames = FALSE; CLIPRDR_FORMAT* format; cliprdrPlugin* cliprdr = (cliprdrPlugin*) context->handle; if (!cliprdr->useLongFormatNames) { length = formatList->numFormats * 36; s = cliprdr_packet_new(CB_FORMAT_LIST, 0, length); for (index = 0; index < formatList->numFormats; index++) { format = (CLIPRDR_FORMAT*) &(formatList->formats[index]); Stream_Write_UINT32(s, format->formatId); /* formatId (4 bytes) */ formatNameSize = 0; formatNameLength = 0; szFormatName = format->formatName; if (asciiNames) { if (szFormatName) formatNameLength = strlen(szFormatName); if (formatNameLength > 31) formatNameLength = 31; Stream_Write(s, szFormatName, formatNameLength); Stream_Zero(s, 32 - formatNameLength); } else { wszFormatName = NULL; if (szFormatName) formatNameSize = ConvertToUnicode(CP_UTF8, 0, szFormatName, -1, &wszFormatName, 0); if (formatNameSize > 15) formatNameSize = 15; if (wszFormatName) Stream_Write(s, wszFormatName, formatNameSize * 2); Stream_Zero(s, 32 - (formatNameSize * 2)); free(wszFormatName); } } } else { for (index = 0; index < formatList->numFormats; index++) { format = (CLIPRDR_FORMAT*) &(formatList->formats[index]); length += 4; formatNameSize = 2; if (format->formatName) formatNameSize = MultiByteToWideChar(CP_UTF8, 0, format->formatName, -1, NULL, 0) * 2; length += formatNameSize; } s = cliprdr_packet_new(CB_FORMAT_LIST, 0, length); for (index = 0; index < formatList->numFormats; index++) { format = (CLIPRDR_FORMAT*) &(formatList->formats[index]); Stream_Write_UINT32(s, format->formatId); /* formatId (4 bytes) */ if (format->formatName) { lpWideCharStr = (LPWSTR) Stream_Pointer(s); cchWideChar = (Stream_Capacity(s) - Stream_GetPosition(s)) / 2; formatNameSize = MultiByteToWideChar(CP_UTF8, 0, format->formatName, -1, lpWideCharStr, cchWideChar) * 2; Stream_Seek(s, formatNameSize); } else { Stream_Write_UINT16(s, 0); } } } WLog_Print(cliprdr->log, WLOG_DEBUG, "ClientFormatList: numFormats: %d", formatList->numFormats); cliprdr_packet_send(cliprdr, s); return 0; } int cliprdr_client_format_list_response(CliprdrClientContext* context, CLIPRDR_FORMAT_LIST_RESPONSE* formatListResponse) { wStream* s; cliprdrPlugin* cliprdr = (cliprdrPlugin*) context->handle; formatListResponse->msgType = CB_FORMAT_LIST_RESPONSE; formatListResponse->dataLen = 0; s = cliprdr_packet_new(formatListResponse->msgType, formatListResponse->msgFlags, formatListResponse->dataLen); WLog_Print(cliprdr->log, WLOG_DEBUG, "ClientFormatListResponse"); cliprdr_packet_send(cliprdr, s); return 0; } int cliprdr_client_lock_clipboard_data(CliprdrClientContext* context, CLIPRDR_LOCK_CLIPBOARD_DATA* lockClipboardData) { wStream* s; cliprdrPlugin* cliprdr = (cliprdrPlugin*) context->handle; s = cliprdr_packet_new(CB_LOCK_CLIPDATA, 0, 4); Stream_Write_UINT32(s, lockClipboardData->clipDataId); /* clipDataId (4 bytes) */ WLog_Print(cliprdr->log, WLOG_DEBUG, "ClientLockClipboardData: clipDataId: 0x%04X", lockClipboardData->clipDataId); cliprdr_packet_send(cliprdr, s); return 1; } int cliprdr_client_unlock_clipboard_data(CliprdrClientContext* context, CLIPRDR_UNLOCK_CLIPBOARD_DATA* unlockClipboardData) { wStream* s; cliprdrPlugin* cliprdr = (cliprdrPlugin*) context->handle; s = cliprdr_packet_new(CB_UNLOCK_CLIPDATA, 0, 4); Stream_Write_UINT32(s, unlockClipboardData->clipDataId); /* clipDataId (4 bytes) */ WLog_Print(cliprdr->log, WLOG_DEBUG, "ClientUnlockClipboardData: clipDataId: 0x%04X", unlockClipboardData->clipDataId); cliprdr_packet_send(cliprdr, s); return 1; } int cliprdr_client_format_data_request(CliprdrClientContext* context, CLIPRDR_FORMAT_DATA_REQUEST* formatDataRequest) { wStream* s; cliprdrPlugin* cliprdr = (cliprdrPlugin*) context->handle; formatDataRequest->msgType = CB_FORMAT_DATA_REQUEST; formatDataRequest->msgFlags = 0; formatDataRequest->dataLen = 4; s = cliprdr_packet_new(formatDataRequest->msgType, formatDataRequest->msgFlags, formatDataRequest->dataLen); Stream_Write_UINT32(s, formatDataRequest->requestedFormatId); /* requestedFormatId (4 bytes) */ WLog_Print(cliprdr->log, WLOG_DEBUG, "ClientFormatDataRequest"); cliprdr_packet_send(cliprdr, s); return 0; } int cliprdr_client_format_data_response(CliprdrClientContext* context, CLIPRDR_FORMAT_DATA_RESPONSE* formatDataResponse) { wStream* s; cliprdrPlugin* cliprdr = (cliprdrPlugin*) context->handle; formatDataResponse->msgType = CB_FORMAT_DATA_RESPONSE; s = cliprdr_packet_new(formatDataResponse->msgType, formatDataResponse->msgFlags, formatDataResponse->dataLen); Stream_Write(s, formatDataResponse->requestedFormatData, formatDataResponse->dataLen); WLog_Print(cliprdr->log, WLOG_DEBUG, "ClientFormatDataResponse"); cliprdr_packet_send(cliprdr, s); return 0; } int cliprdr_client_file_contents_request(CliprdrClientContext* context, CLIPRDR_FILE_CONTENTS_REQUEST* fileContentsRequest) { wStream* s; cliprdrPlugin* cliprdr = (cliprdrPlugin*) context->handle; s = cliprdr_packet_new(CB_FILECONTENTS_REQUEST, 0, 28); Stream_Write_UINT32(s, fileContentsRequest->streamId); /* streamId (4 bytes) */ Stream_Write_UINT32(s, fileContentsRequest->listIndex); /* listIndex (4 bytes) */ Stream_Write_UINT32(s, fileContentsRequest->dwFlags); /* dwFlags (4 bytes) */ Stream_Write_UINT32(s, fileContentsRequest->nPositionLow); /* nPositionLow (4 bytes) */ Stream_Write_UINT32(s, fileContentsRequest->nPositionHigh); /* nPositionHigh (4 bytes) */ Stream_Write_UINT32(s, fileContentsRequest->cbRequested); /* cbRequested (4 bytes) */ Stream_Write_UINT32(s, fileContentsRequest->clipDataId); /* clipDataId (4 bytes) */ WLog_Print(cliprdr->log, WLOG_DEBUG, "ClientFileContentsRequest: streamId: 0x%04X", fileContentsRequest->streamId); cliprdr_packet_send(cliprdr, s); return 1; } int cliprdr_client_file_contents_response(CliprdrClientContext* context, CLIPRDR_FILE_CONTENTS_RESPONSE* fileContentsResponse) { wStream* s; cliprdrPlugin* cliprdr = (cliprdrPlugin*) context->handle; if (fileContentsResponse->dwFlags & FILECONTENTS_SIZE) fileContentsResponse->cbRequested = sizeof(UINT64); s = cliprdr_packet_new(CB_FILECONTENTS_REQUEST, 0, 4 + fileContentsResponse->cbRequested); Stream_Write_UINT32(s, fileContentsResponse->streamId); /* streamId (4 bytes) */ /** * requestedFileContentsData: * FILECONTENTS_SIZE: file size as UINT64 * FILECONTENTS_RANGE: file data from requested range */ Stream_Write(s, fileContentsResponse->requestedData, fileContentsResponse->cbRequested); WLog_Print(cliprdr->log, WLOG_DEBUG, "ClientFileContentsResponse: streamId: 0x%04X", fileContentsResponse->streamId); cliprdr_packet_send(cliprdr, s); return 1; } /****************************************************************************************/ static wListDictionary* g_InitHandles = NULL; static wListDictionary* g_OpenHandles = NULL; void cliprdr_add_init_handle_data(void* pInitHandle, void* pUserData) { if (!g_InitHandles) g_InitHandles = ListDictionary_New(TRUE); ListDictionary_Add(g_InitHandles, pInitHandle, pUserData); } void* cliprdr_get_init_handle_data(void* pInitHandle) { void* pUserData = NULL; pUserData = ListDictionary_GetItemValue(g_InitHandles, pInitHandle); return pUserData; } void cliprdr_remove_init_handle_data(void* pInitHandle) { ListDictionary_Remove(g_InitHandles, pInitHandle); if (ListDictionary_Count(g_InitHandles) < 1) { ListDictionary_Free(g_InitHandles); g_InitHandles = NULL; } } void cliprdr_add_open_handle_data(DWORD openHandle, void* pUserData) { void* pOpenHandle = (void*) (size_t) openHandle; if (!g_OpenHandles) g_OpenHandles = ListDictionary_New(TRUE); ListDictionary_Add(g_OpenHandles, pOpenHandle, pUserData); } void* cliprdr_get_open_handle_data(DWORD openHandle) { void* pUserData = NULL; void* pOpenHandle = (void*) (size_t) openHandle; pUserData = ListDictionary_GetItemValue(g_OpenHandles, pOpenHandle); return pUserData; } void cliprdr_remove_open_handle_data(DWORD openHandle) { void* pOpenHandle = (void*) (size_t) openHandle; ListDictionary_Remove(g_OpenHandles, pOpenHandle); if (ListDictionary_Count(g_OpenHandles) < 1) { ListDictionary_Free(g_OpenHandles); g_OpenHandles = NULL; } } static void cliprdr_virtual_channel_event_data_received(cliprdrPlugin* cliprdr, void* pData, UINT32 dataLength, UINT32 totalLength, UINT32 dataFlags) { wStream* data_in; if ((dataFlags & CHANNEL_FLAG_SUSPEND) || (dataFlags & CHANNEL_FLAG_RESUME)) { return; } if (dataFlags & CHANNEL_FLAG_FIRST) { if (cliprdr->data_in) Stream_Free(cliprdr->data_in, TRUE); cliprdr->data_in = Stream_New(NULL, totalLength); } data_in = cliprdr->data_in; Stream_EnsureRemainingCapacity(data_in, (int) dataLength); Stream_Write(data_in, pData, dataLength); if (dataFlags & CHANNEL_FLAG_LAST) { if (Stream_Capacity(data_in) != Stream_GetPosition(data_in)) { WLog_ERR(TAG, "cliprdr_plugin_process_received: read error"); } cliprdr->data_in = NULL; Stream_SealLength(data_in); Stream_SetPosition(data_in, 0); MessageQueue_Post(cliprdr->queue, NULL, 0, (void*) data_in, NULL); } } static VOID VCAPITYPE cliprdr_virtual_channel_open_event(DWORD openHandle, UINT event, LPVOID pData, UINT32 dataLength, UINT32 totalLength, UINT32 dataFlags) { cliprdrPlugin* cliprdr; cliprdr = (cliprdrPlugin*) cliprdr_get_open_handle_data(openHandle); if (!cliprdr) { WLog_ERR(TAG, "cliprdr_virtual_channel_open_event: error no match"); return; } switch (event) { case CHANNEL_EVENT_DATA_RECEIVED: cliprdr_virtual_channel_event_data_received(cliprdr, pData, dataLength, totalLength, dataFlags); break; case CHANNEL_EVENT_WRITE_COMPLETE: Stream_Free((wStream*) pData, TRUE); break; case CHANNEL_EVENT_USER: break; } } static void* cliprdr_virtual_channel_client_thread(void* arg) { wStream* data; wMessage message; cliprdrPlugin* cliprdr = (cliprdrPlugin*) arg; while (1) { if (!MessageQueue_Wait(cliprdr->queue)) break; if (MessageQueue_Peek(cliprdr->queue, &message, TRUE)) { if (message.id == WMQ_QUIT) break; if (message.id == 0) { data = (wStream*) message.wParam; cliprdr_order_recv(cliprdr, data); } } } ExitThread(0); return NULL; } static void cliprdr_virtual_channel_event_connected(cliprdrPlugin* cliprdr, LPVOID pData, UINT32 dataLength) { UINT32 status; status = cliprdr->channelEntryPoints.pVirtualChannelOpen(cliprdr->InitHandle, &cliprdr->OpenHandle, cliprdr->channelDef.name, cliprdr_virtual_channel_open_event); cliprdr_add_open_handle_data(cliprdr->OpenHandle, cliprdr); if (status != CHANNEL_RC_OK) { WLog_ERR(TAG, "pVirtualChannelOpen failed with %s [%08X]", WTSErrorToString(status), status); return; } cliprdr->queue = MessageQueue_New(NULL); cliprdr->thread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) cliprdr_virtual_channel_client_thread, (void*) cliprdr, 0, NULL); } static void cliprdr_virtual_channel_event_disconnected(cliprdrPlugin* cliprdr) { UINT rc; MessageQueue_PostQuit(cliprdr->queue, 0); WaitForSingleObject(cliprdr->thread, INFINITE); MessageQueue_Free(cliprdr->queue); CloseHandle(cliprdr->thread); rc = cliprdr->channelEntryPoints.pVirtualChannelClose(cliprdr->OpenHandle); if (CHANNEL_RC_OK != rc) { WLog_ERR(TAG, "pVirtualChannelClose failed with %s [%08X]", WTSErrorToString(rc), rc); } if (cliprdr->data_in) { Stream_Free(cliprdr->data_in, TRUE); cliprdr->data_in = NULL; } cliprdr_remove_open_handle_data(cliprdr->OpenHandle); } static void cliprdr_virtual_channel_event_terminated(cliprdrPlugin* cliprdr) { cliprdr_remove_init_handle_data(cliprdr->InitHandle); free(cliprdr); } static VOID VCAPITYPE cliprdr_virtual_channel_init_event(LPVOID pInitHandle, UINT event, LPVOID pData, UINT dataLength) { cliprdrPlugin* cliprdr; cliprdr = (cliprdrPlugin*) cliprdr_get_init_handle_data(pInitHandle); if (!cliprdr) { WLog_ERR(TAG, "cliprdr_virtual_channel_init_event: error no match"); return; } switch (event) { case CHANNEL_EVENT_CONNECTED: cliprdr_virtual_channel_event_connected(cliprdr, pData, dataLength); break; case CHANNEL_EVENT_DISCONNECTED: cliprdr_virtual_channel_event_disconnected(cliprdr); break; case CHANNEL_EVENT_TERMINATED: cliprdr_virtual_channel_event_terminated(cliprdr); break; } } /* cliprdr is always built-in */ #define VirtualChannelEntry cliprdr_VirtualChannelEntry BOOL VCAPITYPE VirtualChannelEntry(PCHANNEL_ENTRY_POINTS pEntryPoints) { UINT rc; cliprdrPlugin* cliprdr; CliprdrClientContext* context; CHANNEL_ENTRY_POINTS_FREERDP* pEntryPointsEx; cliprdr = (cliprdrPlugin*) calloc(1, sizeof(cliprdrPlugin)); cliprdr->channelDef.options = CHANNEL_OPTION_INITIALIZED | CHANNEL_OPTION_ENCRYPT_RDP | CHANNEL_OPTION_COMPRESS_RDP | CHANNEL_OPTION_SHOW_PROTOCOL; strcpy(cliprdr->channelDef.name, "cliprdr"); pEntryPointsEx = (CHANNEL_ENTRY_POINTS_FREERDP*) pEntryPoints; if ((pEntryPointsEx->cbSize >= sizeof(CHANNEL_ENTRY_POINTS_FREERDP)) && (pEntryPointsEx->MagicNumber == FREERDP_CHANNEL_MAGIC_NUMBER)) { context = (CliprdrClientContext*) calloc(1, sizeof(CliprdrClientContext)); context->handle = (void*) cliprdr; context->custom = NULL; context->ClientCapabilities = cliprdr_client_capabilities; context->TempDirectory = cliprdr_temp_directory; context->ClientFormatList = cliprdr_client_format_list; context->ClientFormatListResponse = cliprdr_client_format_list_response; context->ClientLockClipboardData = cliprdr_client_lock_clipboard_data; context->ClientUnlockClipboardData = cliprdr_client_unlock_clipboard_data; context->ClientFormatDataRequest = cliprdr_client_format_data_request; context->ClientFormatDataResponse = cliprdr_client_format_data_response; context->ClientFileContentsRequest = cliprdr_client_file_contents_request; context->ClientFileContentsResponse = cliprdr_client_file_contents_response; *(pEntryPointsEx->ppInterface) = (void*) context; cliprdr->context = context; } cliprdr->log = WLog_Get("com.freerdp.channels.cliprdr.client"); cliprdr->useLongFormatNames = TRUE; cliprdr->streamFileClipEnabled = FALSE; cliprdr->fileClipNoFilePaths = TRUE; cliprdr->canLockClipData = FALSE; WLog_Print(cliprdr->log, WLOG_DEBUG, "VirtualChannelEntry"); CopyMemory(&(cliprdr->channelEntryPoints), pEntryPoints, sizeof(CHANNEL_ENTRY_POINTS_FREERDP)); rc = cliprdr->channelEntryPoints.pVirtualChannelInit(&cliprdr->InitHandle, &cliprdr->channelDef, 1, VIRTUAL_CHANNEL_VERSION_WIN2000, cliprdr_virtual_channel_init_event); if (CHANNEL_RC_OK != rc) { WLog_ERR(TAG, "pVirtualChannelInit failed with %s [%08X]", WTSErrorToString(rc), rc); free(cliprdr); return -1; } cliprdr->channelEntryPoints.pInterface = *(cliprdr->channelEntryPoints.ppInterface); cliprdr->channelEntryPoints.ppInterface = &(cliprdr->channelEntryPoints.pInterface); cliprdr_add_init_handle_data(cliprdr->InitHandle, (void*) cliprdr); return 1; }
vworkspace/FreeRDP
channels/cliprdr/client/cliprdr_main.c
C
apache-2.0
28,694
/* * Copyright (c) 2010 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.zkybase.kite; import org.springframework.beans.factory.BeanNameAware; import org.springframework.jmx.export.annotation.ManagedAttribute; /** * Abstract base class for implementing guards. * * @author Willie Wheeler (willie.wheeler@gmail.com) * @since 1.0 */ public abstract class AbstractGuard implements Guard, BeanNameAware { private String name; @ManagedAttribute(description = "Guard name") public String getName() { return name; } /* (non-Javadoc) * @see org.springframework.beans.factory.BeanNameAware#setBeanName(java.lang.String) */ @Override public void setBeanName(String beanName) { this.name = beanName; } }
jamesanto/kite-test
src/main/java/org/zkybase/kite/AbstractGuard.java
Java
apache-2.0
1,275
#!/usr/bin/env bash # Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if [[ -n "${JENKINS_HOME}" ]] then exec ./build/jenkins_e2e.sh fi echo ">> starting cAdvisor locally" sudo ./cadvisor & echo ">> running integration tests against local cAdvisor" godep go test github.com/google/cadvisor/integration/tests/... --vmodule=*=2 if [ $? -ne 0 ] then echo "Integration tests failed" fi echo ">> stopping cAdvisor" sudo pkill -9 cadvisor
rhuss/gofabric8
vendor/github.com/google/cadvisor/build/integration.sh
Shell
apache-2.0
991
/* * Copyright (c) 2019 STMicroelectronics * * SPDX-License-Identifier: Apache-2.0 */ /* SoC level DTS fixup file */ #define DT_NUM_IRQ_PRIO_BITS DT_ARM_V7M_NVIC_E000E100_ARM_NUM_IRQ_PRIORITY_BITS #define DT_GPIO_STM32_GPIOA_BASE_ADDRESS \ DT_ST_STM32_GPIO_50002000_BASE_ADDRESS #define DT_GPIO_STM32_GPIOA_CLOCK_BITS_0 \ DT_ST_STM32_GPIO_50002000_CLOCK_BITS_0 #define DT_GPIO_STM32_GPIOA_CLOCK_BUS_0 \ DT_ST_STM32_GPIO_50002000_CLOCK_BUS_0 #define DT_GPIO_STM32_GPIOA_CLOCK_CONTROLLER \ DT_ST_STM32_GPIO_50002000_CLOCK_CONTROLLER #define DT_GPIO_STM32_GPIOA_LABEL \ DT_ST_STM32_GPIO_50002000_LABEL #define DT_GPIO_STM32_GPIOA_SIZE \ DT_ST_STM32_GPIO_50002000_SIZE #define DT_GPIO_STM32_GPIOA_CLOCK_BITS \ DT_ST_STM32_GPIO_50002000_CLOCK_BITS #define DT_GPIO_STM32_GPIOA_CLOCK_BUS \ DT_ST_STM32_GPIO_50002000_CLOCK_BUS #define DT_GPIO_STM32_GPIOB_BASE_ADDRESS \ DT_ST_STM32_GPIO_50003000_BASE_ADDRESS #define DT_GPIO_STM32_GPIOB_CLOCK_BITS_0 \ DT_ST_STM32_GPIO_50003000_CLOCK_BITS_0 #define DT_GPIO_STM32_GPIOB_CLOCK_BUS_0 \ DT_ST_STM32_GPIO_50003000_CLOCK_BUS_0 #define DT_GPIO_STM32_GPIOB_CLOCK_CONTROLLER \ DT_ST_STM32_GPIO_50003000_CLOCK_CONTROLLER #define DT_GPIO_STM32_GPIOB_LABEL \ DT_ST_STM32_GPIO_50003000_LABEL #define DT_GPIO_STM32_GPIOB_SIZE \ DT_ST_STM32_GPIO_50003000_SIZE #define DT_GPIO_STM32_GPIOB_CLOCK_BITS \ DT_ST_STM32_GPIO_50003000_CLOCK_BITS #define DT_GPIO_STM32_GPIOB_CLOCK_BUS \ DT_ST_STM32_GPIO_50003000_CLOCK_BUS #define DT_GPIO_STM32_GPIOC_BASE_ADDRESS \ DT_ST_STM32_GPIO_50004000_BASE_ADDRESS #define DT_GPIO_STM32_GPIOC_CLOCK_BITS_0 \ DT_ST_STM32_GPIO_50004000_CLOCK_BITS_0 #define DT_GPIO_STM32_GPIOC_CLOCK_BUS_0 \ DT_ST_STM32_GPIO_50004000_CLOCK_BUS_0 #define DT_GPIO_STM32_GPIOC_CLOCK_CONTROLLER \ DT_ST_STM32_GPIO_50004000_CLOCK_CONTROLLER #define DT_GPIO_STM32_GPIOC_LABEL \ DT_ST_STM32_GPIO_50004000_LABEL #define DT_GPIO_STM32_GPIOC_SIZE \ DT_ST_STM32_GPIO_50004000_SIZE #define DT_GPIO_STM32_GPIOC_CLOCK_BITS \ DT_ST_STM32_GPIO_50004000_CLOCK_BITS #define DT_GPIO_STM32_GPIOC_CLOCK_BUS \ DT_ST_STM32_GPIO_50004000_CLOCK_BUS #define DT_GPIO_STM32_GPIOD_BASE_ADDRESS \ DT_ST_STM32_GPIO_50005000_BASE_ADDRESS #define DT_GPIO_STM32_GPIOD_CLOCK_BITS_0 \ DT_ST_STM32_GPIO_50005000_CLOCK_BITS_0 #define DT_GPIO_STM32_GPIOD_CLOCK_BUS_0 \ T_ST_STM32_GPIO_50005000_CLOCK_BUS_0 #define DT_GPIO_STM32_GPIOD_CLOCK_CONTROLLER \ DT_ST_STM32_GPIO_50005000_CLOCK_CONTROLLER #define DT_GPIO_STM32_GPIOD_LABEL \ DT_ST_STM32_GPIO_50005000_LABEL #define DT_GPIO_STM32_GPIOD_SIZE \ DT_ST_STM32_GPIO_50005000_SIZE #define DT_GPIO_STM32_GPIOD_CLOCK_BITS \ DT_ST_STM32_GPIO_50005000_CLOCK_BITS #define DT_GPIO_STM32_GPIOD_CLOCK_BUS \ DT_ST_STM32_GPIO_50005000_CLOCK_BUS #define DT_GPIO_STM32_GPIOE_BASE_ADDRESS \ DT_ST_STM32_GPIO_50006000_BASE_ADDRESS #define DT_GPIO_STM32_GPIOE_CLOCK_BITS_0 \ DT_ST_STM32_GPIO_50006000_CLOCK_BITS_0 #define DT_GPIO_STM32_GPIOE_CLOCK_BUS_0 \ DT_ST_STM32_GPIO_50006000_CLOCK_BUS_0 #define DT_GPIO_STM32_GPIOE_CLOCK_CONTROLLER \ DT_ST_STM32_GPIO_50006000_CLOCK_CONTROLLER #define DT_GPIO_STM32_GPIOE_LABEL \ DT_ST_STM32_GPIO_50006000_LABEL #define DT_GPIO_STM32_GPIOE_SIZE \ DT_ST_STM32_GPIO_50006000_SIZE #define DT_GPIO_STM32_GPIOE_CLOCK_BITS \ DT_ST_STM32_GPIO_50006000_CLOCK_BITS #define DT_GPIO_STM32_GPIOE_CLOCK_BUS \ DT_ST_STM32_GPIO_50006000_CLOCK_BUS #define DT_GPIO_STM32_GPIOF_BASE_ADDRESS \ DT_ST_STM32_GPIO_50007000_BASE_ADDRESS #define DT_GPIO_STM32_GPIOF_CLOCK_BITS_0 \ DT_ST_STM32_GPIO_50007000_CLOCK_BITS_0 #define DT_GPIO_STM32_GPIOF_CLOCK_BUS_0 \ DT_ST_STM32_GPIO_50007000_CLOCK_BUS_0 #define DT_GPIO_STM32_GPIOF_CLOCK_CONTROLLER \ DT_ST_STM32_GPIO_50007000_CLOCK_CONTROLLER #define DT_GPIO_STM32_GPIOF_LABEL \ DT_ST_STM32_GPIO_50007000_LABEL #define DT_GPIO_STM32_GPIOF_SIZE \ DT_ST_STM32_GPIO_50007000_SIZE #define DT_GPIO_STM32_GPIOF_CLOCK_BITS \ DT_ST_STM32_GPIO_50007000_CLOCK_BITS #define DT_GPIO_STM32_GPIOF_CLOCK_BUS \ DT_ST_STM32_GPIO_50007000_CLOCK_BUS #define DT_GPIO_STM32_GPIOG_BASE_ADDRESS \ DT_ST_STM32_GPIO_50008000_BASE_ADDRESS #define DT_GPIO_STM32_GPIOG_CLOCK_BITS_0 \ DT_ST_STM32_GPIO_50008000_CLOCK_BITS_0 #define DT_GPIO_STM32_GPIOG_CLOCK_BUS_0 \ DT_ST_STM32_GPIO_50008000_CLOCK_BUS_0 #define DT_GPIO_STM32_GPIOG_CLOCK_CONTROLLER \ DT_ST_STM32_GPIO_50008000_CLOCK_CONTROLLER #define DT_GPIO_STM32_GPIOG_LABEL \ DT_ST_STM32_GPIO_50008000_LABEL #define DT_GPIO_STM32_GPIOG_SIZE \ DT_ST_STM32_GPIO_50008000_SIZE #define DT_GPIO_STM32_GPIOG_CLOCK_BITS \ DT_ST_STM32_GPIO_50008000_CLOCK_BITS #define DT_GPIO_STM32_GPIOG_CLOCK_BUS \ DT_ST_STM32_GPIO_50008000_CLOCK_BUS #define DT_GPIO_STM32_GPIOH_BASE_ADDRESS \ DT_ST_STM32_GPIO_50009000_BASE_ADDRESS #define DT_GPIO_STM32_GPIOH_CLOCK_BITS_0 \ DT_ST_STM32_GPIO_50009000_CLOCK_BITS_0 #define DT_GPIO_STM32_GPIOH_CLOCK_BUS_0 \ DT_ST_STM32_GPIO_50009000_CLOCK_BUS_0 #define DT_GPIO_STM32_GPIOH_CLOCK_CONTROLLER \ DT_ST_STM32_GPIO_50009000_CLOCK_CONTROLLER #define DT_GPIO_STM32_GPIOH_LABEL \ DT_ST_STM32_GPIO_50009000_LABEL #define DT_GPIO_STM32_GPIOH_SIZE \ DT_ST_STM32_GPIO_50009000_SIZE #define DT_GPIO_STM32_GPIOH_CLOCK_BITS \ DT_ST_STM32_GPIO_50009000_CLOCK_BITS #define DT_GPIO_STM32_GPIOH_CLOCK_BUS \ DT_ST_STM32_GPIO_50009000_CLOCK_BUS #define DT_GPIO_STM32_GPIOI_BASE_ADDRESS \ DT_ST_STM32_GPIO_5000A000_BASE_ADDRESS #define DT_GPIO_STM32_GPIOI_CLOCK_BITS_0 \ DT_ST_STM32_GPIO_5000A000_CLOCK_BITS_0 #define DT_GPIO_STM32_GPIOI_CLOCK_BUS_0 \ DT_ST_STM32_GPIO_5000A000_CLOCK_BUS_0 #define DT_GPIO_STM32_GPIOI_CLOCK_CONTROLLER \ DT_ST_STM32_GPIO_5000A000_CLOCK_CONTROLLER #define DT_GPIO_STM32_GPIOI_LABEL \ DT_ST_STM32_GPIO_5000A000_LABEL #define DT_GPIO_STM32_GPIOI_SIZE \ DT_ST_STM32_GPIO_5000A000_SIZE #define DT_GPIO_STM32_GPIOI_CLOCK_BITS \ DT_ST_STM32_GPIO_5000A000_CLOCK_BITS #define DT_GPIO_STM32_GPIOI_CLOCK_BUS \ DT_ST_STM32_GPIO_5000A000_CLOCK_BUS #define DT_GPIO_STM32_GPIOJ_BASE_ADDRESS \ DT_ST_STM32_GPIO_5000B000_BASE_ADDRESS #define DT_GPIO_STM32_GPIOJ_CLOCK_BITS_0 \ DT_ST_STM32_GPIO_5000B000_CLOCK_BITS_0 #define DT_GPIO_STM32_GPIOJ_CLOCK_BUS_0 \ DT_ST_STM32_GPIO_5000B000_CLOCK_BUS_0 #define DT_GPIO_STM32_GPIOJ_CLOCK_CONTROLLER \ DT_ST_STM32_GPIO_5000B000_CLOCK_CONTROLLER #define DT_GPIO_STM32_GPIOJ_LABEL \ DT_ST_STM32_GPIO_5000B000_LABEL #define DT_GPIO_STM32_GPIOJ_SIZE \ DT_ST_STM32_GPIO_5000B000_SIZE #define DT_GPIO_STM32_GPIOJ_CLOCK_BITS \ DT_ST_STM32_GPIO_5000B000_CLOCK_BITS #define DT_GPIO_STM32_GPIOJ_CLOCK_BUS \ DT_ST_STM32_GPIO_5000B000_CLOCK_BUS #define DT_GPIO_STM32_GPIOK_BASE_ADDRESS \ DT_ST_STM32_GPIO_5000C000_BASE_ADDRESS #define DT_GPIO_STM32_GPIOK_CLOCK_BITS_0 \ DT_ST_STM32_GPIO_5000C000_CLOCK_BITS_0 #define DT_GPIO_STM32_GPIOK_CLOCK_BUS_0 \ DT_ST_STM32_GPIO_5000C000_CLOCK_BUS_0 #define DT_GPIO_STM32_GPIOK_CLOCK_CONTROLLER \ DT_ST_STM32_GPIO_5000C000_CLOCK_CONTROLLER #define DT_GPIO_STM32_GPIOK_LABEL \ DT_ST_STM32_GPIO_5000C000_LABEL #define DT_GPIO_STM32_GPIOK_SIZE \ DT_ST_STM32_GPIO_5000C000_SIZE #define DT_GPIO_STM32_GPIOK_CLOCK_BITS \ DT_ST_STM32_GPIO_5000C000_CLOCK_BITS #define DT_GPIO_STM32_GPIOK_CLOCK_BUS \ DT_ST_STM32_GPIO_5000C000_CLOCK_BUS #define DT_UART_STM32_USART_2_BASE_ADDRESS \ DT_ST_STM32_USART_4000E000_BASE_ADDRESS #define DT_UART_STM32_USART_2_BAUD_RATE \ DT_ST_STM32_USART_4000E000_CURRENT_SPEED #define DT_UART_STM32_USART_2_IRQ_PRI \ DT_ST_STM32_USART_4000E000_IRQ_0_PRIORITY #define DT_UART_STM32_USART_2_NAME \ DT_ST_STM32_USART_4000E000_LABEL #define DT_USART_2_IRQ \ DT_ST_STM32_USART_4000E000_IRQ_0 #define DT_UART_STM32_USART_2_CLOCK_BITS \ DT_ST_STM32_USART_4000E000_CLOCK_BITS #define DT_UART_STM32_USART_2_CLOCK_BUS \ DT_ST_STM32_USART_4000E000_CLOCK_BUS #define DT_UART_STM32_USART_2_HW_FLOW_CONTROL \ DT_ST_STM32_USART_4000E000_HW_FLOW_CONTROL #define DT_UART_STM32_USART_3_BASE_ADDRESS \ DT_ST_STM32_USART_4000F000_BASE_ADDRESS #define DT_UART_STM32_USART_3_BAUD_RATE \ DT_ST_STM32_USART_4000F000_CURRENT_SPEED #define DT_UART_STM32_USART_3_IRQ_PRI \ DT_ST_STM32_USART_4000F000_IRQ_0_PRIORITY #define DT_UART_STM32_USART_3_NAME \ DT_ST_STM32_USART_4000F000_LABEL #define DT_USART_3_IRQ \ DT_ST_STM32_USART_4000F000_IRQ_0 #define DT_UART_STM32_USART_3_CLOCK_BITS \ DT_ST_STM32_USART_4000F000_CLOCK_BITS #define DT_UART_STM32_USART_3_CLOCK_BUS \ DT_ST_STM32_USART_4000F000_CLOCK_BUS #define DT_UART_STM32_USART_3_HW_FLOW_CONTROL \ DT_ST_STM32_USART_4000F000_HW_FLOW_CONTROL #define DT_UART_STM32_UART_4_BASE_ADDRESS \ DT_ST_STM32_UART_40010000_BASE_ADDRESS #define DT_UART_STM32_UART_4_BAUD_RATE \ DT_ST_STM32_UART_40010000_CURRENT_SPEED #define DT_UART_STM32_UART_4_IRQ_PRI \ DT_ST_STM32_UART_40010000_IRQ_0_PRIORITY #define DT_UART_STM32_UART_4_NAME \ DT_ST_STM32_UART_40010000_LABEL #define DT_UART_4_IRQ \ DT_ST_STM32_UART_40010000_IRQ_0 #define DT_UART_STM32_UART_4_CLOCK_BITS \ DT_ST_STM32_UART_40010000_CLOCK_BITS #define DT_UART_STM32_UART_4_CLOCK_BUS \ DT_ST_STM32_UART_40010000_CLOCK_BUS #define DT_UART_STM32_UART_4_HW_FLOW_CONTROL \ DT_ST_STM32_UART_40010000_HW_FLOW_CONTROL #define DT_UART_STM32_UART_5_BASE_ADDRESS \ DT_ST_STM32_UART_40011000_BASE_ADDRESS #define DT_UART_STM32_UART_5_BAUD_RATE \ DT_ST_STM32_UART_40011000_CURRENT_SPEED #define DT_UART_STM32_UART_5_IRQ_PRI \ DT_ST_STM32_UART_40011000_IRQ_0_PRIORITY #define DT_UART_STM32_UART_5_NAME \ DT_ST_STM32_UART_40011000_LABEL #define DT_UART_5_IRQ \ DT_ST_STM32_UART_40011000_IRQ_0 #define DT_UART_STM32_UART_5_CLOCK_BITS \ DT_ST_STM32_UART_40011000_CLOCK_BITS #define DT_UART_STM32_UART_5_CLOCK_BUS \ DT_ST_STM32_UART_40011000_CLOCK_BUS #define DT_UART_STM32_UART_5_HW_FLOW_CONTROL \ DT_ST_STM32_UART_40011000_HW_FLOW_CONTROL #define DT_UART_STM32_USART_6_BASE_ADDRESS \ DT_ST_STM32_USART_44003000_BASE_ADDRESS #define DT_UART_STM32_USART_6_BAUD_RATE \ DT_ST_STM32_USART_44003000_CURRENT_SPEED #define DT_UART_STM32_USART_6_IRQ_PRI \ DT_ST_STM32_USART_44003000_IRQ_0_PRIORITY #define DT_UART_STM32_USART_6_NAME \ DT_ST_STM32_USART_44003000_LABEL #define DT_USART_6_IRQ \ DT_ST_STM32_USART_44003000_IRQ_0 #define DT_UART_STM32_USART_6_CLOCK_BITS \ DT_ST_STM32_USART_44003000_CLOCK_BITS #define DT_UART_STM32_USART_6_CLOCK_BUS \ DT_ST_STM32_USART_44003000_CLOCK_BUS #define DT_UART_STM32_USART_6_HW_FLOW_CONTROL \ DT_ST_STM32_USART_40003000_HW_FLOW_CONTROL #define DT_UART_STM32_UART_7_BASE_ADDRESS \ DT_ST_STM32_UART_40018000_BASE_ADDRESS #define DT_UART_STM32_UART_7_BAUD_RATE \ DT_ST_STM32_UART_40018000_CURRENT_SPEED #define DT_UART_STM32_UART_7_IRQ_PRI \ DT_ST_STM32_UART_40018000_IRQ_0_PRIORITY #define DT_UART_STM32_UART_7_NAME \ DT_ST_STM32_UART_40018000_LABEL #define DT_UART_7_IRQ \ DT_ST_STM32_UART_40018000_IRQ_0 #define DT_UART_STM32_UART_7_CLOCK_BITS \ DT_ST_STM32_UART_40018000_CLOCK_BITS #define DT_UART_STM32_UART_7_CLOCK_BUS \ DT_ST_STM32_UART_40018000_CLOCK_BUS #define DT_UART_STM32_UART_7_HW_FLOW_CONTROL \ DT_ST_STM32_UART_40018000_HW_FLOW_CONTROL #define DT_UART_STM32_UART_8_BASE_ADDRESS \ DT_ST_STM32_UART_40019000_BASE_ADDRESS #define DT_UART_STM32_UART_8_BAUD_RATE \ DT_ST_STM32_UART_40019000_CURRENT_SPEED #define DT_UART_STM32_UART_8_IRQ_PRI \ DT_ST_STM32_UART_40019000_IRQ_0_PRIORITY #define DT_UART_STM32_UART_8_NAME \ DT_ST_STM32_UART_40019000_LABEL #define DT_UART_8_IRQ \ DT_ST_STM32_UART_40019000_IRQ_0 #define DT_UART_STM32_UART_8_CLOCK_BITS \ DT_ST_STM32_UART_40019000_CLOCK_BITS #define DT_UART_STM32_UART_8_CLOCK_BUS \ DT_ST_STM32_UART_40019000_CLOCK_BUS #define DT_UART_STM32_UART_8_HW_FLOW_CONTROL \ DT_ST_STM32_UART_40019000_HW_FLOW_CONTROL /* End of SoC Level DTS fixup file */
ldts/zephyr
soc/arm/st_stm32/stm32mp1/dts_fixup.h
C
apache-2.0
12,870
package main import ( "crypto/x509" "encoding/json" "encoding/pem" "errors" "fmt" "io" "io/ioutil" "net/http" "os" "strings" "github.com/dustinkirkland/golang-petname" "github.com/gorilla/websocket" log "gopkg.in/inconshreveable/log15.v2" "github.com/lxc/lxd/lxd/archive" "github.com/lxc/lxd/lxd/backup" "github.com/lxc/lxd/lxd/cluster" "github.com/lxc/lxd/lxd/db" deviceConfig "github.com/lxc/lxd/lxd/device/config" "github.com/lxc/lxd/lxd/instance" "github.com/lxc/lxd/lxd/instance/instancetype" "github.com/lxc/lxd/lxd/instance/operationlock" "github.com/lxc/lxd/lxd/operations" "github.com/lxc/lxd/lxd/project" "github.com/lxc/lxd/lxd/request" "github.com/lxc/lxd/lxd/response" "github.com/lxc/lxd/lxd/revert" storagePools "github.com/lxc/lxd/lxd/storage" "github.com/lxc/lxd/lxd/util" "github.com/lxc/lxd/shared" "github.com/lxc/lxd/shared/api" "github.com/lxc/lxd/shared/logger" "github.com/lxc/lxd/shared/osarch" ) func createFromImage(d *Daemon, r *http.Request, projectName string, req *api.InstancesPost) response.Response { if d.cluster.LocalNodeIsEvacuated() { return response.Forbidden(fmt.Errorf("Cluster member is evacuated")) } hash, err := instance.ResolveImage(d.State(), projectName, req.Source) if err != nil { return response.BadRequest(err) } dbType, err := instancetype.New(string(req.Type)) if err != nil { return response.BadRequest(err) } run := func(op *operations.Operation) error { args := db.InstanceArgs{ Project: projectName, Config: req.Config, Type: dbType, Description: req.Description, Devices: deviceConfig.NewDevices(req.Devices), Ephemeral: req.Ephemeral, Name: req.Name, Profiles: req.Profiles, } err := instance.ValidName(args.Name, args.Snapshot) if err != nil { return err } var info *api.Image if req.Source.Server != "" { var autoUpdate bool p, err := d.cluster.GetProject(projectName) if err != nil { return err } if p.Config["images.auto_update_cached"] != "" { autoUpdate = shared.IsTrue(p.Config["images.auto_update_cached"]) } else { autoUpdate, err = cluster.ConfigGetBool(d.cluster, "images.auto_update_cached") if err != nil { return err } } // Detect image type based on instance type requested. imgType := "container" if req.Type == "virtual-machine" { imgType = "virtual-machine" } var budget int64 err = d.cluster.Transaction(func(tx *db.ClusterTx) error { budget, err = project.GetImageSpaceBudget(tx, projectName) return err }) if err != nil { return err } info, err = d.ImageDownload(r, op, &ImageDownloadArgs{ Server: req.Source.Server, Protocol: req.Source.Protocol, Certificate: req.Source.Certificate, Secret: req.Source.Secret, Alias: hash, SetCached: true, Type: imgType, AutoUpdate: autoUpdate, Public: false, PreferCached: true, ProjectName: projectName, Budget: budget, }) if err != nil { return err } } else { _, info, err = d.cluster.GetImage(hash, db.ImageFilter{Project: &projectName}) if err != nil { return err } } args.Architecture, err = osarch.ArchitectureId(info.Architecture) if err != nil { return err } _, err = instanceCreateFromImage(d, r, args, info.Fingerprint, op) return err } resources := map[string][]string{} resources["instances"] = []string{req.Name} if dbType == instancetype.Container { resources["containers"] = resources["instances"] } op, err := operations.OperationCreate(d.State(), projectName, operations.OperationClassTask, db.OperationInstanceCreate, resources, nil, run, nil, nil, r) if err != nil { return response.InternalError(err) } return operations.OperationResponse(op) } func createFromNone(d *Daemon, r *http.Request, projectName string, req *api.InstancesPost) response.Response { if d.cluster.LocalNodeIsEvacuated() { return response.Forbidden(fmt.Errorf("Cluster member is evacuated")) } dbType, err := instancetype.New(string(req.Type)) if err != nil { return response.BadRequest(err) } args := db.InstanceArgs{ Project: projectName, Config: req.Config, Type: dbType, Description: req.Description, Devices: deviceConfig.NewDevices(req.Devices), Ephemeral: req.Ephemeral, Name: req.Name, Profiles: req.Profiles, } if req.Architecture != "" { architecture, err := osarch.ArchitectureId(req.Architecture) if err != nil { return response.InternalError(err) } args.Architecture = architecture } run := func(op *operations.Operation) error { _, err := instanceCreateAsEmpty(d, args) return err } resources := map[string][]string{} resources["instances"] = []string{req.Name} if dbType == instancetype.Container { resources["containers"] = resources["instances"] } op, err := operations.OperationCreate(d.State(), projectName, operations.OperationClassTask, db.OperationInstanceCreate, resources, nil, run, nil, nil, r) if err != nil { return response.InternalError(err) } return operations.OperationResponse(op) } func createFromMigration(d *Daemon, r *http.Request, projectName string, req *api.InstancesPost) response.Response { if d.cluster.LocalNodeIsEvacuated() && r.Context().Value(request.CtxProtocol) != "cluster" { return response.Forbidden(fmt.Errorf("Cluster member is evacuated")) } // Validate migration mode. if req.Source.Mode != "pull" && req.Source.Mode != "push" { return response.NotImplemented(fmt.Errorf("Mode '%s' not implemented", req.Source.Mode)) } // Parse the architecture name architecture, err := osarch.ArchitectureId(req.Architecture) if err != nil { return response.BadRequest(err) } // Pre-fill default profile. if req.Profiles == nil { req.Profiles = []string{"default"} } dbType, err := instancetype.New(string(req.Type)) if err != nil { return response.BadRequest(err) } if dbType != instancetype.Container && dbType != instancetype.VM { return response.BadRequest(fmt.Errorf("Instance type not supported %q", req.Type)) } // Prepare the instance creation request. args := db.InstanceArgs{ Project: projectName, Architecture: architecture, BaseImage: req.Source.BaseImage, Config: req.Config, Type: dbType, Devices: deviceConfig.NewDevices(req.Devices), Description: req.Description, Ephemeral: req.Ephemeral, Name: req.Name, Profiles: req.Profiles, Stateful: req.Stateful, } // Early profile validation. profiles, err := d.cluster.GetProfileNames(projectName) if err != nil { return response.InternalError(err) } for _, profile := range args.Profiles { if !shared.StringInSlice(profile, profiles) { return response.BadRequest(fmt.Errorf("Requested profile '%s' doesn't exist", profile)) } } storagePool, storagePoolProfile, localRootDiskDeviceKey, localRootDiskDevice, resp := instanceFindStoragePool(d, projectName, req) if resp != nil { return resp } if storagePool == "" { return response.BadRequest(fmt.Errorf("Can't find a storage pool for the instance to use")) } if localRootDiskDeviceKey == "" && storagePoolProfile == "" { // Give the container it's own local root disk device with a pool property. rootDev := map[string]string{} rootDev["type"] = "disk" rootDev["path"] = "/" rootDev["pool"] = storagePool if args.Devices == nil { args.Devices = deviceConfig.Devices{} } // Make sure that we do not overwrite a device the user is currently using under the // name "root". rootDevName := "root" for i := 0; i < 100; i++ { if args.Devices[rootDevName] == nil { break } rootDevName = fmt.Sprintf("root%d", i) continue } args.Devices[rootDevName] = rootDev } else if localRootDiskDeviceKey != "" && localRootDiskDevice["pool"] == "" { args.Devices[localRootDiskDeviceKey]["pool"] = storagePool } var inst instance.Instance var instOp *operationlock.InstanceOperation // Early check for refresh. if req.Source.Refresh { // Check if the instance exists. inst, err = instance.LoadByProjectAndName(d.State(), projectName, req.Name) if err != nil { req.Source.Refresh = false } else if inst.IsRunning() { return response.BadRequest(fmt.Errorf("Cannot refresh a running instance")) } } revert := revert.New() defer revert.Fail() instanceOnly := req.Source.InstanceOnly || req.Source.ContainerOnly if !req.Source.Refresh { _, err := storagePools.GetPoolByName(d.State(), storagePool) if err != nil { return response.InternalError(err) } // Create the instance and storage DB records for main instance. // Note: At this stage we do not yet know if snapshots are going to be received and so we cannot // create their DB records. This will be done if needed in the migrationSink.Do() function called // as part of the operation below. inst, instOp, err = instance.CreateInternal(d.State(), args, true, nil, revert) if err != nil { return response.InternalError(fmt.Errorf("Failed creating instance record: %w", err)) } defer instOp.Done(err) } var cert *x509.Certificate if req.Source.Certificate != "" { certBlock, _ := pem.Decode([]byte(req.Source.Certificate)) if certBlock == nil { return response.InternalError(fmt.Errorf("Invalid certificate")) } cert, err = x509.ParseCertificate(certBlock.Bytes) if err != nil { return response.InternalError(err) } } config, err := shared.GetTLSConfig("", "", "", cert) if err != nil { return response.InternalError(err) } push := false if req.Source.Mode == "push" { push = true } migrationArgs := MigrationSinkArgs{ Url: req.Source.Operation, Dialer: websocket.Dialer{ TLSClientConfig: config, NetDial: shared.RFC3493Dialer}, Instance: inst, Secrets: req.Source.Websockets, Push: push, Live: req.Source.Live, InstanceOnly: instanceOnly, Refresh: req.Source.Refresh, } sink, err := newMigrationSink(&migrationArgs) if err != nil { return response.InternalError(err) } // Copy reverter so far so we can use it inside run after this function has finished. runRevert := revert.Clone() run := func(op *operations.Operation) error { defer runRevert.Fail() // And finally run the migration. err = sink.Do(d.State(), runRevert, op) if err != nil { return fmt.Errorf("Error transferring instance data: %w", err) } err = inst.DeferTemplateApply(instance.TemplateTriggerCopy) if err != nil { return err } runRevert.Success() return nil } resources := map[string][]string{} resources["instances"] = []string{req.Name} if dbType == instancetype.Container { resources["containers"] = resources["instances"] } var op *operations.Operation if push { op, err = operations.OperationCreate(d.State(), projectName, operations.OperationClassWebsocket, db.OperationInstanceCreate, resources, sink.Metadata(), run, nil, sink.Connect, r) if err != nil { return response.InternalError(err) } } else { op, err = operations.OperationCreate(d.State(), projectName, operations.OperationClassTask, db.OperationInstanceCreate, resources, nil, run, nil, nil, r) if err != nil { return response.InternalError(err) } } revert.Success() return operations.OperationResponse(op) } func createFromCopy(d *Daemon, r *http.Request, projectName string, req *api.InstancesPost) response.Response { if d.cluster.LocalNodeIsEvacuated() { return response.Forbidden(fmt.Errorf("Cluster member is evacuated")) } if req.Source.Source == "" { return response.BadRequest(fmt.Errorf("Must specify a source instance")) } sourceProject := req.Source.Project if sourceProject == "" { sourceProject = projectName } targetProject := projectName source, err := instance.LoadByProjectAndName(d.State(), sourceProject, req.Source.Source) if err != nil { return response.SmartError(err) } // Check if we need to redirect to migration clustered, err := cluster.Enabled(d.db) if err != nil { return response.SmartError(err) } // When clustered, use the node name, otherwise use the hostname. if clustered { var serverName string err = d.cluster.Transaction(func(tx *db.ClusterTx) error { serverName, err = tx.GetLocalNodeName() return err }) if err != nil { return response.SmartError(err) } if serverName != source.Location() { // Check if we are copying from a ceph-based container. _, rootDevice, _ := shared.GetRootDiskDevice(source.ExpandedDevices().CloneNative()) sourcePoolName := rootDevice["pool"] destPoolName, _, _, _, resp := instanceFindStoragePool(d, targetProject, req) if resp != nil { return resp } if sourcePoolName != destPoolName { // Redirect to migration return clusterCopyContainerInternal(d, r, source, projectName, req) } _, pool, _, err := d.cluster.GetStoragePoolInAnyState(sourcePoolName) if err != nil { err = fmt.Errorf("Failed to fetch instance's pool info: %w", err) return response.SmartError(err) } if pool.Driver != "ceph" { // Redirect to migration return clusterCopyContainerInternal(d, r, source, projectName, req) } } } // Config override sourceConfig := source.LocalConfig() if req.Config == nil { req.Config = make(map[string]string) } for key, value := range sourceConfig { if !shared.InstanceIncludeWhenCopying(key, false) { logger.Debug("Skipping key from copy source", log.Ctx{"key": key, "sourceProject": source.Project(), "sourceInstance": source.Name(), "project": targetProject, "instance": req.Name}) continue } _, exists := req.Config[key] if exists { continue } req.Config[key] = value } // Devices override sourceDevices := source.LocalDevices() if req.Devices == nil { req.Devices = make(map[string]map[string]string) } for key, value := range sourceDevices { _, exists := req.Devices[key] if exists { continue } req.Devices[key] = value } // Profiles override if req.Profiles == nil { req.Profiles = source.Profiles() } if req.Stateful { sourceName, _, _ := shared.InstanceGetParentAndSnapshotName(source.Name()) if sourceName != req.Name { return response.BadRequest(fmt.Errorf("Copying stateful instances requires that source %q and target %q name be identical", sourceName, req.Name)) } } // Early check for refresh if req.Source.Refresh { // Check if the container exists c, err := instance.LoadByProjectAndName(d.State(), targetProject, req.Name) if err != nil { req.Source.Refresh = false } else if c.IsRunning() { return response.BadRequest(fmt.Errorf("Cannot refresh a running instance")) } } dbType, err := instancetype.New(string(req.Type)) if err != nil { return response.BadRequest(err) } // If type isn't specified, match the source type. if req.Type == "" { dbType = source.Type() } if dbType != instancetype.Any && dbType != source.Type() { return response.BadRequest(fmt.Errorf("Instance type should not be specified or should match source type")) } args := db.InstanceArgs{ Project: targetProject, Architecture: source.Architecture(), BaseImage: req.Source.BaseImage, Config: req.Config, Type: source.Type(), Description: req.Description, Devices: deviceConfig.NewDevices(req.Devices), Ephemeral: req.Ephemeral, Name: req.Name, Profiles: req.Profiles, Stateful: req.Stateful, } run := func(op *operations.Operation) error { _, err := instanceCreateAsCopy(d.State(), instanceCreateAsCopyOpts{ sourceInstance: source, targetInstance: args, instanceOnly: req.Source.InstanceOnly || req.Source.ContainerOnly, refresh: req.Source.Refresh, applyTemplateTrigger: true, allowInconsistent: req.Source.AllowInconsistent, }, op) if err != nil { return err } return nil } resources := map[string][]string{} resources["instances"] = []string{req.Name, req.Source.Source} if dbType == instancetype.Container { resources["containers"] = resources["instances"] } op, err := operations.OperationCreate(d.State(), targetProject, operations.OperationClassTask, db.OperationInstanceCreate, resources, nil, run, nil, nil, r) if err != nil { return response.InternalError(err) } return operations.OperationResponse(op) } func createFromBackup(d *Daemon, r *http.Request, projectName string, data io.Reader, pool string, instanceName string) response.Response { revert := revert.New() defer revert.Fail() // Create temporary file to store uploaded backup data. backupFile, err := ioutil.TempFile(shared.VarPath("backups"), fmt.Sprintf("%s_", backup.WorkingDirPrefix)) if err != nil { return response.InternalError(err) } defer os.Remove(backupFile.Name()) revert.Add(func() { backupFile.Close() }) // Stream uploaded backup data into temporary file. _, err = io.Copy(backupFile, data) if err != nil { return response.InternalError(err) } // Detect squashfs compression and convert to tarball. backupFile.Seek(0, 0) _, algo, decomArgs, err := shared.DetectCompressionFile(backupFile) if err != nil { return response.InternalError(err) } if algo == ".squashfs" { // Pass the temporary file as program argument to the decompression command. decomArgs := append(decomArgs, backupFile.Name()) // Create temporary file to store the decompressed tarball in. tarFile, err := ioutil.TempFile(shared.VarPath("backups"), fmt.Sprintf("%s_decompress_", backup.WorkingDirPrefix)) if err != nil { return response.InternalError(err) } defer os.Remove(tarFile.Name()) // Decompress to tarFile temporary file. err = archive.ExtractWithFds(decomArgs[0], decomArgs[1:], nil, nil, d.State().OS, tarFile) if err != nil { return response.InternalError(err) } // We don't need the original squashfs file anymore. backupFile.Close() os.Remove(backupFile.Name()) // Replace the backup file handle with the handle to the tar file. backupFile = tarFile } // Parse the backup information. backupFile.Seek(0, 0) logger.Debug("Reading backup file info") bInfo, err := backup.GetInfo(backupFile, d.State().OS, backupFile.Name()) if err != nil { return response.BadRequest(err) } bInfo.Project = projectName // Override pool. if pool != "" { bInfo.Pool = pool } // Override instance name. if instanceName != "" { bInfo.Name = instanceName } logger.Debug("Backup file info loaded", log.Ctx{ "type": bInfo.Type, "name": bInfo.Name, "project": bInfo.Project, "backend": bInfo.Backend, "pool": bInfo.Pool, "optimized": *bInfo.OptimizedStorage, "snapshots": bInfo.Snapshots, }) // Check storage pool exists. _, _, _, err = d.State().Cluster.GetStoragePoolInAnyState(bInfo.Pool) if errors.Is(err, db.ErrNoSuchObject) { // The storage pool doesn't exist. If backup is in binary format (so we cannot alter // the backup.yaml) or the pool has been specified directly from the user restoring // the backup then we cannot proceed so return an error. if *bInfo.OptimizedStorage || pool != "" { return response.InternalError(fmt.Errorf("Storage pool not found: %w", err)) } // Otherwise try and restore to the project's default profile pool. _, profile, err := d.State().Cluster.GetProfile(bInfo.Project, "default") if err != nil { return response.InternalError(fmt.Errorf("Failed to get default profile: %w", err)) } _, v, err := shared.GetRootDiskDevice(profile.Devices) if err != nil { return response.InternalError(fmt.Errorf("Failed to get root disk device: %w", err)) } // Use the default-profile's root pool. bInfo.Pool = v["pool"] } else if err != nil { return response.InternalError(err) } // Copy reverter so far so we can use it inside run after this function has finished. runRevert := revert.Clone() run := func(op *operations.Operation) error { defer backupFile.Close() defer runRevert.Fail() pool, err := storagePools.GetPoolByName(d.State(), bInfo.Pool) if err != nil { return err } // Check if the backup is optimized that the source pool driver matches the target pool driver. if *bInfo.OptimizedStorage && pool.Driver().Info().Name != bInfo.Backend { return fmt.Errorf("Optimized backup storage driver %q differs from the target storage pool driver %q", bInfo.Backend, pool.Driver().Info().Name) } // Dump tarball to storage. Because the backup file is unpacked and restored onto the storage // device before the instance is created in the database it is necessary to return two functions; // a post hook that can be run once the instance has been created in the database to run any // storage layer finalisations, and a revert hook that can be run if the instance database load // process fails that will remove anything created thus far. postHook, revertHook, err := pool.CreateInstanceFromBackup(*bInfo, backupFile, nil) if err != nil { return fmt.Errorf("Create instance from backup: %w", err) } runRevert.Add(revertHook) err = internalImportFromBackup(d, bInfo.Project, bInfo.Name, true, instanceName != "") if err != nil { return fmt.Errorf("Failed importing backup: %w", err) } inst, err := instance.LoadByProjectAndName(d.State(), bInfo.Project, bInfo.Name) if err != nil { return fmt.Errorf("Load instance: %w", err) } // Clean up created instance if the post hook fails below. runRevert.Add(func() { inst.Delete(true) }) // Run the storage post hook to perform any final actions now that the instance has been created // in the database (this normally includes unmounting volumes that were mounted). if postHook != nil { err = postHook(inst) if err != nil { return fmt.Errorf("Post hook failed: %w", err) } } runRevert.Success() return nil } resources := map[string][]string{} resources["instances"] = []string{bInfo.Name} resources["containers"] = resources["instances"] op, err := operations.OperationCreate(d.State(), bInfo.Project, operations.OperationClassTask, db.OperationBackupRestore, resources, nil, run, nil, nil, r) if err != nil { return response.InternalError(err) } revert.Success() return operations.OperationResponse(op) } // swagger:operation POST /1.0/instances instances instances_post // // Create a new instance // // Creates a new instance on LXD. // Depending on the source, this can create an instance from an existing // local image, remote image, existing local instance or snapshot, remote // migration stream or backup file. // // --- // consumes: // - application/json // produces: // - application/json // parameters: // - in: query // name: project // description: Project name // type: string // example: default // - in: query // name: target // description: Cluster member // type: string // example: default // - in: body // name: instance // description: Instance request // required: false // schema: // $ref: "#/definitions/InstancesPost" // - in: body // name: raw_backup // description: Raw backup file // required: false // responses: // "202": // $ref: "#/responses/Operation" // "400": // $ref: "#/responses/BadRequest" // "403": // $ref: "#/responses/Forbidden" // "500": // $ref: "#/responses/InternalServerError" func instancesPost(d *Daemon, r *http.Request) response.Response { targetProjectName := projectParam(r) logger.Debugf("Responding to instance create") // If we're getting binary content, process separately if r.Header.Get("Content-Type") == "application/octet-stream" { return createFromBackup(d, r, targetProjectName, r.Body, r.Header.Get("X-LXD-pool"), r.Header.Get("X-LXD-name")) } // Parse the request req := api.InstancesPost{} err := json.NewDecoder(r.Body).Decode(&req) if err != nil { return response.BadRequest(err) } // Set type from URL if missing urlType, err := urlInstanceTypeDetect(r) if err != nil { return response.InternalError(err) } if req.Type == "" && urlType != instancetype.Any { req.Type = api.InstanceType(urlType.String()) } var targetProject *db.Project targetNode := queryParam(r, "target") err = d.cluster.Transaction(func(tx *db.ClusterTx) error { targetProject, err = tx.GetProject(targetProjectName) if err != nil { return fmt.Errorf("Failed loading project: %w", err) } return project.CheckClusterTargetRestriction(tx, r, targetProject, targetNode) }) if err != nil { return response.SmartError(err) } // Check if clustered. clustered, err := cluster.Enabled(d.db) if err != nil { return response.InternalError(fmt.Errorf("Failed to check for cluster state: %w", err)) } if clustered && (targetNode == "" || strings.HasPrefix(targetNode, "@")) { // If no target node was specified, pick the node with the // least number of containers. If there's just one node, or if // the selected node is the local one, this is effectively a // no-op, since GetNodeWithLeastInstances() will return an empty // string. // If the target is a cluster group, find a suitable node. group := "" if strings.HasPrefix(targetNode, "@") { group = strings.TrimPrefix(targetNode, "@") } // Load restricted groups from project. var allowedGroups []string if !isClusterNotification(r) && shared.IsTrue(targetProject.Config["restricted"]) { allowedGroups = util.SplitNTrimSpace(targetProject.Config["restricted.cluster.groups"], ",", -1, true) } else { allowedGroups = nil } if group != "" { var groupExists bool // Check if the group exists. err = d.cluster.Transaction(func(tx *db.ClusterTx) error { groupExists, err = tx.ClusterGroupExists(group) if err != nil { return err } return nil }) if err != nil { return response.SmartError(err) } if !groupExists { return response.BadRequest(fmt.Errorf("Cluster group %q doesn't exist", group)) } // Validate restrictions. if !isClusterNotification(r) && shared.IsTrue(targetProject.Config["restricted"]) { found := false for _, entry := range allowedGroups { if group == entry { found = true break } } if !found { return response.Forbidden(fmt.Errorf("Project isn't allowed to use this cluster group")) } } } architectures, err := instance.SuitableArchitectures(d.State(), targetProjectName, req) if err != nil { return response.BadRequest(err) } err = d.cluster.Transaction(func(tx *db.ClusterTx) error { defaultArch := "" if targetProject.Config["images.default_architecture"] != "" { defaultArch = targetProject.Config["images.default_architecture"] } else { config, err := cluster.ConfigLoad(tx) if err != nil { return err } defaultArch = config.ImagesDefaultArchitecture() } defaultArchID := -1 if defaultArch != "" { defaultArchID, err = osarch.ArchitectureId(defaultArch) if err != nil { return err } } var err error targetNode, err = tx.GetNodeWithLeastInstances(architectures, defaultArchID, group, allowedGroups) return err }) if err != nil { return response.SmartError(err) } if targetNode == "" { return response.BadRequest(fmt.Errorf("No suitable cluster member could be found")) } } if targetNode != "" { address, err := cluster.ResolveTarget(d.cluster, targetNode) if err != nil { return response.SmartError(err) } if address != "" { client, err := cluster.Connect(address, d.endpoints.NetworkCert(), d.serverCert(), r, false) if err != nil { return response.SmartError(err) } client = client.UseProject(targetProjectName) client = client.UseTarget(targetNode) logger.Debugf("Forward instance post request to %s", address) op, err := client.CreateInstance(req) if err != nil { return response.SmartError(err) } opAPI := op.Get() return operations.ForwardedOperationResponse(targetProjectName, &opAPI) } } if req.Devices == nil { req.Devices = map[string]map[string]string{} } if req.Config == nil { req.Config = map[string]string{} } if req.InstanceType != "" { conf, err := instanceParseType(req.InstanceType) if err != nil { return response.BadRequest(err) } for k, v := range conf { if req.Config[k] == "" { req.Config[k] = v } } } if strings.Contains(req.Name, shared.SnapshotDelimiter) { return response.BadRequest(fmt.Errorf("Invalid instance name: %q is reserved for snapshots", shared.SnapshotDelimiter)) } // Check that the project's limits are not violated. Also, possibly // automatically assign a name. // // Note this check is performed after automatically generated config // values (such as the ones from an InstanceType) have been set. err = d.cluster.Transaction(func(tx *db.ClusterTx) error { if req.Type == "" { switch req.Source.Type { case "copy": if req.Source.Source == "" { return fmt.Errorf("Must specify a source instance") } if req.Source.Project == "" { req.Source.Project = targetProjectName } source, err := instance.LoadInstanceDatabaseObject(tx, req.Source.Project, req.Source.Source) if err != nil { return fmt.Errorf("Load source instance from database: %w", err) } req.Type = api.InstanceType(source.Type.String()) case "migration": req.Type = api.InstanceTypeContainer // Default to container if not specified. } } err := project.AllowInstanceCreation(tx, targetProjectName, req) if err != nil { return err } if req.Name == "" { names, err := tx.GetInstanceNames(targetProjectName) if err != nil { return err } i := 0 for { i++ req.Name = strings.ToLower(petname.Generate(2, "-")) if !shared.StringInSlice(req.Name, names) { break } if i > 100 { return fmt.Errorf("Couldn't generate a new unique name after 100 tries") } } logger.Debugf("No name provided, creating %s", req.Name) } return nil }) if err != nil { return response.SmartError(err) } switch req.Source.Type { case "image": return createFromImage(d, r, targetProjectName, &req) case "none": return createFromNone(d, r, targetProjectName, &req) case "migration": return createFromMigration(d, r, targetProjectName, &req) case "copy": return createFromCopy(d, r, targetProjectName, &req) default: return response.BadRequest(fmt.Errorf("Unknown source type %s", req.Source.Type)) } } func instanceFindStoragePool(d *Daemon, projectName string, req *api.InstancesPost) (string, string, string, map[string]string, response.Response) { // Grab the container's root device if one is specified storagePool := "" storagePoolProfile := "" localRootDiskDeviceKey, localRootDiskDevice, _ := shared.GetRootDiskDevice(req.Devices) if localRootDiskDeviceKey != "" { storagePool = localRootDiskDevice["pool"] } // Handle copying/moving between two storage-api LXD instances. if storagePool != "" { _, err := d.cluster.GetStoragePoolID(storagePool) if err == db.ErrNoSuchObject { storagePool = "" // Unset the local root disk device storage pool if not // found. localRootDiskDevice["pool"] = "" } } // If we don't have a valid pool yet, look through profiles if storagePool == "" { for _, pName := range req.Profiles { _, p, err := d.cluster.GetProfile(projectName, pName) if err != nil { return "", "", "", nil, response.SmartError(err) } k, v, _ := shared.GetRootDiskDevice(p.Devices) if k != "" && v["pool"] != "" { // Keep going as we want the last one in the profile chain storagePool = v["pool"] storagePoolProfile = pName } } } // If there is just a single pool in the database, use that if storagePool == "" { logger.Debugf("No valid storage pool in the container's local root disk device and profiles found") pools, err := d.cluster.GetStoragePoolNames() if err != nil { if err == db.ErrNoSuchObject { return "", "", "", nil, response.BadRequest(fmt.Errorf("This LXD instance does not have any storage pools configured")) } return "", "", "", nil, response.SmartError(err) } if len(pools) == 1 { storagePool = pools[0] } } return storagePool, storagePoolProfile, localRootDiskDeviceKey, localRootDiskDevice, nil } func clusterCopyContainerInternal(d *Daemon, r *http.Request, source instance.Instance, projectName string, req *api.InstancesPost) response.Response { name := req.Source.Source // Locate the source of the container var nodeAddress string err := d.cluster.Transaction(func(tx *db.ClusterTx) error { var err error // Load source node. nodeAddress, err = tx.GetNodeAddressOfInstance(projectName, name, db.InstanceTypeFilter(source.Type())) if err != nil { return fmt.Errorf("Failed to get address of instance's member: %w", err) } return nil }) if err != nil { return response.SmartError(err) } if nodeAddress == "" { return response.BadRequest(fmt.Errorf("The source instance is currently offline")) } // Connect to the container source client, err := cluster.Connect(nodeAddress, d.endpoints.NetworkCert(), d.serverCert(), r, false) if err != nil { return response.SmartError(err) } client = client.UseProject(source.Project()) // Setup websockets var opAPI api.Operation if shared.IsSnapshot(req.Source.Source) { cName, sName, _ := shared.InstanceGetParentAndSnapshotName(req.Source.Source) pullReq := api.InstanceSnapshotPost{ Migration: true, Live: req.Source.Live, Name: req.Name, } op, err := client.MigrateInstanceSnapshot(cName, sName, pullReq) if err != nil { return response.SmartError(err) } opAPI = op.Get() } else { instanceOnly := req.Source.InstanceOnly || req.Source.ContainerOnly pullReq := api.InstancePost{ Migration: true, Live: req.Source.Live, ContainerOnly: instanceOnly, InstanceOnly: instanceOnly, Name: req.Name, } op, err := client.MigrateInstance(req.Source.Source, pullReq) if err != nil { return response.SmartError(err) } opAPI = op.Get() } websockets := map[string]string{} for k, v := range opAPI.Metadata { websockets[k] = v.(string) } // Reset the source for a migration req.Source.Type = "migration" req.Source.Certificate = string(d.endpoints.NetworkCert().PublicKey()) req.Source.Mode = "pull" req.Source.Operation = fmt.Sprintf("https://%s/1.0/operations/%s", nodeAddress, opAPI.ID) req.Source.Websockets = websockets req.Source.Source = "" req.Source.Project = "" // Run the migration return createFromMigration(d, nil, projectName, req) }
lxc/lxd
lxd/instances_post.go
GO
apache-2.0
34,848
# If you are in a hurry For language-specific installation instructions for gRPC runtime, please refer to these documents * [C++](examples/cpp): Currently to install gRPC for C++, you need to build from source as described below. * [C#](src/csharp): NuGet package `Grpc` * [Go](https://github.com/grpc/grpc-go): `go get google.golang.org/grpc` * [Java](https://github.com/grpc/grpc-java) * [Node](src/node): `npm install grpc` * [Objective-C](src/objective-c) * [PHP](src/php): `pecl install grpc` * [Python](src/python/grpcio): `pip install grpcio` * [Ruby](src/ruby): `gem install grpc` # Pre-requisites ## Linux ```sh $ [sudo] apt-get install build-essential autoconf libtool ``` If you plan to build from source and run tests, install the following as well: ```sh $ [sudo] apt-get install libgflags-dev libgtest-dev $ [sudo] apt-get install clang libc++-dev ``` ## macOS On a Mac, you will first need to install Xcode or [Command Line Tools for Xcode](https://developer.apple.com/download/more/) and then run the following command from a terminal: ```sh $ [sudo] xcode-select --install ``` To build gRPC from source, you may also need to install the following packages, which you can get from [Homebrew](https://brew.sh): ```sh $ brew install autoconf automake libtool shtool ``` If you plan to build from source and run tests, install the following as well: ```sh $ brew install gflags ``` *Tip*: when building, you *may* want to explicitly set the `LIBTOOL` and `LIBTOOLIZE` environment variables when running `make` to ensure the version installed by `brew` is being used: ```sh $ LIBTOOL=glibtool LIBTOOLIZE=glibtoolize make ``` ## Protoc By default gRPC uses [protocol buffers](https://github.com/google/protobuf), you will need the `protoc` compiler to generate stub server and client code. If you compile gRPC from source, as described below, the Makefile will automatically try and compile the `protoc` in third_party if you cloned the repository recursively and it detects that you don't already have it installed. # Build from Source For developers who are interested to contribute, here is how to compile the gRPC C Core library. ```sh $ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc $ cd grpc $ git submodule update --init $ make $ [sudo] make install ``` ## Windows There are several ways to build under Windows, of varying complexity depending on experience with the tools involved. ### Building using CMake (RECOMMENDED) Builds gRPC C and C++ with boringssl. - Install [CMake](https://cmake.org/download/). - Install [Active State Perl](http://www.activestate.com/activeperl/) (`choco install activeperl`) - Install [Ninja](https://ninja-build.org/) (`choco install ninja`) - Install [Go](https://golang.org/dl/) (`choco install golang`) - Install [yasm](http://yasm.tortall.net/) and add it to `PATH` (`choco install yasm`) - Run these commands in the repo root directory Using Ninja (faster build, supports boringssl's assembly optimizations) ``` > md .build > cd .build > call "%VS140COMNTOOLS%..\..\VC\vcvarsall.bat" x64 > cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release > cmake --build . ``` Using Visual Studio 2015 (can only build with OPENSSL_NO_ASM) ``` > md .build > cd .build > cmake .. -G "Visual Studio 14 2015" -DCMAKE_BUILD_TYPE=Release > cmake --build . ``` ### msys2 (with mingw) The Makefile (and source code) should support msys2's mingw32 and mingw64 compilers. Building with msys2's native compiler is also possible, but difficult. This approach requires having [msys2](https://msys2.github.io/) installed. ``` # Install prerequisites MSYS2$ pacman -S autoconf automake gcc libtool mingw-w64-x86_64-toolchain perl pkg-config zlib MSYS2$ pacman -S mingw-w64-x86_64-gflags ``` ``` # From mingw shell MINGW64$ export CPPFLAGS="-D_WIN32_WINNT=0x0600" MINGW64$ make ``` NOTE: While most of the make targets are buildable under Mingw, some haven't been ported to Windows yet and may fail to build (mostly trying to include POSIX headers not available on Mingw). ### Pre-generated Visual Studio solution (DEPRECATED) *WARNING: This used to be the recommended way to build on Windows, but because of significant limitations (hard to build dependencies including boringssl, .proto codegen is hard to support, ..), it is no longer recommended. Use cmake to build on Windows instead.* The pre-generated VS projects & solution are checked into the repository under the [vsprojects](/vsprojects) directory.
yang-g/grpc
INSTALL.md
Markdown
apache-2.0
4,522
/* * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. * * * * * * * * * * * * * * * * * * * * */ package com.sun.corba.se.impl.resolver; import java.util.List ; import java.util.Map ; import java.util.Comparator ; import java.util.Iterator ; import java.util.HashMap ; import java.util.ArrayList ; import java.util.Collections ; import org.omg.CosNaming.NamingContextExt ; import org.omg.CosNaming.NamingContextExtHelper ; import sun.corba.EncapsInputStreamFactory; import com.sun.corba.se.spi.ior.IOR; import com.sun.corba.se.spi.ior.IORTemplate; import com.sun.corba.se.spi.ior.ObjectKey; import com.sun.corba.se.spi.ior.IORFactories; import com.sun.corba.se.spi.ior.ObjectKeyFactory ; import com.sun.corba.se.spi.ior.iiop.IIOPAddress; import com.sun.corba.se.spi.ior.iiop.IIOPProfile ; import com.sun.corba.se.spi.ior.iiop.IIOPProfileTemplate ; import com.sun.corba.se.spi.ior.iiop.IIOPFactories ; import com.sun.corba.se.spi.ior.iiop.GIOPVersion; import com.sun.corba.se.spi.ior.iiop.AlternateIIOPAddressComponent; import com.sun.corba.se.spi.logging.CORBALogDomains ; import com.sun.corba.se.spi.orb.Operation; import com.sun.corba.se.spi.orb.ORB; import com.sun.corba.se.spi.resolver.Resolver; import com.sun.corba.se.impl.encoding.EncapsInputStream; import com.sun.corba.se.impl.logging.ORBUtilSystemException; import com.sun.corba.se.impl.logging.OMGSystemException; import com.sun.corba.se.impl.naming.namingutil.INSURLHandler; import com.sun.corba.se.impl.naming.namingutil.IIOPEndpointInfo; import com.sun.corba.se.impl.naming.namingutil.INSURL; import com.sun.corba.se.impl.naming.namingutil.CorbalocURL; import com.sun.corba.se.impl.naming.namingutil.CorbanameURL; import com.sun.corba.se.impl.orbutil.ORBConstants; import com.sun.corba.se.impl.orbutil.ORBUtility; /** * This class provides an Operation that converts from CORBA INS URL strings into * CORBA object references. It will eventually become extensible, but for now it * simply encapsulates the existing implementation. Once the full extensibility * is in place, we want this operation to convert string to INSURL, which has mainly * a public resolver method that returns an object reference. * * @author Hemanth * @author Ken */ public class INSURLOperationImpl implements Operation { ORB orb; ORBUtilSystemException wrapper ; OMGSystemException omgWrapper ; Resolver bootstrapResolver ; // Root Naming Context for default resolution of names. private NamingContextExt rootNamingContextExt; private Object rootContextCacheLock = new Object() ; // The URLHandler to parse INS URL's private INSURLHandler insURLHandler = INSURLHandler.getINSURLHandler() ; public INSURLOperationImpl( ORB orb, Resolver bootstrapResolver ) { this.orb = orb ; wrapper = ORBUtilSystemException.get( orb, CORBALogDomains.ORB_RESOLVER ) ; omgWrapper = OMGSystemException.get( orb, CORBALogDomains.ORB_RESOLVER ) ; this.bootstrapResolver = bootstrapResolver ; } private static final int NIBBLES_PER_BYTE = 2 ; private static final int UN_SHIFT = 4 ; // "UPPER NIBBLE" shift factor for << /** This static method takes a Stringified IOR and converts it into IOR object. * It is the caller's responsibility to only pass strings that start with "IOR:". */ private org.omg.CORBA.Object getIORFromString( String str ) { // Length must be even for str to be valid if ( (str.length() & 1) == 1 ) throw wrapper.badStringifiedIorLen() ; byte[] buf = new byte[(str.length() - ORBConstants.STRINGIFY_PREFIX.length()) / NIBBLES_PER_BYTE]; for (int i=ORBConstants.STRINGIFY_PREFIX.length(), j=0; i < str.length(); i +=NIBBLES_PER_BYTE, j++) { buf[j] = (byte)((ORBUtility.hexOf(str.charAt(i)) << UN_SHIFT) & 0xF0); buf[j] |= (byte)(ORBUtility.hexOf(str.charAt(i+1)) & 0x0F); } EncapsInputStream s = EncapsInputStreamFactory.newEncapsInputStream(orb, buf, buf.length, orb.getORBData().getGIOPVersion()); s.consumeEndian(); return s.read_Object() ; } public Object operate( Object arg ) { if (arg instanceof String) { String str = (String)arg ; if (str.startsWith( ORBConstants.STRINGIFY_PREFIX )) // XXX handle this as just another URL scheme return getIORFromString( str ) ; else { INSURL insURL = insURLHandler.parseURL( str ) ; if (insURL == null) throw omgWrapper.soBadSchemeName() ; return resolveINSURL( insURL ) ; } } throw wrapper.stringExpected() ; } private org.omg.CORBA.Object resolveINSURL( INSURL theURLObject ) { // XXX resolve should be a method on INSURL if( theURLObject.isCorbanameURL() ) { return resolveCorbaname( (CorbanameURL)theURLObject ); } else { return resolveCorbaloc( (CorbalocURL)theURLObject ); } } /** * resolves a corbaloc: url that is encapsulated in a CorbalocURL object. * * @return the CORBA.Object if resolution is successful */ private org.omg.CORBA.Object resolveCorbaloc( CorbalocURL theCorbaLocObject ) { org.omg.CORBA.Object result = null; // If RIR flag is true use the Bootstrap protocol if( theCorbaLocObject.getRIRFlag( ) ) { result = bootstrapResolver.resolve(theCorbaLocObject.getKeyString()); } else { result = getIORUsingCorbaloc( theCorbaLocObject ); } return result; } /** * resolves a corbaname: url that is encapsulated in a CorbanameURL object. * * @return the CORBA.Object if resolution is successful */ private org.omg.CORBA.Object resolveCorbaname( CorbanameURL theCorbaName ) { org.omg.CORBA.Object result = null; try { NamingContextExt theNamingContext = null; if( theCorbaName.getRIRFlag( ) ) { // Case 1 of corbaname: rir# theNamingContext = getDefaultRootNamingContext( ); } else { // Case 2 of corbaname: ::hostname# org.omg.CORBA.Object corbalocResult = getIORUsingCorbaloc( theCorbaName ); if( corbalocResult == null ) { return null; } theNamingContext = NamingContextExtHelper.narrow( corbalocResult ); } String StringifiedName = theCorbaName.getStringifiedName( ); if( StringifiedName == null ) { // This means return the Root Naming context return theNamingContext; } else { return theNamingContext.resolve_str( StringifiedName ); } } catch( Exception e ) { clearRootNamingContextCache( ); return null; } } /** * This is an internal method to get the IOR from the CorbalocURL object. * * @return the CORBA.Object if resolution is successful */ private org.omg.CORBA.Object getIORUsingCorbaloc( INSURL corbalocObject ) { Map profileMap = new HashMap(); List profileList1_0 = new ArrayList(); // corbalocObject cannot be null, because it's validated during // parsing. So no null check is required. List theEndpointInfo = corbalocObject.getEndpointInfo(); String theKeyString = corbalocObject.getKeyString(); // If there is no KeyString then it's invalid if( theKeyString == null ) { return null; } ObjectKey key = orb.getObjectKeyFactory().create( theKeyString.getBytes() ); IORTemplate iortemp = IORFactories.makeIORTemplate( key.getTemplate() ); Iterator iterator = theEndpointInfo.iterator( ); while( iterator.hasNext( ) ) { IIOPEndpointInfo element = (IIOPEndpointInfo) iterator.next( ); IIOPAddress addr = IIOPFactories.makeIIOPAddress( orb, element.getHost(), element.getPort() ); GIOPVersion giopVersion = GIOPVersion.getInstance( (byte)element.getMajor(), (byte)element.getMinor()); IIOPProfileTemplate profileTemplate = null; if (giopVersion.equals(GIOPVersion.V1_0)) { profileTemplate = IIOPFactories.makeIIOPProfileTemplate( orb, giopVersion, addr); profileList1_0.add(profileTemplate); } else { if (profileMap.get(giopVersion) == null) { profileTemplate = IIOPFactories.makeIIOPProfileTemplate( orb, giopVersion, addr); profileMap.put(giopVersion, profileTemplate); } else { profileTemplate = (IIOPProfileTemplate)profileMap.get(giopVersion); AlternateIIOPAddressComponent iiopAddressComponent = IIOPFactories.makeAlternateIIOPAddressComponent(addr); profileTemplate.add(iiopAddressComponent); } } } GIOPVersion giopVersion = orb.getORBData().getGIOPVersion(); IIOPProfileTemplate pTemplate = (IIOPProfileTemplate)profileMap.get(giopVersion); if (pTemplate != null) { iortemp.add(pTemplate); // Add profile for GIOP version used by this ORB profileMap.remove(giopVersion); // Now remove this value from the map } // Create a comparator that can sort in decending order (1.2, 1.1, ...) Comparator comp = new Comparator() { public int compare(Object o1, Object o2) { GIOPVersion gv1 = (GIOPVersion)o1; GIOPVersion gv2 = (GIOPVersion)o2; return (gv1.lessThan(gv2) ? 1 : (gv1.equals(gv2) ? 0 : -1)); }; }; // Now sort using the above comparator List list = new ArrayList(profileMap.keySet()); Collections.sort(list, comp); // Add the profiles in the sorted order Iterator iter = list.iterator(); while (iter.hasNext()) { IIOPProfileTemplate pt = (IIOPProfileTemplate)profileMap.get(iter.next()); iortemp.add(pt); } // Finally add the 1.0 profiles iortemp.addAll(profileList1_0); IOR ior = iortemp.makeIOR( orb, "", key.getId() ) ; return ORBUtility.makeObjectReference( ior ) ; } /** * This is required for corbaname: resolution. Currently we * are not caching RootNamingContext as the reference to rootNamingContext * may not be Persistent in all the implementations. * _REVISIT_ to clear the rootNamingContext in case of COMM_FAILURE. * * @return the org.omg.COSNaming.NamingContextExt if resolution is * successful * */ private NamingContextExt getDefaultRootNamingContext( ) { synchronized( rootContextCacheLock ) { if( rootNamingContextExt == null ) { try { rootNamingContextExt = NamingContextExtHelper.narrow( orb.getLocalResolver().resolve( "NameService" ) ); } catch( Exception e ) { rootNamingContextExt = null; } } } return rootNamingContextExt; } /** * A utility method to clear the RootNamingContext, if there is an * exception in resolving CosNaming:Name from the RootNamingContext, */ private void clearRootNamingContextCache( ) { synchronized( rootContextCacheLock ) { rootNamingContextExt = null; } } }
wangsongpeng/jdk-src
src/main/java/com/sun/corba/se/impl/resolver/INSURLOperationImpl.java
Java
apache-2.0
12,101
/* ==================================================================== Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for Additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================================================================== */ using NPOI.OpenXmlFormats.Spreadsheet; using System.IO; using System.Xml; using System.Collections.Generic; using System; using NPOI.OpenXmlFormats; using NPOI.Util; using NPOI.OpenXml4Net.OPC; using NPOI.OpenXmlFormats.Dml; namespace NPOI.XSSF.UserModel { /** * High level representation of Sheet Parts that are of type 'chartsheet'. * <p> * Chart sheet is a special kind of Sheet that Contains only chart and no data. * </p> * * @author Yegor Kozlov */ public class XSSFChartSheet : XSSFSheet { private static byte[] BLANK_WORKSHEET = blankWorksheet(); protected CT_Chartsheet chartsheet; /** * @since POI 3.14-Beta1 */ protected XSSFChartSheet(PackagePart part) : base(part) { } [Obsolete("deprecated in POI 3.14, scheduled for removal in POI 3.16")] protected XSSFChartSheet(PackagePart part, PackageRelationship rel) : base(part) { } internal override void Read(Stream is1) { //Initialize the supeclass with a blank worksheet base.Read(new MemoryStream(BLANK_WORKSHEET)); try { XmlDocument doc = ConvertStreamToXml(is1); chartsheet = ChartsheetDocument.Parse(doc, XSSFSheet.NamespaceManager).GetChartsheet(); } catch (XmlException e) { throw new POIXMLException(e); } } /** * Provide access to the CTChartsheet bean holding this sheet's data * * @return the CTChartsheet bean holding this sheet's data */ public CT_Chartsheet GetCTChartsheet() { return chartsheet; } protected override NPOI.OpenXmlFormats.Spreadsheet.CT_Drawing GetCTDrawing() { return chartsheet.drawing; } protected override NPOI.OpenXmlFormats.Spreadsheet.CT_LegacyDrawing GetCTLegacyDrawing() { return chartsheet.legacyDrawing; } internal override void Write(Stream out1) { new ChartsheetDocument(this.chartsheet).Save(out1); } private static byte[] blankWorksheet() { MemoryStream out1 = new MemoryStream(); try { new XSSFSheet().Write(out1); } catch (IOException e) { throw new RuntimeException(e); } return out1.ToArray(); } } }
tonyqus/npoi
ooxml/XSSF/UserModel/XSSFChartSheet.cs
C#
apache-2.0
3,555
/* * * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. * */ package org.apache.geode.tools.pulse.internal.service; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.List; import java.util.Map; import javax.servlet.http.HttpServletRequest; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import org.springframework.context.annotation.Scope; import org.springframework.stereotype.Component; import org.springframework.stereotype.Service; import org.apache.geode.tools.pulse.internal.data.Cluster; import org.apache.geode.tools.pulse.internal.data.PulseConstants; import org.apache.geode.tools.pulse.internal.data.Repository; /** * Class ClusterMembersRGraphService * * This class contains implementations of getting List of Cluster members and their details * * @since GemFire version 7.5 */ @Component @Service("ClusterMembersRGraph") @Scope("singleton") public class ClusterMembersRGraphService implements PulseService { private final ObjectMapper mapper = new ObjectMapper(); // String constants used for forming a json response private final String CLUSTER = "clustor"; private final String MEMBER_COUNT = "memberCount"; private final String ID = "id"; private final String NAME = "name"; private final String DATA = "data"; private final String MEMORY_USAGE = "memoryUsage"; private final String CPU_USAGE = "cpuUsage"; private final String REGIONS = "regions"; private final String HOST = "host"; private final String PORT = "port"; private final String CLIENTS = "clients"; private final String GC_PAUSES = "gcPauses"; private final String GATEWAY_SENDER = "gatewaySender"; private final String GATEWAY_RECEIVER = "gatewayReceiver"; private final String LOAD_AVG = "loadAvg"; private final String SOCKETS = "sockets"; private final String THREADS = "threads"; private final String NUM_THREADS = "numThreads"; private final String MEMBER_NODE_TYPE_NORMAL = "Normal"; private final String MEMBER_NODE_TYPE_WARNING = "Warning"; private final String MEMBER_NODE_TYPE_ERROR = "Error"; private final String MEMBER_NODE_TYPE_SEVERE = "Severe"; private final String CHILDREN = "children"; // traversing the alert array list and members which have severe, error or // warnings // alerts saving them in three different arraylists private List<String> severeAlertList; private List<String> errorAlertsList; private List<String> warningAlertsList; public ObjectNode execute(final HttpServletRequest request) throws Exception { // Reference to repository Repository repository = Repository.get(); // get cluster object Cluster cluster = repository.getCluster(); // json object to be sent as response ObjectNode responseJSON = mapper.createObjectNode(); // cluster's Members responseJSON.put(this.CLUSTER, getPhysicalServerJson(cluster, repository.getJmxHost(), repository.getJmxPort())); responseJSON.put(this.MEMBER_COUNT, cluster.getMemberCount()); // Send json response return responseJSON; } /** * function used for getting all members details in format of JSON Object array defined under a * cluster. This function create json based on the relation of physical host and members related * to it. * * @param cluster * @param host * @param port * @return Array list of JSON objects for required fields of members in cluster */ private ObjectNode getPhysicalServerJson(Cluster cluster, String host, String port) { Map<String, List<Cluster.Member>> physicalToMember = cluster.getPhysicalToMember(); ObjectNode clusterTopologyJSON = mapper.createObjectNode(); clusterTopologyJSON.put(this.ID, cluster.getClusterId()); clusterTopologyJSON.put(this.NAME, cluster.getClusterId()); ObjectNode data1 = mapper.createObjectNode(); clusterTopologyJSON.put(this.DATA, data1); ArrayNode childHostArray = mapper.createArrayNode(); DecimalFormat df2 = new DecimalFormat(PulseConstants.DECIMAL_FORMAT_PATTERN); updateAlertLists(cluster); for (Map.Entry<String, List<Cluster.Member>> physicalToMem : physicalToMember.entrySet()) { String hostName = physicalToMem.getKey(); double hostCpuUsage = 0.0; long hostMemoryUsage = 0; double hostLoadAvg = 0.0; int hostNumThreads = 0; long hostSockets = 0; boolean hostSevere = false; boolean hostError = false; boolean hostWarning = false; String hostStatus; ObjectNode childHostObject = mapper.createObjectNode(); childHostObject.put(this.ID, hostName); childHostObject.put(this.NAME, hostName); ArrayNode membersArray = mapper.createArrayNode(); List<Cluster.Member> memberList = physicalToMem.getValue(); for (Cluster.Member member : memberList) { ObjectNode memberJSONObj = mapper.createObjectNode(); memberJSONObj.put(this.ID, member.getId()); memberJSONObj.put(this.NAME, member.getName()); ObjectNode memberData = mapper.createObjectNode(); memberData.put("gemfireVersion", member.getGemfireVersion()); Long currentHeap = member.getCurrentHeapSize(); Long usedHeapSize = cluster.getUsedHeapSize(); if (usedHeapSize > 0) { double heapUsage = (currentHeap.doubleValue() / usedHeapSize.doubleValue()) * 100; memberData.put(this.MEMORY_USAGE, Double.valueOf(df2.format(heapUsage))); } else memberData.put(this.MEMORY_USAGE, 0); double currentCPUUsage = member.getCpuUsage(); memberData.put(this.CPU_USAGE, Double.valueOf(df2.format(currentCPUUsage))); memberData.put(this.REGIONS, member.getMemberRegions().size()); memberData.put(this.HOST, member.getHost()); if ((member.getMemberPort() == null) || (member.getMemberPort().equals(""))) { memberData.put(this.PORT, "-"); } else { memberData.put(this.PORT, member.getMemberPort()); } memberData.put(this.CLIENTS, member.getMemberClientsHMap().size()); memberData.put(this.GC_PAUSES, member.getGarbageCollectionCount()); memberData.put(this.NUM_THREADS, member.getNumThreads()); // Host CPU Usage is aggregate of all members cpu usage // hostCpuUsage = hostCpuUsage + currentCPUUsage; hostCpuUsage = member.getHostCpuUsage(); hostMemoryUsage = hostMemoryUsage + member.getCurrentHeapSize(); hostLoadAvg = member.getLoadAverage(); hostNumThreads = member.getNumThreads(); hostSockets = member.getTotalFileDescriptorOpen(); // defining the status of Member Icons for R Graph based on the alerts // created for that member String memberNodeType = ""; // for severe alert if (severeAlertList.contains(member.getName())) { memberNodeType = getMemberNodeType(member, this.MEMBER_NODE_TYPE_SEVERE); if (!hostSevere) { hostSevere = true; } } else if (errorAlertsList.contains(member.getName())) { // for error alerts memberNodeType = getMemberNodeType(member, this.MEMBER_NODE_TYPE_ERROR); if (!hostError) { hostError = true; } } // for warning alerts else if (warningAlertsList.contains(member.getName())) { memberNodeType = getMemberNodeType(member, this.MEMBER_NODE_TYPE_WARNING); if (!hostWarning) { hostWarning = true; } } else { memberNodeType = getMemberNodeType(member, this.MEMBER_NODE_TYPE_NORMAL); } memberData.put("nodeType", memberNodeType); memberData.put("$type", memberNodeType); memberData.put(this.GATEWAY_SENDER, member.getGatewaySenderList().size()); if (member.getGatewayReceiver() != null) { memberData.put(this.GATEWAY_RECEIVER, 1); } else { memberData.put(this.GATEWAY_RECEIVER, 0); } memberJSONObj.put(this.DATA, memberData); memberJSONObj.put(this.CHILDREN, mapper.createArrayNode()); membersArray.add(memberJSONObj); } ObjectNode data = mapper.createObjectNode(); data.put(this.LOAD_AVG, Double.valueOf(df2.format(hostLoadAvg))); data.put(this.SOCKETS, hostSockets); data.put(this.THREADS, hostNumThreads); data.put(this.CPU_USAGE, Double.valueOf(df2.format(hostCpuUsage))); data.put(this.MEMORY_USAGE, hostMemoryUsage); String hostNodeType; // setting physical host status if (hostSevere) { hostStatus = this.MEMBER_NODE_TYPE_SEVERE; hostNodeType = "hostSevereNode"; } else if (hostError) { hostStatus = this.MEMBER_NODE_TYPE_ERROR; hostNodeType = "hostErrorNode"; } else if (hostWarning) { hostStatus = this.MEMBER_NODE_TYPE_WARNING; hostNodeType = "hostWarningNode"; } else { hostStatus = this.MEMBER_NODE_TYPE_NORMAL; hostNodeType = "hostNormalNode"; } data.put("hostStatus", hostStatus); data.put("$type", hostNodeType); childHostObject.put(this.DATA, data); childHostObject.put(this.CHILDREN, membersArray); childHostArray.add(childHostObject); } clusterTopologyJSON.put(this.CHILDREN, childHostArray); return clusterTopologyJSON; } /** * used for getting member node type based on member's current state * * @param member Member * @param memberState i.e Severe, Error, Warning or Normal * @return */ private String getMemberNodeType(Cluster.Member member, String memberState) { String memberNodeType = ""; if ((member.isLocator()) && (member.isServer()) && (member.isManager())) { memberNodeType = "memberLocatorManagerServer" + memberState + "Node"; } else if ((member.isLocator()) && (member.isServer()) && !(member.isManager())) { memberNodeType = "memberLocatorServer" + memberState + "Node"; } else if ((member.isLocator()) && !(member.isServer()) && (member.isManager())) { memberNodeType = "memberLocatorManager" + memberState + "Node"; } else if ((member.isLocator()) && !(member.isServer()) && !(member.isManager())) { memberNodeType = "memberLocator" + memberState + "Node"; } else if (!(member.isLocator()) && (member.isServer()) && (member.isManager())) { memberNodeType = "memberManagerServer" + memberState + "Node"; } else if (!(member.isLocator()) && (member.isServer()) && !(member.isManager())) { memberNodeType = "memberServer" + memberState + "Node"; } else if (!(member.isLocator()) && !(member.isServer()) && (member.isManager())) { memberNodeType = "memberManager" + memberState + "Node"; } else if (!(member.isLocator()) && !(member.isServer()) && !(member.isManager())) { memberNodeType = "member" + memberState + "Node"; } return memberNodeType; } /** * function used for getting list of all the alerts and save the member names in respective error, * warning and severe alert lists * * @param cluster */ private void updateAlertLists(Cluster cluster) { severeAlertList = new ArrayList<String>(); errorAlertsList = new ArrayList<String>(); warningAlertsList = new ArrayList<String>(); Cluster.Alert[] alertsList = cluster.getAlertsList(); for (Cluster.Alert alert : alertsList) { // if alert is severe if (alert.getSeverity() == Cluster.Alert.SEVERE) { if (errorAlertsList.contains(alert.getMemberName())) { errorAlertsList.remove(alert.getMemberName()); } else if (warningAlertsList.contains(alert.getMemberName())) { warningAlertsList.remove(alert.getMemberName()); } if (!severeAlertList.contains(alert.getMemberName())) { severeAlertList.add(alert.getMemberName()); } } // if alert is error else if (alert.getSeverity() == Cluster.Alert.ERROR) { if (!severeAlertList.contains(alert.getMemberName())) { if (warningAlertsList.contains(alert.getMemberName())) { warningAlertsList.remove(alert.getMemberName()); } if (!errorAlertsList.contains(alert.getMemberName())) { errorAlertsList.add(alert.getMemberName()); } } } // if alert is warning else if (alert.getSeverity() == Cluster.Alert.WARNING) { if (!severeAlertList.contains(alert.getMemberName())) { if (!errorAlertsList.contains(alert.getMemberName())) { if (!warningAlertsList.contains(alert.getMemberName())) { warningAlertsList.add(alert.getMemberName()); } } } } } } }
prasi-in/geode
geode-pulse/src/main/java/org/apache/geode/tools/pulse/internal/service/ClusterMembersRGraphService.java
Java
apache-2.0
13,585
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import profile class Profile(profile.Profile): def __init__(self, region, plugins=None, **kwargs): super(Profile, self).__init__(plugins=plugins or ['rackspace']) self.set_region(self.ALL, region) global_services = ('cloudMetrics', 'cloudMetricsIngest', 'cloudMonitoring', 'rackCDN') for service in self.get_services(): if service.service_name in global_services: service.region = None
briancurtin/rackspace-sdk-plugin
rackspace/profile.py
Python
apache-2.0
1,044
/** * Copyright 2014 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ 'use strict'; var assert = require('assert'); var googleapis = require('../lib/googleapis.js'); var google, drive, authClient, OAuth2; describe('Query params', function() { function noop() {} beforeEach(function() { google = new googleapis.GoogleApis(); OAuth2 = google.auth.OAuth2; authClient = new OAuth2('CLIENT_ID', 'CLIENT_SECRET', 'REDIRECT_URL'); authClient.setCredentials({ access_token: 'abc123' }); drive = google.drive('v2'); }); it('should not append ? with no query parameters', function() { var uri = drive.files.get({ fileId: 'ID' }, noop).uri; assert.equal(-1, uri.href.indexOf('?')); }); it('should be null if no object passed', function() { var req = drive.files.list(noop); assert.equal(req.uri.query, null); }); it('should be null if params passed are in path', function() { var req = drive.files.get({ fileId: '123' }, noop); assert.equal(req.uri.query, null); }); it('should be set if params passed are optional query params', function() { var req = drive.files.get({ fileId: '123', updateViewedDate: true }, noop); assert.equal(req.uri.query, 'updateViewedDate=true'); }); it('should be set if params passed are unknown params', function() { var req = drive.files.get({ fileId: '123', madeThisUp: 'hello' }, noop); assert.equal(req.uri.query, 'madeThisUp=hello'); }); it('should chain together with & in order', function() { var req = drive.files.get({ fileId: '123', madeThisUp: 'hello', thisToo: 'world' }, noop); assert.equal(req.uri.query, 'madeThisUp=hello&thisToo=world'); }); it('should not include auth if auth is an OAuth2Client object', function() { var req = drive.files.get({ fileId: '123', auth: authClient }, noop); assert.equal(req.uri.query, null); }); });
naskogithub/google-api-nodejs-client
test/test.query.js
JavaScript
apache-2.0
2,474
# Copyright (C) 2013-2015 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2013-2015 YAMAMOTO Takashi <yamamoto at valinux co jp> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # there are two representations of value and mask this module deal with. # # "user" # (value, mask) or value. the latter means no mask. # value and mask are strings. # # "internal" # value and mask are on-wire bytes. # mask is None if no mask. # There are two types of OXM/NXM headers. # # 32-bit OXM/NXM header # +-------------------------------+-------------+-+---------------+ # | class | field |m| length | # +-------------------------------+-------------+-+---------------+ # # 64-bit experimenter OXM header # +-------------------------------+-------------+-+---------------+ # | class (OFPXMC_EXPERIMENTER) | field |m| length | # +-------------------------------+-------------+-+---------------+ # | experimenter ID | # +---------------------------------------------------------------+ # NOTE: EXT-256 had a variation of experimenter OXM header. # It has been rectified since then. Currently this implementation # supports only the old version. # # ONF EXT-256 (old, exp_type = 2560) # +-------------------------------+-------------+-+---------------+ # | class (OFPXMC_EXPERIMENTER) | ????? |m| length | # +-------------------------------+-------------+-+---------------+ # | experimenter ID (ONF_EXPERIMENTER_ID) | # +-------------------------------+---------------+---------------+ # | exp_type (PBB_UCA=2560) | pbb_uca | # +-------------------------------+---------------+ # # ONF EXT-256 (new, oxm_field = 41) # +-------------------------------+-------------+-+---------------+ # | class (OFPXMC_EXPERIMENTER) | PBB_UCA=41 |m| length | # +-------------------------------+-------------+-+---------------+ # | experimenter ID (ONF_EXPERIMENTER_ID) | # +-------------------------------+---------------+---------------+ # | reserved, should be zero | pbb_uca | # +-------------------------------+---------------+ import itertools import struct from ryu.ofproto import ofproto_common from ryu.lib.pack_utils import msg_pack_into from ryu.lib import type_desc OFPXMC_NXM_0 = 0 # Nicira Extended Match (NXM_OF_) OFPXMC_NXM_1 = 1 # Nicira Extended Match (NXM_NX_) OFPXMC_OPENFLOW_BASIC = 0x8000 OFPXMC_PACKET_REGS = 0x8001 OFPXMC_EXPERIMENTER = 0xffff class _OxmClass(object): def __init__(self, name, num, type_): self.name = name self.oxm_type = num | (self._class << 7) # TODO(yamamoto): Clean this up later. # Probably when we drop EXT-256 style experimenter OXMs. self.num = self.oxm_type self.type = type_ class OpenFlowBasic(_OxmClass): _class = OFPXMC_OPENFLOW_BASIC class PacketRegs(_OxmClass): _class = OFPXMC_PACKET_REGS class _Experimenter(_OxmClass): _class = OFPXMC_EXPERIMENTER def __init__(self, name, num, type_): super(_Experimenter, self).__init__(name, num, type_) self.num = (self.experimenter_id, self.oxm_type) class ONFExperimenter(_Experimenter): experimenter_id = ofproto_common.ONF_EXPERIMENTER_ID class OldONFExperimenter(_Experimenter): # This class is for the old version of EXT-256 experimenter_id = ofproto_common.ONF_EXPERIMENTER_ID def __init__(self, name, num, type_): super(OldONFExperimenter, self).__init__(name, 0, type_) self.num = (self.experimenter_id, num) self.exp_type = num class OpenStateExperimenter(_Experimenter): experimenter_id = ofproto_common.OPENSTATE_EXPERIMENTER_ID class NiciraExperimenter(_Experimenter): experimenter_id = ofproto_common.NX_EXPERIMENTER_ID class NiciraExtended0(_OxmClass): """Nicira Extended Match (NXM_0) NXM header format is same as 32-bit (non-experimenter) OXMs. """ _class = OFPXMC_NXM_0 class NiciraExtended1(_OxmClass): """Nicira Extended Match (NXM_1) NXM header format is same as 32-bit (non-experimenter) OXMs. """ _class = OFPXMC_NXM_1 def generate(modname): import sys import functools mod = sys.modules[modname] def add_attr(k, v): setattr(mod, k, v) for i in mod.oxm_types: uk = i.name.upper() if isinstance(i.num, tuple): continue oxm_class = i.num >> 7 if oxm_class != OFPXMC_OPENFLOW_BASIC: continue ofpxmt = i.num & 0x3f td = i.type add_attr('OFPXMT_OFB_' + uk, ofpxmt) add_attr('OXM_OF_' + uk, mod.oxm_tlv_header(ofpxmt, td.size)) add_attr('OXM_OF_' + uk + '_W', mod.oxm_tlv_header_w(ofpxmt, td.size)) name_to_field = dict((f.name, f) for f in mod.oxm_types) num_to_field = dict((f.num, f) for f in mod.oxm_types) add_attr('oxm_from_user', functools.partial(_from_user, name_to_field)) add_attr('oxm_from_user_header', functools.partial(_from_user_header, name_to_field)) add_attr('oxm_to_user', functools.partial(_to_user, num_to_field)) add_attr('oxm_to_user_header', functools.partial(_to_user_header, num_to_field)) add_attr('_oxm_field_desc', functools.partial(_field_desc, num_to_field)) add_attr('oxm_normalize_user', functools.partial(_normalize_user, mod)) add_attr('oxm_parse', functools.partial(_parse, mod)) add_attr('oxm_parse_header', functools.partial(_parse_header, mod)) add_attr('oxm_serialize', functools.partial(_serialize, mod)) add_attr('oxm_serialize_header', functools.partial(_serialize_header, mod)) add_attr('oxm_to_jsondict', _to_jsondict) add_attr('oxm_from_jsondict', _from_jsondict) def _get_field_info_by_name(name_to_field, name): try: f = name_to_field[name] t = f.type num = f.num except KeyError: t = type_desc.UnknownType if name.startswith('field_'): num = int(name.split('_')[1]) else: raise KeyError('unknown match field ' + name) return num, t def _from_user_header(name_to_field, name): (num, t) = _get_field_info_by_name(name_to_field, name) return num def _from_user(name_to_field, name, user_value): (num, t) = _get_field_info_by_name(name_to_field, name) # the 'list' case below is a bit hack; json.dumps silently maps # python tuples into json lists. if isinstance(user_value, (tuple, list)): (value, mask) = user_value else: value = user_value mask = None if value is not None: value = t.from_user(value) if mask is not None: mask = t.from_user(mask) return num, value, mask def _get_field_info_by_number(num_to_field, n): try: f = num_to_field[n] t = f.type name = f.name except KeyError: t = type_desc.UnknownType name = 'field_%d' % (n,) return name, t def _to_user_header(num_to_field, n): (name, t) = _get_field_info_by_number(num_to_field, n) return name def _to_user(num_to_field, n, v, m): (name, t) = _get_field_info_by_number(num_to_field, n) if v is not None: if hasattr(t, 'size') and t.size != len(v): raise Exception( 'Unexpected OXM payload length %d for %s (expected %d)' % (len(v), name, t.size)) value = t.to_user(v) else: value = None if m is None: user_value = value else: user_value = (value, t.to_user(m)) return name, user_value def _field_desc(num_to_field, n): return num_to_field[n] def _normalize_user(mod, k, uv): (n, v, m) = mod.oxm_from_user(k, uv) # apply mask if m is not None: v = ''.join(chr(ord(x) & ord(y)) for (x, y) in itertools.izip(v, m)) (k2, uv2) = mod.oxm_to_user(n, v, m) assert k2 == k return (k2, uv2) def _parse_header_impl(mod, buf, offset): hdr_pack_str = '!I' (header, ) = struct.unpack_from(hdr_pack_str, buf, offset) hdr_len = struct.calcsize(hdr_pack_str) oxm_type = header >> 9 # class|field oxm_hasmask = mod.oxm_tlv_header_extract_hasmask(header) oxm_class = oxm_type >> 7 oxm_length = header & 0xff if oxm_class == OFPXMC_EXPERIMENTER: # Experimenter OXMs have 64-bit header. (vs 32-bit for other OXMs) exp_hdr_pack_str = '!I' # experimenter_id (exp_id, ) = struct.unpack_from(exp_hdr_pack_str, buf, offset + hdr_len) exp_hdr_len = struct.calcsize(exp_hdr_pack_str) assert exp_hdr_len == 4 oxm_field = oxm_type & 0x7f if exp_id == ofproto_common.ONF_EXPERIMENTER_ID and oxm_field == 0: # XXX # This block implements EXT-256 style experimenter OXM. onf_exp_type_pack_str = '!H' (exp_type, ) = struct.unpack_from(onf_exp_type_pack_str, buf, offset + hdr_len + exp_hdr_len) exp_hdr_len += struct.calcsize(onf_exp_type_pack_str) assert exp_hdr_len == 4 + 2 num = (exp_id, exp_type) elif exp_id == ofproto_common.OPENSTATE_EXPERIMENTER_ID: num = oxm_type else: num = (exp_id, oxm_type) else: num = oxm_type exp_hdr_len = 0 value_len = oxm_length - exp_hdr_len if oxm_hasmask: value_len //= 2 assert value_len > 0 field_len = hdr_len + oxm_length total_hdr_len = hdr_len + exp_hdr_len return num, total_hdr_len, oxm_hasmask, value_len, field_len def _parse_header(mod, buf, offset): (oxm_type_num, total_hdr_len, hasmask, value_len, field_len) = _parse_header_impl(mod, buf, offset) return oxm_type_num, field_len - value_len def _parse(mod, buf, offset): (oxm_type_num, total_hdr_len, hasmask, value_len, field_len) = _parse_header_impl(mod, buf, offset) # Note: OXM payload length (oxm_len) includes Experimenter ID (exp_hdr_len) # for experimenter OXMs. value_offset = offset + total_hdr_len value_pack_str = '!%ds' % value_len assert struct.calcsize(value_pack_str) == value_len (value, ) = struct.unpack_from(value_pack_str, buf, value_offset) if hasmask: (mask, ) = struct.unpack_from(value_pack_str, buf, value_offset + value_len) else: mask = None return oxm_type_num, value, mask, field_len def _make_exp_hdr(mod, n): exp_hdr = bytearray() try: desc = mod._oxm_field_desc(n) except KeyError: return n, exp_hdr if isinstance(desc, _Experimenter): # XXX (exp_id, exp_type) = n assert desc.experimenter_id == exp_id if isinstance(desc, OldONFExperimenter): # XXX # XXX # This block implements EXT-256 style experimenter OXM. exp_hdr_pack_str = '!IH' # experimenter_id, exp_type msg_pack_into(exp_hdr_pack_str, exp_hdr, 0, desc.experimenter_id, desc.exp_type) else: assert desc.oxm_type == exp_type exp_hdr_pack_str = '!I' # experimenter_id msg_pack_into(exp_hdr_pack_str, exp_hdr, 0, desc.experimenter_id) assert len(exp_hdr) == struct.calcsize(exp_hdr_pack_str) n = desc.oxm_type assert (n >> 7) == OFPXMC_EXPERIMENTER return n, exp_hdr def _serialize_header(mod, n, buf, offset): try: desc = mod._oxm_field_desc(n) value_len = desc.type.size except KeyError: value_len = 0 n, exp_hdr = _make_exp_hdr(mod, n) exp_hdr_len = len(exp_hdr) pack_str = "!I%ds" % (exp_hdr_len,) msg_pack_into(pack_str, buf, offset, (n << 9) | (0 << 8) | (exp_hdr_len + value_len), bytes(exp_hdr)) return struct.calcsize(pack_str) def _serialize(mod, n, value, mask, buf, offset): n, exp_hdr = _make_exp_hdr(mod, n) exp_hdr_len = len(exp_hdr) value_len = len(value) if mask: assert value_len == len(mask) pack_str = "!I%ds%ds%ds" % (exp_hdr_len, value_len, len(mask)) msg_pack_into(pack_str, buf, offset, (n << 9) | (1 << 8) | (exp_hdr_len + value_len * 2), bytes(exp_hdr), value, mask) else: pack_str = "!I%ds%ds" % (exp_hdr_len, value_len,) msg_pack_into(pack_str, buf, offset, (n << 9) | (0 << 8) | (exp_hdr_len + value_len), bytes(exp_hdr), value) return struct.calcsize(pack_str) def _to_jsondict(k, uv): if isinstance(uv, tuple): (value, mask) = uv else: value = uv mask = None return {"OXMTlv": {"field": k, "value": value, "mask": mask}} def _from_jsondict(j): tlv = j['OXMTlv'] field = tlv['field'] value = tlv['value'] mask = tlv.get('mask') if mask is None: uv = value else: uv = (value, mask) return (field, uv)
Tesi-Luca-Davide/ryu
ryu/ofproto/oxm_fields.py
Python
apache-2.0
13,622
package com.olegych.scastie.web.oauth2 import com.olegych.scastie.api.User import akka.http.scaladsl._ import server._ import com.softwaremill.session._ import SessionDirectives._ import SessionOptions._ import scala.concurrent.ExecutionContext class UserDirectives( session: GithubUserSession )(implicit val executionContext: ExecutionContext) { import session._ def optionalLogin: Directive1[Option[User]] = optionalSession(refreshable, usingCookies).map(getUser) }
scalacenter/scastie
server/src/main/scala/com.olegych.scastie.web/oauth2/UserDirectives.scala
Scala
apache-2.0
486
/* Copyright 2011 Michael Edwards Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Reflection; using Sitecore.Data.Items; using System.Collections; using Glass.Sitecore.Mapper.Configuration; namespace Glass.Sitecore.Mapper.Proxies { public class ProxyGenerator { private static readonly Castle.DynamicProxy.ProxyGenerator _generator = new Castle.DynamicProxy.ProxyGenerator(); private static readonly Castle.DynamicProxy.ProxyGenerationOptions _options = new Castle.DynamicProxy.ProxyGenerationOptions(new ProxyGeneratorHook()); public static object CreateProxy(SitecoreClassConfig config, ISitecoreService service, Item item, bool inferType){ object proxy = null; Type type = config.Type; if (type.IsInterface) { proxy = _generator.CreateInterfaceProxyWithoutTarget(type, new InterfaceMethodInterceptor(config, item, service)); } else { proxy = _generator.CreateClassProxy(type, _options, new ProxyClassInterceptor(type, service, item, inferType)); } return proxy; } } }
photomoose/Glass.Sitecore.Mapper
Source/Glass.Sitecore.Mapper/Proxies/ProxyGenerator.cs
C#
apache-2.0
1,832
# # Cookbook Name:: coopr_hosts # Attributes:: default # # Copyright © 2013-2015 Cask Data, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Possible combinations: [], ['access_v4'], ['bind_v4'], ['access_v4','bind_v4'], ['bind_v4','access_v4'] default['coopr_hosts']['address_types'] = ['bind_v4']
caskdata/coopr-provisioner
lib/provisioner/worker/plugins/automators/chef_solo_automator/resources/cookbooks/coopr_hosts/attributes/default.rb
Ruby
apache-2.0
807
### Prerequisites * Azure SDK 2.8 * Azure Compute Emulator v2.8 ### How to run * Build solution and make sure there is no build errors due to missing packages * Choose Deployment project to be a "Startup project" * Hit F5 to deploy everything into emulator * In compute emulator you should see 2 roles: Client (web role) running 1 instance, and Cluster (worker role) running 1 instance * Client is an ASP.NET application and after the role is started up, the Visual Studio should open the entry page in your browser * Enter the count of publisher grains you want to spawn and click "Spawn" * After some delay you will be redirected to notifications page where you can observe all events generated by all grains in a cluster * You can click "Back" in a browser and spawn more grains any time you wish There could be some glitches: * You may receive `System.TimeoutException: Task.WaitWithThrow has timed out after 00:00:03` - just Continue * Trying to open Compute Emulator UI may give you "Unable to determine the emulator running mode. Please ensure you have sufficient permissions". See the SO answer [here]()http://stackoverflow.com/a/18911982. ### Have fun! P.S. If you want to run Orleans cluster in multi-node setup you will need to: * Open Deployment project properties * On Web tab select Use Full Emulator * Restart VS in elevated mode * Also make sure to restart Compute Emulator in elevated mode as well * Hit F5 to deploy roles into emulator * This time there will be 3 instances of Cluster worker role * Now you can play again. * Observe that grain notifications now came from different nodes ### Have even more fun!
AntyaDev/Orleankka
Source/Example.Azure.Cluster/README.md
Markdown
apache-2.0
1,638
/** * All channels used by angular's WebWorker components are listed here. * You should not use these channels in your application code. */ export const SETUP_CHANNEL = "ng-WebWorkerSetup"; export const RENDERER_CHANNEL = "ng-Renderer"; export const XHR_CHANNEL = "ng-XHR"; export const EVENT_CHANNEL = "ng-events"; //# sourceMappingURL=messaging_api.js.map
aayushkapoor206/whatshot
node_modules/angular2/es6/prod/src/web_workers/shared/messaging_api.js
JavaScript
apache-2.0
361
/********************************************************************** * * stopword.c * * Author: Stephen V. Rice * * Copyright 1996 The Board of Regents of the Nevada System of Higher * Education, on behalf, of the University of Nevada, Las Vegas, * Information Science Research Institute * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. * **********************************************************************/ #include "stopword.h" #include "table.h" #include "word.h" BEGIN_ENTRY(Stopword) END_ENTRY(Stopword); BEGIN_TABLE_OF(Stopword, Stopwordlist) END_TABLE(Stopwordtable); static Stopwordtable stopwordtable; static Boolean initialized = False; static Textopt textopt = { False, False, 0, True, True, True }; static Text text; static Wordlist wordlist; static char *default_stopword[] = { "a", "about", "after", "all", "also", "an", "and", "any", "are", "as", "at", "back", "be", "because", "been", "but", "by", "can", "could", "did", "do", "does", "down", "each", "first", "for", "from", "get", "good", "had", "has", "have", "he", "her", "him", "his", "how", "i", "if", "in", "into", "is", "it", "its", "just", "know", "like", "little", "long", "made", "make", "man", "many", "may", "me", "more", "most", "my", "new", "no", "not", "now", "of", "on", "one", "only", "or", "other", "our", "out", "over", "said", "same", "see", "she", "so", "some", "than", "that", "the", "their", "them", "then", "there", "these", "they", "this", "to", "too", "two", "up", "us", "used", "very", "was", "way", "we", "were", "what", "when", "where", "which", "who", "why", "will", "with", "woman", "would", "you", "your" }; /**********************************************************************/ static void save_stopword(key) char *key; { Stopword *stopword; stopword = table_lookup(&stopwordtable, key); if (stopword) error_string("duplicate stopword", key, Continue); else { stopword = NEW(Stopword); stopword->key = key; table_insert(&stopwordtable, stopword); } } /**********************************************************************/ void init_stopwords(filename) char *filename; { Word *word; short i; if (initialized) error("stopwords already initialized", Exit); if (filename) { read_text(&text, filename, &textopt); find_words(&wordlist, &text); list_empty(&text, free); for (word = wordlist.first; word; word = word->next) save_stopword(word->string); } else for (i = 0; i < sizeof(default_stopword) / sizeof(char *); i++) save_stopword(default_stopword[i]); initialized = True; } /**********************************************************************/ Boolean is_stopword(string) unsigned char *string; { if (!initialized) error("stopwords not initialized", Exit); return(table_lookup(&stopwordtable, string) ? True : False); }
SmartDOC-MOC/ocr-evaluation-tools
Modules/stopword.c
C
apache-2.0
3,425
/* * %CopyrightBegin% * * Copyright Ericsson AB 2001-2020. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * %CopyrightEnd% */ #include "ei_runner.h" #include <string.h> #include <stdlib.h> /* * Purpose: Tests the ei_print() function. * Author: Jakob */ static void send_printed_buf(ei_x_buff* x) { char* b = NULL; char fn[256]; char *tmp = getenv("temp"); FILE* f; int n, index = 0, ver; if (tmp == NULL) { tmp = "/tmp"; } strcpy(fn, tmp); strcat(fn, "/ei_print_test.txt"); f = fopen(fn, "w+"); ei_decode_version(x->buff, &index, &ver); n = ei_print_term(f, x->buff, &index); if (n < 0) { fclose(f); x->index = 0; ei_x_format(x, "~s", "ERROR: term decoding failed"); send_bin_term(x); } else { fseek(f, 0, SEEK_SET); b = malloc(n+1); fread(b, 1, n, f); b[n] = '\0'; fclose(f); x->index = 0; ei_x_format(x, "~s", b); send_bin_term(x); free(b); } } static void send_printed3(char* format, char* p1, char* p2, int fl) { char* b = NULL; char fn[100], * tmp = getenv("temp"); FILE* f; int n, index = 0, ver; ei_x_buff x; ei_x_new(&x); if (fl) { ei_x_format(&x, format, *(float*)p1, *(float*)p2); } else { ei_x_format(&x, format, p1, p2); } send_printed_buf(&x); ei_x_free(&x); } static void send_printed(char* format) { send_printed3(format, NULL, NULL, 0); } static void send_printed2(char* format, char* p) { send_printed3(format, p, NULL, 0); } static void send_printed3f(char* format, float f1, float f2) { send_printed3(format, (char*)&f1, (char*)&f2, 1); } TESTCASE(atoms) { ei_init(); send_printed("''"); send_printed("'a'"); send_printed("'A'"); send_printed("'abc'"); send_printed("'Abc'"); send_printed("'ab@c'"); send_printed("'The rain in Spain stays mainly in the plains'"); send_printed("a"); send_printed("ab"); send_printed("abc"); send_printed("ab@c"); send_printed(" abcdefghijklmnopq "); send_printed2("~a", ""); send_printed2("~a", "a"); send_printed2("~a", "A"); send_printed2("~a", "abc"); send_printed2("~a", "Abc"); send_printed2("~a", "ab@c"); send_printed2("~a", "The rain in Spain stays mainly in the plains"); send_printed2("~a", "a"); send_printed2("~a", "ab"); send_printed2("~a", "abc"); send_printed2("~a","ab@c"); send_printed2("~a", " abcdefghijklmnopq "); report(1); } TESTCASE(tuples) { ei_init(); send_printed("{}"); send_printed("{a}"); send_printed("{a, b}"); send_printed("{a, b, c}"); send_printed("{1}"); send_printed("{[]}"); send_printed("{[], []}"); send_printed("{[], a, b, c}"); send_printed("{[], a, [], b, c}"); send_printed("{[], a, '', b, c}"); report(1); } TESTCASE(lists) { ei_x_buff x; ei_init(); send_printed("[]"); send_printed("[a]"); send_printed("[a, b]"); send_printed("[a, b, c]"); send_printed("[1]"); send_printed("[[]]"); send_printed("[[], []]"); send_printed("[[], a, b, c]"); send_printed("[[], a, [], b, c]"); send_printed("[[], a, '', b, c]"); send_printed("[[x, 2], [y, 3], [z, 4]]"); /* more tests needed */ send_printed3f("[{pi, ~f}, {'cos(70)', ~f}]", (float)3.1415, (float)0.34202); send_printed3f("[[pi, ~f], ['cos(70)', ~f]]", (float)3.1415, (float)0.34202); send_printed2("[~i]", (char*)-1); report(1); } TESTCASE(strings) { ei_x_buff x; ei_init(); send_printed("\"\n\""); send_printed("\"\r\n\""); send_printed("\"a\""); send_printed("\"A\""); send_printed("\"0\""); send_printed("\"9\""); send_printed("\"The rain in Spain stays mainly in the plains\""); send_printed("\" abcdefghijklmnopq \""); report(1); } TESTCASE(maps) { ei_x_buff x; ei_init(); ei_x_new_with_version(&x); ei_x_encode_map_header(&x, 0); send_printed_buf(&x); ei_x_free(&x); ei_x_new_with_version(&x); ei_x_encode_map_header(&x, 1); ei_x_encode_atom(&x, "key"); ei_x_encode_atom(&x, "value"); send_printed_buf(&x); ei_x_free(&x); ei_x_new_with_version(&x); ei_x_encode_map_header(&x, 2); ei_x_encode_atom(&x, "key"); ei_x_encode_atom(&x, "value"); ei_x_encode_atom(&x, "another_key"); ei_x_encode_tuple_header(&x, 2); ei_x_encode_atom(&x, "ok"); ei_x_encode_long(&x, 42L); send_printed_buf(&x); ei_x_free(&x); report(1); } TESTCASE(funs) { ei_x_buff x; erlang_pid self; erlang_fun fun; strcpy(self.node, "node@host"); self.num = 9; self.serial = 99; self.creation = 1; ei_init(); ei_x_new_with_version(&x); fun.arity = -1; /* Will encode as FUN_EXT */ strcpy(fun.module, "some_module"); fun.type = EI_FUN_CLOSURE; fun.u.closure.pid = self; fun.u.closure.index = fun.u.closure.old_index = 42; fun.u.closure.uniq = 0xDEADBEEF; fun.u.closure.n_free_vars = 0; fun.u.closure.free_var_len = 0; ei_x_encode_fun(&x, &fun); send_printed_buf(&x); ei_x_free(&x); ei_x_new_with_version(&x); fun.arity = 0; /* Will encode as NEW_FUN_EXT */ strcpy(fun.module, "some_module"); fun.type = EI_FUN_CLOSURE; fun.u.closure.pid = self; fun.u.closure.index = fun.u.closure.old_index = 37; fun.u.closure.uniq = 0xBADBEEF; fun.u.closure.n_free_vars = 0; fun.u.closure.free_var_len = 0; ei_x_encode_fun(&x, &fun); send_printed_buf(&x); ei_x_free(&x); ei_x_new_with_version(&x); fun.arity = 1; strcpy(fun.module, "erlang"); fun.type = EI_FUN_EXPORT; fun.u.exprt.func = "abs"; ei_x_encode_fun(&x, &fun); send_printed_buf(&x); ei_x_free(&x); report(1); } TESTCASE(binaries) { char *buf; long len; int err, n, index; ei_x_buff x; ei_init(); for (n = 5; n; n--) { buf = read_packet(NULL); index = 0; err = ei_decode_version(buf, &index, NULL); if (err != 0) fail1("ei_decode_version returned %d", err); err = ei_decode_binary(buf, &index, NULL, &len); if (err != 0) fail1("ei_decode_binary returned %d", err); ei_x_new(&x); ei_x_append_buf(&x, buf, index); send_printed_buf(&x); ei_x_free(&x); free_packet(buf); } report(1); } TESTCASE(bitstrings) { char *buf; long len; int err, n, index; ei_x_buff x; ei_init(); for (n = 7; n; n--) { buf = read_packet(NULL); index = 0; err = ei_decode_version(buf, &index, NULL); if (err != 0) fail1("ei_decode_version returned %d", err); err = ei_decode_bitstring(buf, &index, NULL, NULL, NULL); if (err != 0) fail1("ei_decode_bitstring returned %d", err); ei_x_new(&x); ei_x_append_buf(&x, buf, index); send_printed_buf(&x); ei_x_free(&x); free_packet(buf); } report(1); }
electricimp/otp
lib/erl_interface/test/ei_print_SUITE_data/ei_print_test.c
C
apache-2.0
7,718
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /** * Service for operating on sharing profiles via the REST API. */ angular.module('rest').factory('sharingProfileService', ['$injector', function sharingProfileService($injector) { // Required services var $http = $injector.get('$http'); var authenticationService = $injector.get('authenticationService'); var cacheService = $injector.get('cacheService'); var service = {}; /** * Makes a request to the REST API to get a single sharing profile, * returning a promise that provides the corresponding @link{SharingProfile} * if successful. * * @param {String} id The ID of the sharing profile. * * @returns {Promise.<SharingProfile>} * A promise which will resolve with a @link{SharingProfile} upon * success. * * @example * * sharingProfileService.getSharingProfile('mySharingProfile').success(function(sharingProfile) { * // Do something with the sharing profile * }); */ service.getSharingProfile = function getSharingProfile(dataSource, id) { // Build HTTP parameters set var httpParameters = { token : authenticationService.getCurrentToken() }; // Retrieve sharing profile return $http({ cache : cacheService.connections, method : 'GET', url : 'api/session/data/' + encodeURIComponent(dataSource) + '/sharingProfiles/' + encodeURIComponent(id), params : httpParameters }); }; /** * Makes a request to the REST API to get the parameters of a single * sharing profile, returning a promise that provides the corresponding * map of parameter name/value pairs if successful. * * @param {String} id * The identifier of the sharing profile. * * @returns {Promise.<Object.<String, String>>} * A promise which will resolve with an map of parameter name/value * pairs upon success. */ service.getSharingProfileParameters = function getSharingProfileParameters(dataSource, id) { // Build HTTP parameters set var httpParameters = { token : authenticationService.getCurrentToken() }; // Retrieve sharing profile parameters return $http({ cache : cacheService.connections, method : 'GET', url : 'api/session/data/' + encodeURIComponent(dataSource) + '/sharingProfiles/' + encodeURIComponent(id) + '/parameters', params : httpParameters }); }; /** * Makes a request to the REST API to save a sharing profile, returning a * promise that can be used for processing the results of the call. If the * sharing profile is new, and thus does not yet have an associate * identifier, the identifier will be automatically set in the provided * sharing profile upon success. * * @param {SharingProfile} sharingProfile * The sharing profile to update. * * @returns {Promise} * A promise for the HTTP call which will succeed if and only if the * save operation is successful. */ service.saveSharingProfile = function saveSharingProfile(dataSource, sharingProfile) { // Build HTTP parameters set var httpParameters = { token : authenticationService.getCurrentToken() }; // If sharing profile is new, add it and set the identifier automatically if (!sharingProfile.identifier) { return $http({ method : 'POST', url : 'api/session/data/' + encodeURIComponent(dataSource) + '/sharingProfiles', params : httpParameters, data : sharingProfile }) // Set the identifier on the new sharing profile and clear the cache .success(function sharingProfileCreated(newSharingProfile){ sharingProfile.identifier = newSharingProfile.identifier; cacheService.connections.removeAll(); // Clear users cache to force reload of permissions for this // newly created sharing profile cacheService.users.removeAll(); }); } // Otherwise, update the existing sharing profile else { return $http({ method : 'PUT', url : 'api/session/data/' + encodeURIComponent(dataSource) + '/sharingProfiles/' + encodeURIComponent(sharingProfile.identifier), params : httpParameters, data : sharingProfile }) // Clear the cache .success(function sharingProfileUpdated(){ cacheService.connections.removeAll(); // Clear users cache to force reload of permissions for this // newly updated sharing profile cacheService.users.removeAll(); }); } }; /** * Makes a request to the REST API to delete a sharing profile, * returning a promise that can be used for processing the results of the call. * * @param {SharingProfile} sharingProfile * The sharing profile to delete. * * @returns {Promise} * A promise for the HTTP call which will succeed if and only if the * delete operation is successful. */ service.deleteSharingProfile = function deleteSharingProfile(dataSource, sharingProfile) { // Build HTTP parameters set var httpParameters = { token : authenticationService.getCurrentToken() }; // Delete sharing profile return $http({ method : 'DELETE', url : 'api/session/data/' + encodeURIComponent(dataSource) + '/sharingProfiles/' + encodeURIComponent(sharingProfile.identifier), params : httpParameters }) // Clear the cache .success(function sharingProfileDeleted(){ cacheService.connections.removeAll(); }); }; return service; }]);
softpymesJeffer/incubator-guacamole-client
guacamole/src/main/webapp/app/rest/services/sharingProfileService.js
JavaScript
apache-2.0
7,066
// Copyright 2019 Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package constraint import ( "fmt" "github.com/hashicorp/go-multierror" ) // ExactlyOne item in a collection must match all check Constraints type ExactlyOne struct { Constraints []Check } var _ Range = &ExactlyOne{} // ValidateItems implements Range func (e *ExactlyOne) ValidateItems(arr []interface{}, p Params) error { var matches int var err error mainloop: for _, a := range arr { for _, c := range e.Constraints { er := c.ValidateItem(a, p) if er != nil { err = multierror.Append(err, er) continue mainloop } } matches++ } switch matches { case 0: err = multierror.Append(err, fmt.Errorf("no item matched constraints: %v", arr)) return multierror.Flatten(err) case 1: return nil default: return fmt.Errorf("multiple items(%d) matched constraints: %v", matches, arr) } }
smawson/istio
pkg/test/conformance/constraint/exactlyone.go
GO
apache-2.0
1,416
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2019 Serge Rider (serge@jkiss.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ext.mysql.model.plan; import org.jkiss.dbeaver.model.exec.plan.DBCPlanCostNode; import org.jkiss.dbeaver.model.impl.plan.AbstractExecutionPlanNode; /** * Abstract plan node */ public abstract class MySQLPlanNode extends AbstractExecutionPlanNode implements DBCPlanCostNode { }
liuyuanyuan/dbeaver
plugins/org.jkiss.dbeaver.ext.mysql/src/org/jkiss/dbeaver/ext/mysql/model/plan/MySQLPlanNode.java
Java
apache-2.0
971
using System; using System.Collections.Generic; using System.Linq; using System.Text; using NUnit.Framework; using Sage.Platform.Application; using OpenSlx.Lib.Utility; namespace OpenSlx.Lib.UnitTest { [SetUpFixture] public class TestSuiteSetup { SlxAppSetup _setup; /// <summary> /// Time zone used for testing (make sure this is different from the local time zone) /// </summary> public const String TIMEZONE = "Pacific Standard Time (Mexico)"; [SetUp] public void Setup() { // app root - this needs to be updated if the project is deployed somewhere else _setup = new SlxAppSetup(@"\inetpub\WWWRoot\SlxEval\SlxClient", ""); try { _setup.Open(); } catch (Exception) { throw; } // setup a default time zone - this is used in some business rules // normally passed in from the client browser. ApplicationContext.Current.Services.Get<Sage.Platform.Application.IContextService>().SetContext("TimeZone", new Sage.Platform.TimeZones().FindTimeZone(TIMEZONE)); } [TearDown] public void Teardown() { _setup.Close(); } } }
ngaller/OpenSlx
OpenSlx.Lib.UnitTest/TestSuiteSetup.cs
C#
apache-2.0
1,401
package org.elasticsearch.examples.nativescript.script; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import java.util.ArrayList; import java.util.List; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.search.sort.SortBuilders; import org.junit.Test; /** */ public class RandomSortScriptTests extends AbstractSearchScriptTests { @Test public void testPseudoRandomScript() throws Exception { // Create a new index String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties") .startObject("name").field("type", "string").endObject() .endObject().endObject().endObject() .string(); assertAcked(prepareCreate("test") .addMapping("type", mapping)); List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>(); // Index 100 records (0..99) for (int i = 0; i < 100; i++) { indexBuilders.add( client().prepareIndex("test", "type", Integer.toString(i)) .setSource(XContentFactory.jsonBuilder().startObject() .field("name", "rec " + i) .endObject())); } indexRandom(true, indexBuilders); // Retrieve first 10 records SearchResponse searchResponse = client().prepareSearch("test") .setQuery(matchAllQuery()) .addField("name") .setSize(10) .addSort(SortBuilders.scriptSort("random", "number").lang("native").setParams(MapBuilder.<String, Object>newMapBuilder().put("salt", "1234").map())) .execute().actionGet(); assertNoFailures(searchResponse); // Check that random order was applied assertThat(searchResponse.getHits().getAt(0).field("name").getValue().toString(), not(equalTo("rec0"))); String[] records = new String[10]; // Store sort order for (int i = 0; i < 10; i++) { records[i] = searchResponse.getHits().getAt(i).field("name").getValue().toString(); } // Retrieve first 10 records again searchResponse = client().prepareSearch("test") .setQuery(matchAllQuery()) .addField("name") .setSize(10) .addSort(SortBuilders.scriptSort("random", "number").lang("native").setParams(MapBuilder.<String, Object>newMapBuilder().put("salt", "1234").map())) .execute().actionGet(); assertNoFailures(searchResponse); // Verify the same sort order for (int i = 0; i < 10; i++) { assertThat(searchResponse.getHits().getAt(i).field("name").getValue().toString(), equalTo(records[i])); } // Retrieve first 10 records without salt searchResponse = client().prepareSearch("test") .setQuery(matchAllQuery()) .addField("name") .setSize(10) .addSort(SortBuilders.scriptSort("random", "number").lang("native")) .execute().actionGet(); assertNoFailures(searchResponse); // Verify different sort order boolean different = false; for (int i = 0; i < 10; i++) { if (!records[i].equals(searchResponse.getHits().getAt(i).field("name").getValue().toString())) { different = true; break; } } assertThat(different, equalTo(true)); } }
AndreiArion/elasticsearch-loghash-plugin
src/test/java/org/elasticsearch/examples/nativescript/script/RandomSortScriptTests.java
Java
apache-2.0
4,084
package com.yammer.metrics.scala import java.util.concurrent.TimeUnit import com.yammer.metrics.Metrics import com.yammer.metrics.core.{MetricsRegistry, Gauge} /** * A helper class for creating and registering metrics. */ class MetricsGroup(val klass: Class[_], val metricsRegistry: MetricsRegistry = Metrics.defaultRegistry()) { /** * Registers a new gauge metric. * * @param name the name of the gauge * @param scope the scope of the gauge * @param registry the registry for the gauge */ def gauge[A](name: String, scope: String = null, registry: MetricsRegistry = metricsRegistry)(f: => A) = { registry.newGauge(klass, name, scope, new Gauge[A] { def getValue = f }) } /** * Creates a new counter metric. * * @param name the name of the counter * @param scope the scope of the gauge * @param registry the registry for the gauge */ def counter(name: String, scope: String = null, registry: MetricsRegistry = metricsRegistry) = new Counter(registry.newCounter(klass, name, scope)) /** * Creates a new histogram metrics. * * @param name the name of the histogram * @param scope the scope of the histogram * @param biased whether or not to use a biased sample * @param registry the registry for the gauge */ def histogram(name: String, scope: String = null, biased: Boolean = false, registry: MetricsRegistry = metricsRegistry) = new Histogram(registry.newHistogram(klass, name, scope, biased)) /** * Creates a new meter metric. * * @param name the name of the meter * @param eventType the plural name of the type of events the meter is * measuring (e.g., "requests") * @param scope the scope of the meter * @param unit the time unit of the meter * @param registry the registry for the gauge */ def meter(name: String, eventType: String, scope: String = null, unit: TimeUnit = TimeUnit.SECONDS, registry: MetricsRegistry = metricsRegistry) = new Meter(registry.newMeter(klass, name, scope, eventType, unit)) /** * Creates a new timer metric. * * @param name the name of the timer * @param scope the scope of the timer * @param durationUnit the time unit for measuring duration * @param rateUnit the time unit for measuring rate * @param registry the registry for the gauge */ def timer(name: String, scope: String = null, durationUnit: TimeUnit = TimeUnit.MILLISECONDS, rateUnit: TimeUnit = TimeUnit.SECONDS, registry: MetricsRegistry = metricsRegistry) = new Timer(registry.newTimer(klass, name, scope, durationUnit, rateUnit)) }
germanborbolla/metrics-scala
src/main/scala/com/yammer/metrics/scala/MetricsGroup.scala
Scala
apache-2.0
2,766
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta http-equiv="X-UA-Compatible" content="IE=9"/> <meta name="generator" content="Doxygen 1.8.13"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <title>ColorPatternTracker: patterntracker.ColorGridTracker.Paird Class Reference</title> <link href="tabs.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="jquery.js"></script> <script type="text/javascript" src="dynsections.js"></script> <link href="navtree.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="resize.js"></script> <script type="text/javascript" src="navtreedata.js"></script> <script type="text/javascript" src="navtree.js"></script> <script type="text/javascript"> $(document).ready(initResizable); </script> <link href="search/search.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="search/searchdata.js"></script> <script type="text/javascript" src="search/search.js"></script> <link href="doxygen.css" rel="stylesheet" type="text/css" /> </head> <body> <div id="top"><!-- do not remove this div, it is closed by doxygen! --> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <td id="projectalign" style="padding-left: 0.5em;"> <div id="projectname">ColorPatternTracker </div> </td> </tr> </tbody> </table> </div> <!-- end header part --> <!-- Generated by Doxygen 1.8.13 --> <script type="text/javascript"> var searchBox = new SearchBox("searchBox", "search",false,'Search'); </script> <script type="text/javascript" src="menudata.js"></script> <script type="text/javascript" src="menu.js"></script> <script type="text/javascript"> $(function() { initMenu('',true,false,'search.php','Search'); $(document).ready(function() { init_search(); }); }); </script> <div id="main-nav"></div> </div><!-- top --> <div id="side-nav" class="ui-resizable side-nav-resizable"> <div id="nav-tree"> <div id="nav-tree-contents"> <div id="nav-sync" class="sync"></div> </div> </div> <div id="splitbar" style="-moz-user-select:none;" class="ui-resizable-handle"> </div> </div> <script type="text/javascript"> $(document).ready(function(){initNavTree('classpatterntracker_1_1_color_grid_tracker_1_1_paird.html','');}); </script> <div id="doc-content"> <!-- window showing the filter options --> <div id="MSearchSelectWindow" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" onkeydown="return searchBox.OnSearchSelectKey(event)"> </div> <!-- iframe showing the search results (closed by default) --> <div id="MSearchResultsWindow"> <iframe src="javascript:void(0)" frameborder="0" name="MSearchResults" id="MSearchResults"> </iframe> </div> <div class="header"> <div class="summary"> <a href="#pub-methods">Public Member Functions</a> &#124; <a href="#pub-attribs">Public Attributes</a> &#124; <a href="classpatterntracker_1_1_color_grid_tracker_1_1_paird-members.html">List of all members</a> </div> <div class="headertitle"> <div class="title">patterntracker.ColorGridTracker.Paird Class Reference</div> </div> </div><!--header--> <div class="contents"> <div class="dynheader"> Inheritance diagram for patterntracker.ColorGridTracker.Paird:</div> <div class="dyncontent"> <div class="center"><img src="classpatterntracker_1_1_color_grid_tracker_1_1_paird__inherit__graph.png" border="0" usemap="#patterntracker_8_color_grid_tracker_8_paird_inherit__map" alt="Inheritance graph"/></div> <center><span class="legend">[<a target="top" href="graph_legend.html">legend</a>]</span></center></div> <div class="dynheader"> Collaboration diagram for patterntracker.ColorGridTracker.Paird:</div> <div class="dyncontent"> <div class="center"><img src="classpatterntracker_1_1_color_grid_tracker_1_1_paird__coll__graph.png" border="0" usemap="#patterntracker_8_color_grid_tracker_8_paird_coll__map" alt="Collaboration graph"/></div> <center><span class="legend">[<a target="top" href="graph_legend.html">legend</a>]</span></center></div> <table class="memberdecls"> <tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="pub-methods"></a> Public Member Functions</h2></td></tr> <tr class="memitem:acc7000ec9978de6c36a01d718f9672ef"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classpatterntracker_1_1_color_grid_tracker_1_1_paird.html#acc7000ec9978de6c36a01d718f9672ef">Paird</a> (int <a class="el" href="classpatterntracker_1_1_color_grid_tracker_1_1_paird.html#a174a9d23d1685dd4ae5e0056c439228c">index</a>, double <a class="el" href="classpatterntracker_1_1_color_grid_tracker_1_1_paird.html#ad0de261e2ae502ec42d068e4a82fca36">value</a>)</td></tr> <tr class="separator:acc7000ec9978de6c36a01d718f9672ef"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:ab1fddad44021625d55084f9514a60d7f"><td class="memItemLeft" align="right" valign="top">int&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classpatterntracker_1_1_color_grid_tracker_1_1_paird.html#ab1fddad44021625d55084f9514a60d7f">compareTo</a> (<a class="el" href="classpatterntracker_1_1_color_grid_tracker_1_1_paird.html">Paird</a> other)</td></tr> <tr class="separator:ab1fddad44021625d55084f9514a60d7f"><td class="memSeparator" colspan="2">&#160;</td></tr> </table><table class="memberdecls"> <tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="pub-attribs"></a> Public Attributes</h2></td></tr> <tr class="memitem:a174a9d23d1685dd4ae5e0056c439228c"><td class="memItemLeft" align="right" valign="top">final int&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classpatterntracker_1_1_color_grid_tracker_1_1_paird.html#a174a9d23d1685dd4ae5e0056c439228c">index</a></td></tr> <tr class="separator:a174a9d23d1685dd4ae5e0056c439228c"><td class="memSeparator" colspan="2">&#160;</td></tr> <tr class="memitem:ad0de261e2ae502ec42d068e4a82fca36"><td class="memItemLeft" align="right" valign="top">final double&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classpatterntracker_1_1_color_grid_tracker_1_1_paird.html#ad0de261e2ae502ec42d068e4a82fca36">value</a></td></tr> <tr class="separator:ad0de261e2ae502ec42d068e4a82fca36"><td class="memSeparator" colspan="2">&#160;</td></tr> </table> <a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2> <div class="textblock"><p>The Class <a class="el" href="classpatterntracker_1_1_color_grid_tracker_1_1_paird.html">Paird</a> for sorting. </p> <p class="definition">Definition at line <a class="el" href="_color_grid_tracker_8java_source.html#l00396">396</a> of file <a class="el" href="_color_grid_tracker_8java_source.html">ColorGridTracker.java</a>.</p> </div><h2 class="groupheader">Constructor &amp; Destructor Documentation</h2> <a id="acc7000ec9978de6c36a01d718f9672ef"></a> <h2 class="memtitle"><span class="permalink"><a href="#acc7000ec9978de6c36a01d718f9672ef">&#9670;&nbsp;</a></span>Paird()</h2> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">patterntracker.ColorGridTracker.Paird.Paird </td> <td>(</td> <td class="paramtype">int&#160;</td> <td class="paramname"><em>index</em>, </td> </tr> <tr> <td class="paramkey"></td> <td></td> <td class="paramtype">double&#160;</td> <td class="paramname"><em>value</em>&#160;</td> </tr> <tr> <td></td> <td>)</td> <td></td><td></td> </tr> </table> </div><div class="memdoc"> <p>Instantiates a new paird.</p> <dl class="params"><dt>Parameters</dt><dd> <table class="params"> <tr><td class="paramname">index</td><td>the index </td></tr> <tr><td class="paramname">value</td><td>the value </td></tr> </table> </dd> </dl> <p class="definition">Definition at line <a class="el" href="_color_grid_tracker_8java_source.html#l00410">410</a> of file <a class="el" href="_color_grid_tracker_8java_source.html">ColorGridTracker.java</a>.</p> </div> </div> <h2 class="groupheader">Member Function Documentation</h2> <a id="ab1fddad44021625d55084f9514a60d7f"></a> <h2 class="memtitle"><span class="permalink"><a href="#ab1fddad44021625d55084f9514a60d7f">&#9670;&nbsp;</a></span>compareTo()</h2> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">int patterntracker.ColorGridTracker.Paird.compareTo </td> <td>(</td> <td class="paramtype"><a class="el" href="classpatterntracker_1_1_color_grid_tracker_1_1_paird.html">Paird</a>&#160;</td> <td class="paramname"><em>other</em></td><td>)</td> <td></td> </tr> </table> </div><div class="memdoc"> <p class="definition">Definition at line <a class="el" href="_color_grid_tracker_8java_source.html#l00419">419</a> of file <a class="el" href="_color_grid_tracker_8java_source.html">ColorGridTracker.java</a>.</p> </div> </div> <h2 class="groupheader">Member Data Documentation</h2> <a id="a174a9d23d1685dd4ae5e0056c439228c"></a> <h2 class="memtitle"><span class="permalink"><a href="#a174a9d23d1685dd4ae5e0056c439228c">&#9670;&nbsp;</a></span>index</h2> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">final int patterntracker.ColorGridTracker.Paird.index</td> </tr> </table> </div><div class="memdoc"> <p>The index. </p> <p class="definition">Definition at line <a class="el" href="_color_grid_tracker_8java_source.html#l00399">399</a> of file <a class="el" href="_color_grid_tracker_8java_source.html">ColorGridTracker.java</a>.</p> </div> </div> <a id="ad0de261e2ae502ec42d068e4a82fca36"></a> <h2 class="memtitle"><span class="permalink"><a href="#ad0de261e2ae502ec42d068e4a82fca36">&#9670;&nbsp;</a></span>value</h2> <div class="memitem"> <div class="memproto"> <table class="memname"> <tr> <td class="memname">final double patterntracker.ColorGridTracker.Paird.value</td> </tr> </table> </div><div class="memdoc"> <p>The value. </p> <p class="definition">Definition at line <a class="el" href="_color_grid_tracker_8java_source.html#l00402">402</a> of file <a class="el" href="_color_grid_tracker_8java_source.html">ColorGridTracker.java</a>.</p> </div> </div> <hr/>The documentation for this class was generated from the following file:<ul> <li>C:/Research/projects/ColorPatternTracker/PatternTracker/app/src/main/java/com/samsung/dtl/patterntracker/<a class="el" href="_color_grid_tracker_8java_source.html">ColorGridTracker.java</a></li> </ul> </div><!-- contents --> </div><!-- doc-content --> <!-- start footer part --> <div id="nav-path" class="navpath"><!-- id is needed for treeview function! --> <ul> <li class="navelem"><a class="el" href="namespacepatterntracker.html">patterntracker</a></li><li class="navelem"><a class="el" href="classpatterntracker_1_1_color_grid_tracker.html">ColorGridTracker</a></li><li class="navelem"><a class="el" href="classpatterntracker_1_1_color_grid_tracker_1_1_paird.html">Paird</a></li> <li class="footer">Generated by <a href="http://www.doxygen.org/index.html"> <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.13 </li> </ul> </div> </body> </html>
Samsung/ColorPatternTracker
PatternTracker/doxygen/html/classpatterntracker_1_1_color_grid_tracker_1_1_paird.html
HTML
apache-2.0
11,689
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (9-Debian) on Thu Sep 28 23:13:23 GMT 2017 --> <title>Uses of Class dollar.internal.runtime.script.parser.SymbolDef (dollar-script 0.4.5195 API)</title> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <meta name="date" content="2017-09-28"> <link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style"> <link rel="stylesheet" type="text/css" href="../../../../../../jquery/jquery-ui.css" title="Style"> <script type="text/javascript" src="../../../../../../script.js"></script> <script type="text/javascript" src="../../../../../../jquery/jszip/dist/jszip.min.js"></script> <script type="text/javascript" src="../../../../../../jquery/jszip-utils/dist/jszip-utils.min.js"></script> <!--[if IE]> <script type="text/javascript" src="../../../../../../jquery/jszip-utils/dist/jszip-utils-ie.min.js"></script> <![endif]--> <script type="text/javascript" src="../../../../../../jquery/jquery-1.10.2.js"></script> <script type="text/javascript" src="../../../../../../jquery/jquery-ui.js"></script> </head> <body> <script type="text/javascript"><!-- try { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="Uses of Class dollar.internal.runtime.script.parser.SymbolDef (dollar-script 0.4.5195 API)"; } } catch(err) { } //--> var pathtoroot = "../../../../../../";loadScripts(document, 'script');</script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <div class="fixedNav"> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar.top"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.top.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../../overview-summary.html">Overview</a></li> <li><a href="../package-summary.html">Package</a></li> <li><a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../package-tree.html">Tree</a></li> <li><a href="../../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../../index-all.html">Index</a></li> <li><a href="../../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../../../../index.html?dollar/internal/runtime/script/parser/class-use/SymbolDef.html" target="_top">Frames</a></li> <li><a href="SymbolDef.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../../../allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <ul class="navListSearch"> <li><span>SEARCH:&nbsp;</span> <input type="text" id="search" value=" " disabled="disabled"> <input type="reset" id="reset" value=" " disabled="disabled"> </li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> </div> <a name="skip.navbar.top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> </div> <div class="navPadding">&nbsp;</div> <script type="text/javascript"><!-- $('.navPadding').css('padding-top', $('.fixedNav').css("height")); //--> </script> <div class="header"> <h2 title="Uses of Class dollar.internal.runtime.script.parser.SymbolDef" class="title">Uses of Class<br>dollar.internal.runtime.script.parser.SymbolDef</h2> </div> <div class="classUseContainer"> <ul class="blockList"> <li class="blockList"> <table class="useSummary" summary="Use table, listing packages, and an explanation"> <caption><span>Packages that use <a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">SymbolDef</a></span><span class="tabEnd">&nbsp;</span></caption> <tr> <th class="colFirst" scope="col">Package</th> <th class="colLast" scope="col">Description</th> </tr> <tbody> <tr class="altColor"> <th class="colFirst" scope="row"><a href="#dollar.internal.runtime.script.parser">dollar.internal.runtime.script.parser</a></th> <td class="colLast">&nbsp;</td> </tr> </tbody> </table> </li> <li class="blockList"> <ul class="blockList"> <li class="blockList"><a name="dollar.internal.runtime.script.parser"> <!-- --> </a> <h3>Uses of <a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">SymbolDef</a> in <a href="../../../../../../dollar/internal/runtime/script/parser/package-summary.html">dollar.internal.runtime.script.parser</a></h3> <table class="useSummary" summary="Use table, listing fields, and an explanation"> <caption><span>Fields in <a href="../../../../../../dollar/internal/runtime/script/parser/package-summary.html">dollar.internal.runtime.script.parser</a> declared as <a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">SymbolDef</a></span><span class="tabEnd">&nbsp;</span></caption> <tr> <th class="colFirst" scope="col">Modifier and Type</th> <th class="colSecond" scope="col">Field</th> <th class="colLast" scope="col">Description</th> </tr> <tbody> <tr class="altColor"> <td class="colFirst"><code>static @NotNull <a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">SymbolDef</a></code></td> <th class="colSecond" scope="row"><span class="typeNameLabel">Symbols.</span><code><span class="memberNameLink"><a href="../../../../../../dollar/internal/runtime/script/parser/Symbols.html#COMMA">COMMA</a></span></code></th> <td class="colLast">&nbsp;</td> </tr> <tr class="rowColor"> <td class="colFirst"><code>static @NotNull <a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">SymbolDef</a></code></td> <th class="colSecond" scope="row"><span class="typeNameLabel">Symbols.</span><code><span class="memberNameLink"><a href="../../../../../../dollar/internal/runtime/script/parser/Symbols.html#DOLLAR">DOLLAR</a></span></code></th> <td class="colLast">&nbsp;</td> </tr> <tr class="altColor"> <td class="colFirst"><code>static @NotNull <a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">SymbolDef</a></code></td> <th class="colSecond" scope="row"><span class="typeNameLabel">Symbols.</span><code><span class="memberNameLink"><a href="../../../../../../dollar/internal/runtime/script/parser/Symbols.html#LEFT_BRACE">LEFT_BRACE</a></span></code></th> <td class="colLast">&nbsp;</td> </tr> <tr class="rowColor"> <td class="colFirst"><code>static @NotNull <a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">SymbolDef</a></code></td> <th class="colSecond" scope="row"><span class="typeNameLabel">Symbols.</span><code><span class="memberNameLink"><a href="../../../../../../dollar/internal/runtime/script/parser/Symbols.html#LEFT_BRACKET">LEFT_BRACKET</a></span></code></th> <td class="colLast">&nbsp;</td> </tr> <tr class="altColor"> <td class="colFirst"><code>static @NotNull <a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">SymbolDef</a></code></td> <th class="colSecond" scope="row"><span class="typeNameLabel">Symbols.</span><code><span class="memberNameLink"><a href="../../../../../../dollar/internal/runtime/script/parser/Symbols.html#LEFT_PAREN">LEFT_PAREN</a></span></code></th> <td class="colLast">&nbsp;</td> </tr> <tr class="rowColor"> <td class="colFirst"><code>static @NotNull <a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">SymbolDef</a></code></td> <th class="colSecond" scope="row"><span class="typeNameLabel">Symbols.</span><code><span class="memberNameLink"><a href="../../../../../../dollar/internal/runtime/script/parser/Symbols.html#NEWLINE">NEWLINE</a></span></code></th> <td class="colLast">&nbsp;</td> </tr> <tr class="altColor"> <td class="colFirst"><code>static @NotNull <a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">SymbolDef</a></code></td> <th class="colSecond" scope="row"><span class="typeNameLabel">Symbols.</span><code><span class="memberNameLink"><a href="../../../../../../dollar/internal/runtime/script/parser/Symbols.html#RIGHT_BRACE">RIGHT_BRACE</a></span></code></th> <td class="colLast">&nbsp;</td> </tr> <tr class="rowColor"> <td class="colFirst"><code>static @NotNull <a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">SymbolDef</a></code></td> <th class="colSecond" scope="row"><span class="typeNameLabel">Symbols.</span><code><span class="memberNameLink"><a href="../../../../../../dollar/internal/runtime/script/parser/Symbols.html#RIGHT_BRACKET">RIGHT_BRACKET</a></span></code></th> <td class="colLast">&nbsp;</td> </tr> <tr class="altColor"> <td class="colFirst"><code>static @NotNull <a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">SymbolDef</a></code></td> <th class="colSecond" scope="row"><span class="typeNameLabel">Symbols.</span><code><span class="memberNameLink"><a href="../../../../../../dollar/internal/runtime/script/parser/Symbols.html#RIGHT_PAREN">RIGHT_PAREN</a></span></code></th> <td class="colLast">&nbsp;</td> </tr> <tr class="rowColor"> <td class="colFirst"><code>static @NotNull <a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">SymbolDef</a></code></td> <th class="colSecond" scope="row"><span class="typeNameLabel">Symbols.</span><code><span class="memberNameLink"><a href="../../../../../../dollar/internal/runtime/script/parser/Symbols.html#SEMI_COLON">SEMI_COLON</a></span></code></th> <td class="colLast">&nbsp;</td> </tr> </tbody> </table> </li> </ul> </li> </ul> </div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar.bottom"> <!-- --> </a> <div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div> <a name="navbar.bottom.firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../../../overview-summary.html">Overview</a></li> <li><a href="../package-summary.html">Package</a></li> <li><a href="../../../../../../dollar/internal/runtime/script/parser/SymbolDef.html" title="class in dollar.internal.runtime.script.parser">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../package-tree.html">Tree</a></li> <li><a href="../../../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../../../index-all.html">Index</a></li> <li><a href="../../../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../../../../index.html?dollar/internal/runtime/script/parser/class-use/SymbolDef.html" target="_top">Frames</a></li> <li><a href="SymbolDef.html" target="_top">No&nbsp;Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../../../allclasses-noframe.html">All&nbsp;Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> </div> <a name="skip.navbar.bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> <p class="legalCopy"><small>Copyright &#169; 2017. All rights reserved.</small></p> </body> </html>
neilellis/dollar
docs/dev/dollar-script/apidocs/dollar/internal/runtime/script/parser/class-use/SymbolDef.html
HTML
apache-2.0
12,675
-------------------------------------------------------------------------------- ## Treebank Statistics (UD_Danish) This feature is language-specific. It occurs with 2 different values: `Arch`, `Form`. 36 tokens (0%) have a non-empty value of `Style`. 6 types (0%) occur at least once with a non-empty value of `Style`. 2 lemmas (0%) occur at least once with a non-empty value of `Style`. The feature is used with 2 part-of-speech tags: [da-pos/DET]() (35; 0% instances), [da-pos/PRON]() (1; 0% instances). ### `DET` 35 [da-pos/DET]() tokens (1% of all `DET` tokens) have a non-empty value of `Style`. The most frequent other feature values with which `DET` and `Style` co-occurred: <tt><a href="Person.html">Person</a>=1</tt> (34; 97%), <tt><a href="PronType.html">PronType</a>=Prs</tt> (34; 97%), <tt><a href="Poss.html">Poss</a>=Yes</tt> (34; 97%), <tt><a href="Number[psor].html">Number[psor]</a>=Plur</tt> (34; 97%), <tt><a href="Number.html">Number</a>=Plur</tt> (19; 54%), <tt><a href="Gender.html">Gender</a>=EMPTY</tt> (19; 54%). `DET` tokens may have the following values of `Style`: * `Arch` (1; 3% of non-empty `Style`): <em>somme</em> * `Form` (34; 97% of non-empty `Style`): <em>vore, vor, vort</em> ### `PRON` 1 [da-pos/PRON]() tokens (0% of all `PRON` tokens) have a non-empty value of `Style`. The most frequent other feature values with which `PRON` and `Style` co-occurred: <tt><a href="PronType.html">PronType</a>=Prs</tt> (1; 100%), <tt><a href="Number.html">Number</a>=Plur</tt> (1; 100%), <tt><a href="PartType.html">PartType</a>=EMPTY</tt> (1; 100%), <tt><a href="Gender.html">Gender</a>=EMPTY</tt> (1; 100%), <tt><a href="Person.html">Person</a>=1</tt> (1; 100%), <tt><a href="Case.html">Case</a>=EMPTY</tt> (1; 100%). `PRON` tokens may have the following values of `Style`: * `Form` (1; 100% of non-empty `Style`): <em>vore</em>
fginter/docs-fginterfork
_includes/stats/da/feat/Style.md
Markdown
apache-2.0
1,871
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/c/c_api.h" #include <algorithm> #include <cstddef> #include <iterator> #include <memory> #include <vector> #include "tensorflow/c/c_test_util.h" #include "tensorflow/cc/saved_model/signature_constants.h" #include "tensorflow/cc/saved_model/tag_constants.h" #include "tensorflow/core/example/example.pb.h" #include "tensorflow/core/example/feature.pb.h" #include "tensorflow/core/framework/api_def.pb.h" #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/graph.pb_text.h" #include "tensorflow/core/framework/kernel_def.pb.h" #include "tensorflow/core/framework/node_def.pb_text.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/partial_tensor_shape.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/tensor_id.h" #include "tensorflow/core/lib/core/error_codes.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/meta_graph.pb.h" #include "tensorflow/core/util/equal_graph_def.h" namespace tensorflow { TF_Tensor* TF_TensorFromTensor(const Tensor& src, TF_Status* status); Status TF_TensorToTensor(const TF_Tensor* src, Tensor* dst); namespace { static void ExpectHasSubstr(StringPiece s, StringPiece expected) { EXPECT_TRUE(absl::StrContains(s, expected)) << "'" << s << "' does not contain '" << expected << "'"; } // Returns the GPU device name if there is one (with arbitrary tie breaking if // there are more than one), or "" otherwise. string GPUDeviceName(TF_Session* session) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); TF_Status* s = status.get(); std::unique_ptr<TF_DeviceList, decltype(&TF_DeleteDeviceList)> list( TF_SessionListDevices(session, s), TF_DeleteDeviceList); TF_DeviceList* device_list = list.get(); CHECK_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); const int num_devices = TF_DeviceListCount(device_list); LOG(INFO) << "There are " << num_devices << " devices."; for (int i = 0; i < num_devices; ++i) { const char* device_name = TF_DeviceListName(device_list, i, s); CHECK_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); const char* device_type = TF_DeviceListType(device_list, i, s); CHECK_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); LOG(INFO) << "Device " << i << " has name " << device_name << ", type " << device_type; if (string(device_type) == DEVICE_GPU) { return device_name; } } // No GPU device found. return ""; } string GPUDeviceName() { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); TF_Status* s = status.get(); std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> graph(TF_NewGraph(), TF_DeleteGraph); TF_SessionOptions* opts = TF_NewSessionOptions(); TF_Session* sess = TF_NewSession(graph.get(), opts, s); TF_DeleteSessionOptions(opts); const string gpu_device_name = GPUDeviceName(sess); TF_DeleteSession(sess, s); CHECK_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); return gpu_device_name; } TEST(CAPI, Version) { EXPECT_STRNE("", TF_Version()); } TEST(CAPI, Status) { TF_Status* s = TF_NewStatus(); EXPECT_EQ(TF_OK, TF_GetCode(s)); EXPECT_EQ(string(), TF_Message(s)); TF_SetStatus(s, TF_CANCELLED, "cancel"); EXPECT_EQ(TF_CANCELLED, TF_GetCode(s)); EXPECT_EQ(string("cancel"), TF_Message(s)); TF_DeleteStatus(s); } void Deallocator(void* data, size_t, void* arg) { tensorflow::cpu_allocator()->DeallocateRaw(data); *reinterpret_cast<bool*>(arg) = true; } TEST(CAPI, Tensor) { const int num_bytes = 6 * sizeof(float); float* values = reinterpret_cast<float*>(tensorflow::cpu_allocator()->AllocateRaw( EIGEN_MAX_ALIGN_BYTES, num_bytes)); int64_t dims[] = {2, 3}; bool deallocator_called = false; TF_Tensor* t = TF_NewTensor(TF_FLOAT, dims, 2, values, num_bytes, &Deallocator, &deallocator_called); EXPECT_FALSE(deallocator_called); EXPECT_EQ(TF_FLOAT, TF_TensorType(t)); EXPECT_EQ(2, TF_NumDims(t)); EXPECT_EQ(dims[0], TF_Dim(t, 0)); EXPECT_EQ(dims[1], TF_Dim(t, 1)); EXPECT_EQ(num_bytes, TF_TensorByteSize(t)); EXPECT_EQ(static_cast<void*>(values), TF_TensorData(t)); TF_DeleteTensor(t); EXPECT_TRUE(deallocator_called); } void NoOpDeallocator(void* data, size_t, void*) {} TEST(CAPI, MalformedTensor) { // See https://github.com/tensorflow/tensorflow/issues/7394 // num_dims = 0 implies a scalar, so should be backed by at least 4 bytes of // data. TF_Tensor* t = TF_NewTensor(TF_FLOAT, nullptr, 0, nullptr, 0, &NoOpDeallocator, nullptr); ASSERT_TRUE(t == nullptr); } TEST(CAPI, AllocateTensor) { const int num_bytes = 6 * sizeof(float); int64_t dims[] = {2, 3}; TF_Tensor* t = TF_AllocateTensor(TF_FLOAT, dims, 2, num_bytes); EXPECT_EQ(TF_FLOAT, TF_TensorType(t)); EXPECT_EQ(2, TF_NumDims(t)); EXPECT_EQ(dims[0], TF_Dim(t, 0)); EXPECT_EQ(dims[1], TF_Dim(t, 1)); EXPECT_EQ(num_bytes, TF_TensorByteSize(t)); EXPECT_EQ(6, TF_TensorElementCount(t)); TF_DeleteTensor(t); } TEST(CAPI, MaybeMove) { const int num_bytes = 6 * sizeof(float); float* values = reinterpret_cast<float*>(tensorflow::cpu_allocator()->AllocateRaw( EIGEN_MAX_ALIGN_BYTES, num_bytes)); int64_t dims[] = {2, 3}; bool deallocator_called = false; TF_Tensor* t = TF_NewTensor(TF_FLOAT, dims, 2, values, num_bytes, &Deallocator, &deallocator_called); TF_Tensor* o = TF_TensorMaybeMove(t); ASSERT_TRUE(o == nullptr); // It is unsafe to move memory TF might not own. TF_DeleteTensor(t); EXPECT_TRUE(deallocator_called); } TEST(CAPI, LibraryLoadFunctions) { // TODO(b/73318067): Fix linking for the GPU test generated by the // tf_cuda_cc_test() bazel rule and remove the next line. if (!GPUDeviceName().empty()) return; #if !defined(TENSORFLOW_NO_SHARED_OBJECTS) { // Load the library. TF_Status* status = TF_NewStatus(); TF_Library* lib = TF_LoadLibrary("tensorflow/c/test_op1.so", status); TF_Code code = TF_GetCode(status); string status_msg(TF_Message(status)); TF_DeleteStatus(status); ASSERT_EQ(TF_OK, code) << status_msg; // Test op list. TF_Buffer op_list_buf = TF_GetOpList(lib); tensorflow::OpList op_list; EXPECT_TRUE(op_list.ParseFromArray(op_list_buf.data, op_list_buf.length)); ASSERT_EQ(op_list.op_size(), 1); EXPECT_EQ("TestCApi1", op_list.op(0).name()); TF_DeleteLibraryHandle(lib); } #endif // !defined(TENSORFLOW_NO_SHARED_OBJECTS) { TF_Buffer* op_list_buffer = TF_GetAllOpList(); tensorflow::OpList op_list; op_list.ParseFromArray(op_list_buffer->data, op_list_buffer->length); ASSERT_GE(op_list.op_size(), 1); typedef tensorflow::protobuf::RepeatedPtrField<tensorflow::OpDef> OpDefs; const OpDefs& ops = op_list.op(); bool found = std::find_if(ops.begin(), ops.end(), [](const tensorflow::OpDef& op_def) { return op_def.name() == "TestCApi"; }) != ops.end(); EXPECT_TRUE(found); TF_DeleteBuffer(op_list_buffer); } } void TestEncodeDecode(int line, const std::vector<string>& data) { const tensorflow::int64 n = data.size(); TF_Status* status = TF_NewStatus(); for (const std::vector<tensorflow::int64>& dims : std::vector<std::vector<tensorflow::int64>>{ {n}, {1, n}, {n, 1}, {n / 2, 2}}) { // Create C++ Tensor Tensor src(tensorflow::DT_STRING, TensorShape(dims)); for (tensorflow::int64 i = 0; i < src.NumElements(); ++i) { src.flat<string>()(i) = data[i]; } TF_Tensor* dst = TF_TensorFromTensor(src, status); ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); // Convert back to a C++ Tensor and ensure we get expected output. Tensor output; ASSERT_EQ(Status::OK(), TF_TensorToTensor(dst, &output)) << line; ASSERT_EQ(src.NumElements(), output.NumElements()) << line; for (tensorflow::int64 i = 0; i < src.NumElements(); ++i) { ASSERT_EQ(data[i], output.flat<string>()(i)) << line; } TF_DeleteTensor(dst); } TF_DeleteStatus(status); } TEST(CAPI, TensorEncodeDecodeStrings) { TestEncodeDecode(__LINE__, {}); TestEncodeDecode(__LINE__, {"hello"}); TestEncodeDecode(__LINE__, {"the", "quick", "brown", "fox", "jumped", "over"}); string big(1000, 'a'); TestEncodeDecode(__LINE__, {"small", big, "small2"}); } TEST(CAPI, SessionOptions) { TF_SessionOptions* opt = TF_NewSessionOptions(); TF_DeleteSessionOptions(opt); } TEST(CAPI, DeprecatedSession) { TF_Status* s = TF_NewStatus(); TF_SessionOptions* opt = TF_NewSessionOptions(); TF_DeprecatedSession* session = TF_NewDeprecatedSession(opt, s); TF_DeleteSessionOptions(opt); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Buffer* run_options = TF_NewBufferFromString("", 0); TF_Buffer* run_metadata = TF_NewBuffer(); TF_Run(session, run_options, nullptr, nullptr, 0, nullptr, nullptr, 0, nullptr, 0, run_metadata, s); EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s)) << TF_Message(s); EXPECT_EQ("Session was not created with a graph before Run()!", string(TF_Message(s))); TF_DeleteBuffer(run_metadata); TF_DeleteBuffer(run_options); TF_DeleteDeprecatedSession(session, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_DeleteStatus(s); } TEST(CAPI, DataTypeEnum) { EXPECT_EQ(TF_FLOAT, static_cast<TF_DataType>(tensorflow::DT_FLOAT)); EXPECT_EQ(TF_DOUBLE, static_cast<TF_DataType>(tensorflow::DT_DOUBLE)); EXPECT_EQ(TF_INT32, static_cast<TF_DataType>(tensorflow::DT_INT32)); EXPECT_EQ(TF_UINT8, static_cast<TF_DataType>(tensorflow::DT_UINT8)); EXPECT_EQ(TF_INT16, static_cast<TF_DataType>(tensorflow::DT_INT16)); EXPECT_EQ(TF_INT8, static_cast<TF_DataType>(tensorflow::DT_INT8)); EXPECT_EQ(TF_STRING, static_cast<TF_DataType>(tensorflow::DT_STRING)); EXPECT_EQ(TF_COMPLEX64, static_cast<TF_DataType>(tensorflow::DT_COMPLEX64)); EXPECT_EQ(TF_COMPLEX, TF_COMPLEX64); EXPECT_EQ(TF_INT64, static_cast<TF_DataType>(tensorflow::DT_INT64)); EXPECT_EQ(TF_BOOL, static_cast<TF_DataType>(tensorflow::DT_BOOL)); EXPECT_EQ(TF_QINT8, static_cast<TF_DataType>(tensorflow::DT_QINT8)); EXPECT_EQ(TF_QUINT8, static_cast<TF_DataType>(tensorflow::DT_QUINT8)); EXPECT_EQ(TF_QINT32, static_cast<TF_DataType>(tensorflow::DT_QINT32)); EXPECT_EQ(TF_BFLOAT16, static_cast<TF_DataType>(tensorflow::DT_BFLOAT16)); EXPECT_EQ(TF_QINT16, static_cast<TF_DataType>(tensorflow::DT_QINT16)); EXPECT_EQ(TF_QUINT16, static_cast<TF_DataType>(tensorflow::DT_QUINT16)); EXPECT_EQ(TF_UINT16, static_cast<TF_DataType>(tensorflow::DT_UINT16)); EXPECT_EQ(TF_COMPLEX128, static_cast<TF_DataType>(tensorflow::DT_COMPLEX128)); EXPECT_EQ(TF_HALF, static_cast<TF_DataType>(tensorflow::DT_HALF)); EXPECT_EQ(TF_DataTypeSize(TF_DOUBLE), tensorflow::DataTypeSize(tensorflow::DT_DOUBLE)); EXPECT_EQ(TF_DataTypeSize(TF_STRING), tensorflow::DataTypeSize(tensorflow::DT_STRING)); // Test with invalid type; should always return 0 as documented EXPECT_EQ(TF_DataTypeSize(static_cast<TF_DataType>(0)), 0); } TEST(CAPI, StatusEnum) { EXPECT_EQ(TF_OK, static_cast<TF_Code>(tensorflow::error::OK)); EXPECT_EQ(TF_CANCELLED, static_cast<TF_Code>(tensorflow::error::CANCELLED)); EXPECT_EQ(TF_UNKNOWN, static_cast<TF_Code>(tensorflow::error::UNKNOWN)); EXPECT_EQ(TF_INVALID_ARGUMENT, static_cast<TF_Code>(tensorflow::error::INVALID_ARGUMENT)); EXPECT_EQ(TF_DEADLINE_EXCEEDED, static_cast<TF_Code>(tensorflow::error::DEADLINE_EXCEEDED)); EXPECT_EQ(TF_NOT_FOUND, static_cast<TF_Code>(tensorflow::error::NOT_FOUND)); EXPECT_EQ(TF_ALREADY_EXISTS, static_cast<TF_Code>(tensorflow::error::ALREADY_EXISTS)); EXPECT_EQ(TF_PERMISSION_DENIED, static_cast<TF_Code>(tensorflow::error::PERMISSION_DENIED)); EXPECT_EQ(TF_UNAUTHENTICATED, static_cast<TF_Code>(tensorflow::error::UNAUTHENTICATED)); EXPECT_EQ(TF_RESOURCE_EXHAUSTED, static_cast<TF_Code>(tensorflow::error::RESOURCE_EXHAUSTED)); EXPECT_EQ(TF_FAILED_PRECONDITION, static_cast<TF_Code>(tensorflow::error::FAILED_PRECONDITION)); EXPECT_EQ(TF_ABORTED, static_cast<TF_Code>(tensorflow::error::ABORTED)); EXPECT_EQ(TF_OUT_OF_RANGE, static_cast<TF_Code>(tensorflow::error::OUT_OF_RANGE)); EXPECT_EQ(TF_UNIMPLEMENTED, static_cast<TF_Code>(tensorflow::error::UNIMPLEMENTED)); EXPECT_EQ(TF_INTERNAL, static_cast<TF_Code>(tensorflow::error::INTERNAL)); EXPECT_EQ(TF_UNAVAILABLE, static_cast<TF_Code>(tensorflow::error::UNAVAILABLE)); EXPECT_EQ(TF_DATA_LOSS, static_cast<TF_Code>(tensorflow::error::DATA_LOSS)); } TEST(CAPI, GetAllOpList) { TF_Buffer* buf = TF_GetAllOpList(); tensorflow::OpList op_list; EXPECT_TRUE(op_list.ParseFromArray(buf->data, buf->length)); EXPECT_GT(op_list.op_size(), 0); TF_DeleteBuffer(buf); } TEST(CAPI, SetShape) { TF_Status* s = TF_NewStatus(); TF_Graph* graph = TF_NewGraph(); TF_Operation* feed = Placeholder(graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Output feed_out_0 = TF_Output{feed, 0}; int num_dims; // Fetch the shape, it should be completely unknown. num_dims = TF_GraphGetTensorNumDims(graph, feed_out_0, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); EXPECT_EQ(-1, num_dims); // Set the shape to be unknown, expect no change. TF_GraphSetTensorShape(graph, feed_out_0, /*dims=*/nullptr, -1, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); num_dims = TF_GraphGetTensorNumDims(graph, feed_out_0, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); EXPECT_EQ(-1, num_dims); // Set the shape to be 2 x Unknown int64_t dims[] = {2, -1}; TF_GraphSetTensorShape(graph, feed_out_0, dims, 2, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Fetch the shape and validate it is 2 by -1. num_dims = TF_GraphGetTensorNumDims(graph, feed_out_0, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); EXPECT_EQ(2, num_dims); // Resize the dimension vector appropriately. int64_t returned_dims[2]; TF_GraphGetTensorShape(graph, feed_out_0, returned_dims, num_dims, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); EXPECT_EQ(dims[0], returned_dims[0]); EXPECT_EQ(dims[1], returned_dims[1]); // Set to a new valid shape: [2, 3] dims[1] = 3; TF_GraphSetTensorShape(graph, feed_out_0, dims, 2, s); EXPECT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Fetch and see that the new value is returned. TF_GraphGetTensorShape(graph, feed_out_0, returned_dims, num_dims, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); EXPECT_EQ(dims[0], returned_dims[0]); EXPECT_EQ(dims[1], returned_dims[1]); // Try to set 'unknown' with unknown rank on the shape and see that // it doesn't change. TF_GraphSetTensorShape(graph, feed_out_0, /*dims=*/nullptr, -1, s); EXPECT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_GraphGetTensorShape(graph, feed_out_0, returned_dims, num_dims, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); EXPECT_EQ(2, num_dims); EXPECT_EQ(2, returned_dims[0]); EXPECT_EQ(3, returned_dims[1]); // Try to set 'unknown' with same rank on the shape and see that // it doesn't change. dims[0] = -1; dims[1] = -1; TF_GraphSetTensorShape(graph, feed_out_0, dims, 2, s); EXPECT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Fetch and see that the new value is returned. TF_GraphGetTensorShape(graph, feed_out_0, returned_dims, num_dims, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); EXPECT_EQ(2, num_dims); EXPECT_EQ(2, returned_dims[0]); EXPECT_EQ(3, returned_dims[1]); // Try to fetch a shape with the wrong num_dims TF_GraphGetTensorShape(graph, feed_out_0, returned_dims, 5, s); EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s)) << TF_Message(s); // Try to set an invalid shape (cannot change 2x3 to a 2x5). dims[1] = 5; TF_GraphSetTensorShape(graph, feed_out_0, dims, 2, s); EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s)) << TF_Message(s); // Test for a scalar. TF_Operation* three = ScalarConst(3, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Output three_out_0 = TF_Output{three, 0}; num_dims = TF_GraphGetTensorNumDims(graph, three_out_0, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); EXPECT_EQ(0, num_dims); TF_GraphGetTensorShape(graph, three_out_0, returned_dims, num_dims, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Clean up TF_DeleteGraph(graph); TF_DeleteStatus(s); } TEST(CAPI, Graph) { TF_Status* s = TF_NewStatus(); TF_Graph* graph = TF_NewGraph(); // Make a placeholder operation. TF_Operation* feed = Placeholder(graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Test TF_Operation*() query functions. EXPECT_EQ(string("feed"), string(TF_OperationName(feed))); EXPECT_EQ(string("Placeholder"), string(TF_OperationOpType(feed))); EXPECT_EQ(string(""), string(TF_OperationDevice(feed))); EXPECT_EQ(1, TF_OperationNumOutputs(feed)); EXPECT_EQ(TF_INT32, TF_OperationOutputType(TF_Output{feed, 0})); EXPECT_EQ(1, TF_OperationOutputListLength(feed, "output", s)); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); EXPECT_EQ(0, TF_OperationNumInputs(feed)); EXPECT_EQ(0, TF_OperationOutputNumConsumers(TF_Output{feed, 0})); EXPECT_EQ(0, TF_OperationNumControlInputs(feed)); EXPECT_EQ(0, TF_OperationNumControlOutputs(feed)); tensorflow::AttrValue attr_value; ASSERT_TRUE(GetAttrValue(feed, "dtype", &attr_value, s)) << TF_Message(s); EXPECT_EQ(attr_value.type(), tensorflow::DT_INT32); // Test not found errors in TF_Operation*() query functions. EXPECT_EQ(-1, TF_OperationOutputListLength(feed, "bogus", s)); EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s)); ASSERT_FALSE(GetAttrValue(feed, "missing", &attr_value, s)); EXPECT_EQ(string("Operation 'feed' has no attr named 'missing'."), string(TF_Message(s))); // Make a constant oper with the scalar "3". TF_Operation* three = ScalarConst(3, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Add oper. TF_Operation* add = Add(feed, three, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Test TF_Operation*() query functions. EXPECT_EQ(string("add"), string(TF_OperationName(add))); EXPECT_EQ(string("AddN"), string(TF_OperationOpType(add))); EXPECT_EQ(string(""), string(TF_OperationDevice(add))); EXPECT_EQ(1, TF_OperationNumOutputs(add)); EXPECT_EQ(TF_INT32, TF_OperationOutputType(TF_Output{add, 0})); EXPECT_EQ(1, TF_OperationOutputListLength(add, "sum", s)); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); EXPECT_EQ(2, TF_OperationNumInputs(add)); EXPECT_EQ(2, TF_OperationInputListLength(add, "inputs", s)); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); EXPECT_EQ(TF_INT32, TF_OperationInputType(TF_Input{add, 0})); EXPECT_EQ(TF_INT32, TF_OperationInputType(TF_Input{add, 1})); TF_Output add_in_0 = TF_OperationInput(TF_Input{add, 0}); EXPECT_EQ(feed, add_in_0.oper); EXPECT_EQ(0, add_in_0.index); TF_Output add_in_1 = TF_OperationInput(TF_Input{add, 1}); EXPECT_EQ(three, add_in_1.oper); EXPECT_EQ(0, add_in_1.index); EXPECT_EQ(0, TF_OperationOutputNumConsumers(TF_Output{add, 0})); EXPECT_EQ(0, TF_OperationNumControlInputs(add)); EXPECT_EQ(0, TF_OperationNumControlOutputs(add)); ASSERT_TRUE(GetAttrValue(add, "T", &attr_value, s)) << TF_Message(s); EXPECT_EQ(attr_value.type(), tensorflow::DT_INT32); ASSERT_TRUE(GetAttrValue(add, "N", &attr_value, s)) << TF_Message(s); EXPECT_EQ(attr_value.i(), 2); // Placeholder oper now has a consumer. ASSERT_EQ(1, TF_OperationOutputNumConsumers(TF_Output{feed, 0})); TF_Input feed_port; EXPECT_EQ(1, TF_OperationOutputConsumers(TF_Output{feed, 0}, &feed_port, 1)); EXPECT_EQ(add, feed_port.oper); EXPECT_EQ(0, feed_port.index); // The scalar const oper also has a consumer. ASSERT_EQ(1, TF_OperationOutputNumConsumers(TF_Output{three, 0})); TF_Input three_port; EXPECT_EQ(1, TF_OperationOutputConsumers(TF_Output{three, 0}, &three_port, 1)); EXPECT_EQ(add, three_port.oper); EXPECT_EQ(1, three_port.index); // Serialize to GraphDef. GraphDef graph_def; ASSERT_TRUE(GetGraphDef(graph, &graph_def)); // Validate GraphDef is what we expect. bool found_placeholder = false; bool found_scalar_const = false; bool found_add = false; for (const auto& n : graph_def.node()) { if (IsPlaceholder(n)) { EXPECT_FALSE(found_placeholder); found_placeholder = true; } else if (IsScalarConst(n, 3)) { EXPECT_FALSE(found_scalar_const); found_scalar_const = true; } else if (IsAddN(n, 2)) { EXPECT_FALSE(found_add); found_add = true; } else { ADD_FAILURE() << "Unexpected NodeDef: " << ProtoDebugString(n); } } EXPECT_TRUE(found_placeholder); EXPECT_TRUE(found_scalar_const); EXPECT_TRUE(found_add); // Add another oper to the graph. TF_Operation* neg = Neg(add, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Serialize to NodeDef. NodeDef node_def; ASSERT_TRUE(GetNodeDef(neg, &node_def)); // Validate NodeDef is what we expect. EXPECT_TRUE(IsNeg(node_def, "add")); // Serialize to GraphDef. GraphDef graph_def2; ASSERT_TRUE(GetGraphDef(graph, &graph_def2)); // Compare with first GraphDef + added NodeDef. NodeDef* added_node = graph_def.add_node(); *added_node = node_def; EXPECT_EQ(ProtoDebugString(graph_def), ProtoDebugString(graph_def2)); // Look up some nodes by name. TF_Operation* neg2 = TF_GraphOperationByName(graph, "neg"); EXPECT_TRUE(neg == neg2); NodeDef node_def2; ASSERT_TRUE(GetNodeDef(neg2, &node_def2)); EXPECT_EQ(ProtoDebugString(node_def), ProtoDebugString(node_def2)); TF_Operation* feed2 = TF_GraphOperationByName(graph, "feed"); EXPECT_TRUE(feed == feed2); ASSERT_TRUE(GetNodeDef(feed, &node_def)); ASSERT_TRUE(GetNodeDef(feed2, &node_def2)); EXPECT_EQ(ProtoDebugString(node_def), ProtoDebugString(node_def2)); // Test iterating through the nodes of a graph. found_placeholder = false; found_scalar_const = false; found_add = false; bool found_neg = false; size_t pos = 0; TF_Operation* oper; while ((oper = TF_GraphNextOperation(graph, &pos)) != nullptr) { if (oper == feed) { EXPECT_FALSE(found_placeholder); found_placeholder = true; } else if (oper == three) { EXPECT_FALSE(found_scalar_const); found_scalar_const = true; } else if (oper == add) { EXPECT_FALSE(found_add); found_add = true; } else if (oper == neg) { EXPECT_FALSE(found_neg); found_neg = true; } else { ASSERT_TRUE(GetNodeDef(oper, &node_def)); ADD_FAILURE() << "Unexpected Node: " << ProtoDebugString(node_def); } } EXPECT_TRUE(found_placeholder); EXPECT_TRUE(found_scalar_const); EXPECT_TRUE(found_add); EXPECT_TRUE(found_neg); // Clean up TF_DeleteGraph(graph); TF_DeleteStatus(s); } /* TODO(skyewm): this test currently DCHECKs, change to bad status TEST(CAPI, InputFromDifferentGraphError) { TF_Status* s = TF_NewStatus(); TF_Graph* g1 = TF_NewGraph(); TF_Graph* g2 = TF_NewGraph(); TF_Operation* feed = Placeholder(g1, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Attempt to create node in g2 with input from g1 Neg(feed, g2, s); EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s)); EXPECT_STREQ("foo", TF_Message(s)); TF_DeleteGraph(g1); TF_DeleteGraph(g2); TF_DeleteStatus(s); } */ TEST(CAPI, ImportGraphDef) { TF_Status* s = TF_NewStatus(); TF_Graph* graph = TF_NewGraph(); // Create a simple graph. Placeholder(graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); ASSERT_TRUE(TF_GraphOperationByName(graph, "feed") != nullptr); TF_Operation* oper = ScalarConst(3, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); ASSERT_TRUE(TF_GraphOperationByName(graph, "scalar") != nullptr); Neg(oper, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); ASSERT_TRUE(TF_GraphOperationByName(graph, "neg") != nullptr); // Export to a GraphDef. TF_Buffer* graph_def = TF_NewBuffer(); TF_GraphToGraphDef(graph, graph_def, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Import it, with a prefix, in a fresh graph. TF_DeleteGraph(graph); graph = TF_NewGraph(); TF_ImportGraphDefOptions* opts = TF_NewImportGraphDefOptions(); TF_ImportGraphDefOptionsSetPrefix(opts, "imported"); TF_GraphImportGraphDef(graph, graph_def, opts, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Operation* scalar = TF_GraphOperationByName(graph, "imported/scalar"); TF_Operation* feed = TF_GraphOperationByName(graph, "imported/feed"); TF_Operation* neg = TF_GraphOperationByName(graph, "imported/neg"); ASSERT_TRUE(scalar != nullptr); ASSERT_TRUE(feed != nullptr); ASSERT_TRUE(neg != nullptr); // Test basic structure of the imported graph. EXPECT_EQ(0, TF_OperationNumInputs(scalar)); EXPECT_EQ(0, TF_OperationNumInputs(feed)); ASSERT_EQ(1, TF_OperationNumInputs(neg)); TF_Output neg_input = TF_OperationInput({neg, 0}); EXPECT_EQ(scalar, neg_input.oper); EXPECT_EQ(0, neg_input.index); // Test that we can't see control edges involving the source and sink nodes. TF_Operation* control_ops[100]; EXPECT_EQ(0, TF_OperationNumControlInputs(scalar)); EXPECT_EQ(0, TF_OperationGetControlInputs(scalar, control_ops, 100)); EXPECT_EQ(0, TF_OperationNumControlOutputs(scalar)); EXPECT_EQ(0, TF_OperationGetControlOutputs(scalar, control_ops, 100)); EXPECT_EQ(0, TF_OperationNumControlInputs(feed)); EXPECT_EQ(0, TF_OperationGetControlInputs(feed, control_ops, 100)); EXPECT_EQ(0, TF_OperationNumControlOutputs(feed)); EXPECT_EQ(0, TF_OperationGetControlOutputs(feed, control_ops, 100)); EXPECT_EQ(0, TF_OperationNumControlInputs(neg)); EXPECT_EQ(0, TF_OperationGetControlInputs(neg, control_ops, 100)); EXPECT_EQ(0, TF_OperationNumControlOutputs(neg)); EXPECT_EQ(0, TF_OperationGetControlOutputs(neg, control_ops, 100)); // Import it again, with an input mapping, return outputs, and a return // operation, into the same graph. TF_DeleteImportGraphDefOptions(opts); opts = TF_NewImportGraphDefOptions(); TF_ImportGraphDefOptionsSetPrefix(opts, "imported2"); TF_ImportGraphDefOptionsAddInputMapping(opts, "scalar", 0, {scalar, 0}); TF_ImportGraphDefOptionsAddReturnOutput(opts, "feed", 0); TF_ImportGraphDefOptionsAddReturnOutput(opts, "scalar", 0); EXPECT_EQ(2, TF_ImportGraphDefOptionsNumReturnOutputs(opts)); TF_ImportGraphDefOptionsAddReturnOperation(opts, "scalar"); EXPECT_EQ(1, TF_ImportGraphDefOptionsNumReturnOperations(opts)); TF_ImportGraphDefResults* results = TF_GraphImportGraphDefWithResults(graph, graph_def, opts, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Operation* scalar2 = TF_GraphOperationByName(graph, "imported2/scalar"); TF_Operation* feed2 = TF_GraphOperationByName(graph, "imported2/feed"); TF_Operation* neg2 = TF_GraphOperationByName(graph, "imported2/neg"); ASSERT_TRUE(scalar2 != nullptr); ASSERT_TRUE(feed2 != nullptr); ASSERT_TRUE(neg2 != nullptr); // Check input mapping neg_input = TF_OperationInput({neg, 0}); EXPECT_EQ(scalar, neg_input.oper); EXPECT_EQ(0, neg_input.index); // Check return outputs TF_Output* return_outputs; int num_return_outputs; TF_ImportGraphDefResultsReturnOutputs(results, &num_return_outputs, &return_outputs); ASSERT_EQ(2, num_return_outputs); EXPECT_EQ(feed2, return_outputs[0].oper); EXPECT_EQ(0, return_outputs[0].index); EXPECT_EQ(scalar, return_outputs[1].oper); // remapped EXPECT_EQ(0, return_outputs[1].index); // Check return operation TF_Operation** return_opers; int num_return_opers; TF_ImportGraphDefResultsReturnOperations(results, &num_return_opers, &return_opers); ASSERT_EQ(1, num_return_opers); EXPECT_EQ(scalar2, return_opers[0]); // not remapped TF_DeleteImportGraphDefResults(results); // Import again, with control dependencies, into the same graph. TF_DeleteImportGraphDefOptions(opts); opts = TF_NewImportGraphDefOptions(); TF_ImportGraphDefOptionsSetPrefix(opts, "imported3"); TF_ImportGraphDefOptionsAddControlDependency(opts, feed); TF_ImportGraphDefOptionsAddControlDependency(opts, feed2); TF_GraphImportGraphDef(graph, graph_def, opts, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Operation* scalar3 = TF_GraphOperationByName(graph, "imported3/scalar"); TF_Operation* feed3 = TF_GraphOperationByName(graph, "imported3/feed"); TF_Operation* neg3 = TF_GraphOperationByName(graph, "imported3/neg"); ASSERT_TRUE(scalar3 != nullptr); ASSERT_TRUE(feed3 != nullptr); ASSERT_TRUE(neg3 != nullptr); // Check that newly-imported scalar and feed have control deps (neg3 will // inherit them from input) TF_Operation* control_inputs[100]; int num_control_inputs = TF_OperationGetControlInputs( scalar3, control_inputs, TF_OperationNumControlInputs(scalar3)); ASSERT_EQ(2, num_control_inputs); EXPECT_EQ(feed, control_inputs[0]); EXPECT_EQ(feed2, control_inputs[1]); num_control_inputs = TF_OperationGetControlInputs( feed3, control_inputs, TF_OperationNumControlInputs(feed3)); ASSERT_EQ(2, num_control_inputs); EXPECT_EQ(feed, control_inputs[0]); EXPECT_EQ(feed2, control_inputs[1]); // Export to a graph def so we can import a graph with control dependencies TF_DeleteBuffer(graph_def); graph_def = TF_NewBuffer(); TF_GraphToGraphDef(graph, graph_def, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Import again, with remapped control dependency, into the same graph TF_DeleteImportGraphDefOptions(opts); opts = TF_NewImportGraphDefOptions(); TF_ImportGraphDefOptionsSetPrefix(opts, "imported4"); TF_ImportGraphDefOptionsRemapControlDependency(opts, "imported/feed", feed); TF_GraphImportGraphDef(graph, graph_def, opts, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Operation* scalar4 = TF_GraphOperationByName(graph, "imported4/imported3/scalar"); TF_Operation* feed4 = TF_GraphOperationByName(graph, "imported4/imported2/feed"); // Check that imported `imported3/scalar` has remapped control dep from // original graph and imported control dep num_control_inputs = TF_OperationGetControlInputs( scalar4, control_inputs, TF_OperationNumControlInputs(scalar4)); ASSERT_EQ(2, num_control_inputs); EXPECT_EQ(feed, control_inputs[0]); EXPECT_EQ(feed4, control_inputs[1]); TF_DeleteImportGraphDefOptions(opts); TF_DeleteBuffer(graph_def); // Can add nodes to the imported graph without trouble. Add(feed, scalar, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_DeleteGraph(graph); TF_DeleteStatus(s); } TEST(CAPI, ImportGraphDef_WithReturnOutputs) { TF_Status* s = TF_NewStatus(); TF_Graph* graph = TF_NewGraph(); // Create a graph with two nodes: x and 3 Placeholder(graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); ASSERT_TRUE(TF_GraphOperationByName(graph, "feed") != nullptr); TF_Operation* oper = ScalarConst(3, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); ASSERT_TRUE(TF_GraphOperationByName(graph, "scalar") != nullptr); Neg(oper, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); ASSERT_TRUE(TF_GraphOperationByName(graph, "neg") != nullptr); // Export to a GraphDef. TF_Buffer* graph_def = TF_NewBuffer(); TF_GraphToGraphDef(graph, graph_def, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Import it in a fresh graph with return outputs. TF_DeleteGraph(graph); graph = TF_NewGraph(); TF_ImportGraphDefOptions* opts = TF_NewImportGraphDefOptions(); TF_ImportGraphDefOptionsAddReturnOutput(opts, "feed", 0); TF_ImportGraphDefOptionsAddReturnOutput(opts, "scalar", 0); EXPECT_EQ(2, TF_ImportGraphDefOptionsNumReturnOutputs(opts)); TF_Output return_outputs[2]; TF_GraphImportGraphDefWithReturnOutputs(graph, graph_def, opts, return_outputs, 2, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Operation* scalar = TF_GraphOperationByName(graph, "scalar"); TF_Operation* feed = TF_GraphOperationByName(graph, "feed"); TF_Operation* neg = TF_GraphOperationByName(graph, "neg"); ASSERT_TRUE(scalar != nullptr); ASSERT_TRUE(feed != nullptr); ASSERT_TRUE(neg != nullptr); // Check return outputs EXPECT_EQ(feed, return_outputs[0].oper); EXPECT_EQ(0, return_outputs[0].index); EXPECT_EQ(scalar, return_outputs[1].oper); EXPECT_EQ(0, return_outputs[1].index); TF_DeleteImportGraphDefOptions(opts); TF_DeleteBuffer(graph_def); TF_DeleteGraph(graph); TF_DeleteStatus(s); } TEST(CAPI, ImportGraphDef_MissingUnusedInputMappings) { TF_Status* s = TF_NewStatus(); TF_Graph* graph = TF_NewGraph(); // Create a graph with two nodes: x and 3 Placeholder(graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); ASSERT_TRUE(TF_GraphOperationByName(graph, "feed") != nullptr); TF_Operation* oper = ScalarConst(3, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); ASSERT_TRUE(TF_GraphOperationByName(graph, "scalar") != nullptr); Neg(oper, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); ASSERT_TRUE(TF_GraphOperationByName(graph, "neg") != nullptr); // Export to a GraphDef. TF_Buffer* graph_def = TF_NewBuffer(); TF_GraphToGraphDef(graph, graph_def, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Import it in a fresh graph. TF_DeleteGraph(graph); graph = TF_NewGraph(); TF_ImportGraphDefOptions* opts = TF_NewImportGraphDefOptions(); TF_GraphImportGraphDef(graph, graph_def, opts, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Operation* scalar = TF_GraphOperationByName(graph, "scalar"); // Import it in a fresh graph with an unused input mapping. TF_DeleteImportGraphDefOptions(opts); opts = TF_NewImportGraphDefOptions(); TF_ImportGraphDefOptionsSetPrefix(opts, "imported"); TF_ImportGraphDefOptionsAddInputMapping(opts, "scalar", 0, {scalar, 0}); TF_ImportGraphDefOptionsAddInputMapping(opts, "fake", 0, {scalar, 0}); TF_ImportGraphDefResults* results = TF_GraphImportGraphDefWithResults(graph, graph_def, opts, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Check unused input mappings int num_unused_input_mappings; const char** src_names; int* src_indexes; TF_ImportGraphDefResultsMissingUnusedInputMappings( results, &num_unused_input_mappings, &src_names, &src_indexes); ASSERT_EQ(1, num_unused_input_mappings); EXPECT_EQ(string("fake"), string(src_names[0])); EXPECT_EQ(0, src_indexes[0]); TF_DeleteImportGraphDefResults(results); TF_DeleteImportGraphDefOptions(opts); TF_DeleteBuffer(graph_def); TF_DeleteGraph(graph); TF_DeleteStatus(s); } TEST(CAPI, Session) { TF_Status* s = TF_NewStatus(); TF_Graph* graph = TF_NewGraph(); // Make a placeholder operation. TF_Operation* feed = Placeholder(graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Make a constant operation with the scalar "2". TF_Operation* two = ScalarConst(2, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Add operation. TF_Operation* add = Add(feed, two, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Create a session for this graph. CSession csession(graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Run the graph. csession.SetInputs({{feed, Int32Tensor(3)}}); csession.SetOutputs({add}); csession.Run(s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Tensor* out = csession.output_tensor(0); ASSERT_TRUE(out != nullptr); EXPECT_EQ(TF_INT32, TF_TensorType(out)); EXPECT_EQ(0, TF_NumDims(out)); // scalar ASSERT_EQ(sizeof(int32), TF_TensorByteSize(out)); int32* output_contents = static_cast<int32*>(TF_TensorData(out)); EXPECT_EQ(3 + 2, *output_contents); // Add another operation to the graph. TF_Operation* neg = Neg(add, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Run up to the new operation. csession.SetInputs({{feed, Int32Tensor(7)}}); csession.SetOutputs({neg}); csession.Run(s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); out = csession.output_tensor(0); ASSERT_TRUE(out != nullptr); EXPECT_EQ(TF_INT32, TF_TensorType(out)); EXPECT_EQ(0, TF_NumDims(out)); // scalar ASSERT_EQ(sizeof(int32), TF_TensorByteSize(out)); output_contents = static_cast<int32*>(TF_TensorData(out)); EXPECT_EQ(-(7 + 2), *output_contents); // Clean up csession.CloseAndDelete(s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_DeleteGraph(graph); TF_DeleteStatus(s); } // If `device` is non-empty, run Min op on that device. // Otherwise run it on the default device (CPU). void RunMinTest(const string& device, bool use_XLA) { TF_Status* s = TF_NewStatus(); TF_Graph* graph = TF_NewGraph(); // Make a placeholder operation. TF_Operation* feed = Placeholder(graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Make a constant operation with the scalar "0", for axis. TF_Operation* one = ScalarConst(0, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Create a session for this graph. CSession csession(graph, s, use_XLA); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); if (!device.empty()) { LOG(INFO) << "Setting op Min on device " << device; } TF_Operation* min = MinWithDevice(feed, one, graph, device, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Run the graph. csession.SetInputs({{feed, Int32Tensor({3, 2, 5})}}); csession.SetOutputs({min}); csession.Run(s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Tensor* out = csession.output_tensor(0); ASSERT_TRUE(out != nullptr); EXPECT_EQ(TF_INT32, TF_TensorType(out)); EXPECT_EQ(0, TF_NumDims(out)); // scalar ASSERT_EQ(sizeof(int32), TF_TensorByteSize(out)); int32* output_contents = static_cast<int32*>(TF_TensorData(out)); EXPECT_EQ(2, *output_contents); // Clean up csession.CloseAndDelete(s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_DeleteGraph(graph); TF_DeleteStatus(s); } TEST(CAPI, Session_Min_CPU) { RunMinTest(/*device=*/"", /*use_XLA=*/false); } TEST(CAPI, Session_Min_XLA_CPU) { RunMinTest(/*device=*/"", /*use_XLA=*/true); } TEST(CAPI, Session_Min_GPU) { const string gpu_device = GPUDeviceName(); // Skip this test if no GPU is available. if (gpu_device.empty()) return; RunMinTest(gpu_device, /*use_XLA=*/false); } TEST(CAPI, Session_Min_XLA_GPU) { const string gpu_device = GPUDeviceName(); // Skip this test if no GPU is available. if (gpu_device.empty()) return; RunMinTest(gpu_device, /*use_XLA=*/true); } TEST(CAPI, SessionPRun) { TF_Status* s = TF_NewStatus(); TF_Graph* graph = TF_NewGraph(); // Construct the graph: A + 2 + B TF_Operation* a = Placeholder(graph, s, "A"); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Operation* b = Placeholder(graph, s, "B"); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Operation* two = ScalarConst(2, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Operation* plus2 = Add(a, two, graph, s, "plus2"); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Operation* plusB = Add(plus2, b, graph, s, "plusB"); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Setup a session and a partial run handle. The partial run will allow // computation of A + 2 + B in two phases (calls to TF_SessionPRun): // 1. Feed A and get (A+2) // 2. Feed B and get (A+2)+B TF_SessionOptions* opts = TF_NewSessionOptions(); TF_Session* sess = TF_NewSession(graph, opts, s); TF_DeleteSessionOptions(opts); TF_Output feeds[] = {TF_Output{a, 0}, TF_Output{b, 0}}; TF_Output fetches[] = {TF_Output{plus2, 0}, TF_Output{plusB, 0}}; const char* handle = nullptr; TF_SessionPRunSetup(sess, feeds, TF_ARRAYSIZE(feeds), fetches, TF_ARRAYSIZE(fetches), nullptr, 0, &handle, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Feed A and fetch A + 2. TF_Output feeds1[] = {TF_Output{a, 0}}; TF_Output fetches1[] = {TF_Output{plus2, 0}}; TF_Tensor* feedValues1[] = {Int32Tensor(1)}; TF_Tensor* fetchValues1[1]; TF_SessionPRun(sess, handle, feeds1, feedValues1, 1, fetches1, fetchValues1, 1, nullptr, 0, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); EXPECT_EQ(3, *(static_cast<int32*>(TF_TensorData(fetchValues1[0])))); TF_DeleteTensor(feedValues1[0]); TF_DeleteTensor(fetchValues1[0]); // Feed B and fetch (A + 2) + B. TF_Output feeds2[] = {TF_Output{b, 0}}; TF_Output fetches2[] = {TF_Output{plusB, 0}}; TF_Tensor* feedValues2[] = {Int32Tensor(4)}; TF_Tensor* fetchValues2[1]; TF_SessionPRun(sess, handle, feeds2, feedValues2, 1, fetches2, fetchValues2, 1, nullptr, 0, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); EXPECT_EQ(7, *(static_cast<int32*>(TF_TensorData(fetchValues2[0])))); TF_DeleteTensor(feedValues2[0]); TF_DeleteTensor(fetchValues2[0]); // Clean up. TF_DeletePRunHandle(handle); TF_DeleteSession(sess, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_DeleteGraph(graph); TF_DeleteStatus(s); } TEST(CAPI, ShapeInferenceError) { // TF_FinishOperation should fail if the shape of the added operation cannot // be inferred. TF_Status* status = TF_NewStatus(); TF_Graph* graph = TF_NewGraph(); // Create this failure by trying to add two nodes with incompatible shapes // (A tensor with shape [2] and a tensor with shape [3] cannot be added). const char data[] = {1, 2, 3}; const int64_t vec2_dims[] = {2}; unique_tensor_ptr vec2_tensor( Int8Tensor(vec2_dims, TF_ARRAYSIZE(vec2_dims), data), TF_DeleteTensor); TF_Operation* vec2 = Const(vec2_tensor.get(), graph, status, "vec2"); ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); const int64_t vec3_dims[] = {3}; unique_tensor_ptr vec3_tensor( Int8Tensor(vec3_dims, TF_ARRAYSIZE(vec3_dims), data), TF_DeleteTensor); TF_Operation* vec3 = Const(vec3_tensor.get(), graph, status, "vec3"); ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TF_Operation* add = AddNoCheck(vec2, vec3, graph, status); ASSERT_NE(TF_OK, TF_GetCode(status)); ASSERT_TRUE(add == nullptr); TF_DeleteGraph(graph); TF_DeleteStatus(status); } TEST(CAPI, GetOpDef) { TF_Status* status = TF_NewStatus(); TF_Graph* graph = TF_NewGraph(); TF_Buffer* buffer = TF_NewBuffer(); TF_GraphGetOpDef(graph, "Add", buffer, status); ASSERT_EQ(TF_OK, TF_GetCode(status)); const OpDef* expected_op_def; TF_ASSERT_OK(OpRegistry::Global()->LookUpOpDef("Add", &expected_op_def)); string expected_serialized; expected_op_def->SerializeToString(&expected_serialized); string actual_string(reinterpret_cast<const char*>(buffer->data), buffer->length); EXPECT_EQ(expected_serialized, actual_string); TF_GraphGetOpDef(graph, "MyFakeOp", buffer, status); EXPECT_EQ(TF_NOT_FOUND, TF_GetCode(status)); ExpectHasSubstr(TF_Message(status), "Op type not registered 'MyFakeOp' in binary"); TF_DeleteBuffer(buffer); TF_DeleteGraph(graph); TF_DeleteStatus(status); } void StringVectorToArrays(const std::vector<string>& v, std::unique_ptr<const void*[]>* ptrs, std::unique_ptr<size_t[]>* lens) { ptrs->reset(new const void*[v.size()]); lens->reset(new size_t[v.size()]); for (size_t i = 0; i < v.size(); ++i) { (*ptrs)[i] = v[i].data(); (*lens)[i] = v[i].size(); } } class CApiColocationTest : public ::testing::Test { protected: CApiColocationTest() : s_(TF_NewStatus()), graph_(TF_NewGraph()) {} void SetUp() override { feed1_ = Placeholder(graph_, s_, "feed1"); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); feed2_ = Placeholder(graph_, s_, "feed2"); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); constant_ = ScalarConst(10, graph_, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); desc_ = TF_NewOperation(graph_, "AddN", "add"); TF_Output inputs[] = {{feed1_, 0}, {constant_, 0}}; TF_AddInputList(desc_, inputs, TF_ARRAYSIZE(inputs)); } ~CApiColocationTest() override { TF_DeleteGraph(graph_); TF_DeleteStatus(s_); } void SetViaStringList(TF_OperationDescription* desc, const std::vector<string>& list) { std::unique_ptr<const void*[]> list_ptrs; std::unique_ptr<size_t[]> list_lens; StringVectorToArrays(list, &list_ptrs, &list_lens); TF_SetAttrStringList(desc, tensorflow::kColocationAttrName, list_ptrs.get(), list_lens.get(), list.size()); } void SetViaProto(TF_OperationDescription* desc, const std::vector<string>& list) { tensorflow::AttrValue attr; for (const string& v : list) { attr.mutable_list()->add_s(v); } string bytes; attr.SerializeToString(&bytes); TF_SetAttrValueProto(desc, tensorflow::kColocationAttrName, bytes.data(), bytes.size(), s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); } void VerifyCollocation(TF_Operation* op, const std::vector<string>& expected) { TF_AttrMetadata m = TF_OperationGetAttrMetadata(op, tensorflow::kColocationAttrName, s_); if (expected.empty()) { ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_)) << TF_Message(s_); EXPECT_EQ("Operation 'add' has no attr named '_class'.", string(TF_Message(s_))); return; } EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_EQ(1, m.is_list); EXPECT_EQ(expected.size(), m.list_size); EXPECT_EQ(TF_ATTR_STRING, m.type); std::vector<void*> values(expected.size()); std::vector<size_t> lens(expected.size()); std::unique_ptr<char[]> storage(new char[m.total_size]); TF_OperationGetAttrStringList(op, tensorflow::kColocationAttrName, values.data(), lens.data(), expected.size(), storage.get(), m.total_size, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); for (int i = 0; i < expected.size(); ++i) { EXPECT_EQ(expected[i], string(static_cast<const char*>(values[i]), lens[i])); } } void FinishAndVerify(TF_OperationDescription* desc, const std::vector<string>& expected) { TF_Operation* op = TF_FinishOperation(desc_, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); VerifyCollocation(op, expected); } TF_Status* s_; TF_Graph* graph_; TF_Operation* feed1_; TF_Operation* feed2_; TF_Operation* constant_; TF_OperationDescription* desc_; }; TEST_F(CApiColocationTest, ColocateWith) { TF_ColocateWith(desc_, feed1_); FinishAndVerify(desc_, {"loc:@feed1"}); } TEST_F(CApiColocationTest, StringList) { SetViaStringList(desc_, {"loc:@feed1"}); FinishAndVerify(desc_, {"loc:@feed1"}); } TEST_F(CApiColocationTest, Proto) { SetViaProto(desc_, {"loc:@feed1"}); FinishAndVerify(desc_, {"loc:@feed1"}); } TEST_F(CApiColocationTest, ColocateWith_StringList) { TF_ColocateWith(desc_, feed1_); SetViaStringList(desc_, {"loc:@feed2"}); FinishAndVerify(desc_, {"loc:@feed2"}); } TEST_F(CApiColocationTest, ColocateWith_Proto) { TF_ColocateWith(desc_, feed1_); SetViaProto(desc_, {"loc:@feed2"}); FinishAndVerify(desc_, {"loc:@feed2"}); } TEST_F(CApiColocationTest, StringList_ColocateWith) { SetViaStringList(desc_, {"loc:@feed2"}); TF_ColocateWith(desc_, feed1_); FinishAndVerify(desc_, {"loc:@feed1", "loc:@feed2"}); } TEST_F(CApiColocationTest, Proto_ColocateWith) { SetViaProto(desc_, {"loc:@feed2"}); TF_ColocateWith(desc_, feed1_); FinishAndVerify(desc_, {"loc:@feed1", "loc:@feed2"}); } TEST_F(CApiColocationTest, ColocateWith_ColocateWith) { TF_ColocateWith(desc_, feed1_); TF_ColocateWith(desc_, feed2_); FinishAndVerify(desc_, {"loc:@feed1", "loc:@feed2"}); } TEST_F(CApiColocationTest, Proto_StringList) { SetViaProto(desc_, {"loc:@feed1"}); SetViaStringList(desc_, {"loc:@feed2"}); FinishAndVerify(desc_, {"loc:@feed2"}); } TEST_F(CApiColocationTest, StringList_Proto) { SetViaStringList(desc_, {"loc:@feed1"}); SetViaProto(desc_, {"loc:@feed2"}); FinishAndVerify(desc_, {"loc:@feed2"}); } TEST_F(CApiColocationTest, ClearViaStringList) { TF_ColocateWith(desc_, feed1_); SetViaStringList(desc_, {}); FinishAndVerify(desc_, {}); } TEST_F(CApiColocationTest, ClearViaProto) { TF_ColocateWith(desc_, feed1_); SetViaProto(desc_, {}); FinishAndVerify(desc_, {}); } TEST(CAPI, SavedModel) { // Load the saved model. const char kSavedModel[] = "cc/saved_model/testdata/half_plus_two/00000123"; const string saved_model_dir = tensorflow::io::JoinPath( tensorflow::testing::TensorFlowSrcRoot(), kSavedModel); TF_SessionOptions* opt = TF_NewSessionOptions(); TF_Buffer* run_options = TF_NewBufferFromString("", 0); TF_Buffer* metagraph = TF_NewBuffer(); TF_Status* s = TF_NewStatus(); const char* tags[] = {tensorflow::kSavedModelTagServe}; TF_Graph* graph = TF_NewGraph(); TF_Session* session = TF_LoadSessionFromSavedModel( opt, run_options, saved_model_dir.c_str(), tags, 1, graph, metagraph, s); TF_DeleteBuffer(run_options); TF_DeleteSessionOptions(opt); tensorflow::MetaGraphDef metagraph_def; metagraph_def.ParseFromArray(metagraph->data, metagraph->length); TF_DeleteBuffer(metagraph); EXPECT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); CSession csession(session); // Retrieve the regression signature from meta graph def. const auto signature_def_map = metagraph_def.signature_def(); const auto signature_def = signature_def_map.at("regress_x_to_y"); const string input_name = signature_def.inputs().at(tensorflow::kRegressInputs).name(); const string output_name = signature_def.outputs().at(tensorflow::kRegressOutputs).name(); // Write {0, 1, 2, 3} as tensorflow::Example inputs. Tensor input(tensorflow::DT_STRING, TensorShape({4})); for (tensorflow::int64 i = 0; i < input.NumElements(); ++i) { tensorflow::Example example; auto* feature_map = example.mutable_features()->mutable_feature(); (*feature_map)["x"].mutable_float_list()->add_value(i); input.flat<string>()(i) = example.SerializeAsString(); } const tensorflow::string input_op_name( tensorflow::ParseTensorName(input_name).first); TF_Operation* input_op = TF_GraphOperationByName(graph, input_op_name.c_str()); ASSERT_TRUE(input_op != nullptr); csession.SetInputs({{input_op, TF_TensorFromTensor(input, s)}}); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); const tensorflow::string output_op_name( tensorflow::ParseTensorName(output_name).first); TF_Operation* output_op = TF_GraphOperationByName(graph, output_op_name.c_str()); ASSERT_TRUE(output_op != nullptr); csession.SetOutputs({output_op}); csession.Run(s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_Tensor* out = csession.output_tensor(0); ASSERT_TRUE(out != nullptr); EXPECT_EQ(TF_FLOAT, TF_TensorType(out)); EXPECT_EQ(2, TF_NumDims(out)); EXPECT_EQ(4, TF_Dim(out, 0)); EXPECT_EQ(1, TF_Dim(out, 1)); float* values = static_cast<float*>(TF_TensorData(out)); // These values are defined to be (input / 2) + 2. EXPECT_EQ(2, values[0]); EXPECT_EQ(2.5, values[1]); EXPECT_EQ(3, values[2]); EXPECT_EQ(3.5, values[3]); csession.CloseAndDelete(s); EXPECT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_DeleteGraph(graph); TF_DeleteStatus(s); } TEST(CAPI, SavedModelNullArgsAreValid) { const char kSavedModel[] = "cc/saved_model/testdata/half_plus_two/00000123"; const string saved_model_dir = tensorflow::io::JoinPath( tensorflow::testing::TensorFlowSrcRoot(), kSavedModel); TF_SessionOptions* opt = TF_NewSessionOptions(); TF_Status* s = TF_NewStatus(); const char* tags[] = {tensorflow::kSavedModelTagServe}; TF_Graph* graph = TF_NewGraph(); // NULL run_options and meta_graph_def should work. TF_Session* session = TF_LoadSessionFromSavedModel( opt, nullptr, saved_model_dir.c_str(), tags, 1, graph, nullptr, s); EXPECT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_DeleteSessionOptions(opt); TF_CloseSession(session, s); EXPECT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_DeleteSession(session, s); EXPECT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); TF_DeleteGraph(graph); TF_DeleteStatus(s); } TEST(CAPI, DeletingNullPointerIsSafe) { TF_Status* status = TF_NewStatus(); TF_DeleteStatus(nullptr); TF_DeleteBuffer(nullptr); TF_DeleteTensor(nullptr); TF_DeleteSessionOptions(nullptr); TF_DeleteGraph(nullptr); TF_DeleteImportGraphDefOptions(nullptr); TF_DeleteImportGraphDefResults(nullptr); TF_DeleteFunction(nullptr); TF_DeleteSession(nullptr, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TF_DeletePRunHandle(nullptr); TF_DeleteDeprecatedSession(nullptr, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TF_DeleteDeviceList(nullptr); TF_DeleteLibraryHandle(nullptr); TF_DeleteApiDefMap(nullptr); TF_DeleteStatus(status); } TEST(CAPI, TestBitcastFrom_Reshape) { int64_t dims[] = {2, 3}; TF_Tensor* a = TF_AllocateTensor(TF_UINT64, dims, 2, 6 * TF_DataTypeSize(TF_UINT64)); TF_Tensor* b = TF_AllocateTensor(TF_UINT64, nullptr, 0, TF_DataTypeSize(TF_UINT64)); EXPECT_NE(a, nullptr); EXPECT_NE(b, nullptr); EXPECT_EQ(6, TF_TensorElementCount(a)); EXPECT_EQ(1, TF_TensorElementCount(b)); EXPECT_EQ(6 * TF_DataTypeSize(TF_UINT64), TF_TensorByteSize(a)); EXPECT_EQ(TF_DataTypeSize(TF_UINT64), TF_TensorByteSize(b)); int64_t new_dims[] = {3, 2}; TF_Status* status = TF_NewStatus(); TF_TensorBitcastFrom(a, TF_UINT64, b, new_dims, 2, status); ASSERT_EQ(TF_OK, TF_GetCode(status)); TF_DeleteStatus(status); EXPECT_EQ(6, TF_TensorElementCount(a)); EXPECT_EQ(6, TF_TensorElementCount(b)); EXPECT_EQ(6 * TF_DataTypeSize(TF_UINT64), TF_TensorByteSize(a)); EXPECT_EQ(6 * TF_DataTypeSize(TF_UINT64), TF_TensorByteSize(b)); // Check that a write to one tensor shows up in the other. *(static_cast<int64_t*>(TF_TensorData(a))) = 4; EXPECT_EQ(4, *(static_cast<int64_t*>(TF_TensorData(b)))); *(static_cast<int64_t*>(TF_TensorData(b))) = 6; EXPECT_EQ(6, *(static_cast<int64_t*>(TF_TensorData(a)))); TF_DeleteTensor(a); TF_DeleteTensor(b); } REGISTER_OP("TestOpWithNoGradient") .Input("x: T") .Output("y: T") .Attr("T: {float, double}") .Doc(R"doc( Test op with no grad registered. x: input y: output )doc") .SetShapeFn(tensorflow::shape_inference::UnknownShape); class CApiGradientsTest : public ::testing::Test { protected: CApiGradientsTest() : s_(TF_NewStatus()), graph_(TF_NewGraph()), expected_graph_(TF_NewGraph()) {} ~CApiGradientsTest() override { TF_DeleteGraph(graph_); TF_DeleteGraph(expected_graph_); TF_DeleteStatus(s_); } void TestGradientsSuccess(bool grad_inputs_provided) { TF_Output inputs[2]; TF_Output outputs[1]; TF_Output grad_outputs[2]; TF_Output expected_grad_outputs[2]; BuildSuccessGraph(inputs, outputs); BuildExpectedGraph(grad_inputs_provided, expected_grad_outputs); AddGradients(grad_inputs_provided, nullptr, inputs, 2, outputs, 1, grad_outputs); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); // Compare that the graphs match. GraphDef expected_gdef; GraphDef gdef; EXPECT_TRUE(GetGraphDef(expected_graph_, &expected_gdef)); EXPECT_TRUE(GetGraphDef(graph_, &gdef)); TF_EXPECT_GRAPH_EQ(expected_gdef, gdef); // Compare that the output of the gradients of both graphs match. RunGraphsAndCompareOutputs(grad_outputs, expected_grad_outputs); } void TestGradientsError(bool grad_inputs_provided) { TF_Output inputs[1]; TF_Output outputs[1]; TF_Output grad_outputs[1]; BuildErrorGraph(inputs, outputs); AddGradients(grad_inputs_provided, nullptr, inputs, 1, outputs, 1, grad_outputs); string expected_msg = "No gradient defined for op: TestOpWithNoGradient. Please see " "https://www.tensorflow.org/code/" "tensorflow/cc/gradients/README.md" " for instructions on how to add C++ gradients."; EXPECT_EQ(expected_msg, TF_Message(s_)); } // Run the graph and ensure that the gradient values are as expected. void RunGraphsAndCompareOutputs(TF_Output* grad_outputs, TF_Output* expected_grad_outputs) { std::unique_ptr<CSession> csession(new CSession(graph_, s_)); std::unique_ptr<CSession> expected_csession( new CSession(expected_graph_, s_)); std::vector<TF_Output> grad_outputs_vec; grad_outputs_vec.assign(grad_outputs, grad_outputs + 2); csession->SetOutputs(grad_outputs_vec); csession->Run(s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); TF_Tensor* out0 = csession->output_tensor(0); TF_Tensor* out1 = csession->output_tensor(1); std::vector<TF_Output> expected_grad_outputs_vec; expected_grad_outputs_vec.assign(expected_grad_outputs, expected_grad_outputs + 2); expected_csession->SetOutputs(expected_grad_outputs_vec); expected_csession->Run(s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); TF_Tensor* expected_out0 = expected_csession->output_tensor(0); TF_Tensor* expected_out1 = expected_csession->output_tensor(1); CompareTensors(out0, expected_out0); CompareTensors(out1, expected_out1); } void CompareTensors(TF_Tensor* a, TF_Tensor* b) { float* a_data = static_cast<float*>(TF_TensorData(a)); float* b_data = static_cast<float*>(TF_TensorData(b)); EXPECT_EQ(*a_data, *b_data); } void AddGradients(bool grad_inputs_provided, const char* prefix, TF_Output* inputs, int ninputs, TF_Output* outputs, int noutputs, TF_Output* grad_outputs) { if (grad_inputs_provided) { TF_Output grad_inputs[1]; const float grad_inputs_val[] = {1.0, 1.0, 1.0, 1.0}; TF_Operation* grad_inputs_op = FloatConst2x2(graph_, s_, grad_inputs_val, "GradInputs"); grad_inputs[0] = TF_Output{grad_inputs_op, 0}; TF_AddGradientsWithPrefix(graph_, prefix, outputs, noutputs, inputs, ninputs, grad_inputs, s_, grad_outputs); } else { TF_AddGradientsWithPrefix(graph_, prefix, outputs, noutputs, inputs, ninputs, nullptr, s_, grad_outputs); } } void BuildErrorGraph(TF_Output* inputs, TF_Output* outputs) { const float const0_val[] = {1.0, 2.0, 3.0, 4.0}; TF_Operation* const0 = FloatConst2x2(graph_, s_, const0_val, "Const_0"); TF_Operation* nograd = NoGradientOp(graph_, s_, const0, "NoGrad"); inputs[0] = TF_Output{const0, 0}; outputs[0] = TF_Output{nograd, 0}; EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); } void BuildSuccessGraph(TF_Output* inputs, TF_Output* outputs) { // Construct the following graph: // | // z| // | // MatMul // / \ // ^ ^ // | | // x| y| // | | // | | // Const_0 Const_1 // const float const0_val[] = {1.0, 2.0, 3.0, 4.0}; const float const1_val[] = {1.0, 0.0, 0.0, 1.0}; TF_Operation* const0 = FloatConst2x2(graph_, s_, const0_val, "Const_0"); TF_Operation* const1 = FloatConst2x2(graph_, s_, const1_val, "Const_1"); TF_Operation* matmul = MatMul(graph_, s_, const0, const1, "MatMul"); inputs[0] = TF_Output{const0, 0}; inputs[1] = TF_Output{const1, 0}; outputs[0] = TF_Output{matmul, 0}; EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); } void BuildExpectedGraph(bool grad_inputs_provided, TF_Output* expected_grad_outputs) { // The expected graph looks like this if grad_inputs_provided. // If grad_inputs_provided is false, Const_0 will be a OnesLike op. // ^ ^ // dy| dx| // MatMul Gradient Graph // | | // MatMul_2 MatMul_1 // ^ ^ ^ ^ // | |----------| | // | ^ | // | dz| | // | | | // | Const_3 | // | | // | ^ | // | z| | // MatMul Forward Graph // | | | // | MatMul | // | / \ | // | ^ ^ | // | | | | // |---x| y|----| // | | // | | // Const_0 Const_1 // const float const0_val[] = {1.0, 2.0, 3.0, 4.0}; const float const1_val[] = {1.0, 0.0, 0.0, 1.0}; TF_Operation* const0 = FloatConst2x2(expected_graph_, s_, const0_val, "Const_0"); TF_Operation* const1 = FloatConst2x2(expected_graph_, s_, const1_val, "Const_1"); TF_Operation* matmul = MatMul(expected_graph_, s_, const0, const1, "MatMul"); TF_Operation* const3; if (grad_inputs_provided) { const float const3_val[] = {1.0, 1.0, 1.0, 1.0}; const3 = FloatConst2x2(expected_graph_, s_, const3_val, "GradInputs"); } else { const3 = OnesLike(expected_graph_, s_, matmul, "gradients/OnesLike"); } TF_Operation* matmul1 = MatMul(expected_graph_, s_, const3, const1, "gradients/MatMul", false, true); TF_Operation* matmul2 = MatMul(expected_graph_, s_, const0, const3, "gradients/MatMul_1", true, false); expected_grad_outputs[0] = {matmul1, 0}; expected_grad_outputs[1] = {matmul2, 0}; } TF_Tensor* FloatTensor2x2(const float* values) { const int64_t dims[2] = {2, 2}; TF_Tensor* t = TF_AllocateTensor(TF_FLOAT, dims, 2, sizeof(float) * 4); memcpy(TF_TensorData(t), values, sizeof(float) * 4); return t; } TF_Operation* FloatConst2x2(TF_Graph* graph, TF_Status* s, const float* values, const char* name) { unique_tensor_ptr tensor(FloatTensor2x2(values), TF_DeleteTensor); TF_OperationDescription* desc = TF_NewOperation(graph, "Const", name); TF_SetAttrTensor(desc, "value", tensor.get(), s); if (TF_GetCode(s) != TF_OK) return nullptr; TF_SetAttrType(desc, "dtype", TF_FLOAT); TF_Operation* op = TF_FinishOperation(desc, s); EXPECT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); return op; } TF_Operation* MatMul(TF_Graph* graph, TF_Status* s, TF_Operation* l, TF_Operation* r, const char* name, bool transpose_a = false, bool transpose_b = false) { TF_OperationDescription* desc = TF_NewOperation(graph, "MatMul", name); if (transpose_a) { TF_SetAttrBool(desc, "transpose_a", 1); } if (transpose_b) { TF_SetAttrBool(desc, "transpose_b", 1); } TF_AddInput(desc, {l, 0}); TF_AddInput(desc, {r, 0}); TF_Operation* op = TF_FinishOperation(desc, s); EXPECT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); return op; } TF_Operation* OnesLike(TF_Graph* graph, TF_Status* s, TF_Operation* in, const char* name) { TF_OperationDescription* desc = TF_NewOperation(graph, "OnesLike", name); TF_AddInput(desc, {in, 0}); TF_Operation* op = TF_FinishOperation(desc, s); EXPECT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); return op; } TF_Operation* NoGradientOp(TF_Graph* graph, TF_Status* s, TF_Operation* in, const char* name) { TF_OperationDescription* desc = TF_NewOperation(graph, "TestOpWithNoGradient", name); TF_AddInput(desc, {in, 0}); TF_Operation* op = TF_FinishOperation(desc, s); EXPECT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); return op; } void BuildGraphAndAddGradientsWithPrefixes(const char* prefix1, const char* prefix2 = nullptr) { TF_Output inputs[2]; TF_Output outputs[1]; TF_Output grad_outputs[2]; BuildSuccessGraph(inputs, outputs); AddGradients(false, prefix1, inputs, 2, outputs, 1, grad_outputs); if (prefix2 != nullptr) { AddGradients(false, prefix2, inputs, 2, outputs, 1, grad_outputs); } } TF_Status* s_; TF_Graph* graph_; TF_Graph* expected_graph_; }; TEST_F(CApiGradientsTest, Gradients_GradInputs) { TestGradientsSuccess(true); } TEST_F(CApiGradientsTest, Gradients_NoGradInputs) { TestGradientsSuccess(false); } TEST_F(CApiGradientsTest, OpWithNoGradientRegistered_GradInputs) { TestGradientsError(true); } TEST_F(CApiGradientsTest, OpWithNoGradientRegistered_NoGradInputs) { TestGradientsError(false); } TEST_F(CApiGradientsTest, GradientsPrefix_PrefixIsOk) { BuildGraphAndAddGradientsWithPrefixes("gradients"); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); } TEST_F(CApiGradientsTest, GradientsPrefix_TwoGradientsWithDistinctPrefixes) { BuildGraphAndAddGradientsWithPrefixes("gradients", "gradients_1"); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); } TEST_F(CApiGradientsTest, GradientsPrefix_TwoGradientsInSameScope) { BuildGraphAndAddGradientsWithPrefixes("scope/gradients", "scope/gradients_1"); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); } TEST_F(CApiGradientsTest, GradientsPrefix_TwoGradientsInDifferentScopes) { BuildGraphAndAddGradientsWithPrefixes("scope/gradients", "scope_1/gradients"); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); } TEST_F(CApiGradientsTest, GradientsPrefix_2ndGradientsAsSubScopeOf1st) { BuildGraphAndAddGradientsWithPrefixes("gradients", "gradients/sub"); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); } TEST_F(CApiGradientsTest, GradientsPrefix_PrefixMatchesExistingNodeName) { BuildGraphAndAddGradientsWithPrefixes("Const_0"); ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_)) << TF_Message(s_); } TEST_F(CApiGradientsTest, GradientsPrefix_TwoGradientsWithIdenticalPrefixes) { BuildGraphAndAddGradientsWithPrefixes("gradients", "gradients"); ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_)) << TF_Message(s_); } TEST_F(CApiGradientsTest, GradientsPrefix_2ndGradientsMatchingNodeOf1st) { BuildGraphAndAddGradientsWithPrefixes("gradients", "gradients/MatMul"); ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_)) << TF_Message(s_); } TEST_F(CApiGradientsTest, GradientsPrefix_1stGradientsMatchingNodeOf2nd) { BuildGraphAndAddGradientsWithPrefixes("gradients/MatMul", "gradients"); ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_)) << TF_Message(s_); } TEST_F(CApiGradientsTest, GradientsPrefix_2ndGradientsAsParentScopeOf1st) { BuildGraphAndAddGradientsWithPrefixes("gradients/sub", "gradients"); ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_)) << TF_Message(s_); } void ScalarFloatFromTensor(const TF_Tensor* t, float* f) { ASSERT_TRUE(t != nullptr); ASSERT_EQ(TF_FLOAT, TF_TensorType(t)); ASSERT_EQ(0, TF_NumDims(t)); ASSERT_EQ(4, TF_TensorByteSize(t)); float* p = static_cast<float*>(TF_TensorData(t)); *f = *p; } TEST_F(CApiGradientsTest, MultipleCallsToAddGradients) { const float X = 3.0f, Y = 7.0f; TF_Operation* x = Placeholder(graph_, s_, "x", TF_FLOAT); TF_Operation* y = Placeholder(graph_, s_, "y", TF_FLOAT); TF_Operation* xy = Mul(x, y, graph_, s_, "xy"); TF_Output dxy_dx, dxy_dy; TF_Output outputs[1] = {{xy, 0}}; TF_Output inputs[1] = {{x, 0}}; TF_AddGradients(graph_, outputs, 1, inputs, 1, nullptr, s_, &dxy_dx); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); inputs[0] = {y, 0}; TF_AddGradients(graph_, outputs, 1, inputs, 1, nullptr, s_, &dxy_dy); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); TF_SessionOptions* opts = TF_NewSessionOptions(); TF_Session* sess = TF_NewSession(graph_, opts, s_); TF_DeleteSessionOptions(opts); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); TF_Output feeds[] = {{x, 0}, {y, 0}}; TF_Tensor* feedValues[] = {FloatTensor(X), FloatTensor(Y)}; TF_Output fetches[] = {dxy_dx, dxy_dy}; TF_Tensor* fetchValues[] = {nullptr, nullptr}; TF_SessionRun(sess, nullptr /* run_options */, feeds, feedValues, 2, fetches, fetchValues, 2, nullptr /* target_opers */, 0, nullptr /* run_metadata */, s_); TF_DeleteTensor(feedValues[0]); TF_DeleteTensor(feedValues[1]); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); TF_DeleteSession(sess, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); float dxy_dxValue = 0.0f, dxy_dyValue = 0.0f; ScalarFloatFromTensor(fetchValues[0], &dxy_dxValue); EXPECT_EQ(Y, dxy_dxValue); ScalarFloatFromTensor(fetchValues[1], &dxy_dyValue); EXPECT_EQ(X, dxy_dyValue); TF_DeleteTensor(fetchValues[0]); TF_DeleteTensor(fetchValues[1]); } // REGISTER_OP for CApiAttributesTest test cases. // Registers two ops, each with a single attribute called 'v'. // The attribute in one op will have a type 'type', the other // will have list(type). #define ATTR_TEST_REGISTER_OP(type) \ REGISTER_OP("CApiAttributesTestOp" #type) \ .Attr("v: " #type) \ .SetShapeFn(tensorflow::shape_inference::UnknownShape); \ REGISTER_OP("CApiAttributesTestOpList" #type) \ .Attr("v: list(" #type ")") \ .SetShapeFn(tensorflow::shape_inference::UnknownShape) ATTR_TEST_REGISTER_OP(string); ATTR_TEST_REGISTER_OP(int); ATTR_TEST_REGISTER_OP(float); ATTR_TEST_REGISTER_OP(bool); ATTR_TEST_REGISTER_OP(type); ATTR_TEST_REGISTER_OP(shape); ATTR_TEST_REGISTER_OP(tensor); #undef ATTR_TEST_REGISTER_OP class CApiAttributesTest : public ::testing::Test { protected: CApiAttributesTest() : s_(TF_NewStatus()), graph_(TF_NewGraph()), counter_(0) {} ~CApiAttributesTest() override { TF_DeleteGraph(graph_); TF_DeleteStatus(s_); } TF_OperationDescription* init(string type) { // Construct op_name to match the name used by REGISTER_OP in the // ATTR_TEST_REGISTER calls above. string op_name = "CApiAttributesTestOp"; if (type.find("list(") == 0) { op_name += "List"; type = type.replace(0, 5, ""); type = type.replace(type.size() - 1, 1, ""); } op_name += type; return TF_NewOperation( graph_, op_name.c_str(), ::tensorflow::strings::StrCat("name", counter_++).c_str()); } TF_Status* s_; private: TF_Graph* graph_; int counter_; }; // Helper macros for the TF_OperationGetAttr* tests. // TODO(ashankar): Use gmock matchers instead? // (https://github.com/google/googletest/blob/master/googlemock/docs/CookBook.md#writing-new-parameterized-matchers-quickly) // That will require setting up the tensorflow build with gmock. #define EXPECT_TF_META(attr_name, expected_list_size, expected_type, \ expected_total_size) \ do { \ auto m = TF_OperationGetAttrMetadata(oper, attr_name, s_); \ EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); \ const unsigned char e = expected_list_size >= 0 ? 1 : 0; \ EXPECT_EQ(e, m.is_list); \ EXPECT_EQ(expected_list_size, m.list_size); \ EXPECT_EQ(expected_type, m.type); \ EXPECT_EQ(expected_total_size, m.total_size); \ } while (0) TEST_F(CApiAttributesTest, String) { auto desc = init("string"); TF_SetAttrString(desc, "v", "bunny", 5); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TF_META("v", -1, TF_ATTR_STRING, 5); std::unique_ptr<char[]> value(new char[5]); TF_OperationGetAttrString(oper, "v", value.get(), 5, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_EQ("bunny", string(static_cast<const char*>(value.get()), 5)); } TEST_F(CApiAttributesTest, StringList) { std::vector<string> list = {"bugs", "bunny", "duck"}; std::unique_ptr<const void*[]> list_ptrs; std::unique_ptr<size_t[]> list_lens; StringVectorToArrays(list, &list_ptrs, &list_lens); int list_total_size = 0; for (const auto& s : list) { list_total_size += s.size(); } auto desc = init("list(string)"); TF_SetAttrStringList(desc, "v", list_ptrs.get(), list_lens.get(), list.size()); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TF_META("v", list.size(), TF_ATTR_STRING, list_total_size); std::unique_ptr<void*[]> values(new void*[list.size()]); std::unique_ptr<size_t[]> lens(new size_t[list.size()]); std::unique_ptr<char[]> storage(new char[list_total_size]); TF_OperationGetAttrStringList(oper, "v", values.get(), lens.get(), list.size(), storage.get(), list_total_size, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); for (size_t i = 0; i < list.size(); ++i) { EXPECT_EQ(list[i].size(), lens[i]) << i; EXPECT_EQ(list[i], string(static_cast<const char*>(values[i]), lens[i])) << i; } } TEST_F(CApiAttributesTest, Int) { auto desc = init("int"); TF_SetAttrInt(desc, "v", 31415); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TF_META("v", -1, TF_ATTR_INT, -1); int64_t value; TF_OperationGetAttrInt(oper, "v", &value, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_EQ(31415, value); } TEST_F(CApiAttributesTest, IntList) { const int64_t list[] = {1, 2, 3, 4}; const size_t list_size = TF_ARRAYSIZE(list); auto desc = init("list(int)"); TF_SetAttrIntList(desc, "v", list, list_size); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); int64_t values[list_size]; EXPECT_TF_META("v", list_size, TF_ATTR_INT, -1); TF_OperationGetAttrIntList(oper, "v", values, list_size, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TRUE(std::equal(std::begin(list), std::end(list), std::begin(values))); } TEST_F(CApiAttributesTest, Float) { auto desc = init("float"); TF_SetAttrFloat(desc, "v", 2.718); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TF_META("v", -1, TF_ATTR_FLOAT, -1); float value; TF_OperationGetAttrFloat(oper, "v", &value, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_FLOAT_EQ(2.718, value); } TEST_F(CApiAttributesTest, FloatList) { const float list[] = {1.414, 2.718, 3.1415}; const size_t list_size = TF_ARRAYSIZE(list); auto desc = init("list(float)"); TF_SetAttrFloatList(desc, "v", list, list_size); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); float values[list_size]; EXPECT_TF_META("v", list_size, TF_ATTR_FLOAT, -1); TF_OperationGetAttrFloatList(oper, "v", values, list_size, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TRUE(std::equal(std::begin(list), std::end(list), std::begin(values))); } TEST_F(CApiAttributesTest, Bool) { auto desc = init("bool"); TF_SetAttrBool(desc, "v", 1); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TF_META("v", -1, TF_ATTR_BOOL, -1); unsigned char value; TF_OperationGetAttrBool(oper, "v", &value, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_EQ(1, value); } TEST_F(CApiAttributesTest, BoolList) { const unsigned char list[] = {0, 1, 1, 0, 0, 1, 1}; const size_t list_size = TF_ARRAYSIZE(list); auto desc = init("list(bool)"); TF_SetAttrBoolList(desc, "v", list, list_size); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); unsigned char values[list_size]; EXPECT_TF_META("v", list_size, TF_ATTR_BOOL, -1); TF_OperationGetAttrBoolList(oper, "v", values, list_size, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TRUE(std::equal(std::begin(list), std::end(list), std::begin(values))); } TEST_F(CApiAttributesTest, Type) { auto desc = init("type"); TF_SetAttrType(desc, "v", TF_COMPLEX128); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TF_META("v", -1, TF_ATTR_TYPE, -1); TF_DataType value; TF_OperationGetAttrType(oper, "v", &value, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_EQ(TF_COMPLEX128, value); } TEST_F(CApiAttributesTest, TypeList) { const TF_DataType list[] = {TF_FLOAT, TF_DOUBLE, TF_HALF, TF_COMPLEX128}; const size_t list_size = TF_ARRAYSIZE(list); auto desc = init("list(type)"); TF_SetAttrTypeList(desc, "v", list, list_size); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); TF_DataType values[list_size]; EXPECT_TF_META("v", list_size, TF_ATTR_TYPE, -1); TF_OperationGetAttrTypeList(oper, "v", values, list_size, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TRUE(std::equal(std::begin(list), std::end(list), std::begin(values))); } TEST_F(CApiAttributesTest, Shape) { // Unknown shape auto desc = init("shape"); TF_SetAttrShape(desc, "v", nullptr, -1); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TF_META("v", -1, TF_ATTR_SHAPE, -1); TF_OperationGetAttrShape(oper, "v", nullptr, 10, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); // Partially specified shape const int64_t partial_shape[] = {17, -1}; const size_t sz = TF_ARRAYSIZE(partial_shape); desc = init("shape"); TF_SetAttrShape(desc, "v", partial_shape, sz); oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TF_META("v", -1, TF_ATTR_SHAPE, sz); int64_t values[sz]; TF_OperationGetAttrShape(oper, "v", values, sz, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TRUE( std::equal(std::begin(partial_shape), std::end(partial_shape), values)); } TEST_F(CApiAttributesTest, ShapeList) { const int64_t shape_1[] = {1, 3}; const int64_t shape_2[] = {2, 4, 6}; const int64_t* list[] = {&shape_1[0], &shape_2[0]}; const size_t list_size = TF_ARRAYSIZE(list); const int ndims[] = {TF_ARRAYSIZE(shape_1), TF_ARRAYSIZE(shape_2)}; const int total_ndims = 5; // ndims[0] + ndims[1] auto desc = init("list(shape)"); TF_SetAttrShapeList(desc, "v", list, ndims, list_size); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TF_META("v", list_size, TF_ATTR_SHAPE, total_ndims); int64_t* values[list_size]; int values_ndims[list_size]; int64_t storage[total_ndims]; TF_OperationGetAttrShapeList(oper, "v", values, values_ndims, list_size, storage, total_ndims, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); for (size_t i = 0; i < list_size; ++i) { EXPECT_EQ(ndims[i], values_ndims[i]) << i; for (int j = 0; j < values_ndims[i]; ++j) { EXPECT_EQ(list[i][j], values[i][j]) << "(" << i << ", " << j << ")"; } } } TEST_F(CApiAttributesTest, TensorShapeProto) { const tensorflow::int64 pts[] = {2, 4, -1, 8}; tensorflow::TensorShapeProto proto; tensorflow::PartialTensorShape(pts).AsProto(&proto); string bytes; proto.SerializeToString(&bytes); auto desc = init("shape"); TF_SetAttrTensorShapeProto(desc, "v", bytes.data(), bytes.length(), s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TF_META("v", -1, TF_ATTR_SHAPE, 4); TF_Buffer* value = TF_NewBuffer(); TF_OperationGetAttrTensorShapeProto(oper, "v", value, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_EQ(bytes.length(), value->length); EXPECT_EQ(0, memcmp(bytes.data(), value->data, value->length)); TF_DeleteBuffer(value); } TEST_F(CApiAttributesTest, TensorShapeProtoList) { string bytes1, bytes2; tensorflow::TensorShapeProto proto; const tensorflow::int64 pts1[] = {2, 4, -1, 8}; tensorflow::PartialTensorShape(pts1).AsProto(&proto); proto.SerializeToString(&bytes1); const tensorflow::int64 pts2[] = {1, 3, 5, 7}; tensorflow::PartialTensorShape(pts2).AsProto(&proto); proto.SerializeToString(&bytes2); std::unique_ptr<const void*[]> list_ptrs; std::unique_ptr<size_t[]> list_lens; const std::vector<string> list = {bytes1, bytes2}; StringVectorToArrays(list, &list_ptrs, &list_lens); auto desc = init("list(shape)"); TF_SetAttrTensorShapeProtoList(desc, "v", list_ptrs.get(), list_lens.get(), list.size(), s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TF_META("v", 2, TF_ATTR_SHAPE, 8); TF_Buffer* values[2]; TF_OperationGetAttrTensorShapeProtoList(oper, "v", values, 2, s_); EXPECT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); for (int i = 0; i < 2; ++i) { int le = list_lens[i]; int la = values[i]->length; const void* e = list_ptrs[i]; const void* a = values[i]->data; EXPECT_EQ(le, la) << i; EXPECT_EQ(0, memcmp(e, a, std::min(le, la))) << i; TF_DeleteBuffer(values[i]); } } TEST_F(CApiAttributesTest, Tensor) { const char tensor[] = {5, 7}; const int64_t dims[] = {1, 2}; const size_t ndims = TF_ARRAYSIZE(dims); auto desc = init("tensor"); unique_tensor_ptr v(Int8Tensor(dims, ndims, tensor), TF_DeleteTensor); TF_SetAttrTensor(desc, "v", v.get(), s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TF_META("v", -1, TF_ATTR_TENSOR, -1); TF_Tensor* value; TF_OperationGetAttrTensor(oper, "v", &value, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); ASSERT_NE(nullptr, value); EXPECT_EQ(TF_INT8, TF_TensorType(value)); EXPECT_EQ(ndims, TF_NumDims(value)); for (int i = 0; i < TF_NumDims(value); ++i) { EXPECT_EQ(dims[i], TF_Dim(value, i)) << i; } EXPECT_EQ(sizeof(char) * TF_ARRAYSIZE(tensor), TF_TensorByteSize(value)); EXPECT_EQ(0, memcmp(tensor, TF_TensorData(value), TF_TensorByteSize(value))); TF_DeleteTensor(value); } TEST_F(CApiAttributesTest, StringTensor) { // Create the string-Tensor "attribute" value. char encoded[] = { 0, 0, 0, 0, 0, 0, 0, 0, // array[uint64] offsets 1, // varint encoded string length 'A', }; auto deallocator = [](void* data, size_t len, void* arg) {}; unique_tensor_ptr t_in(TF_NewTensor(TF_STRING, nullptr, 0, &encoded[0], sizeof(encoded), deallocator, nullptr), TF_DeleteTensor); // Create a TF_Operation with the attribute t_in auto desc = init("tensor"); TF_SetAttrTensor(desc, "v", t_in.get(), s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); // Fetch the attribute back. EXPECT_TF_META("v", -1, TF_ATTR_TENSOR, -1); TF_Tensor* t_out = nullptr; TF_OperationGetAttrTensor(oper, "v", &t_out, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_EQ(TF_STRING, TF_TensorType(t_out)); EXPECT_EQ(0, TF_NumDims(t_out)); ASSERT_EQ(TF_TensorByteSize(t_in.get()), TF_TensorByteSize(t_out)); EXPECT_EQ(0, memcmp(TF_TensorData(t_in.get()), TF_TensorData(t_out), TF_TensorByteSize(t_out))); TF_DeleteTensor(t_out); } TEST_F(CApiAttributesTest, TensorList) { const char tensor1[] = {5, 7}; const int64_t dims1[] = {1, 2}; const size_t ndims1 = TF_ARRAYSIZE(dims1); const char tensor2[] = {2, 4, 6, 8}; const int64_t dims2[] = {2, 2}; const size_t ndims2 = TF_ARRAYSIZE(dims2); auto desc = init("list(tensor)"); TF_Tensor* tmp[] = { Int8Tensor(dims1, ndims1, tensor1), Int8Tensor(dims2, ndims2, tensor2), }; TF_SetAttrTensorList(desc, "v", tmp, TF_ARRAYSIZE(tmp), s_); for (int i = 0; i < TF_ARRAYSIZE(tmp); ++i) { TF_DeleteTensor(tmp[i]); } ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TF_META("v", 2, TF_ATTR_TENSOR, -1); TF_Tensor* values[2]; TF_OperationGetAttrTensorList(oper, "v", &values[0], TF_ARRAYSIZE(values), s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); const char* tensor_data[] = {&tensor1[0], &tensor2[0]}; const size_t tensor_size[] = {TF_ARRAYSIZE(tensor1), TF_ARRAYSIZE(tensor2)}; const int64_t* tensor_dims[] = {&dims1[0], &dims2[0]}; const size_t tensor_ndims[] = {ndims1, ndims2}; for (int i = 0; i < 2; ++i) { TF_Tensor* v = values[i]; ASSERT_NE(nullptr, v) << i; EXPECT_EQ(TF_INT8, TF_TensorType(v)) << i; EXPECT_EQ(tensor_ndims[i], TF_NumDims(v)) << i; for (int j = 0; j < TF_NumDims(v); ++j) { EXPECT_EQ(tensor_dims[i][j], TF_Dim(v, j)) << "Tensor #" << i << ", dimension #" << j; } EXPECT_EQ(sizeof(char) * tensor_size[i], TF_TensorByteSize(v)) << i; EXPECT_EQ(0, memcmp(tensor_data[i], TF_TensorData(v), TF_TensorByteSize(v))); TF_DeleteTensor(v); } } TEST_F(CApiAttributesTest, EmptyList) { auto desc = init("list(int)"); TF_SetAttrIntList(desc, "v", nullptr, 0); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); EXPECT_TF_META("v", 0, TF_ATTR_INT, -1); } TEST_F(CApiAttributesTest, Errors) { auto desc = init("int"); TF_SetAttrInt(desc, "v", 3); auto oper = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); TF_OperationGetAttrString(oper, "v", nullptr, 0, s_); EXPECT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(s_)) << TF_Message(s_); } TEST(TestApiDef, TestCreateApiDef) { // TODO(b/73318067): Fix linking for the GPU test generated by the // tf_cuda_cc_test() bazel rule and remove the next line. if (!GPUDeviceName().empty()) return; TF_Buffer* op_list_buf = TF_GetAllOpList(); TF_Status* status = TF_NewStatus(); auto* api_def_map = TF_NewApiDefMap(op_list_buf, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TF_DeleteStatus(status); string op_name = "TestCApi"; status = TF_NewStatus(); auto* api_def_buf = TF_ApiDefMapGet(api_def_map, op_name.c_str(), op_name.size(), status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TF_DeleteStatus(status); tensorflow::ApiDef api_def; EXPECT_TRUE(api_def.ParseFromArray(api_def_buf->data, api_def_buf->length)); EXPECT_EQ(op_name, api_def.graph_op_name()); EXPECT_EQ(R"doc(Used to test C API)doc", api_def.summary()); TF_DeleteBuffer(api_def_buf); TF_DeleteApiDefMap(api_def_map); TF_DeleteBuffer(op_list_buf); } TEST(TestApiDef, TestCreateApiDefWithOverwrites) { // TODO(b/73318067): Fix linking for the GPU test generated by the // tf_cuda_cc_test() bazel rule and remove the next line. if (!GPUDeviceName().empty()) return; TF_Buffer* op_list_buf = TF_GetAllOpList(); TF_Status* status = TF_NewStatus(); auto* api_def_map = TF_NewApiDefMap(op_list_buf, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TF_DeleteStatus(status); string api_def_overwrites = R"(op: < graph_op_name: "TestCApi" summary: "New summary" > )"; status = TF_NewStatus(); TF_ApiDefMapPut(api_def_map, api_def_overwrites.c_str(), api_def_overwrites.size(), status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TF_DeleteStatus(status); string op_name = "TestCApi"; status = TF_NewStatus(); auto* api_def_buf = TF_ApiDefMapGet(api_def_map, op_name.c_str(), op_name.size(), status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TF_DeleteStatus(status); tensorflow::ApiDef api_def; EXPECT_TRUE(api_def.ParseFromArray(api_def_buf->data, api_def_buf->length)); EXPECT_EQ(op_name, api_def.graph_op_name()); EXPECT_EQ("New summary", api_def.summary()); TF_DeleteBuffer(api_def_buf); TF_DeleteApiDefMap(api_def_map); TF_DeleteBuffer(op_list_buf); } class DummyKernel : public tensorflow::OpKernel { public: explicit DummyKernel(tensorflow::OpKernelConstruction* context) : OpKernel(context) {} void Compute(tensorflow::OpKernelContext* context) override {} }; // Test we can query kernels REGISTER_OP("TestOpWithSingleKernel") .Input("a: float") .Input("b: float") .Output("o: float"); REGISTER_KERNEL_BUILDER( Name("TestOpWithSingleKernel").Device(tensorflow::DEVICE_CPU), DummyKernel); TEST(TestKernel, TestGetAllRegisteredKernels) { TF_Status* status = TF_NewStatus(); TF_Buffer* kernel_list_buf = TF_GetAllRegisteredKernels(status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); KernelList kernel_list; kernel_list.ParseFromArray(kernel_list_buf->data, kernel_list_buf->length); ASSERT_GT(kernel_list.kernel_size(), 0); TF_DeleteBuffer(kernel_list_buf); TF_DeleteStatus(status); } TEST(TestKernel, TestGetRegisteredKernelsForOp) { TF_Status* status = TF_NewStatus(); TF_Buffer* kernel_list_buf = TF_GetRegisteredKernelsForOp("TestOpWithSingleKernel", status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); KernelList kernel_list; kernel_list.ParseFromArray(kernel_list_buf->data, kernel_list_buf->length); ASSERT_EQ(kernel_list.kernel_size(), 1); EXPECT_EQ(kernel_list.kernel(0).op(), "TestOpWithSingleKernel"); EXPECT_EQ(kernel_list.kernel(0).device_type(), "CPU"); TF_DeleteBuffer(kernel_list_buf); TF_DeleteStatus(status); } TEST(TestKernel, TestGetRegisteredKernelsForOpNoKernels) { TF_Status* status = TF_NewStatus(); TF_Buffer* kernel_list_buf = TF_GetRegisteredKernelsForOp("Unknown", status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); KernelList kernel_list; kernel_list.ParseFromArray(kernel_list_buf->data, kernel_list_buf->length); ASSERT_EQ(kernel_list.kernel_size(), 0); TF_DeleteBuffer(kernel_list_buf); TF_DeleteStatus(status); } #undef EXPECT_TF_META } // namespace } // namespace tensorflow // TODO(josh11b): Test: // * TF_SetDevice(desc, "/job:worker"); // * control inputs / outputs // * targets // * TF_DeleteGraph() before TF_DeleteSession()
ghchinoy/tensorflow
tensorflow/c/c_api_test.cc
C++
apache-2.0
92,851
/* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.activiti.engine.identity; import org.activiti.engine.query.Query; /** * Allows to programmatically query for {@link Group}s. * * @author Joram Barrez */ public interface GroupQuery extends Query<GroupQuery, Group> { /** Only select {@link Group}s with the given id. */ GroupQuery groupId(String groupId); /** Only select {@link Group}s with the given name. */ GroupQuery groupName(String groupName); /** * Only select {@link Group}s where the name matches the given parameter. The syntax to use is that of SQL, eg. %activiti%. */ GroupQuery groupNameLike(String groupNameLike); /** Only select {@link Group}s which have the given type. */ GroupQuery groupType(String groupType); /** Only selects {@link Group}s where the given user is a member of. */ GroupQuery groupMember(String groupMemberUserId); /** * Only select {@link Group}S that are potential starter for the given process definition. */ GroupQuery potentialStarter(String procDefId); // sorting //////////////////////////////////////////////////////// /** * Order by group id (needs to be followed by {@link #asc()} or {@link #desc()}). */ GroupQuery orderByGroupId(); /** * Order by group name (needs to be followed by {@link #asc()} or {@link #desc()}). */ GroupQuery orderByGroupName(); /** * Order by group type (needs to be followed by {@link #asc()} or {@link #desc()}). */ GroupQuery orderByGroupType(); }
stefan-ziel/Activiti
modules/activiti-engine/src/main/java/org/activiti/engine/identity/GroupQuery.java
Java
apache-2.0
2,035
// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Generated code. DO NOT EDIT! namespace Google.Cloud.Vision.V1.Snippets { // [START vision_v1_generated_ProductSearch_DeleteReferenceImage_async_flattened] using Google.Cloud.Vision.V1; using System.Threading.Tasks; public sealed partial class GeneratedProductSearchClientSnippets { /// <summary>Snippet for DeleteReferenceImageAsync</summary> /// <remarks> /// This snippet has been automatically generated for illustrative purposes only. /// It may require modifications to work in your environment. /// </remarks> public async Task DeleteReferenceImageAsync() { // Create client ProductSearchClient productSearchClient = await ProductSearchClient.CreateAsync(); // Initialize request argument(s) string name = "projects/[PROJECT]/locations/[LOCATION]/products/[PRODUCT]/referenceImages/[REFERENCE_IMAGE]"; // Make the request await productSearchClient.DeleteReferenceImageAsync(name); } } // [END vision_v1_generated_ProductSearch_DeleteReferenceImage_async_flattened] }
googleapis/google-cloud-dotnet
apis/Google.Cloud.Vision.V1/Google.Cloud.Vision.V1.GeneratedSnippets/ProductSearchClient.DeleteReferenceImageAsyncSnippet.g.cs
C#
apache-2.0
1,731
from plow.gui.manifest import QtCore, QtGui from plow.gui.util import formatDateTime, formatDuration __all__ = [ "Text", "Number", "Decimal", "DateTime", "PillWidget", "Checkbox" ] class FormWidget(QtGui.QWidget): """ The base class for all form widgets. """ __LOCKED_PIX = None def __init__(self, value, parent=None): QtGui.QWidget.__init__(self, parent) layout = QtGui.QGridLayout(self) layout.setSpacing(0) layout.setContentsMargins(0, 0, 0, 0) self._widget = None self.__status = QtGui.QLabel(self) self.__status.setContentsMargins(5, 0, 0, 0) layout.addWidget(self.__status, 0, 2) if not FormWidget.__LOCKED_PIX: FormWidget.__LOCKED_PIX = QtGui.QPixmap(":/images/locked.png") FormWidget.__LOCKED_PIX = FormWidget.__LOCKED_PIX.scaled( QtCore.QSize(12, 12), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation) def setReadOnly(self, value): self._setReadOnly(value) if value: self.__status.setPixmap(FormWidget.__LOCKED_PIX) else: self.__status.setText("") def setSuffix(self, value): self._setSuffix(value) def _setSuffix(self, value): self.layout().addWidget(QtGui.QLabel(value), 0, 1) def _setReadOnly(self, value): pass def setWidget(self, widget): self._widget = widget self.layout().addWidget(widget, 0, 0) class Text(FormWidget): def __init__(self, text, parent=None): FormWidget.__init__(self, parent) self.setWidget(QtGui.QLineEdit(text, self)) self._widget.setFocusPolicy(QtCore.Qt.NoFocus) self._widget.setCursorPosition(1) def _setReadOnly(self, value): self._widget.setReadOnly(value) class Number(FormWidget): def __init__(self, value, parent=None): FormWidget.__init__(self, parent) widget = QtGui.QSpinBox(self) widget.setMinimum(0) widget.setMaximum(1000000) widget.setMinimumWidth(100) widget.setValue(value) self.setWidget(widget) self._widget.setFocusPolicy(QtCore.Qt.NoFocus) def _setReadOnly(self, value): self._widget.setReadOnly(value) self._widget.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons) def _setSuffix(self, value): self._widget.setSuffix(value) class Decimal(FormWidget): def __init__(self, value, parent=None): FormWidget.__init__(self, parent) widget = QtGui.QDoubleSpinBox(self) widget.setValue(value) self.setWidget(widget) widget.setMinimumWidth(100) self._widget.setFocusPolicy(QtCore.Qt.NoFocus) def _setReadOnly(self, value): self._widget.setReadOnly(value) self._widget.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons) def _setSuffix(self, value): self._widget.setSuffix(value) class DateTime(FormWidget): def __init__(self, value, parent=None): FormWidget.__init__(self, parent) self.setWidget(QtGui.QLabel(formatDateTime(value), self)) class Duration(FormWidget): def __init__(self, times, parent=None): FormWidget.__init__(self, parent) self.setWidget(QtGui.QLabel(formatDuration(times[0], times[1]), self)) class PillWidget(FormWidget): def __init__(self, value, parent): FormWidget.__init__(self, parent) data, color = value self.label = QtGui.QLabel(data, self) self.label.setStyleSheet("border: 1px solid #222222; background-color: %s; border-radius: 6px;" % color) self.label.setMinimumWidth(100) self.setWidget(self.label) class Checkbox(FormWidget): def __init__(self, bvalue, parent=None): FormWidget.__init__(self, parent) self.setWidget(QtGui.QCheckBox(self)) self._widget.setCheckState(QtCore.Qt.Checked if bvalue else QtCore.Qt.Unchecked) self._widget.setFocusPolicy(QtCore.Qt.NoFocus) def _setReadOnly(self, value): self._widget.setReadOnly(value)
chadmv/plow
lib/python/plow/gui/form/fwidgets.py
Python
apache-2.0
4,081
package com.github.tomakehurst.wiremock.standalone; import com.github.tomakehurst.wiremock.core.MappingsSaver; public interface MappingsSource extends MappingsLoader, MappingsSaver { }
planetakshay/wiremock
src/main/java/com/github/tomakehurst/wiremock/standalone/MappingsSource.java
Java
apache-2.0
187
/* * Licensed to ElasticSearch and Shay Banon under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. ElasticSearch licenses this * file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.monitor.fs; import com.google.common.collect.Iterators; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import java.io.IOException; import java.util.Iterator; /** */ public class FsStats implements Iterable<FsStats.Info>, Streamable, ToXContent { public static class Info implements Streamable { String path; @Nullable String mount; @Nullable String dev; long total = -1; long free = -1; long available = -1; long diskReads = -1; long diskWrites = -1; long diskReadBytes = -1; long diskWriteBytes = -1; double diskQueue = -1; double diskServiceTime = -1; @Override public void readFrom(StreamInput in) throws IOException { path = in.readString(); mount = in.readOptionalString(); dev = in.readOptionalString(); total = in.readLong(); free = in.readLong(); available = in.readLong(); diskReads = in.readLong(); diskWrites = in.readLong(); diskReadBytes = in.readLong(); diskWriteBytes = in.readLong(); diskQueue = in.readDouble(); diskServiceTime = in.readDouble(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(path); out.writeOptionalString(mount); out.writeOptionalString(dev); out.writeLong(total); out.writeLong(free); out.writeLong(available); out.writeLong(diskReads); out.writeLong(diskWrites); out.writeLong(diskReadBytes); out.writeLong(diskWriteBytes); out.writeDouble(diskQueue); out.writeDouble(diskServiceTime); } public String getPath() { return path; } public String getMount() { return mount; } public String getDev() { return dev; } public ByteSizeValue getTotal() { return new ByteSizeValue(total); } public ByteSizeValue getFree() { return new ByteSizeValue(free); } public ByteSizeValue getAvailable() { return new ByteSizeValue(available); } public long getDiskReads() { return this.diskReads; } public long getDiskWrites() { return this.diskWrites; } public long getDiskReadSizeInBytes() { return diskReadBytes; } public ByteSizeValue getDiskReadSizeSize() { return new ByteSizeValue(diskReadBytes); } public long getDiskWriteSizeInBytes() { return diskWriteBytes; } public ByteSizeValue getDiskWriteSizeSize() { return new ByteSizeValue(diskWriteBytes); } public double getDiskQueue() { return diskQueue; } public double getDiskServiceTime() { return diskServiceTime; } } long timestamp; Info[] infos; FsStats() { } FsStats(long timestamp, Info[] infos) { this.timestamp = timestamp; this.infos = infos; } public long getTimestamp() { return timestamp; } @Override public Iterator<Info> iterator() { return Iterators.forArray(infos); } public static FsStats readFsStats(StreamInput in) throws IOException { FsStats stats = new FsStats(); stats.readFrom(in); return stats; } @Override public void readFrom(StreamInput in) throws IOException { timestamp = in.readVLong(); infos = new Info[in.readVInt()]; for (int i = 0; i < infos.length; i++) { infos[i] = new Info(); infos[i].readFrom(in); } } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(timestamp); out.writeVInt(infos.length); for (Info info : infos) { info.writeTo(out); } } static final class Fields { static final XContentBuilderString FS = new XContentBuilderString("fs"); static final XContentBuilderString TIMESTAMP = new XContentBuilderString("timestamp"); static final XContentBuilderString DATA = new XContentBuilderString("data"); static final XContentBuilderString PATH = new XContentBuilderString("path"); static final XContentBuilderString MOUNT = new XContentBuilderString("mount"); static final XContentBuilderString DEV = new XContentBuilderString("dev"); static final XContentBuilderString TOTAL = new XContentBuilderString("total"); static final XContentBuilderString TOTAL_IN_BYTES = new XContentBuilderString("total_in_bytes"); static final XContentBuilderString FREE = new XContentBuilderString("free"); static final XContentBuilderString FREE_IN_BYTES = new XContentBuilderString("free_in_bytes"); static final XContentBuilderString AVAILABLE = new XContentBuilderString("available"); static final XContentBuilderString AVAILABLE_IN_BYTES = new XContentBuilderString("available_in_bytes"); static final XContentBuilderString DISK_READS = new XContentBuilderString("disk_reads"); static final XContentBuilderString DISK_WRITES = new XContentBuilderString("disk_writes"); static final XContentBuilderString DISK_READ_SIZE = new XContentBuilderString("disk_read_size"); static final XContentBuilderString DISK_READ_SIZE_IN_BYTES = new XContentBuilderString("disk_read_size_in_bytes"); static final XContentBuilderString DISK_WRITE_SIZE = new XContentBuilderString("disk_write_size"); static final XContentBuilderString DISK_WRITE_SIZE_IN_BYTES = new XContentBuilderString("disk_write_size_in_bytes"); static final XContentBuilderString DISK_QUEUE = new XContentBuilderString("disk_queue"); static final XContentBuilderString DISK_SERVICE_TIME = new XContentBuilderString("disk_service_time"); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.FS); builder.field(Fields.TIMESTAMP, timestamp); builder.startArray(Fields.DATA); for (Info info : infos) { builder.startObject(); builder.field(Fields.PATH, info.path, XContentBuilder.FieldCaseConversion.NONE); if (info.mount != null) { builder.field(Fields.MOUNT, info.mount, XContentBuilder.FieldCaseConversion.NONE); } if (info.dev != null) { builder.field(Fields.DEV, info.dev, XContentBuilder.FieldCaseConversion.NONE); } if (info.total != -1) { builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, info.total); } if (info.free != -1) { builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, info.free); } if (info.available != -1) { builder.byteSizeField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, info.available); } if (info.diskReads != -1) { builder.field(Fields.DISK_READS, info.diskReads); } if (info.diskWrites != -1) { builder.field(Fields.DISK_WRITES, info.diskWrites); } if (info.diskReadBytes != -1) { builder.byteSizeField(Fields.DISK_READ_SIZE_IN_BYTES, Fields.DISK_READ_SIZE, info.getDiskReadSizeInBytes()); } if (info.diskWriteBytes != -1) { builder.byteSizeField(Fields.DISK_WRITE_SIZE_IN_BYTES, Fields.DISK_WRITE_SIZE, info.getDiskWriteSizeInBytes()); } if (info.diskQueue != -1) { builder.field(Fields.DISK_QUEUE, Strings.format1Decimals(info.diskQueue, "")); } if (info.diskServiceTime != -1) { builder.field(Fields.DISK_SERVICE_TIME, Strings.format1Decimals(info.diskServiceTime, "")); } builder.endObject(); } builder.endArray(); builder.endObject(); return builder; } }
andrewvc/elasticsearch
src/main/java/org/elasticsearch/monitor/fs/FsStats.java
Java
apache-2.0
9,609
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!--NewPage--> <HTML> <HEAD> <!-- Generated by javadoc (build 1.6.0_29) on Mon Nov 26 17:21:33 MSK 2012 --> <TITLE> org.apache.poi.hssf Class Hierarchy (POI API Documentation) </TITLE> <META NAME="date" CONTENT="2012-11-26"> <LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../stylesheet.css" TITLE="Style"> <SCRIPT type="text/javascript"> function windowTitle() { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="org.apache.poi.hssf Class Hierarchy (POI API Documentation)"; } } </SCRIPT> <NOSCRIPT> </NOSCRIPT> </HEAD> <BODY BGCOLOR="white" onload="windowTitle();"> <HR> <!-- ========= START OF TOP NAVBAR ======= --> <A NAME="navbar_top"><!-- --></A> <A HREF="#skip-navbar_top" title="Skip navigation links"></A> <TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY=""> <TR> <TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A NAME="navbar_top_firstrow"><!-- --></A> <TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY=""> <TR ALIGN="center" VALIGN="top"> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <FONT CLASS="NavBarFont1">Class</FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <FONT CLASS="NavBarFont1">Use</FONT>&nbsp;</TD> <TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> &nbsp;<FONT CLASS="NavBarFont1Rev"><B>Tree</B></FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A>&nbsp;</TD> </TR> </TABLE> </TD> <TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM> </EM> </TD> </TR> <TR> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> &nbsp;<A HREF="../../../../org/apache/poi/hsmf/parsers/package-tree.html"><B>PREV</B></A>&nbsp; &nbsp;<A HREF="../../../../org/apache/poi/hssf/converter/package-tree.html"><B>NEXT</B></A></FONT></TD> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> <A HREF="../../../../index.html?org/apache/poi/hssf/package-tree.html" target="_top"><B>FRAMES</B></A> &nbsp; &nbsp;<A HREF="package-tree.html" target="_top"><B>NO FRAMES</B></A> &nbsp; &nbsp;<SCRIPT type="text/javascript"> <!-- if(window==top) { document.writeln('<A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A>'); } //--> </SCRIPT> <NOSCRIPT> <A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A> </NOSCRIPT> </FONT></TD> </TR> </TABLE> <A NAME="skip-navbar_top"></A> <!-- ========= END OF TOP NAVBAR ========= --> <HR> <CENTER> <H2> Hierarchy For Package org.apache.poi.hssf </H2> </CENTER> <DL> <DT><B>Package Hierarchies:</B><DD><A HREF="../../../../overview-tree.html">All Packages</A></DL> <HR> <H2> Class Hierarchy </H2> <UL> <LI TYPE="circle">java.lang.Object<UL> <LI TYPE="circle">java.lang.Throwable (implements java.io.Serializable) <UL> <LI TYPE="circle">java.lang.Exception<UL> <LI TYPE="circle">java.lang.RuntimeException<UL> <LI TYPE="circle">java.lang.IllegalArgumentException<UL> <LI TYPE="circle">org.apache.poi.<A HREF="../../../../org/apache/poi/OldFileFormatException.html" title="class in org.apache.poi"><B>OldFileFormatException</B></A><UL> <LI TYPE="circle">org.apache.poi.hssf.<A HREF="../../../../org/apache/poi/hssf/OldExcelFormatException.html" title="class in org.apache.poi.hssf"><B>OldExcelFormatException</B></A></UL> </UL> </UL> </UL> </UL> </UL> </UL> <HR> <!-- ======= START OF BOTTOM NAVBAR ====== --> <A NAME="navbar_bottom"><!-- --></A> <A HREF="#skip-navbar_bottom" title="Skip navigation links"></A> <TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY=""> <TR> <TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A NAME="navbar_bottom_firstrow"><!-- --></A> <TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY=""> <TR ALIGN="center" VALIGN="top"> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <FONT CLASS="NavBarFont1">Class</FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <FONT CLASS="NavBarFont1">Use</FONT>&nbsp;</TD> <TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> &nbsp;<FONT CLASS="NavBarFont1Rev"><B>Tree</B></FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A>&nbsp;</TD> </TR> </TABLE> </TD> <TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM> </EM> </TD> </TR> <TR> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> &nbsp;<A HREF="../../../../org/apache/poi/hsmf/parsers/package-tree.html"><B>PREV</B></A>&nbsp; &nbsp;<A HREF="../../../../org/apache/poi/hssf/converter/package-tree.html"><B>NEXT</B></A></FONT></TD> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> <A HREF="../../../../index.html?org/apache/poi/hssf/package-tree.html" target="_top"><B>FRAMES</B></A> &nbsp; &nbsp;<A HREF="package-tree.html" target="_top"><B>NO FRAMES</B></A> &nbsp; &nbsp;<SCRIPT type="text/javascript"> <!-- if(window==top) { document.writeln('<A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A>'); } //--> </SCRIPT> <NOSCRIPT> <A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A> </NOSCRIPT> </FONT></TD> </TR> </TABLE> <A NAME="skip-navbar_bottom"></A> <!-- ======== END OF BOTTOM NAVBAR ======= --> <HR> <i>Copyright 2012 The Apache Software Foundation or its licensors, as applicable.</i> </BODY> </HTML>
brenthand/Panda
poi-3.9/docs/apidocs/org/apache/poi/hssf/package-tree.html
HTML
apache-2.0
6,831
/* --------------------------------------------------------------------------- Open Asset Import Library - Java Binding (jassimp) --------------------------------------------------------------------------- Copyright (c) 2006-2012, assimp team All rights reserved. Redistribution and use of this software in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the assimp team, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission of the assimp team. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------- */ package jassimp; import java.io.IOException; import java.nio.ByteBuffer; import java.util.EnumSet; import java.util.Set; /** * Entry point to the jassimp library.<p> * * Use {@link #importFile(String, Set)} to load a file. * * <h3>General Notes and Pitfalls</h3> * Due to the loading via JNI, strings (for example as returned by the * <code>getName()</code> methods) are not interned. You should therefore * compare strings the way it should be done, i.e, via <code>equals()</code>. * Pointer comparison will fail. */ public final class Jassimp { /** * The native interface. * * @param filename the file to load * @param postProcessing post processing flags * @return the loaded scene, or null if an error occurred * @throws IOException if an error occurs */ private static native AiScene aiImportFile(String filename, long postProcessing, AiIOSystem<?> ioSystem, AiProgressHandler progressHandler) throws IOException; /** * The active wrapper provider. */ private static AiWrapperProvider<?, ?, ?, ?, ?> s_wrapperProvider = new AiBuiltInWrapperProvider(); /** * The library loader to load the native library. */ private static JassimpLibraryLoader s_libraryLoader = new JassimpLibraryLoader(); /** * Status flag if the library is loaded. * * Volatile to avoid problems with double checked locking. * */ private static volatile boolean s_libraryLoaded = false; /** * Lock for library loading. */ private static final Object s_libraryLoadingLock = new Object(); /** * The default wrapper provider using built in types. */ public static final AiWrapperProvider<?, ?, ?, ?, ?> BUILTIN = new AiBuiltInWrapperProvider(); /** * Imports a file via assimp without post processing. * * @param filename the file to import * @return the loaded scene * @throws IOException if an error occurs */ public static AiScene importFile(String filename) throws IOException { return importFile(filename, EnumSet.noneOf(AiPostProcessSteps.class)); } /** * Imports a file via assimp without post processing. * * @param filename the file to import * @param ioSystem ioSystem to load files, or null for default * @return the loaded scene * @throws IOException if an error occurs */ public static AiScene importFile(String filename, AiIOSystem<?> ioSystem) throws IOException { return importFile(filename, EnumSet.noneOf(AiPostProcessSteps.class), ioSystem); } /** * Imports a file via assimp. * * @param filename the file to import * @param postProcessing post processing flags * @return the loaded scene, or null if an error occurred * @throws IOException if an error occurs */ public static AiScene importFile(String filename, Set<AiPostProcessSteps> postProcessing) throws IOException { return importFile(filename, postProcessing, null); } /** * Imports a file via assimp. * * @param filename the file to import * @param postProcessing post processing flags * @param ioSystem ioSystem to load files, or null for default * @return the loaded scene, or null if an error occurred * @throws IOException if an error occurs */ public static AiScene importFile(String filename, Set<AiPostProcessSteps> postProcessing, AiIOSystem<?> ioSystem) throws IOException { return importFile(filename, postProcessing, ioSystem, null); } /** * Imports a file via assimp. * * @param filename the file to import * @param postProcessing post processing flags * @param ioSystem ioSystem to load files, or null for default * @return the loaded scene, or null if an error occurred * @throws IOException if an error occurs */ public static AiScene importFile(String filename, Set<AiPostProcessSteps> postProcessing, AiIOSystem<?> ioSystem, AiProgressHandler progressHandler) throws IOException { loadLibrary(); return aiImportFile(filename, AiPostProcessSteps.toRawValue( postProcessing), ioSystem, progressHandler); } /** * Returns the size of a struct or ptimitive.<p> * * @return the result of sizeof call */ public static native int getVKeysize(); /** * @see #getVKeysize */ public static native int getQKeysize(); /** * @see #getVKeysize */ public static native int getV3Dsize(); /** * @see #getVKeysize */ public static native int getfloatsize(); /** * @see #getVKeysize */ public static native int getintsize(); /** * @see #getVKeysize */ public static native int getuintsize(); /** * @see #getVKeysize */ public static native int getdoublesize(); /** * @see #getVKeysize */ public static native int getlongsize(); /** * Returns a human readable error description.<p> * * This method can be called when one of the import methods fails, i.e., * throws an exception, to get a human readable error description. * * @return the error string */ public static native String getErrorString(); /** * Returns the active wrapper provider.<p> * * This method is part of the wrapped API (see {@link AiWrapperProvider} * for details on wrappers). * * @return the active wrapper provider */ public static AiWrapperProvider<?, ?, ?, ?, ?> getWrapperProvider() { return s_wrapperProvider; } /** * Sets a new wrapper provider.<p> * * This method is part of the wrapped API (see {@link AiWrapperProvider} * for details on wrappers). * * @param wrapperProvider the new wrapper provider */ public static void setWrapperProvider(AiWrapperProvider<?, ?, ?, ?, ?> wrapperProvider) { s_wrapperProvider = wrapperProvider; } public static void setLibraryLoader(JassimpLibraryLoader libraryLoader) { s_libraryLoader = libraryLoader; } /** * Helper method for wrapping a matrix.<p> * * Used by JNI, do not modify! * * @param data the matrix data * @return the wrapped matrix */ static Object wrapMatrix(float[] data) { return s_wrapperProvider.wrapMatrix4f(data); } /** * Helper method for wrapping a color (rgb).<p> * * Used by JNI, do not modify! * * @param red red component * @param green green component * @param blue blue component * @return the wrapped color */ static Object wrapColor3(float red, float green, float blue) { return wrapColor4(red, green, blue, 1.0f); } /** * Helper method for wrapping a color (rgba).<p> * * Used by JNI, do not modify! * * @param red red component * @param green green component * @param blue blue component * @param alpha alpha component * @return the wrapped color */ static Object wrapColor4(float red, float green, float blue, float alpha) { ByteBuffer temp = ByteBuffer.allocate(4 * 4); temp.putFloat(red); temp.putFloat(green); temp.putFloat(blue); temp.putFloat(alpha); temp.flip(); return s_wrapperProvider.wrapColor(temp, 0); } /** * Helper method for wrapping a vector.<p> * * Used by JNI, do not modify! * * @param x x component * @param y y component * @param z z component * @return the wrapped vector */ static Object wrapVec3(float x, float y, float z) { ByteBuffer temp = ByteBuffer.allocate(3 * 4); temp.putFloat(x); temp.putFloat(y); temp.putFloat(z); temp.flip(); return s_wrapperProvider.wrapVector3f(temp, 0, 3); } /** * Helper method for wrapping a scene graph node.<p> * * Used by JNI, do not modify! * * @param parent the parent node * @param matrix the transformation matrix * @param meshRefs array of matrix references * @param name the name of the node * @return the wrapped matrix */ static Object wrapSceneNode(Object parent, Object matrix, int[] meshRefs, String name) { return s_wrapperProvider.wrapSceneNode(parent, matrix, meshRefs, name); } /** * Helper method to load the library using the provided JassimpLibraryLoader.<p> * * Synchronized to avoid race conditions. */ private static void loadLibrary() { if(!s_libraryLoaded) { synchronized(s_libraryLoadingLock) { if(!s_libraryLoaded) { s_libraryLoader.loadLibrary(); NATIVE_AIVEKTORKEY_SIZE = getVKeysize(); NATIVE_AIQUATKEY_SIZE = getQKeysize(); NATIVE_AIVEKTOR3D_SIZE = getV3Dsize(); NATIVE_FLOAT_SIZE = getfloatsize(); NATIVE_INT_SIZE = getintsize(); NATIVE_UINT_SIZE = getuintsize(); NATIVE_DOUBLE_SIZE = getdoublesize(); NATIVE_LONG_SIZE = getlongsize(); s_libraryLoaded = true; } } } } /** * Pure static class, no accessible constructor. */ private Jassimp() { /* nothing to do */ } public static int NATIVE_AIVEKTORKEY_SIZE; public static int NATIVE_AIQUATKEY_SIZE; public static int NATIVE_AIVEKTOR3D_SIZE; public static int NATIVE_FLOAT_SIZE; public static int NATIVE_INT_SIZE; public static int NATIVE_UINT_SIZE; public static int NATIVE_DOUBLE_SIZE; public static int NATIVE_LONG_SIZE; }
google/filament
third_party/libassimp/port/jassimp/jassimp/src/jassimp/Jassimp.java
Java
apache-2.0
12,142
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Tests for google.apphosting.tools.devappserver2.module.""" import httplib import logging import os import re import time import unittest import google import mox from google.appengine.api import appinfo from google.appengine.api import request_info from google.appengine.tools.devappserver2 import api_server from google.appengine.tools.devappserver2 import application_configuration from google.appengine.tools.devappserver2 import constants from google.appengine.tools.devappserver2 import dispatcher from google.appengine.tools.devappserver2 import instance from google.appengine.tools.devappserver2 import module from google.appengine.tools.devappserver2 import start_response_utils from google.appengine.tools.devappserver2 import wsgi_server class ModuleConfigurationStub(object): def __init__(self, application_root='/root', application='app', module_name='default', automatic_scaling=appinfo.AutomaticScaling(), version='version', runtime='python27', threadsafe=False, skip_files='', inbound_services=['warmup'], handlers=[appinfo.URLMap(url=r'/python-(.*)', script=r'\1.py')], normalized_libraries=None, env_variables=None, manual_scaling=None, basic_scaling=None): self.application_root = application_root self.application = application self.module_name = module_name self.automatic_scaling = automatic_scaling self.manual_scaling = manual_scaling self.basic_scaling = basic_scaling self.major_version = version self.runtime = runtime self.threadsafe = threadsafe self.skip_files = skip_files self.inbound_services = inbound_services self.handlers = handlers self.normalized_libraries = normalized_libraries or [] self.env_variables = env_variables or [] self.version_id = '%s:%s.%s' % (module_name, version, '12345') self.is_backend = False def check_for_updates(self): return set() class ModuleFacade(module.Module): def __init__(self, module_configuration=ModuleConfigurationStub(), instance_factory=None, ready=True, allow_skipped_files=False): super(ModuleFacade, self).__init__( module_configuration, host='fakehost', balanced_port=0, api_port=8080, auth_domain='gmail.com', runtime_stderr_loglevel=1, php_executable_path='/usr/bin/php-cgi', enable_php_remote_debugging=False, python_config=None, cloud_sql_config=None, default_version_port=8080, port_registry=dispatcher.PortRegistry(), request_data=None, dispatcher=None, max_instances=None, use_mtime_file_watcher=False, automatic_restarts=True, allow_skipped_files=allow_skipped_files) if instance_factory is not None: self._instance_factory = instance_factory self._ready = ready @property def ready(self): return self._ready @property def balanced_port(self): return self._balanced_port class AutoScalingModuleFacade(module.AutoScalingModule): def __init__(self, module_configuration=ModuleConfigurationStub(), balanced_port=0, instance_factory=None, max_instances=None, ready=True): super(AutoScalingModuleFacade, self).__init__( module_configuration, host='fakehost', balanced_port=balanced_port, api_port=8080, auth_domain='gmail.com', runtime_stderr_loglevel=1, php_executable_path='/usr/bin/php-cgi', enable_php_remote_debugging=False, python_config=None, cloud_sql_config=None, default_version_port=8080, port_registry=dispatcher.PortRegistry(), request_data=None, dispatcher=None, max_instances=max_instances, use_mtime_file_watcher=False, automatic_restarts=True, allow_skipped_files=False) if instance_factory is not None: self._instance_factory = instance_factory self._ready = ready @property def ready(self): return self._ready @property def balanced_port(self): return self._balanced_port class ManualScalingModuleFacade(module.ManualScalingModule): def __init__(self, module_configuration=ModuleConfigurationStub(), balanced_port=0, instance_factory=None, ready=True): super(ManualScalingModuleFacade, self).__init__( module_configuration, host='fakehost', balanced_port=balanced_port, api_port=8080, auth_domain='gmail.com', runtime_stderr_loglevel=1, php_executable_path='/usr/bin/php-cgi', enable_php_remote_debugging=False, python_config=None, cloud_sql_config=None, default_version_port=8080, port_registry=dispatcher.PortRegistry(), request_data=None, dispatcher=None, max_instances=None, use_mtime_file_watcher=False, automatic_restarts=True, allow_skipped_files=False) if instance_factory is not None: self._instance_factory = instance_factory self._ready = ready @property def ready(self): return self._ready @property def balanced_port(self): return self._balanced_port class BasicScalingModuleFacade(module.BasicScalingModule): def __init__(self, host='fakehost', module_configuration=ModuleConfigurationStub(), balanced_port=0, instance_factory=None, ready=True): super(BasicScalingModuleFacade, self).__init__( module_configuration, host, balanced_port=balanced_port, api_port=8080, auth_domain='gmail.com', runtime_stderr_loglevel=1, php_executable_path='/usr/bin/php-cgi', enable_php_remote_debugging=False, python_config=None, cloud_sql_config=None, default_version_port=8080, port_registry=dispatcher.PortRegistry(), request_data=None, dispatcher=None, max_instances=None, use_mtime_file_watcher=False, automatic_restarts=True, allow_skipped_files=False) if instance_factory is not None: self._instance_factory = instance_factory self._ready = ready @property def ready(self): return self._ready @property def balanced_port(self): return self._balanced_port class BuildRequestEnvironTest(unittest.TestCase): def setUp(self): api_server.test_setup_stubs() self.module = ModuleFacade() def test_build_request_environ(self): expected_environ = { constants.FAKE_IS_ADMIN_HEADER: '1', 'HTTP_HOST': 'fakehost:8080', 'HTTP_HEADER': 'Value', 'HTTP_OTHER': 'Values', 'CONTENT_LENGTH': '4', 'PATH_INFO': '/foo', 'QUERY_STRING': 'bar=baz', 'REQUEST_METHOD': 'PUT', 'REMOTE_ADDR': '1.2.3.4', 'SERVER_NAME': 'fakehost', 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.1', 'wsgi.version': (1, 0), 'wsgi.url_scheme': 'http', 'wsgi.multithread': True, 'wsgi.multiprocess': True} environ = self.module.build_request_environ( 'PUT', '/foo?bar=baz', [('Header', 'Value'), ('Other', 'Values')], 'body', '1.2.3.4', 8080) self.assertEqual('', environ.pop('wsgi.errors').getvalue()) self.assertEqual('body', environ.pop('wsgi.input').getvalue()) self.assertEqual(expected_environ, environ) def test_build_request_environ_fake_is_logged_in(self): expected_environ = { constants.FAKE_IS_ADMIN_HEADER: '1', constants.FAKE_LOGGED_IN_HEADER: '1', 'HTTP_HOST': 'fakehost:8080', 'HTTP_HEADER': 'Value', 'HTTP_OTHER': 'Values', 'CONTENT_LENGTH': '4', 'PATH_INFO': '/foo', 'QUERY_STRING': 'bar=baz', 'REQUEST_METHOD': 'PUT', 'REMOTE_ADDR': '1.2.3.4', 'SERVER_NAME': 'fakehost', 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.1', 'wsgi.version': (1, 0), 'wsgi.url_scheme': 'http', 'wsgi.multithread': True, 'wsgi.multiprocess': True} environ = self.module.build_request_environ( 'PUT', '/foo?bar=baz', [('Header', 'Value'), ('Other', 'Values')], 'body', '1.2.3.4', 8080, fake_login=True) self.assertEqual('', environ.pop('wsgi.errors').getvalue()) self.assertEqual('body', environ.pop('wsgi.input').getvalue()) self.assertEqual(expected_environ, environ) def test_build_request_environ_unicode_body(self): expected_environ = { constants.FAKE_IS_ADMIN_HEADER: '1', 'HTTP_HOST': 'fakehost', 'HTTP_HEADER': 'Value', 'HTTP_OTHER': 'Values', 'CONTENT_LENGTH': '4', 'PATH_INFO': '/foo', 'QUERY_STRING': 'bar=baz', 'REQUEST_METHOD': 'PUT', 'REMOTE_ADDR': '1.2.3.4', 'SERVER_NAME': 'fakehost', 'SERVER_PORT': '80', 'SERVER_PROTOCOL': 'HTTP/1.1', 'wsgi.version': (1, 0), 'wsgi.url_scheme': 'http', 'wsgi.multithread': True, 'wsgi.multiprocess': True} environ = self.module.build_request_environ( 'PUT', '/foo?bar=baz', [('Header', 'Value'), ('Other', 'Values')], u'body', '1.2.3.4', 80) self.assertEqual('', environ.pop('wsgi.errors').getvalue()) self.assertEqual('body', environ.pop('wsgi.input').getvalue()) self.assertEqual(expected_environ, environ) class TestModuleCreateUrlHandlers(unittest.TestCase): """Tests for module.Module._create_url_handlers.""" def setUp(self): self.module_configuration = ModuleConfigurationStub() self.instance_factory = instance.InstanceFactory(None, 1) self.servr = ModuleFacade(instance_factory=self.instance_factory, module_configuration=self.module_configuration) self.instance_factory.START_URL_MAP = appinfo.URLMap( url='/_ah/start', script='start_handler', login='admin') self.instance_factory.WARMUP_URL_MAP = appinfo.URLMap( url='/_ah/warmup', script='warmup_handler', login='admin') def test_match_all(self): self.module_configuration.handlers = [appinfo.URLMap(url=r'.*', script=r'foo.py')] handlers = self.servr._create_url_handlers() self.assertEqual(6, len(handlers)) def test_match_start_only(self): self.module_configuration.handlers = [appinfo.URLMap(url=r'/_ah/start', script=r'foo.py')] handlers = self.servr._create_url_handlers() self.assertEqual(7, len(handlers)) self.assertEqual(self.instance_factory.WARMUP_URL_MAP, handlers[0].url_map) def test_match_warmup_only(self): self.module_configuration.handlers = [appinfo.URLMap(url=r'/_ah/warmup', script=r'foo.py')] handlers = self.servr._create_url_handlers() self.assertEqual(7, len(handlers)) self.assertEqual(self.instance_factory.START_URL_MAP, handlers[0].url_map) def test_match_neither_warmup_nor_start(self): self.module_configuration.handlers = [appinfo.URLMap(url=r'/', script=r'foo.py')] handlers = self.servr._create_url_handlers() self.assertEqual(8, len(handlers)) self.assertEqual(self.instance_factory.WARMUP_URL_MAP, handlers[0].url_map) self.assertEqual(self.instance_factory.START_URL_MAP, handlers[1].url_map) def test_match_static_only(self): self.module_configuration.handlers = [ appinfo.URLMap(url=r'/_ah/start', static_dir='foo'), appinfo.URLMap(url=r'/_ah/warmup', static_files='foo', upload='foo')] handlers = self.servr._create_url_handlers() self.assertEqual(9, len(handlers)) self.assertEqual(self.instance_factory.WARMUP_URL_MAP, handlers[0].url_map) self.assertEqual(self.instance_factory.START_URL_MAP, handlers[1].url_map) def test_match_start_only_no_inbound_warmup(self): self.module_configuration.inbound_services = None self.module_configuration.handlers = [appinfo.URLMap(url=r'/_ah/start', script=r'foo.py')] handlers = self.servr._create_url_handlers() self.assertEqual(6, len(handlers)) def test_match_warmup_only_no_inbound_warmup(self): self.module_configuration.inbound_services = None self.module_configuration.handlers = [appinfo.URLMap(url=r'/_ah/warmup', script=r'foo.py')] handlers = self.servr._create_url_handlers() self.assertEqual(7, len(handlers)) self.assertEqual(self.instance_factory.START_URL_MAP, handlers[0].url_map) def test_match_neither_warmup_nor_start_no_inbound_warmup(self): self.module_configuration.inbound_services = None self.module_configuration.handlers = [appinfo.URLMap(url=r'/', script=r'foo.py')] handlers = self.servr._create_url_handlers() self.assertEqual(7, len(handlers)) self.assertEqual(self.instance_factory.START_URL_MAP, handlers[0].url_map) class TestModuleGetRuntimeConfig(unittest.TestCase): """Tests for module.Module._get_runtime_config.""" def setUp(self): self.module_configuration = ModuleConfigurationStub(skip_files='foo') self.module_configuration.handlers = [ appinfo.URLMap(url=r'/static', static_dir='static'), appinfo.URLMap(url=r'/app_read_static', static_dir='app_read_static', application_readable=True), appinfo.URLMap(url=r'/static_images/*.png', static_files=r'static_images/\\1', upload=r'static_images/*.png'), appinfo.URLMap(url=r'/app_readable_static_images/*.png', static_files=r'app_readable_static_images/\\1', upload=r'app_readable_static_images/*.png', application_readable=True), ] self.instance_factory = instance.InstanceFactory(None, 1) def test_static_files_regex(self): servr = ModuleFacade(instance_factory=self.instance_factory, module_configuration=self.module_configuration) config = servr._get_runtime_config() self.assertEqual(r'^(static%s.*)|(static_images/*.png)$' % re.escape(os.path.sep), config.static_files) def test_allow_skipped_files(self): servr = ModuleFacade(instance_factory=self.instance_factory, module_configuration=self.module_configuration, allow_skipped_files=True) config = servr._get_runtime_config() self.assertFalse(config.HasField('skip_files')) self.assertFalse(config.HasField('static_files')) class TestModuleShutdownInstance(unittest.TestCase): """Tests for module.Module._shutdown_instance.""" def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.module_configuration = ModuleConfigurationStub() self.instance_factory = instance.InstanceFactory(None, 1) self.servr = ModuleFacade(instance_factory=self.instance_factory, module_configuration=self.module_configuration) self.mox.StubOutWithMock(logging, 'exception') self.mox.StubOutWithMock(self.servr, '_handle_request') self.mox.StubOutWithMock(self.servr._quit_event, 'wait') self.mox.StubOutWithMock(module.Module, 'build_request_environ') self.inst = self.mox.CreateMock(instance.Instance) self.time = 0 self.mox.stubs.Set(time, 'time', lambda: self.time) def tearDown(self): self.mox.UnsetStubs() def test_shutdown_instance(self): def advance_time(*unused_args, **unused_kwargs): self.time += 10 environ = object() self.servr.build_request_environ( 'GET', '/_ah/stop', [], '', '0.1.0.3', 9000, fake_login=True).AndReturn( environ) self.servr._handle_request( environ, start_response_utils.null_start_response, inst=self.inst, request_type=instance.SHUTDOWN_REQUEST).WithSideEffects(advance_time) self.servr._quit_event.wait(20) self.inst.quit(force=True) self.mox.ReplayAll() self.servr._shutdown_instance(self.inst, 9000) self.mox.VerifyAll() class TestAutoScalingModuleWarmup(unittest.TestCase): """Tests for module.AutoScalingModule._warmup.""" def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.mox.StubOutWithMock(module.Module, 'build_request_environ') def tearDown(self): self.mox.UnsetStubs() def test_warmup(self): s = AutoScalingModuleFacade(balanced_port=8080) self.mox.StubOutWithMock(s, '_handle_request') self.mox.StubOutWithMock(s._condition, 'notify') inst = self.mox.CreateMock(instance.Instance) environ = object() s.build_request_environ('GET', '/_ah/warmup', [], '', '0.1.0.3', 8080, fake_login=True).AndReturn(environ) s._handle_request(environ, mox.IgnoreArg(), inst=inst, request_type=instance.READY_REQUEST) s._condition.notify(1) self.mox.ReplayAll() s._warmup(inst) self.mox.VerifyAll() class TestAutoScalingModuleAddInstance(unittest.TestCase): """Tests for module.AutoScalingModule._add_instance.""" def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.factory = self.mox.CreateMock(instance.InstanceFactory) self.factory.max_concurrent_requests = 10 def tearDown(self): self.mox.UnsetStubs() def test_permit_warmup(self): s = AutoScalingModuleFacade(instance_factory=self.factory) self.mox.StubOutWithMock(s, '_async_warmup') self.mox.StubOutWithMock(s._condition, 'notify') inst = self.mox.CreateMock(instance.Instance) self.factory.new_instance(mox.Regex('[a-f0-9]{36}'), expect_ready_request=True).AndReturn(inst) inst.start().AndReturn(True) s._async_warmup(inst) self.mox.ReplayAll() self.assertEqual(inst, s._add_instance(permit_warmup=True)) self.mox.VerifyAll() self.assertEqual(1, len(s._instances)) def test_no_permit_warmup(self): s = AutoScalingModuleFacade(instance_factory=self.factory) self.mox.StubOutWithMock(s._condition, 'notify') inst = self.mox.CreateMock(instance.Instance) self.factory.new_instance(mox.Regex('[a-f0-9]{36}'), expect_ready_request=False).AndReturn(inst) inst.start().AndReturn(True) s._condition.notify(10) self.mox.ReplayAll() self.assertEqual(inst, s._add_instance(permit_warmup=False)) self.mox.VerifyAll() self.assertIn(inst, s._instances) def test_failed_to_start(self): s = AutoScalingModuleFacade(instance_factory=self.factory) self.mox.StubOutWithMock(s, '_async_warmup') self.mox.StubOutWithMock(s._condition, 'notify') inst = self.mox.CreateMock(instance.Instance) self.factory.new_instance(mox.Regex('[a-f0-9]{36}'), expect_ready_request=True).AndReturn(inst) inst.start().AndReturn(False) self.mox.ReplayAll() self.assertIsNone(s._add_instance(permit_warmup=True)) self.mox.VerifyAll() self.assertEqual(1, len(s._instances)) def test_max_instances(self): s = AutoScalingModuleFacade(instance_factory=self.factory, max_instances=1) self.mox.StubOutWithMock(s._condition, 'notify') inst = self.mox.CreateMock(instance.Instance) self.factory.new_instance(mox.Regex('[a-f0-9]{36}'), expect_ready_request=False).AndReturn(inst) inst.start().AndReturn(True) s._condition.notify(10) self.mox.ReplayAll() self.assertEqual(inst, s._add_instance(permit_warmup=False)) self.assertEqual(None, s._add_instance(permit_warmup=False)) self.mox.VerifyAll() self.assertEqual(1, len(s._instances)) class TestAutoScalingInstancePoolHandleScriptRequest(unittest.TestCase): """Tests for module.AutoScalingModule.handle.""" def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.inst = self.mox.CreateMock(instance.Instance) self.environ = {} self.start_response = object() self.response = [object()] self.url_map = object() self.match = object() self.request_id = object() self.auto_module = AutoScalingModuleFacade( instance_factory=instance.InstanceFactory(object(), 10)) self.mox.StubOutWithMock(self.auto_module, '_choose_instance') self.mox.StubOutWithMock(self.auto_module, '_add_instance') self.mox.stubs.Set(time, 'time', lambda: 0.0) def tearDown(self): self.mox.UnsetStubs() def test_handle_script_request(self): self.auto_module._choose_instance(0.1).AndReturn(self.inst) self.inst.handle(self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndReturn(self.response) self.mox.ReplayAll() self.assertEqual( self.response, self.auto_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() self.assertEqual([(mox.IgnoreArg(), 1)], list(self.auto_module._outstanding_request_history)) def test_handle_cannot_accept_request(self): self.auto_module._choose_instance(0.1).AndReturn(self.inst) self.auto_module._choose_instance(0.1).AndReturn(self.inst) self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndRaise( instance.CannotAcceptRequests) self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndReturn( self.response) self.mox.ReplayAll() self.assertEqual( self.response, self.auto_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() self.assertEqual([(mox.IgnoreArg(), 1)], list(self.auto_module._outstanding_request_history)) def test_handle_new_instance(self): self.auto_module._choose_instance(0.1).AndReturn(None) self.auto_module._add_instance(permit_warmup=False).AndReturn(self.inst) self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndReturn( self.response) self.mox.ReplayAll() self.assertEqual( self.response, self.auto_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() def test_handle_new_instance_none_returned(self): self.auto_module._choose_instance(0.1).AndReturn(None) self.auto_module._add_instance(permit_warmup=False).AndReturn(None) self.auto_module._choose_instance(0.2).AndReturn(self.inst) self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndReturn( self.response) self.mox.ReplayAll() self.assertEqual( self.response, self.auto_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() class TestAutoScalingInstancePoolTrimRequestTimesAndOutstanding( unittest.TestCase): """Tests for AutoScalingModule._trim_outstanding_request_history.""" def setUp(self): api_server.test_setup_stubs() def test_trim_outstanding_request_history(self): servr = AutoScalingModuleFacade( instance_factory=instance.InstanceFactory(object(), 10)) servr._outstanding_request_history.append((0, 100)) servr._outstanding_request_history.append((1.0, 101)) servr._outstanding_request_history.append((1.2, 102)) servr._outstanding_request_history.append((2.5, 103)) now = time.time() servr._outstanding_request_history.append((now, 42)) servr._outstanding_request_history.append((now + 1, 43)) servr._outstanding_request_history.append((now + 3, 44)) servr._outstanding_request_history.append((now + 4, 45)) servr._trim_outstanding_request_history() self.assertEqual([(now, 42), (now + 1, 43), (now + 3, 44), (now + 4, 45)], list(servr._outstanding_request_history)) class TestAutoScalingInstancePoolGetNumRequiredInstances(unittest.TestCase): """Tests for AutoScalingModule._outstanding_request_history.""" def setUp(self): api_server.test_setup_stubs() self.servr = AutoScalingModuleFacade( instance_factory=instance.InstanceFactory(object(), 5)) def test_get_num_required_instances(self): now = time.time() self.servr._outstanding_request_history.append((now, 42)) self.servr._outstanding_request_history.append((now + 1, 43)) self.servr._outstanding_request_history.append((now + 3, 44)) self.servr._outstanding_request_history.append((now + 4, 45)) self.assertEqual(9, self.servr._get_num_required_instances()) def test_no_requests(self): self.assertEqual(0, self.servr._get_num_required_instances()) class TestAutoScalingInstancePoolSplitInstances(unittest.TestCase): """Tests for module.AutoScalingModule._split_instances.""" class Instance(object): def __init__(self, num_outstanding_requests, can_accept_requests=True): self.num_outstanding_requests = num_outstanding_requests self.can_accept_requests = can_accept_requests def __repr__(self): return str(self.num_outstanding_requests) def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.servr = AutoScalingModuleFacade( instance_factory=instance.InstanceFactory(object(), 10)) self.mox.StubOutWithMock(self.servr, '_get_num_required_instances') def tearDown(self): self.mox.UnsetStubs() def test_split_instances(self): instance1 = self.Instance(1) instance2 = self.Instance(2, can_accept_requests=False) instance3 = self.Instance(3) instance4 = self.Instance(4) instance5 = self.Instance(5) instance6 = self.Instance(6) instance7 = self.Instance(7) instance8 = self.Instance(8, can_accept_requests=False) instance9 = self.Instance(9) instance10 = self.Instance(10) self.servr._get_num_required_instances().AndReturn(5) self.servr._instances = set([instance1, instance2, instance3, instance4, instance5, instance6, instance7, instance8, instance9, instance10]) self.mox.ReplayAll() self.assertEqual( (set([instance10, instance9, instance7, instance6, instance5]), set([instance1, instance2, instance3, instance4, instance8])), self.servr._split_instances()) self.mox.VerifyAll() def test_split_instances_no_instances(self): self.servr._get_num_required_instances().AndReturn(5) self.servr._instances = set([]) self.mox.ReplayAll() self.assertEqual((set([]), set([])), self.servr._split_instances()) self.mox.VerifyAll() def test_split_instances_no_instances_not_enough_accepting_requests(self): instance1 = self.Instance(1) instance2 = self.Instance(1, can_accept_requests=False) instance3 = self.Instance(2, can_accept_requests=False) self.servr._get_num_required_instances().AndReturn(5) self.servr._instances = set([instance1, instance2, instance3]) self.mox.ReplayAll() self.assertEqual((set([instance1]), set([instance2, instance3])), self.servr._split_instances()) self.mox.VerifyAll() def test_split_instances_no_required_instances(self): instance1 = self.Instance(1) instance2 = self.Instance(2, can_accept_requests=False) instance3 = self.Instance(3, can_accept_requests=False) instance4 = self.Instance(4) instance5 = self.Instance(5) instance6 = self.Instance(6) instance7 = self.Instance(7) instance8 = self.Instance(8) self.servr._get_num_required_instances().AndReturn(0) self.servr._instances = set([instance1, instance2, instance3, instance4, instance5, instance6, instance7, instance8]) self.mox.ReplayAll() self.assertEqual( (set(), set([instance8, instance7, instance6, instance5, instance4, instance3, instance2, instance1])), self.servr._split_instances()) self.mox.VerifyAll() class TestAutoScalingInstancePoolChooseInstances(unittest.TestCase): """Tests for module.AutoScalingModule._choose_instance.""" class Instance(object): def __init__(self, num_outstanding_requests, can_accept_requests=True): self.num_outstanding_requests = num_outstanding_requests self.remaining_request_capacity = 10 - num_outstanding_requests self.can_accept_requests = can_accept_requests def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.servr = AutoScalingModuleFacade( instance_factory=instance.InstanceFactory(object(), 10)) self.mox.StubOutWithMock(self.servr, '_split_instances') self.mox.StubOutWithMock(self.servr._condition, 'wait') self.time = 10 self.mox.stubs.Set(time, 'time', lambda: self.time) def advance_time(self, *unused_args): self.time += 10 def tearDown(self): self.mox.UnsetStubs() def test_choose_instance_required_available(self): instance1 = self.Instance(1) instance2 = self.Instance(2) instance3 = self.Instance(3) instance4 = self.Instance(4) self.servr._split_instances().AndReturn((set([instance3, instance4]), set([instance1, instance2]))) self.mox.ReplayAll() self.assertEqual(instance3, # Least busy required instance. self.servr._choose_instance(15)) self.mox.VerifyAll() def test_choose_instance_no_instances(self): self.servr._split_instances().AndReturn((set([]), set([]))) self.servr._condition.wait(5).WithSideEffects(self.advance_time) self.mox.ReplayAll() self.assertEqual(None, self.servr._choose_instance(15)) self.mox.VerifyAll() def test_choose_instance_no_instance_that_can_accept_requests(self): instance1 = self.Instance(1, can_accept_requests=False) self.servr._split_instances().AndReturn((set([]), set([instance1]))) self.servr._condition.wait(5).WithSideEffects(self.advance_time) self.mox.ReplayAll() self.assertEqual(None, self.servr._choose_instance(15)) self.mox.VerifyAll() def test_choose_instance_required_full(self): instance1 = self.Instance(1) instance2 = self.Instance(2) instance3 = self.Instance(10) instance4 = self.Instance(10) self.servr._split_instances().AndReturn((set([instance3, instance4]), set([instance1, instance2]))) self.mox.ReplayAll() self.assertEqual(instance2, # Busyest non-required instance. self.servr._choose_instance(15)) self.mox.VerifyAll() def test_choose_instance_must_wait(self): instance1 = self.Instance(10) instance2 = self.Instance(10) self.servr._split_instances().AndReturn((set([instance1]), set([instance2]))) self.servr._condition.wait(5).WithSideEffects(self.advance_time) self.mox.ReplayAll() self.assertIsNone(self.servr._choose_instance(15)) self.mox.VerifyAll() class TestAutoScalingInstancePoolAdjustInstances(unittest.TestCase): """Tests for module.AutoScalingModule._adjust_instances.""" class Instance(object): def __init__(self, num_outstanding_requests): self.num_outstanding_requests = num_outstanding_requests def quit(self): pass def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.servr = AutoScalingModuleFacade( module_configuration=ModuleConfigurationStub( automatic_scaling=appinfo.AutomaticScaling( min_pending_latency='0.1s', max_pending_latency='1.0s', min_idle_instances=1, max_idle_instances=2)), instance_factory=instance.InstanceFactory(object(), 10)) self.mox.StubOutWithMock(self.servr, '_split_instances') self.mox.StubOutWithMock(self.servr, '_add_instance') def tearDown(self): self.mox.UnsetStubs() def test_adjust_instances_create_new(self): instance1 = self.Instance(0) instance2 = self.Instance(2) instance3 = self.Instance(3) instance4 = self.Instance(4) self.servr._instances = set([instance1, instance2, instance3, instance4]) self.servr._split_instances().AndReturn( (set([instance1, instance2, instance3, instance4]), set([]))) self.servr._add_instance(permit_warmup=True) self.mox.ReplayAll() self.servr._adjust_instances() self.mox.VerifyAll() def test_adjust_instances_quit_idle(self): instance1 = self.Instance(0) instance2 = self.Instance(2) instance3 = self.Instance(3) instance4 = self.Instance(4) self.mox.StubOutWithMock(instance1, 'quit') self.servr._instances = set([instance1, instance2, instance3, instance4]) self.servr._split_instances().AndReturn( (set([]), set([instance1, instance2, instance3, instance4]))) instance1.quit() self.mox.ReplayAll() self.servr._adjust_instances() self.mox.VerifyAll() def test_adjust_instances_quit_idle_with_race(self): instance1 = self.Instance(0) instance2 = self.Instance(2) instance3 = self.Instance(3) instance4 = self.Instance(4) self.mox.StubOutWithMock(instance1, 'quit') self.servr._instances = set([instance1, instance2, instance3, instance4]) self.servr._split_instances().AndReturn( (set([]), set([instance1, instance2, instance3, instance4]))) instance1.quit().AndRaise(instance.CannotQuitServingInstance) self.mox.ReplayAll() self.servr._adjust_instances() self.mox.VerifyAll() class TestAutoScalingInstancePoolHandleChanges(unittest.TestCase): """Tests for module.AutoScalingModule._handle_changes.""" def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.instance_factory = instance.InstanceFactory(object(), 10) self.servr = AutoScalingModuleFacade( instance_factory=self.instance_factory) self.mox.StubOutWithMock(self.instance_factory, 'files_changed') self.mox.StubOutWithMock(self.instance_factory, 'configuration_changed') self.mox.StubOutWithMock(self.servr, '_maybe_restart_instances') self.mox.StubOutWithMock(self.servr, '_create_url_handlers') self.mox.StubOutWithMock(self.servr._module_configuration, 'check_for_updates') self.mox.StubOutWithMock(self.servr._watcher, 'has_changes') def tearDown(self): self.mox.UnsetStubs() def test_no_changes(self): self.servr._module_configuration.check_for_updates().AndReturn(frozenset()) self.servr._watcher.has_changes().AndReturn(False) self.servr._maybe_restart_instances(config_changed=False, file_changed=False) self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_irrelevant_config_change(self): self.servr._module_configuration.check_for_updates().AndReturn(frozenset()) self.servr._watcher.has_changes().AndReturn(False) self.servr._maybe_restart_instances(config_changed=False, file_changed=False) self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_restart_config_change(self): conf_change = frozenset([application_configuration.ENV_VARIABLES_CHANGED]) self.servr._module_configuration.check_for_updates().AndReturn(conf_change) self.servr._watcher.has_changes().AndReturn(False) self.instance_factory.configuration_changed(conf_change) self.servr._maybe_restart_instances(config_changed=True, file_changed=False) self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_handler_change(self): conf_change = frozenset([application_configuration.HANDLERS_CHANGED]) self.servr._module_configuration.check_for_updates().AndReturn(conf_change) self.servr._watcher.has_changes().AndReturn(False) self.servr._create_url_handlers() self.instance_factory.configuration_changed(conf_change) self.servr._maybe_restart_instances(config_changed=True, file_changed=False) self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_file_change(self): self.servr._module_configuration.check_for_updates().AndReturn(frozenset()) self.servr._watcher.has_changes().AndReturn(True) self.instance_factory.files_changed() self.servr._maybe_restart_instances(config_changed=False, file_changed=True) self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() class TestAutoScalingInstancePoolMaybeRestartInstances(unittest.TestCase): """Tests for module.AutoScalingModule._maybe_restart_instances.""" def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.instance_factory = instance.InstanceFactory(object(), 10) self.instance_factory.FILE_CHANGE_INSTANCE_RESTART_POLICY = instance.ALWAYS self.servr = AutoScalingModuleFacade(instance_factory=self.instance_factory) self.inst1 = self.mox.CreateMock(instance.Instance) self.inst2 = self.mox.CreateMock(instance.Instance) self.inst3 = self.mox.CreateMock(instance.Instance) self.inst1.total_requests = 2 self.inst2.total_requests = 0 self.inst3.total_requests = 4 self.servr._instances.add(self.inst1) self.servr._instances.add(self.inst2) self.servr._instances.add(self.inst3) def tearDown(self): self.mox.UnsetStubs() def test_no_changes(self): self.mox.ReplayAll() self.servr._maybe_restart_instances(config_changed=False, file_changed=False) self.mox.VerifyAll() def test_config_change(self): self.inst1.quit(allow_async=True).InAnyOrder() self.inst2.quit(allow_async=True).InAnyOrder() self.inst3.quit(allow_async=True).InAnyOrder() self.mox.ReplayAll() self.servr._maybe_restart_instances(config_changed=True, file_changed=False) self.mox.VerifyAll() def test_file_change_restart_always(self): self.instance_factory.FILE_CHANGE_INSTANCE_RESTART_POLICY = instance.ALWAYS self.inst1.quit(allow_async=True).InAnyOrder() self.inst2.quit(allow_async=True).InAnyOrder() self.inst3.quit(allow_async=True).InAnyOrder() self.mox.ReplayAll() self.servr._maybe_restart_instances(config_changed=False, file_changed=True) self.mox.VerifyAll() self.assertSequenceEqual(set(), self.servr._instances) def test_file_change_restart_after_first_request(self): self.instance_factory.FILE_CHANGE_INSTANCE_RESTART_POLICY = ( instance.AFTER_FIRST_REQUEST) self.inst1.quit(allow_async=True).InAnyOrder() self.inst3.quit(allow_async=True).InAnyOrder() self.mox.ReplayAll() self.servr._maybe_restart_instances(config_changed=False, file_changed=True) self.mox.VerifyAll() self.assertSequenceEqual(set([self.inst2]), self.servr._instances) def test_file_change_restart_never(self): self.instance_factory.FILE_CHANGE_INSTANCE_RESTART_POLICY = instance.NEVER self.mox.ReplayAll() self.servr._maybe_restart_instances(config_changed=False, file_changed=True) self.mox.VerifyAll() self.assertSequenceEqual(set([self.inst1, self.inst2, self.inst3]), self.servr._instances) class TestAutoScalingInstancePoolLoopAdjustingInstances(unittest.TestCase): """Tests for module.AutoScalingModule._adjust_instances.""" def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.servr = AutoScalingModuleFacade( instance_factory=instance.InstanceFactory(object(), 10)) def tearDown(self): self.mox.UnsetStubs() def test_loop_and_quit(self): self.mox.StubOutWithMock(self.servr, '_adjust_instances') self.mox.StubOutWithMock(self.servr, '_handle_changes') inst1 = self.mox.CreateMock(instance.Instance) inst2 = self.mox.CreateMock(instance.Instance) inst3 = self.mox.CreateMock(instance.Instance) self.servr._instances.add(inst1) self.servr._instances.add(inst2) self.servr._instances.add(inst3) self.servr._handle_changes() def do_quit(*unused_args): self.servr._quit_event.set() self.servr._adjust_instances().WithSideEffects(do_quit) self.mox.ReplayAll() self.servr._loop_adjusting_instances() self.mox.VerifyAll() class TestAutoScalingInstancePoolAutomaticScaling(unittest.TestCase): def setUp(self): api_server.test_setup_stubs() def _create_module(self, automatic_scaling): return AutoScalingModuleFacade( module_configuration=ModuleConfigurationStub( automatic_scaling=automatic_scaling), instance_factory=instance.InstanceFactory(object(), 10)) def test_unset_automatic_settings(self): settings = appinfo.AutomaticScaling() pool = self._create_module(settings) self.assertEqual(0.1, pool._min_pending_latency) self.assertEqual(0.5, pool._max_pending_latency) self.assertEqual(1, pool._min_idle_instances) self.assertEqual(1000, pool._max_idle_instances) def test_automatic_automatic_settings(self): settings = appinfo.AutomaticScaling( min_pending_latency='automatic', max_pending_latency='automatic', min_idle_instances='automatic', max_idle_instances='automatic') pool = self._create_module(settings) self.assertEqual(0.1, pool._min_pending_latency) self.assertEqual(0.5, pool._max_pending_latency) self.assertEqual(1, pool._min_idle_instances) self.assertEqual(1000, pool._max_idle_instances) def test_explicit_automatic_settings(self): settings = appinfo.AutomaticScaling( min_pending_latency='1234ms', max_pending_latency='5.67s', min_idle_instances='3', max_idle_instances='20') pool = self._create_module(settings) self.assertEqual(1.234, pool._min_pending_latency) self.assertEqual(5.67, pool._max_pending_latency) self.assertEqual(3, pool._min_idle_instances) self.assertEqual(20, pool._max_idle_instances) class TestManualScalingModuleStart(unittest.TestCase): """Tests for module.ManualScalingModule._start_instance.""" def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.mox.StubOutWithMock(module.Module, 'build_request_environ') def tearDown(self): self.mox.UnsetStubs() def test_instance_start_success(self): s = ManualScalingModuleFacade(balanced_port=8080) self.mox.StubOutWithMock(s, '_handle_request') self.mox.StubOutWithMock(s._condition, 'notify') wsgi_servr = self.mox.CreateMock(wsgi_server.WsgiServer) wsgi_servr.port = 12345 inst = self.mox.CreateMock(instance.Instance) inst.instance_id = 0 inst.start().AndReturn(True) environ = object() s.build_request_environ('GET', '/_ah/start', [], '', '0.1.0.3', 12345, fake_login=True).AndReturn(environ) s._handle_request(environ, mox.IgnoreArg(), inst=inst, request_type=instance.READY_REQUEST) s._condition.notify(1) self.mox.ReplayAll() s._start_instance(wsgi_servr, inst) self.mox.VerifyAll() def test_instance_start_failure(self): s = ManualScalingModuleFacade(balanced_port=8080) self.mox.StubOutWithMock(s, '_handle_request') self.mox.StubOutWithMock(s._condition, 'notify') wsgi_servr = self.mox.CreateMock(wsgi_server.WsgiServer) wsgi_servr.port = 12345 inst = self.mox.CreateMock(instance.Instance) inst.instance_id = 0 inst.start().AndReturn(False) self.mox.ReplayAll() s._start_instance(wsgi_servr, inst) self.mox.VerifyAll() class TestManualScalingModuleAddInstance(unittest.TestCase): """Tests for module.ManualScalingModule._add_instance.""" class WsgiServer(object): def __init__(self, port): self.port = port def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.factory = self.mox.CreateMock(instance.InstanceFactory) self.factory.max_concurrent_requests = 10 def tearDown(self): self.mox.UnsetStubs() def test_add_while_started(self): servr = ManualScalingModuleFacade(instance_factory=self.factory) inst = self.mox.CreateMock(instance.Instance) self.mox.StubOutWithMock(module._THREAD_POOL, 'submit') self.mox.StubOutWithMock(wsgi_server.WsgiServer, 'start') self.mox.StubOutWithMock(wsgi_server.WsgiServer, 'port') wsgi_server.WsgiServer.port = 12345 self.factory.new_instance(0, expect_ready_request=True).AndReturn(inst) wsgi_server.WsgiServer.start() module._THREAD_POOL.submit(servr._start_instance, mox.IsA(wsgi_server.WsgiServer), inst) self.mox.ReplayAll() servr._add_instance() self.mox.VerifyAll() self.assertIn(inst, servr._instances) self.assertEqual((servr, inst), servr._port_registry.get(12345)) def test_add_while_stopped(self): servr = ManualScalingModuleFacade(instance_factory=self.factory) servr._suspended = True inst = self.mox.CreateMock(instance.Instance) self.mox.StubOutWithMock(wsgi_server.WsgiServer, 'start') self.mox.StubOutWithMock(wsgi_server.WsgiServer, 'port') wsgi_server.WsgiServer.port = 12345 self.mox.StubOutWithMock(module._THREAD_POOL, 'submit') self.factory.new_instance(0, expect_ready_request=True).AndReturn(inst) wsgi_server.WsgiServer.start() self.mox.ReplayAll() servr._add_instance() self.mox.VerifyAll() self.assertIn(inst, servr._instances) self.assertEqual((servr, inst), servr._port_registry.get(12345)) class TestManualScalingInstancePoolHandleScriptRequest(unittest.TestCase): """Tests for module.ManualScalingModule.handle.""" def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.inst = self.mox.CreateMock(instance.Instance) self.inst.instance_id = 0 self.environ = {} self.start_response = object() self.response = [object()] self.url_map = object() self.match = object() self.request_id = object() self.manual_module = ManualScalingModuleFacade( instance_factory=instance.InstanceFactory(object(), 10)) self.mox.StubOutWithMock(self.manual_module, '_choose_instance') self.mox.StubOutWithMock(self.manual_module, '_add_instance') self.mox.StubOutWithMock(self.manual_module._condition, 'notify') self.mox.stubs.Set(time, 'time', lambda: 0.0) def tearDown(self): self.mox.UnsetStubs() def test_handle_script_request(self): self.manual_module._choose_instance(10.0).AndReturn(self.inst) self.inst.handle(self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndReturn(self.response) self.manual_module._condition.notify() self.mox.ReplayAll() self.assertEqual( self.response, self.manual_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() def test_handle_cannot_accept_request(self): self.manual_module._choose_instance(10.0).AndReturn(self.inst) self.manual_module._choose_instance(10.0).AndReturn(self.inst) self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndRaise( instance.CannotAcceptRequests) self.manual_module._condition.notify() self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndReturn( self.response) self.manual_module._condition.notify() self.mox.ReplayAll() self.assertEqual( self.response, self.manual_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() def test_handle_must_wait(self): self.manual_module._choose_instance(10.0).AndReturn(None) self.manual_module._choose_instance(10.0).AndReturn(self.inst) self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndReturn( self.response) self.manual_module._condition.notify() self.mox.ReplayAll() self.assertEqual( self.response, self.manual_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() def test_handle_timeout(self): self.time = 0.0 def advance_time(*unused_args): self.time += 11 self.mox.stubs.Set(time, 'time', lambda: self.time) self.mox.StubOutWithMock(self.manual_module, '_error_response') self.manual_module._choose_instance(10.0).WithSideEffects(advance_time) self.manual_module._error_response(self.environ, self.start_response, 503).AndReturn(self.response) self.mox.ReplayAll() self.assertEqual( self.response, self.manual_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() class TestManualScalingInstancePoolChooseInstances(unittest.TestCase): """Tests for module.ManualScalingModule._choose_instance.""" class Instance(object): def __init__(self, can_accept_requests): self.can_accept_requests = can_accept_requests def setUp(self): self.mox = mox.Mox() api_server.test_setup_stubs() self.servr = ManualScalingModuleFacade( instance_factory=instance.InstanceFactory(object(), 10)) self.mox.StubOutWithMock(self.servr._condition, 'wait') self.time = 0 self.mox.stubs.Set(time, 'time', lambda: self.time) def advance_time(self, *unused_args): self.time += 10 def tearDown(self): self.mox.UnsetStubs() def test_choose_instance_first_can_accept(self): instance1 = self.Instance(True) instance2 = self.Instance(True) self.servr._instances = [instance1, instance2] self.mox.ReplayAll() self.assertEqual(instance1, self.servr._choose_instance(1)) self.mox.VerifyAll() def test_choose_instance_first_cannot_accept(self): instance1 = self.Instance(False) instance2 = self.Instance(True) self.servr._instances = [instance1, instance2] self.mox.ReplayAll() self.assertEqual(instance2, self.servr._choose_instance(1)) self.mox.VerifyAll() def test_choose_instance_none_can_accept(self): instance1 = self.Instance(False) instance2 = self.Instance(False) self.servr._instances = [instance1, instance2] self.servr._condition.wait(5).WithSideEffects(self.advance_time) self.mox.ReplayAll() self.assertEqual(None, self.servr._choose_instance(5)) self.mox.VerifyAll() def test_choose_instance_no_instances(self): self.servr._condition.wait(5).WithSideEffects(self.advance_time) self.mox.ReplayAll() self.assertEqual(None, self.servr._choose_instance(5)) self.mox.VerifyAll() class TestManualScalingInstancePoolSetNumInstances(unittest.TestCase): """Tests for module.ManualScalingModule.set_num_instances.""" def setUp(self): self.mox = mox.Mox() api_server.test_setup_stubs() self.module = ManualScalingModuleFacade( instance_factory=instance.InstanceFactory(object(), 10)) self._instance = self.mox.CreateMock(instance.Instance) self._wsgi_server = self.mox.CreateMock(wsgi_server.WsgiServer) self._wsgi_server.port = 8080 self.module._instances = [self._instance] self.module._wsgi_servers = [self._wsgi_server] self.mox.StubOutWithMock(module._THREAD_POOL, 'submit') self.mox.StubOutWithMock(self.module, '_add_instance') self.mox.StubOutWithMock(self.module, '_shutdown_instance') def tearDown(self): self.mox.UnsetStubs() def test_no_op(self): self.mox.ReplayAll() self.assertEqual(1, self.module.get_num_instances()) self.module.set_num_instances(1) self.mox.VerifyAll() def test_add_an_instance(self): self.module._add_instance() self.mox.ReplayAll() self.assertEqual(1, self.module.get_num_instances()) self.module.set_num_instances(2) self.mox.VerifyAll() def test_remove_an_instance(self): module._THREAD_POOL.submit(self.module._quit_instance, self._instance, self._wsgi_server) self._instance.quit(expect_shutdown=True) self._wsgi_server.quit() self.module._shutdown_instance(self._instance, 8080) self.mox.ReplayAll() self.assertEqual(1, self.module.get_num_instances()) self.module.set_num_instances(0) self.module._quit_instance(self._instance, self._wsgi_server) self.mox.VerifyAll() class TestManualScalingInstancePoolSuspendAndResume(unittest.TestCase): """Tests for module.ManualScalingModule.suspend and resume.""" def setUp(self): self.mox = mox.Mox() api_server.test_setup_stubs() self.factory = self.mox.CreateMock(instance.InstanceFactory) self.module = ManualScalingModuleFacade( instance_factory=self.factory) self._instance = self.mox.CreateMock(instance.Instance) self._wsgi_server = wsgi_server.WsgiServer(('localhost', 0), None) self.module._instances = [self._instance] self.module._wsgi_servers = [self._wsgi_server] self.mox.StubOutWithMock(module._THREAD_POOL, 'submit') self.mox.StubOutWithMock(self.module, '_shutdown_instance') self._wsgi_server.start() def tearDown(self): self._wsgi_server.quit() self.mox.UnsetStubs() def test_already_suspended(self): self.module._suspended = True self.assertRaises(request_info.ModuleAlreadyStoppedError, self.module.suspend) def test_already_resumed(self): self.assertRaises(request_info.ModuleAlreadyStartedError, self.module.resume) def test_suspend_instance(self): module._THREAD_POOL.submit(self.module._suspend_instance, self._instance, self._wsgi_server.port) self._instance.quit(expect_shutdown=True) port = object() self.module._shutdown_instance(self._instance, port) self.mox.ReplayAll() self.module.suspend() self.module._suspend_instance(self._instance, port) self.mox.VerifyAll() self.assertEqual(404, self._wsgi_server._error) self.assertEqual(None, self._wsgi_server._app) self.assertTrue(self.module._suspended) def test_resume(self): self.module._suspended = True self.module._instances = [object()] self.factory.new_instance(0, expect_ready_request=True).AndReturn( self._instance) module._THREAD_POOL.submit(self.module._start_instance, self._wsgi_server, self._instance) self.mox.ReplayAll() self.module.resume() self.mox.VerifyAll() self.assertEqual(self.module._handle_request, self._wsgi_server._app.func) self.assertEqual({'inst': self._instance}, self._wsgi_server._app.keywords) self.assertFalse(self.module._suspended) def test_restart(self): self._new_instance = self.mox.CreateMock(instance.Instance) self.factory.new_instance(0, expect_ready_request=True).AndReturn( self._new_instance) module._THREAD_POOL.submit(self.module._suspend_instance, self._instance, self._wsgi_server.port) module._THREAD_POOL.submit(self.module._start_instance, self._wsgi_server, self._new_instance) self._instance.quit(expect_shutdown=True) port = object() self.module._shutdown_instance(self._instance, port) self.mox.ReplayAll() self.module.restart() self.module._suspend_instance(self._instance, port) self.mox.VerifyAll() self.assertEqual(self.module._handle_request, self._wsgi_server._app.func) self.assertEqual({'inst': self._new_instance}, self._wsgi_server._app.keywords) self.assertFalse(self.module._suspended) class TestManualScalingInstancePoolHandleChanges(unittest.TestCase): """Tests for module.ManualScalingModule._handle_changes.""" def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.instance_factory = instance.InstanceFactory(object(), 10) self.servr = ManualScalingModuleFacade( instance_factory=self.instance_factory) self.mox.StubOutWithMock(self.instance_factory, 'files_changed') self.mox.StubOutWithMock(self.instance_factory, 'configuration_changed') self.mox.StubOutWithMock(self.servr, 'restart') self.mox.StubOutWithMock(self.servr, '_create_url_handlers') self.mox.StubOutWithMock(self.servr._module_configuration, 'check_for_updates') self.mox.StubOutWithMock(self.servr._watcher, 'has_changes') def tearDown(self): self.mox.UnsetStubs() def test_no_changes(self): self.servr._module_configuration.check_for_updates().AndReturn(frozenset()) self.servr._watcher.has_changes().AndReturn(False) self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_irrelevant_config_change(self): self.servr._module_configuration.check_for_updates().AndReturn(frozenset()) self.servr._watcher.has_changes().AndReturn(False) self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_restart_config_change(self): conf_change = frozenset([application_configuration.ENV_VARIABLES_CHANGED]) self.servr._module_configuration.check_for_updates().AndReturn(conf_change) self.servr._watcher.has_changes().AndReturn(False) self.instance_factory.configuration_changed(conf_change) self.servr.restart() self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_handler_change(self): conf_change = frozenset([application_configuration.HANDLERS_CHANGED]) self.servr._module_configuration.check_for_updates().AndReturn(conf_change) self.servr._watcher.has_changes().AndReturn(False) self.servr._create_url_handlers() self.instance_factory.configuration_changed(conf_change) self.servr.restart() self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_file_change(self): self.servr._module_configuration.check_for_updates().AndReturn(frozenset()) self.servr._watcher.has_changes().AndReturn(True) self.instance_factory.files_changed() self.servr.restart() self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_restart_config_change_suspended(self): self.servr._suspended = True conf_change = frozenset([application_configuration.ENV_VARIABLES_CHANGED]) self.servr._module_configuration.check_for_updates().AndReturn(conf_change) self.servr._watcher.has_changes().AndReturn(False) self.instance_factory.configuration_changed(conf_change) self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_handler_change_suspended(self): self.servr._suspended = True conf_change = frozenset([application_configuration.HANDLERS_CHANGED]) self.servr._module_configuration.check_for_updates().AndReturn(conf_change) self.servr._watcher.has_changes().AndReturn(False) self.servr._create_url_handlers() self.instance_factory.configuration_changed(conf_change) self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_file_change_suspended(self): self.servr._suspended = True self.servr._module_configuration.check_for_updates().AndReturn(frozenset()) self.servr._watcher.has_changes().AndReturn(True) self.instance_factory.files_changed() self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() class TestBasicScalingModuleStart(unittest.TestCase): """Tests for module.BasicScalingModule._start_instance.""" def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.mox.StubOutWithMock(module.Module, 'build_request_environ') def tearDown(self): self.mox.UnsetStubs() def test_instance_start_success(self): s = BasicScalingModuleFacade(balanced_port=8080) self.mox.StubOutWithMock(s, '_handle_request') self.mox.StubOutWithMock(s._condition, 'notify') wsgi_servr = self.mox.CreateMock(wsgi_server.WsgiServer) wsgi_servr.port = 12345 s._wsgi_servers[0] = wsgi_servr inst = self.mox.CreateMock(instance.Instance) inst.instance_id = 0 s._instances[0] = inst inst.start().AndReturn(True) environ = object() s.build_request_environ('GET', '/_ah/start', [], '', '0.1.0.3', 12345, fake_login=True).AndReturn(environ) s._handle_request(environ, mox.IgnoreArg(), inst=inst, request_type=instance.READY_REQUEST) s._condition.notify(1) self.mox.ReplayAll() s._start_instance(0) self.mox.VerifyAll() def test_instance_start_failure(self): s = BasicScalingModuleFacade(balanced_port=8080) self.mox.StubOutWithMock(s, '_handle_request') self.mox.StubOutWithMock(s._condition, 'notify') wsgi_servr = self.mox.CreateMock(wsgi_server.WsgiServer) wsgi_servr.port = 12345 s._wsgi_servers[0] = wsgi_servr inst = self.mox.CreateMock(instance.Instance) inst.instance_id = 0 s._instances[0] = inst inst.start().AndReturn(False) self.mox.ReplayAll() s._start_instance(0) self.mox.VerifyAll() def test_start_any_instance_success(self): s = BasicScalingModuleFacade(balanced_port=8080) s._instance_running = [True, False, False, True] inst = object() s._instances = [None, inst, None, None] self.mox.StubOutWithMock(module._THREAD_POOL, 'submit') module._THREAD_POOL.submit(s._start_instance, 1) self.mox.ReplayAll() self.assertEqual(inst, s._start_any_instance()) self.mox.VerifyAll() self.assertEqual([True, True, False, True], s._instance_running) def test_start_any_instance_all_already_running(self): s = BasicScalingModuleFacade(balanced_port=8080) s._instance_running = [True, True, True, True] self.mox.StubOutWithMock(module._THREAD_POOL, 'submit') self.mox.ReplayAll() self.assertIsNone(s._start_any_instance()) self.mox.VerifyAll() self.assertEqual([True, True, True, True], s._instance_running) class TestBasicScalingInstancePoolHandleScriptRequest(unittest.TestCase): """Tests for module.BasicScalingModule.handle.""" def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.inst = self.mox.CreateMock(instance.Instance) self.inst.instance_id = 0 self.environ = {} self.start_response = object() self.response = [object()] self.url_map = object() self.match = object() self.request_id = object() self.basic_module = BasicScalingModuleFacade( instance_factory=instance.InstanceFactory(object(), 10)) self.mox.StubOutWithMock(self.basic_module, '_choose_instance') self.mox.StubOutWithMock(self.basic_module, '_start_any_instance') self.mox.StubOutWithMock(self.basic_module, '_start_instance') self.mox.StubOutWithMock(self.basic_module._condition, 'wait') self.mox.StubOutWithMock(self.basic_module._condition, 'notify') self.time = 10 self.mox.stubs.Set(time, 'time', lambda: self.time) def advance_time(self, *unused_args): self.time += 11 def tearDown(self): self.mox.UnsetStubs() def test_handle_script_request(self): self.basic_module._choose_instance(20).AndReturn(self.inst) self.inst.handle(self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndReturn(self.response) self.basic_module._condition.notify() self.mox.ReplayAll() self.assertEqual( self.response, self.basic_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() def test_handle_cannot_accept_request(self): self.basic_module._choose_instance(20).AndReturn(self.inst) self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndRaise( instance.CannotAcceptRequests) self.basic_module._condition.notify() self.basic_module._choose_instance(20).AndReturn(self.inst) self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndReturn( self.response) self.basic_module._condition.notify() self.mox.ReplayAll() self.assertEqual( self.response, self.basic_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() def test_handle_timeout(self): self.mox.StubOutWithMock(self.basic_module, '_error_response') self.basic_module._choose_instance(20).WithSideEffects(self.advance_time) self.basic_module._error_response(self.environ, self.start_response, 503).AndReturn(self.response) self.mox.ReplayAll() self.assertEqual( self.response, self.basic_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() def test_handle_instance(self): self.inst.instance_id = 0 self.inst.has_quit = False self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndReturn( self.response) self.basic_module._condition.notify() self.mox.ReplayAll() self.assertEqual( self.response, self.basic_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id, inst=self.inst)) self.mox.VerifyAll() def test_handle_instance_start_the_instance(self): self.inst.instance_id = 0 self.inst.has_quit = False self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndRaise( instance.CannotAcceptRequests) self.basic_module._start_instance(0).AndReturn(True) self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndReturn( self.response) self.basic_module._condition.notify() self.mox.ReplayAll() self.assertEqual( self.response, self.basic_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id, inst=self.inst)) self.mox.VerifyAll() def test_handle_instance_already_running(self): self.inst.instance_id = 0 self.inst.has_quit = False self.basic_module._instance_running[0] = True self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndRaise( instance.CannotAcceptRequests) self.inst.wait(20) self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndReturn( self.response) self.basic_module._condition.notify() self.mox.ReplayAll() self.assertEqual( self.response, self.basic_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id, inst=self.inst)) self.mox.VerifyAll() def test_handle_instance_timeout(self): self.mox.StubOutWithMock(self.basic_module, '_error_response') self.inst.instance_id = 0 self.inst.has_quit = False self.basic_module._instance_running[0] = True self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.NORMAL_REQUEST).AndRaise( instance.CannotAcceptRequests) self.inst.wait(20).WithSideEffects(self.advance_time) self.basic_module._error_response(self.environ, self.start_response, 503).AndReturn(self.response) self.basic_module._condition.notify() self.mox.ReplayAll() self.assertEqual( self.response, self.basic_module._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id, inst=self.inst)) self.mox.VerifyAll() class TestBasicScalingInstancePoolChooseInstances(unittest.TestCase): """Tests for module.BasicScalingModule._choose_instance.""" class Instance(object): def __init__(self, can_accept_requests): self.can_accept_requests = can_accept_requests def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.servr = BasicScalingModuleFacade( instance_factory=instance.InstanceFactory(object(), 10)) self.mox.stubs.Set(time, 'time', lambda: self.time) self.mox.StubOutWithMock(self.servr._condition, 'wait') self.mox.StubOutWithMock(self.servr, '_start_any_instance') self.time = 0 def tearDown(self): self.mox.UnsetStubs() def advance_time(self, *unused_args): self.time += 10 def test_choose_instance_first_can_accept(self): instance1 = self.Instance(True) instance2 = self.Instance(True) self.servr._instances = [instance1, instance2] self.mox.ReplayAll() self.assertEqual(instance1, self.servr._choose_instance(1)) self.mox.VerifyAll() def test_choose_instance_first_cannot_accept(self): instance1 = self.Instance(False) instance2 = self.Instance(True) self.servr._instances = [instance1, instance2] self.mox.ReplayAll() self.assertEqual(instance2, self.servr._choose_instance(1)) self.mox.VerifyAll() def test_choose_instance_none_can_accept(self): instance1 = self.Instance(False) instance2 = self.Instance(False) self.servr._instance_running = [True, True] self.servr._instances = [instance1, instance2] self.servr._start_any_instance().AndReturn(None) self.servr._condition.wait(1).WithSideEffects(self.advance_time) self.mox.ReplayAll() self.assertEqual(None, self.servr._choose_instance(1)) self.mox.VerifyAll() def test_choose_instance_start_an_instance(self): instance1 = self.Instance(False) instance2 = self.Instance(False) mock_instance = self.mox.CreateMock(instance.Instance) self.servr._instances = [instance1, instance2] self.servr._instance_running = [True, False] self.servr._start_any_instance().AndReturn(mock_instance) mock_instance.wait(1) self.mox.ReplayAll() self.assertEqual(mock_instance, self.servr._choose_instance(1)) self.mox.VerifyAll() def test_choose_instance_no_instances(self): self.servr._start_any_instance().AndReturn(None) self.servr._condition.wait(1).WithSideEffects(self.advance_time) self.mox.ReplayAll() self.assertEqual(None, self.servr._choose_instance(1)) self.mox.VerifyAll() class TestBasicScalingInstancePoolInstanceManagement(unittest.TestCase): def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.factory = self.mox.CreateMock(instance.InstanceFactory) self.factory.max_concurrent_requests = 10 self.mox.StubOutWithMock(module._THREAD_POOL, 'submit') self.module = BasicScalingModuleFacade(instance_factory=self.factory, host='localhost') self.wsgi_server = self.module._wsgi_servers[0] self.wsgi_server.start() def tearDown(self): self.wsgi_server.quit() self.mox.UnsetStubs() def test_restart(self): old_instances = [self.mox.CreateMock(instance.Instance), self.mox.CreateMock(instance.Instance)] self.module._instances = old_instances[:] self.module._instance_running = [True, False] new_instance = self.mox.CreateMock(instance.Instance) self.factory.new_instance(0, expect_ready_request=True).AndReturn( new_instance) module._THREAD_POOL.submit(self.module._start_instance, 0) old_instances[0].quit(expect_shutdown=True) module._THREAD_POOL.submit(self.module._shutdown_instance, old_instances[0], self.wsgi_server.port) self.mox.ReplayAll() self.module.restart() self.mox.VerifyAll() self.assertEqual([True, False], self.module._instance_running) self.assertEqual(new_instance, self.module._instances[0]) self.assertEqual(self.module._handle_request, self.module._wsgi_servers[0]._app.func) self.assertEqual({'inst': new_instance}, self.module._wsgi_servers[0]._app.keywords) def test_shutdown_idle_instances(self): s = BasicScalingModuleFacade(instance_factory=self.factory) old_instances = [self.mox.CreateMock(instance.Instance), self.mox.CreateMock(instance.Instance), self.mox.CreateMock(instance.Instance)] self.module._instances = old_instances[:] old_instances[0].idle_seconds = (self.module._instance_idle_timeout + 1) old_instances[1].idle_seconds = 0 old_instances[2].idle_seconds = (self.module._instance_idle_timeout + 1) self.module._instance_running = [True, True, False] new_instance = self.mox.CreateMock(instance.Instance) self.factory.new_instance(0, expect_ready_request=True).AndReturn( new_instance) old_instances[0].quit(expect_shutdown=True) module._THREAD_POOL.submit(self.module._shutdown_instance, old_instances[0], self.wsgi_server.port) self.mox.ReplayAll() self.module._shutdown_idle_instances() self.mox.VerifyAll() self.assertEqual([False, True, False], self.module._instance_running) self.assertEqual(new_instance, self.module._instances[0]) self.assertEqual(self.module._handle_request, self.module._wsgi_servers[0]._app.func) self.assertEqual({'inst': new_instance}, self.module._wsgi_servers[0]._app.keywords) class TestBasicScalingInstancePoolHandleChanges(unittest.TestCase): """Tests for module.BasicScalingModule._handle_changes.""" def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.instance_factory = instance.InstanceFactory(object(), 10) self.servr = BasicScalingModuleFacade( instance_factory=self.instance_factory) self.mox.StubOutWithMock(self.instance_factory, 'files_changed') self.mox.StubOutWithMock(self.instance_factory, 'configuration_changed') self.mox.StubOutWithMock(self.servr, 'restart') self.mox.StubOutWithMock(self.servr, '_create_url_handlers') self.mox.StubOutWithMock(self.servr._module_configuration, 'check_for_updates') self.mox.StubOutWithMock(self.servr._watcher, 'has_changes') def tearDown(self): self.mox.UnsetStubs() def test_no_changes(self): self.servr._module_configuration.check_for_updates().AndReturn(frozenset()) self.servr._watcher.has_changes().AndReturn(False) self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_irrelevant_config_change(self): self.servr._module_configuration.check_for_updates().AndReturn(frozenset()) self.servr._watcher.has_changes().AndReturn(False) self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_restart_config_change(self): conf_change = frozenset([application_configuration.ENV_VARIABLES_CHANGED]) self.servr._module_configuration.check_for_updates().AndReturn(conf_change) self.servr._watcher.has_changes().AndReturn(False) self.instance_factory.configuration_changed(conf_change) self.servr.restart() self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_handler_change(self): conf_change = frozenset([application_configuration.HANDLERS_CHANGED]) self.servr._module_configuration.check_for_updates().AndReturn(conf_change) self.servr._watcher.has_changes().AndReturn(False) self.servr._create_url_handlers() self.instance_factory.configuration_changed(conf_change) self.servr.restart() self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() def test_file_change(self): self.servr._module_configuration.check_for_updates().AndReturn(frozenset()) self.servr._watcher.has_changes().AndReturn(True) self.instance_factory.files_changed().AndReturn(True) self.servr.restart() self.mox.ReplayAll() self.servr._handle_changes() self.mox.VerifyAll() class TestInteractiveCommandModule(unittest.TestCase): def setUp(self): api_server.test_setup_stubs() self.mox = mox.Mox() self.inst = self.mox.CreateMock(instance.Instance) self.inst.instance_id = 0 self.environ = object() self.start_response = object() self.response = [object()] self.url_map = object() self.match = object() self.request_id = object() self.servr = module.InteractiveCommandModule( ModuleConfigurationStub(), 'fakehost', balanced_port=8000, api_port=9000, auth_domain='gmail.com', runtime_stderr_loglevel=1, php_executable_path='/usr/bin/php-cgi', enable_php_remote_debugging=False, python_config=None, cloud_sql_config=None, default_version_port=8080, port_registry=dispatcher.PortRegistry(), request_data=None, dispatcher=None, use_mtime_file_watcher=False, allow_skipped_files=False) self.mox.StubOutWithMock(self.servr._instance_factory, 'new_instance') self.mox.StubOutWithMock(self.servr, '_handle_request') self.mox.StubOutWithMock(self.servr, 'build_request_environ') def test_send_interactive_command(self): def good_response(unused_environ, start_response, request_type): start_response('200 OK', []) return ['10\n'] environ = object() self.servr.build_request_environ( 'POST', '/', [], 'print 5+5', '192.0.2.0', 8000).AndReturn(environ) self.servr._handle_request( environ, mox.IgnoreArg(), request_type=instance.INTERACTIVE_REQUEST).WithSideEffects( good_response) self.mox.ReplayAll() self.assertEqual('10\n', self.servr.send_interactive_command('print 5+5')) self.mox.VerifyAll() def test_send_interactive_command_handle_request_exception(self): environ = object() self.servr.build_request_environ( 'POST', '/', [], 'print 5+5', '192.0.2.0', 8000).AndReturn(environ) self.servr._handle_request( environ, mox.IgnoreArg(), request_type=instance.INTERACTIVE_REQUEST).AndRaise(Exception('error')) self.mox.ReplayAll() self.assertRaisesRegexp(module.InteractiveCommandError, 'error', self.servr.send_interactive_command, 'print 5+5') self.mox.VerifyAll() def test_send_interactive_command_handle_request_failure(self): def good_response(unused_environ, start_response, request_type): start_response('503 Service Unavailable', []) return ['Instance was restarted while executing command'] environ = object() self.servr.build_request_environ( 'POST', '/', [], 'print 5+5', '192.0.2.0', 8000).AndReturn(environ) self.servr._handle_request( environ, mox.IgnoreArg(), request_type=instance.INTERACTIVE_REQUEST).WithSideEffects( good_response) self.mox.ReplayAll() self.assertRaisesRegexp(module.InteractiveCommandError, 'Instance was restarted while executing command', self.servr.send_interactive_command, 'print 5+5') self.mox.VerifyAll() def test_handle_script_request(self): self.servr._instance_factory.new_instance( mox.IgnoreArg(), expect_ready_request=False).AndReturn(self.inst) self.inst.start() self.inst.handle(self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.INTERACTIVE_REQUEST).AndReturn(['10\n']) self.mox.ReplayAll() self.assertEqual( ['10\n'], self.servr._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() def test_handle_script_request_busy(self): self.servr._instance_factory.new_instance( mox.IgnoreArg(), expect_ready_request=False).AndReturn(self.inst) self.inst.start() self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.INTERACTIVE_REQUEST).AndRaise(instance.CannotAcceptRequests()) self.inst.wait(mox.IgnoreArg()) self.inst.handle(self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.INTERACTIVE_REQUEST).AndReturn(['10\n']) self.mox.ReplayAll() self.assertEqual( ['10\n'], self.servr._handle_script_request(self.environ, self.start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() def test_handle_script_request_timeout(self): self.servr._MAX_REQUEST_WAIT_TIME = 0 start_response = start_response_utils.CapturingStartResponse() self.mox.ReplayAll() self.assertEqual( ['The command timed-out while waiting for another one to complete'], self.servr._handle_script_request(self.environ, start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() self.assertEqual('503 Service Unavailable', start_response.status) def test_handle_script_request_restart(self): def restart_and_raise(*args): self.servr._inst = None raise httplib.BadStatusLine('line') start_response = start_response_utils.CapturingStartResponse() self.servr._instance_factory.new_instance( mox.IgnoreArg(), expect_ready_request=False).AndReturn(self.inst) self.inst.start() self.inst.handle( self.environ, start_response, self.url_map, self.match, self.request_id, instance.INTERACTIVE_REQUEST).WithSideEffects(restart_and_raise) self.mox.ReplayAll() self.assertEqual( ['Instance was restarted while executing command'], self.servr._handle_script_request(self.environ, start_response, self.url_map, self.match, self.request_id)) self.mox.VerifyAll() self.assertEqual('503 Service Unavailable', start_response.status) def test_handle_script_request_unexpected_instance_exception(self): self.servr._instance_factory.new_instance( mox.IgnoreArg(), expect_ready_request=False).AndReturn(self.inst) self.inst.start() self.inst.handle( self.environ, self.start_response, self.url_map, self.match, self.request_id, instance.INTERACTIVE_REQUEST).AndRaise(httplib.BadStatusLine('line')) self.mox.ReplayAll() self.assertRaises( httplib.BadStatusLine, self.servr._handle_script_request, self.environ, self.start_response, self.url_map, self.match, self.request_id) self.mox.VerifyAll() if __name__ == '__main__': unittest.main()
dvliman/jaikuengine
.google_appengine/google/appengine/tools/devappserver2/module_test.py
Python
apache-2.0
90,007
/* * Copyright 2013 Palantir Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import * as Lint from "../lint"; describe("<whitespace>", () => { const fileName = "rules/whitespace.test.ts"; const WhitespaceRule = Lint.Test.getRule("whitespace"); const createFailure = Lint.Test.createFailuresOnFile(fileName, WhitespaceRule.FAILURE_STRING); let actualFailures: Lint.RuleFailure[]; before(() => { const options = [true, "check-branch", "check-decl", "check-operator", "check-module", "check-separator", "check-type", "check-typecast" ]; actualFailures = Lint.Test.applyRuleOnFile(fileName, WhitespaceRule, options); assert.lengthOf(actualFailures, 39); }); it("enforces rules only when enabled", () => { const failures = Lint.Test.applyRuleOnFile(fileName, WhitespaceRule); assert.equal(failures.length, 0); }); it("enforces whitespace in import statements", () => { const expectedFailures = [ createFailure([1, 11], [1, 12]), createFailure([1, 12], [1, 13]), createFailure([57, 19], [57, 20]), createFailure([58, 7], [58, 8]), createFailure([58, 16], [58, 17]), createFailure([58, 20], [58, 21]), createFailure([59, 26], [59, 27]) ]; expectedFailures.forEach((failure) => { Lint.Test.assertContainsFailure(actualFailures, failure); }); }); it("enforces whitespace in export statements", () => { const expectedFailures = [ createFailure([3, 19], [3, 20]), createFailure([3, 20], [3, 21]), createFailure([42, 7], [42, 8]), createFailure([42, 8], [42, 9]), createFailure([64, 7], [64, 8]) ]; expectedFailures.forEach((failure) => { Lint.Test.assertContainsFailure(actualFailures, failure); }); }); it("enforces whitespace in type declarations", () => { const expectedFailure = createFailure([5, 11], [5, 12]); Lint.Test.assertContainsFailure(actualFailures, expectedFailure); }); it("enforces whitespace in conditional statements", () => { const expectedFailures = [ createFailure([7, 23], [7, 24]), createFailure([7, 24], [7, 25]), createFailure([7, 25], [7, 26]), createFailure([7, 26], [7, 27]) ]; expectedFailures.forEach((failure) => { Lint.Test.assertContainsFailure(actualFailures, failure); }); }); it("enforces whitespace in binary expressions", () => { const expectedFailures = [ createFailure([9, 16], [9, 17]), createFailure([9, 19], [9, 20]) ]; expectedFailures.forEach((failure) => { Lint.Test.assertContainsFailure(actualFailures, failure); }); }); it("enforces whitespace in variable definitions", () => { const expectedFailures = [ createFailure([11, 10], [11, 11]), createFailure([11, 11], [11, 12]), createFailure([13, 11], [13, 12]) ]; expectedFailures.forEach((failure) => { Lint.Test.assertContainsFailure(actualFailures, failure); }); }); it("enforces whitespace in switch statements", () => { const expectedFailures = [ createFailure([15, 11], [15, 12]), createFailure([16, 16], [16, 17]), createFailure([17, 17], [17, 18]) ]; expectedFailures.forEach((failure) => { Lint.Test.assertContainsFailure(actualFailures, failure); }); }); it("enforces whitespace in for statements", () => { const expectedFailures = [ createFailure([20, 8], [20, 9]), createFailure([20, 15], [20, 16]), createFailure([20, 18], [20, 19]) ]; expectedFailures.forEach((failure) => { Lint.Test.assertContainsFailure(actualFailures, failure); }); }); it("enforces whitespace in while statements", () => { const expectedFailure = createFailure([24, 10], [24, 11]); Lint.Test.assertContainsFailure(actualFailures, expectedFailure); }); it("enforces whitespace in label definitions", () => { const expectedFailure = createFailure([21, 14], [21, 15]); Lint.Test.assertContainsFailure(actualFailures, expectedFailure); }); it("enforces whitespace around the => token", () => { const expectedFailures = [ createFailure([29, 17], [29, 18]), createFailure([29, 19], [29, 20]), createFailure([30, 17], [30, 18]), createFailure([30, 19], [30, 20]), createFailure([34, 14], [34, 15]), createFailure([34, 16], [34, 17]), createFailure([35, 18], [35, 19]), createFailure([35, 20], [35, 21]) ]; expectedFailures.forEach((failure) => { Lint.Test.assertContainsFailure(actualFailures, failure); }); }); it("enforces whitespace around typecasts", () => { Lint.Test.assertContainsFailure(actualFailures, createFailure([36, 21], [36, 22])); }); });
nikklassen/tslint
test/rules/whitespaceRuleTests.ts
TypeScript
apache-2.0
5,867
# # This file is part of pyasn1 software. # # Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com> # License: http://snmplabs.com/pyasn1/license.html # from pyasn1 import debug from pyasn1 import error from pyasn1.codec.ber import eoo from pyasn1.compat.integer import from_bytes from pyasn1.compat.octets import oct2int, octs2ints, ints2octs, null from pyasn1.type import base from pyasn1.type import char from pyasn1.type import tag from pyasn1.type import tagmap from pyasn1.type import univ from pyasn1.type import useful __all__ = ['decode'] LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_DECODER) noValue = base.noValue class AbstractDecoder(object): protoComponent = None def valueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): raise error.PyAsn1Error('Decoder not implemented for %s' % (tagSet,)) def indefLenValueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): raise error.PyAsn1Error('Indefinite length mode decoder not implemented for %s' % (tagSet,)) class AbstractSimpleDecoder(AbstractDecoder): @staticmethod def substrateCollector(asn1Object, substrate, length): return substrate[:length], substrate[length:] def _createComponent(self, asn1Spec, tagSet, value, **options): if options.get('native'): return value elif asn1Spec is None: return self.protoComponent.clone(value, tagSet=tagSet) elif value is noValue: return asn1Spec else: return asn1Spec.clone(value) class ExplicitTagDecoder(AbstractSimpleDecoder): protoComponent = univ.Any('') def valueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): if substrateFun: return substrateFun( self._createComponent(asn1Spec, tagSet, '', **options), substrate, length ) head, tail = substrate[:length], substrate[length:] value, _ = decodeFun(head, asn1Spec, tagSet, length, **options) if LOG: LOG('explicit tag container carries %d octets of trailing payload ' '(will be lost!): %s' % (len(_), debug.hexdump(_))) return value, tail def indefLenValueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): if substrateFun: return substrateFun( self._createComponent(asn1Spec, tagSet, '', **options), substrate, length ) value, substrate = decodeFun(substrate, asn1Spec, tagSet, length, **options) eooMarker, substrate = decodeFun(substrate, allowEoo=True, **options) if eooMarker is eoo.endOfOctets: return value, substrate else: raise error.PyAsn1Error('Missing end-of-octets terminator') explicitTagDecoder = ExplicitTagDecoder() class IntegerDecoder(AbstractSimpleDecoder): protoComponent = univ.Integer(0) def valueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): if tagSet[0].tagFormat != tag.tagFormatSimple: raise error.PyAsn1Error('Simple tag format expected') head, tail = substrate[:length], substrate[length:] if not head: return self._createComponent(asn1Spec, tagSet, 0, **options), tail value = from_bytes(head, signed=True) return self._createComponent(asn1Spec, tagSet, value, **options), tail class BooleanDecoder(IntegerDecoder): protoComponent = univ.Boolean(0) def _createComponent(self, asn1Spec, tagSet, value, **options): return IntegerDecoder._createComponent( self, asn1Spec, tagSet, value and 1 or 0, **options) class BitStringDecoder(AbstractSimpleDecoder): protoComponent = univ.BitString(()) supportConstructedForm = True def valueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): head, tail = substrate[:length], substrate[length:] if substrateFun: return substrateFun(self._createComponent( asn1Spec, tagSet, noValue, **options), substrate, length) if not head: raise error.PyAsn1Error('Empty BIT STRING substrate') if tagSet[0].tagFormat == tag.tagFormatSimple: # XXX what tag to check? trailingBits = oct2int(head[0]) if trailingBits > 7: raise error.PyAsn1Error( 'Trailing bits overflow %s' % trailingBits ) value = self.protoComponent.fromOctetString( head[1:], internalFormat=True, padding=trailingBits) return self._createComponent(asn1Spec, tagSet, value, **options), tail if not self.supportConstructedForm: raise error.PyAsn1Error('Constructed encoding form prohibited ' 'at %s' % self.__class__.__name__) if LOG: LOG('assembling constructed serialization') # All inner fragments are of the same type, treat them as octet string substrateFun = self.substrateCollector bitString = self.protoComponent.fromOctetString(null, internalFormat=True) while head: component, head = decodeFun(head, self.protoComponent, substrateFun=substrateFun, **options) trailingBits = oct2int(component[0]) if trailingBits > 7: raise error.PyAsn1Error( 'Trailing bits overflow %s' % trailingBits ) bitString = self.protoComponent.fromOctetString( component[1:], internalFormat=True, prepend=bitString, padding=trailingBits ) return self._createComponent(asn1Spec, tagSet, bitString, **options), tail def indefLenValueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): if substrateFun: return substrateFun(self._createComponent(asn1Spec, tagSet, noValue, **options), substrate, length) # All inner fragments are of the same type, treat them as octet string substrateFun = self.substrateCollector bitString = self.protoComponent.fromOctetString(null, internalFormat=True) while substrate: component, substrate = decodeFun(substrate, self.protoComponent, substrateFun=substrateFun, allowEoo=True, **options) if component is eoo.endOfOctets: break trailingBits = oct2int(component[0]) if trailingBits > 7: raise error.PyAsn1Error( 'Trailing bits overflow %s' % trailingBits ) bitString = self.protoComponent.fromOctetString( component[1:], internalFormat=True, prepend=bitString, padding=trailingBits ) else: raise error.SubstrateUnderrunError('No EOO seen before substrate ends') return self._createComponent(asn1Spec, tagSet, bitString, **options), substrate class OctetStringDecoder(AbstractSimpleDecoder): protoComponent = univ.OctetString('') supportConstructedForm = True def valueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): head, tail = substrate[:length], substrate[length:] if substrateFun: return substrateFun(self._createComponent(asn1Spec, tagSet, noValue, **options), substrate, length) if tagSet[0].tagFormat == tag.tagFormatSimple: # XXX what tag to check? return self._createComponent(asn1Spec, tagSet, head, **options), tail if not self.supportConstructedForm: raise error.PyAsn1Error('Constructed encoding form prohibited at %s' % self.__class__.__name__) if LOG: LOG('assembling constructed serialization') # All inner fragments are of the same type, treat them as octet string substrateFun = self.substrateCollector header = null while head: component, head = decodeFun(head, self.protoComponent, substrateFun=substrateFun, **options) header += component return self._createComponent(asn1Spec, tagSet, header, **options), tail def indefLenValueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): if substrateFun and substrateFun is not self.substrateCollector: asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options) return substrateFun(asn1Object, substrate, length) # All inner fragments are of the same type, treat them as octet string substrateFun = self.substrateCollector header = null while substrate: component, substrate = decodeFun(substrate, self.protoComponent, substrateFun=substrateFun, allowEoo=True, **options) if component is eoo.endOfOctets: break header += component else: raise error.SubstrateUnderrunError( 'No EOO seen before substrate ends' ) return self._createComponent(asn1Spec, tagSet, header, **options), substrate class NullDecoder(AbstractSimpleDecoder): protoComponent = univ.Null('') def valueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): if tagSet[0].tagFormat != tag.tagFormatSimple: raise error.PyAsn1Error('Simple tag format expected') head, tail = substrate[:length], substrate[length:] component = self._createComponent(asn1Spec, tagSet, '', **options) if head: raise error.PyAsn1Error('Unexpected %d-octet substrate for Null' % length) return component, tail class ObjectIdentifierDecoder(AbstractSimpleDecoder): protoComponent = univ.ObjectIdentifier(()) def valueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): if tagSet[0].tagFormat != tag.tagFormatSimple: raise error.PyAsn1Error('Simple tag format expected') head, tail = substrate[:length], substrate[length:] if not head: raise error.PyAsn1Error('Empty substrate') head = octs2ints(head) oid = () index = 0 substrateLen = len(head) while index < substrateLen: subId = head[index] index += 1 if subId < 128: oid += (subId,) elif subId > 128: # Construct subid from a number of octets nextSubId = subId subId = 0 while nextSubId >= 128: subId = (subId << 7) + (nextSubId & 0x7F) if index >= substrateLen: raise error.SubstrateUnderrunError( 'Short substrate for sub-OID past %s' % (oid,) ) nextSubId = head[index] index += 1 oid += ((subId << 7) + nextSubId,) elif subId == 128: # ASN.1 spec forbids leading zeros (0x80) in OID # encoding, tolerating it opens a vulnerability. See # https://www.esat.kuleuven.be/cosic/publications/article-1432.pdf # page 7 raise error.PyAsn1Error('Invalid octet 0x80 in OID encoding') # Decode two leading arcs if 0 <= oid[0] <= 39: oid = (0,) + oid elif 40 <= oid[0] <= 79: oid = (1, oid[0] - 40) + oid[1:] elif oid[0] >= 80: oid = (2, oid[0] - 80) + oid[1:] else: raise error.PyAsn1Error('Malformed first OID octet: %s' % head[0]) return self._createComponent(asn1Spec, tagSet, oid, **options), tail class RealDecoder(AbstractSimpleDecoder): protoComponent = univ.Real() def valueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): if tagSet[0].tagFormat != tag.tagFormatSimple: raise error.PyAsn1Error('Simple tag format expected') head, tail = substrate[:length], substrate[length:] if not head: return self._createComponent(asn1Spec, tagSet, 0.0, **options), tail fo = oct2int(head[0]) head = head[1:] if fo & 0x80: # binary encoding if not head: raise error.PyAsn1Error("Incomplete floating-point value") if LOG: LOG('decoding binary encoded REAL') n = (fo & 0x03) + 1 if n == 4: n = oct2int(head[0]) head = head[1:] eo, head = head[:n], head[n:] if not eo or not head: raise error.PyAsn1Error('Real exponent screwed') e = oct2int(eo[0]) & 0x80 and -1 or 0 while eo: # exponent e <<= 8 e |= oct2int(eo[0]) eo = eo[1:] b = fo >> 4 & 0x03 # base bits if b > 2: raise error.PyAsn1Error('Illegal Real base') if b == 1: # encbase = 8 e *= 3 elif b == 2: # encbase = 16 e *= 4 p = 0 while head: # value p <<= 8 p |= oct2int(head[0]) head = head[1:] if fo & 0x40: # sign bit p = -p sf = fo >> 2 & 0x03 # scale bits p *= 2 ** sf value = (p, 2, e) elif fo & 0x40: # infinite value if LOG: LOG('decoding infinite REAL') value = fo & 0x01 and '-inf' or 'inf' elif fo & 0xc0 == 0: # character encoding if not head: raise error.PyAsn1Error("Incomplete floating-point value") if LOG: LOG('decoding character encoded REAL') try: if fo & 0x3 == 0x1: # NR1 value = (int(head), 10, 0) elif fo & 0x3 == 0x2: # NR2 value = float(head) elif fo & 0x3 == 0x3: # NR3 value = float(head) else: raise error.SubstrateUnderrunError( 'Unknown NR (tag %s)' % fo ) except ValueError: raise error.SubstrateUnderrunError( 'Bad character Real syntax' ) else: raise error.SubstrateUnderrunError( 'Unknown encoding (tag %s)' % fo ) return self._createComponent(asn1Spec, tagSet, value, **options), tail class AbstractConstructedDecoder(AbstractDecoder): protoComponent = None class UniversalConstructedTypeDecoder(AbstractConstructedDecoder): protoRecordComponent = None protoSequenceComponent = None def _getComponentTagMap(self, asn1Object, idx): raise NotImplementedError() def _getComponentPositionByType(self, asn1Object, tagSet, idx): raise NotImplementedError() def _decodeComponents(self, substrate, tagSet=None, decodeFun=None, **options): components = [] componentTypes = set() while substrate: component, substrate = decodeFun(substrate, **options) if component is eoo.endOfOctets: break components.append(component) componentTypes.add(component.tagSet) # Now we have to guess is it SEQUENCE/SET or SEQUENCE OF/SET OF # The heuristics is: # * 1+ components of different types -> likely SEQUENCE/SET # * otherwise -> likely SEQUENCE OF/SET OF if len(componentTypes) > 1: protoComponent = self.protoRecordComponent else: protoComponent = self.protoSequenceComponent asn1Object = protoComponent.clone( # construct tagSet from base tag from prototype ASN.1 object # and additional tags recovered from the substrate tagSet=tag.TagSet(protoComponent.tagSet.baseTag, *tagSet.superTags) ) if LOG: LOG('guessed %r container type (pass `asn1Spec` to guide the ' 'decoder)' % asn1Object) for idx, component in enumerate(components): asn1Object.setComponentByPosition( idx, component, verifyConstraints=False, matchTags=False, matchConstraints=False ) return asn1Object, substrate def valueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): if tagSet[0].tagFormat != tag.tagFormatConstructed: raise error.PyAsn1Error('Constructed tag format expected') head, tail = substrate[:length], substrate[length:] if substrateFun is not None: if asn1Spec is not None: asn1Object = asn1Spec.clone() elif self.protoComponent is not None: asn1Object = self.protoComponent.clone(tagSet=tagSet) else: asn1Object = self.protoRecordComponent, self.protoSequenceComponent return substrateFun(asn1Object, substrate, length) if asn1Spec is None: asn1Object, trailing = self._decodeComponents( head, tagSet=tagSet, decodeFun=decodeFun, **options ) if trailing: if LOG: LOG('Unused trailing %d octets encountered: %s' % ( len(trailing), debug.hexdump(trailing))) return asn1Object, tail asn1Object = asn1Spec.clone() asn1Object.clear() if asn1Spec.typeId in (univ.Sequence.typeId, univ.Set.typeId): namedTypes = asn1Spec.componentType isSetType = asn1Spec.typeId == univ.Set.typeId isDeterministic = not isSetType and not namedTypes.hasOptionalOrDefault if LOG: LOG('decoding %sdeterministic %s type %r chosen by type ID' % ( not isDeterministic and 'non-' or '', isSetType and 'SET' or '', asn1Spec)) seenIndices = set() idx = 0 while head: if not namedTypes: componentType = None elif isSetType: componentType = namedTypes.tagMapUnique else: try: if isDeterministic: componentType = namedTypes[idx].asn1Object elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted: componentType = namedTypes.getTagMapNearPosition(idx) else: componentType = namedTypes[idx].asn1Object except IndexError: raise error.PyAsn1Error( 'Excessive components decoded at %r' % (asn1Spec,) ) component, head = decodeFun(head, componentType, **options) if not isDeterministic and namedTypes: if isSetType: idx = namedTypes.getPositionByType(component.effectiveTagSet) elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted: idx = namedTypes.getPositionNearType(component.effectiveTagSet, idx) asn1Object.setComponentByPosition( idx, component, verifyConstraints=False, matchTags=False, matchConstraints=False ) seenIndices.add(idx) idx += 1 if LOG: LOG('seen component indices %s' % seenIndices) if namedTypes: if not namedTypes.requiredComponents.issubset(seenIndices): raise error.PyAsn1Error( 'ASN.1 object %s has uninitialized ' 'components' % asn1Object.__class__.__name__) if namedTypes.hasOpenTypes: openTypes = options.get('openTypes', {}) if LOG: LOG('using open types map: %r' % openTypes) if openTypes or options.get('decodeOpenTypes', False): for idx, namedType in enumerate(namedTypes.namedTypes): if not namedType.openType: continue if namedType.isOptional and not asn1Object.getComponentByPosition(idx).isValue: continue governingValue = asn1Object.getComponentByName( namedType.openType.name ) try: openType = openTypes[governingValue] except KeyError: try: openType = namedType.openType[governingValue] except KeyError: if LOG: LOG('failed to resolve open type by governing ' 'value %r' % (governingValue,)) continue if LOG: LOG('resolved open type %r by governing ' 'value %r' % (openType, governingValue)) containerValue = asn1Object.getComponentByPosition(idx) if containerValue.typeId in ( univ.SetOf.typeId, univ.SequenceOf.typeId): for pos, containerElement in enumerate( containerValue): component, rest = decodeFun( containerValue[pos].asOctets(), asn1Spec=openType, **options ) containerValue[pos] = component else: component, rest = decodeFun( asn1Object.getComponentByPosition(idx).asOctets(), asn1Spec=openType, **options ) asn1Object.setComponentByPosition(idx, component) else: asn1Object.verifySizeSpec() else: asn1Object = asn1Spec.clone() asn1Object.clear() componentType = asn1Spec.componentType if LOG: LOG('decoding type %r chosen by given `asn1Spec`' % componentType) idx = 0 while head: component, head = decodeFun(head, componentType, **options) asn1Object.setComponentByPosition( idx, component, verifyConstraints=False, matchTags=False, matchConstraints=False ) idx += 1 return asn1Object, tail def indefLenValueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): if tagSet[0].tagFormat != tag.tagFormatConstructed: raise error.PyAsn1Error('Constructed tag format expected') if substrateFun is not None: if asn1Spec is not None: asn1Object = asn1Spec.clone() elif self.protoComponent is not None: asn1Object = self.protoComponent.clone(tagSet=tagSet) else: asn1Object = self.protoRecordComponent, self.protoSequenceComponent return substrateFun(asn1Object, substrate, length) if asn1Spec is None: return self._decodeComponents( substrate, tagSet=tagSet, decodeFun=decodeFun, **dict(options, allowEoo=True) ) asn1Object = asn1Spec.clone() asn1Object.clear() if asn1Spec.typeId in (univ.Sequence.typeId, univ.Set.typeId): namedTypes = asn1Object.componentType isSetType = asn1Object.typeId == univ.Set.typeId isDeterministic = not isSetType and not namedTypes.hasOptionalOrDefault if LOG: LOG('decoding %sdeterministic %s type %r chosen by type ID' % ( not isDeterministic and 'non-' or '', isSetType and 'SET' or '', asn1Spec)) seenIndices = set() idx = 0 while substrate: if len(namedTypes) <= idx: asn1Spec = None elif isSetType: asn1Spec = namedTypes.tagMapUnique else: try: if isDeterministic: asn1Spec = namedTypes[idx].asn1Object elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted: asn1Spec = namedTypes.getTagMapNearPosition(idx) else: asn1Spec = namedTypes[idx].asn1Object except IndexError: raise error.PyAsn1Error( 'Excessive components decoded at %r' % (asn1Object,) ) component, substrate = decodeFun(substrate, asn1Spec, allowEoo=True, **options) if component is eoo.endOfOctets: break if not isDeterministic and namedTypes: if isSetType: idx = namedTypes.getPositionByType(component.effectiveTagSet) elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted: idx = namedTypes.getPositionNearType(component.effectiveTagSet, idx) asn1Object.setComponentByPosition( idx, component, verifyConstraints=False, matchTags=False, matchConstraints=False ) seenIndices.add(idx) idx += 1 else: raise error.SubstrateUnderrunError( 'No EOO seen before substrate ends' ) if LOG: LOG('seen component indices %s' % seenIndices) if namedTypes: if not namedTypes.requiredComponents.issubset(seenIndices): raise error.PyAsn1Error('ASN.1 object %s has uninitialized components' % asn1Object.__class__.__name__) if namedTypes.hasOpenTypes: openTypes = options.get('openTypes', {}) if LOG: LOG('using open types map: %r' % openTypes) if openTypes or options.get('decodeOpenTypes', False): for idx, namedType in enumerate(namedTypes.namedTypes): if not namedType.openType: continue if namedType.isOptional and not asn1Object.getComponentByPosition(idx).isValue: continue governingValue = asn1Object.getComponentByName( namedType.openType.name ) try: openType = openTypes[governingValue] except KeyError: try: openType = namedType.openType[governingValue] except KeyError: if LOG: LOG('failed to resolve open type by governing ' 'value %r' % (governingValue,)) continue if LOG: LOG('resolved open type %r by governing ' 'value %r' % (openType, governingValue)) containerValue = asn1Object.getComponentByPosition(idx) if containerValue.typeId in ( univ.SetOf.typeId, univ.SequenceOf.typeId): for pos, containerElement in enumerate( containerValue): component, rest = decodeFun( containerValue[pos].asOctets(), asn1Spec=openType, **dict(options, allowEoo=True) ) containerValue[pos] = component else: component, rest = decodeFun( asn1Object.getComponentByPosition(idx).asOctets(), asn1Spec=openType, **dict(options, allowEoo=True) ) if component is not eoo.endOfOctets: asn1Object.setComponentByPosition(idx, component) else: asn1Object.verifySizeSpec() else: asn1Object = asn1Spec.clone() asn1Object.clear() componentType = asn1Spec.componentType if LOG: LOG('decoding type %r chosen by given `asn1Spec`' % componentType) idx = 0 while substrate: component, substrate = decodeFun(substrate, componentType, allowEoo=True, **options) if component is eoo.endOfOctets: break asn1Object.setComponentByPosition( idx, component, verifyConstraints=False, matchTags=False, matchConstraints=False ) idx += 1 else: raise error.SubstrateUnderrunError( 'No EOO seen before substrate ends' ) return asn1Object, substrate class SequenceOrSequenceOfDecoder(UniversalConstructedTypeDecoder): protoRecordComponent = univ.Sequence() protoSequenceComponent = univ.SequenceOf() class SequenceDecoder(SequenceOrSequenceOfDecoder): protoComponent = univ.Sequence() class SequenceOfDecoder(SequenceOrSequenceOfDecoder): protoComponent = univ.SequenceOf() class SetOrSetOfDecoder(UniversalConstructedTypeDecoder): protoRecordComponent = univ.Set() protoSequenceComponent = univ.SetOf() class SetDecoder(SetOrSetOfDecoder): protoComponent = univ.Set() class SetOfDecoder(SetOrSetOfDecoder): protoComponent = univ.SetOf() class ChoiceDecoder(AbstractConstructedDecoder): protoComponent = univ.Choice() def valueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): head, tail = substrate[:length], substrate[length:] if asn1Spec is None: asn1Object = self.protoComponent.clone(tagSet=tagSet) else: asn1Object = asn1Spec.clone() if substrateFun: return substrateFun(asn1Object, substrate, length) if asn1Object.tagSet == tagSet: if LOG: LOG('decoding %s as explicitly tagged CHOICE' % (tagSet,)) component, head = decodeFun( head, asn1Object.componentTagMap, **options ) else: if LOG: LOG('decoding %s as untagged CHOICE' % (tagSet,)) component, head = decodeFun( head, asn1Object.componentTagMap, tagSet, length, state, **options ) effectiveTagSet = component.effectiveTagSet if LOG: LOG('decoded component %s, effective tag set %s' % (component, effectiveTagSet)) asn1Object.setComponentByType( effectiveTagSet, component, verifyConstraints=False, matchTags=False, matchConstraints=False, innerFlag=False ) return asn1Object, tail def indefLenValueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): if asn1Spec is None: asn1Object = self.protoComponent.clone(tagSet=tagSet) else: asn1Object = asn1Spec.clone() if substrateFun: return substrateFun(asn1Object, substrate, length) if asn1Object.tagSet == tagSet: if LOG: LOG('decoding %s as explicitly tagged CHOICE' % (tagSet,)) component, substrate = decodeFun( substrate, asn1Object.componentType.tagMapUnique, **options ) # eat up EOO marker eooMarker, substrate = decodeFun( substrate, allowEoo=True, **options ) if eooMarker is not eoo.endOfOctets: raise error.PyAsn1Error('No EOO seen before substrate ends') else: if LOG: LOG('decoding %s as untagged CHOICE' % (tagSet,)) component, substrate = decodeFun( substrate, asn1Object.componentType.tagMapUnique, tagSet, length, state, **options ) effectiveTagSet = component.effectiveTagSet if LOG: LOG('decoded component %s, effective tag set %s' % (component, effectiveTagSet)) asn1Object.setComponentByType( effectiveTagSet, component, verifyConstraints=False, matchTags=False, matchConstraints=False, innerFlag=False ) return asn1Object, substrate class AnyDecoder(AbstractSimpleDecoder): protoComponent = univ.Any() def valueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): if asn1Spec is None: isUntagged = True elif asn1Spec.__class__ is tagmap.TagMap: isUntagged = tagSet not in asn1Spec.tagMap else: isUntagged = tagSet != asn1Spec.tagSet if isUntagged: fullSubstrate = options['fullSubstrate'] # untagged Any container, recover inner header substrate length += len(fullSubstrate) - len(substrate) substrate = fullSubstrate if LOG: LOG('decoding as untagged ANY, substrate %s' % debug.hexdump(substrate)) if substrateFun: return substrateFun(self._createComponent(asn1Spec, tagSet, noValue, **options), substrate, length) head, tail = substrate[:length], substrate[length:] return self._createComponent(asn1Spec, tagSet, head, **options), tail def indefLenValueDecoder(self, substrate, asn1Spec, tagSet=None, length=None, state=None, decodeFun=None, substrateFun=None, **options): if asn1Spec is None: isTagged = False elif asn1Spec.__class__ is tagmap.TagMap: isTagged = tagSet in asn1Spec.tagMap else: isTagged = tagSet == asn1Spec.tagSet if isTagged: # tagged Any type -- consume header substrate header = null if LOG: LOG('decoding as tagged ANY') else: fullSubstrate = options['fullSubstrate'] # untagged Any, recover header substrate header = fullSubstrate[:-len(substrate)] if LOG: LOG('decoding as untagged ANY, header substrate %s' % debug.hexdump(header)) # Any components do not inherit initial tag asn1Spec = self.protoComponent if substrateFun and substrateFun is not self.substrateCollector: asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options) return substrateFun(asn1Object, header + substrate, length + len(header)) if LOG: LOG('assembling constructed serialization') # All inner fragments are of the same type, treat them as octet string substrateFun = self.substrateCollector while substrate: component, substrate = decodeFun(substrate, asn1Spec, substrateFun=substrateFun, allowEoo=True, **options) if component is eoo.endOfOctets: break header += component else: raise error.SubstrateUnderrunError( 'No EOO seen before substrate ends' ) if substrateFun: return header, substrate else: return self._createComponent(asn1Spec, tagSet, header, **options), substrate # character string types class UTF8StringDecoder(OctetStringDecoder): protoComponent = char.UTF8String() class NumericStringDecoder(OctetStringDecoder): protoComponent = char.NumericString() class PrintableStringDecoder(OctetStringDecoder): protoComponent = char.PrintableString() class TeletexStringDecoder(OctetStringDecoder): protoComponent = char.TeletexString() class VideotexStringDecoder(OctetStringDecoder): protoComponent = char.VideotexString() class IA5StringDecoder(OctetStringDecoder): protoComponent = char.IA5String() class GraphicStringDecoder(OctetStringDecoder): protoComponent = char.GraphicString() class VisibleStringDecoder(OctetStringDecoder): protoComponent = char.VisibleString() class GeneralStringDecoder(OctetStringDecoder): protoComponent = char.GeneralString() class UniversalStringDecoder(OctetStringDecoder): protoComponent = char.UniversalString() class BMPStringDecoder(OctetStringDecoder): protoComponent = char.BMPString() # "useful" types class ObjectDescriptorDecoder(OctetStringDecoder): protoComponent = useful.ObjectDescriptor() class GeneralizedTimeDecoder(OctetStringDecoder): protoComponent = useful.GeneralizedTime() class UTCTimeDecoder(OctetStringDecoder): protoComponent = useful.UTCTime() tagMap = { univ.Integer.tagSet: IntegerDecoder(), univ.Boolean.tagSet: BooleanDecoder(), univ.BitString.tagSet: BitStringDecoder(), univ.OctetString.tagSet: OctetStringDecoder(), univ.Null.tagSet: NullDecoder(), univ.ObjectIdentifier.tagSet: ObjectIdentifierDecoder(), univ.Enumerated.tagSet: IntegerDecoder(), univ.Real.tagSet: RealDecoder(), univ.Sequence.tagSet: SequenceOrSequenceOfDecoder(), # conflicts with SequenceOf univ.Set.tagSet: SetOrSetOfDecoder(), # conflicts with SetOf univ.Choice.tagSet: ChoiceDecoder(), # conflicts with Any # character string types char.UTF8String.tagSet: UTF8StringDecoder(), char.NumericString.tagSet: NumericStringDecoder(), char.PrintableString.tagSet: PrintableStringDecoder(), char.TeletexString.tagSet: TeletexStringDecoder(), char.VideotexString.tagSet: VideotexStringDecoder(), char.IA5String.tagSet: IA5StringDecoder(), char.GraphicString.tagSet: GraphicStringDecoder(), char.VisibleString.tagSet: VisibleStringDecoder(), char.GeneralString.tagSet: GeneralStringDecoder(), char.UniversalString.tagSet: UniversalStringDecoder(), char.BMPString.tagSet: BMPStringDecoder(), # useful types useful.ObjectDescriptor.tagSet: ObjectDescriptorDecoder(), useful.GeneralizedTime.tagSet: GeneralizedTimeDecoder(), useful.UTCTime.tagSet: UTCTimeDecoder() } # Type-to-codec map for ambiguous ASN.1 types typeMap = { univ.Set.typeId: SetDecoder(), univ.SetOf.typeId: SetOfDecoder(), univ.Sequence.typeId: SequenceDecoder(), univ.SequenceOf.typeId: SequenceOfDecoder(), univ.Choice.typeId: ChoiceDecoder(), univ.Any.typeId: AnyDecoder() } # Put in non-ambiguous types for faster codec lookup for typeDecoder in tagMap.values(): if typeDecoder.protoComponent is not None: typeId = typeDecoder.protoComponent.__class__.typeId if typeId is not None and typeId not in typeMap: typeMap[typeId] = typeDecoder (stDecodeTag, stDecodeLength, stGetValueDecoder, stGetValueDecoderByAsn1Spec, stGetValueDecoderByTag, stTryAsExplicitTag, stDecodeValue, stDumpRawValue, stErrorCondition, stStop) = [x for x in range(10)] class Decoder(object): defaultErrorState = stErrorCondition #defaultErrorState = stDumpRawValue defaultRawDecoder = AnyDecoder() supportIndefLength = True # noinspection PyDefaultArgument def __init__(self, tagMap, typeMap={}): self.__tagMap = tagMap self.__typeMap = typeMap # Tag & TagSet objects caches self.__tagCache = {} self.__tagSetCache = {} self.__eooSentinel = ints2octs((0, 0)) def __call__(self, substrate, asn1Spec=None, tagSet=None, length=None, state=stDecodeTag, decodeFun=None, substrateFun=None, **options): if LOG: LOG('decoder called at scope %s with state %d, working with up to %d octets of substrate: %s' % (debug.scope, state, len(substrate), debug.hexdump(substrate))) allowEoo = options.pop('allowEoo', False) # Look for end-of-octets sentinel if allowEoo and self.supportIndefLength: if substrate[:2] == self.__eooSentinel: if LOG: LOG('end-of-octets sentinel found') return eoo.endOfOctets, substrate[2:] value = noValue tagMap = self.__tagMap typeMap = self.__typeMap tagCache = self.__tagCache tagSetCache = self.__tagSetCache fullSubstrate = substrate while state is not stStop: if state is stDecodeTag: if not substrate: raise error.SubstrateUnderrunError( 'Short octet stream on tag decoding' ) # Decode tag isShortTag = True firstOctet = substrate[0] substrate = substrate[1:] try: lastTag = tagCache[firstOctet] except KeyError: integerTag = oct2int(firstOctet) tagClass = integerTag & 0xC0 tagFormat = integerTag & 0x20 tagId = integerTag & 0x1F if tagId == 0x1F: isShortTag = False lengthOctetIdx = 0 tagId = 0 try: while True: integerTag = oct2int(substrate[lengthOctetIdx]) lengthOctetIdx += 1 tagId <<= 7 tagId |= (integerTag & 0x7F) if not integerTag & 0x80: break substrate = substrate[lengthOctetIdx:] except IndexError: raise error.SubstrateUnderrunError( 'Short octet stream on long tag decoding' ) lastTag = tag.Tag( tagClass=tagClass, tagFormat=tagFormat, tagId=tagId ) if isShortTag: # cache short tags tagCache[firstOctet] = lastTag if tagSet is None: if isShortTag: try: tagSet = tagSetCache[firstOctet] except KeyError: # base tag not recovered tagSet = tag.TagSet((), lastTag) tagSetCache[firstOctet] = tagSet else: tagSet = tag.TagSet((), lastTag) else: tagSet = lastTag + tagSet state = stDecodeLength if LOG: LOG('tag decoded into %s, decoding length' % tagSet) if state is stDecodeLength: # Decode length if not substrate: raise error.SubstrateUnderrunError( 'Short octet stream on length decoding' ) firstOctet = oct2int(substrate[0]) if firstOctet < 128: size = 1 length = firstOctet elif firstOctet > 128: size = firstOctet & 0x7F # encoded in size bytes encodedLength = octs2ints(substrate[1:size + 1]) # missing check on maximum size, which shouldn't be a # problem, we can handle more than is possible if len(encodedLength) != size: raise error.SubstrateUnderrunError( '%s<%s at %s' % (size, len(encodedLength), tagSet) ) length = 0 for lengthOctet in encodedLength: length <<= 8 length |= lengthOctet size += 1 else: size = 1 length = -1 substrate = substrate[size:] if length == -1: if not self.supportIndefLength: raise error.PyAsn1Error('Indefinite length encoding not supported by this codec') else: if len(substrate) < length: raise error.SubstrateUnderrunError('%d-octet short' % (length - len(substrate))) state = stGetValueDecoder if LOG: LOG('value length decoded into %d, payload substrate is: %s' % (length, debug.hexdump(length == -1 and substrate or substrate[:length]))) if state is stGetValueDecoder: if asn1Spec is None: state = stGetValueDecoderByTag else: state = stGetValueDecoderByAsn1Spec # # There're two ways of creating subtypes in ASN.1 what influences # decoder operation. These methods are: # 1) Either base types used in or no IMPLICIT tagging has been # applied on subtyping. # 2) Subtype syntax drops base type information (by means of # IMPLICIT tagging. # The first case allows for complete tag recovery from substrate # while the second one requires original ASN.1 type spec for # decoding. # # In either case a set of tags (tagSet) is coming from substrate # in an incremental, tag-by-tag fashion (this is the case of # EXPLICIT tag which is most basic). Outermost tag comes first # from the wire. # if state is stGetValueDecoderByTag: try: concreteDecoder = tagMap[tagSet] except KeyError: concreteDecoder = None if concreteDecoder: state = stDecodeValue else: try: concreteDecoder = tagMap[tagSet[:1]] except KeyError: concreteDecoder = None if concreteDecoder: state = stDecodeValue else: state = stTryAsExplicitTag if LOG: LOG('codec %s chosen by a built-in type, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as explicit tag')) debug.scope.push(concreteDecoder is None and '?' or concreteDecoder.protoComponent.__class__.__name__) if state is stGetValueDecoderByAsn1Spec: if asn1Spec.__class__ is tagmap.TagMap: try: chosenSpec = asn1Spec[tagSet] except KeyError: chosenSpec = None if LOG: LOG('candidate ASN.1 spec is a map of:') for firstOctet, v in asn1Spec.presentTypes.items(): LOG(' %s -> %s' % (firstOctet, v.__class__.__name__)) if asn1Spec.skipTypes: LOG('but neither of: ') for firstOctet, v in asn1Spec.skipTypes.items(): LOG(' %s -> %s' % (firstOctet, v.__class__.__name__)) LOG('new candidate ASN.1 spec is %s, chosen by %s' % (chosenSpec is None and '<none>' or chosenSpec.prettyPrintType(), tagSet)) elif tagSet == asn1Spec.tagSet or tagSet in asn1Spec.tagMap: chosenSpec = asn1Spec if LOG: LOG('candidate ASN.1 spec is %s' % asn1Spec.__class__.__name__) else: chosenSpec = None if chosenSpec is not None: try: # ambiguous type or just faster codec lookup concreteDecoder = typeMap[chosenSpec.typeId] if LOG: LOG('value decoder chosen for an ambiguous type by type ID %s' % (chosenSpec.typeId,)) except KeyError: # use base type for codec lookup to recover untagged types baseTagSet = tag.TagSet(chosenSpec.tagSet.baseTag, chosenSpec.tagSet.baseTag) try: # base type or tagged subtype concreteDecoder = tagMap[baseTagSet] if LOG: LOG('value decoder chosen by base %s' % (baseTagSet,)) except KeyError: concreteDecoder = None if concreteDecoder: asn1Spec = chosenSpec state = stDecodeValue else: state = stTryAsExplicitTag else: concreteDecoder = None state = stTryAsExplicitTag if LOG: LOG('codec %s chosen by ASN.1 spec, decoding %s' % (state is stDecodeValue and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as explicit tag')) debug.scope.push(chosenSpec is None and '?' or chosenSpec.__class__.__name__) if state is stDecodeValue: if not options.get('recursiveFlag', True) and not substrateFun: # deprecate this substrateFun = lambda a, b, c: (a, b[:c]) options.update(fullSubstrate=fullSubstrate) if length == -1: # indef length value, substrate = concreteDecoder.indefLenValueDecoder( substrate, asn1Spec, tagSet, length, stGetValueDecoder, self, substrateFun, **options ) else: value, substrate = concreteDecoder.valueDecoder( substrate, asn1Spec, tagSet, length, stGetValueDecoder, self, substrateFun, **options ) if LOG: LOG('codec %s yields type %s, value:\n%s\n...remaining substrate is: %s' % (concreteDecoder.__class__.__name__, value.__class__.__name__, isinstance(value, base.Asn1Item) and value.prettyPrint() or value, substrate and debug.hexdump(substrate) or '<none>')) state = stStop break if state is stTryAsExplicitTag: if (tagSet and tagSet[0].tagFormat == tag.tagFormatConstructed and tagSet[0].tagClass != tag.tagClassUniversal): # Assume explicit tagging concreteDecoder = explicitTagDecoder state = stDecodeValue else: concreteDecoder = None state = self.defaultErrorState if LOG: LOG('codec %s chosen, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as failure')) if state is stDumpRawValue: concreteDecoder = self.defaultRawDecoder if LOG: LOG('codec %s chosen, decoding value' % concreteDecoder.__class__.__name__) state = stDecodeValue if state is stErrorCondition: raise error.PyAsn1Error( '%s not in asn1Spec: %r' % (tagSet, asn1Spec) ) if LOG: debug.scope.pop() LOG('decoder left scope %s, call completed' % debug.scope) return value, substrate #: Turns BER octet stream into an ASN.1 object. #: #: Takes BER octet-stream and decode it into an ASN.1 object #: (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which #: may be a scalar or an arbitrary nested structure. #: #: Parameters #: ---------- #: substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2) #: BER octet-stream #: #: Keyword Args #: ------------ #: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative #: A pyasn1 type object to act as a template guiding the decoder. Depending on the ASN.1 structure #: being decoded, *asn1Spec* may or may not be required. Most common reason for #: it to require is that ASN.1 structure is encoded in *IMPLICIT* tagging mode. #: #: Returns #: ------- #: : :py:class:`tuple` #: A tuple of pyasn1 object recovered from BER substrate (:py:class:`~pyasn1.type.base.PyAsn1Item` derivative) #: and the unprocessed trailing portion of the *substrate* (may be empty) #: #: Raises #: ------ #: ~pyasn1.error.PyAsn1Error, ~pyasn1.error.SubstrateUnderrunError #: On decoding errors #: #: Examples #: -------- #: Decode BER serialisation without ASN.1 schema #: #: .. code-block:: pycon #: #: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03') #: >>> str(s) #: SequenceOf: #: 1 2 3 #: #: Decode BER serialisation with ASN.1 schema #: #: .. code-block:: pycon #: #: >>> seq = SequenceOf(componentType=Integer()) #: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03', asn1Spec=seq) #: >>> str(s) #: SequenceOf: #: 1 2 3 #: decode = Decoder(tagMap, typeMap) # XXX # non-recursive decoding; return position rather than substrate
kawamon/hue
desktop/core/ext-py/pyasn1-0.4.6/pyasn1/codec/ber/decoder.py
Python
apache-2.0
58,050
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN"> <html> <head> <title>ILoggerProvider Members Public Methods</title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta name="generator" content="Doc-O-Matic" /> <meta http-equiv="Content-Style-Type" content="text/css" /> <link rel="STYLESHEET" href="default.css" type="text/css" /> <script type="text/javascript" src="scripts.js"></script> </head> <body class="Element700" onload="onBodyLoadEx('frames.html', 'topic', '_!!MEMBEROVERVIEW_Lightstreamer_DotNet_Client_Log_ILoggerProvider_14_Public_Methods.html');" onmousedown="onBodyMouseDown();"> <!-- Begin Popups --> <!-- End Popups --> <!-- Begin Page Header --> <div class="Element710" id="areafixed"> <div class="Element92"> <table width="100%" cellspacing="0" cellpadding="0"> <tr><td width="33%"> <div class="Element1"> Lightstreamer Windows Phone Client 1.0</div> </td><td width="34%"> <div class="Element2"> <a href="contents.html" target="tocidx"><img src="btn_globals_contents_black.gif" border="0" alt="Contents" title="Contents" onmouseover="switchImage(this, 'btn_globals_contents_black_hover.gif');" onmouseout="switchImage(this, 'btn_globals_contents_black.gif');"></a><a href="idx.html" target="tocidx"><img src="btn_globals_index_black.gif" border="0" alt="Index" title="Index" onmouseover="switchImage(this, 'btn_globals_index_black_hover.gif');" onmouseout="switchImage(this, 'btn_globals_index_black.gif');"></a><a href="index.html" target="topic"><img src="btn_globals_home_black.gif" border="0" alt="Home" title="Home" onmouseover="switchImage(this, 'btn_globals_home_black_hover.gif');" onmouseout="switchImage(this, 'btn_globals_home_black.gif');"></a></div> </td><td width="33%"> </td></tr></table><div class="Element27"> ILoggerProvider Members Public Methods</div> <div class="Element28"> <a href="!!MEMBEROVERVIEW_Lightstreamer_DotNet_Client_Log_ILoggerProvider.html" target="topic">ILoggerProvider Members</a>&nbsp; <a href="_!!MEMBEROVERVIEW_Lightstreamer_DotNet_Client_Log_ILoggerProvider_14_Public_Methods_Public_Methods.html" target="topic">Legend</a></div> </div> </div> <!-- End Page Header --> <!-- Begin Client Area --> <div class="Element720" id="areascroll"> <div class="Element721"> <!-- Begin Page Content --> <div class="Element58"> <div class="Element14"> Public Methods</div> <div class="Element11"> <div class="Element10"> <div class="Element212"> <div class="TableDiv"> <table cellspacing="0" class="Table0"> <tr> <td class="Element200" valign="top" width="10%" style="white-space: nowrap;"> <div class="Element201"> &nbsp;</div></td><td class="Element200" valign="top" width="30%"> <div class="Element201"> Name&nbsp;</div></td><td class="Element204" valign="top" width="60%"> <div class="Element205"> Description&nbsp;</div></td></tr><tr> <td class="Element202" valign="top" width="10%" style="white-space: nowrap;"> <div class="Element203"> <img src="indicator_method.gif" border="0" alt="" title="">&nbsp;</div></td><td class="Element202" valign="top" width="30%"> <div class="Element203"> <a href="Lightstreamer_DotNet_Client_Log_ILoggerProvider_GetLogger@string.html" target="topic">GetLogger</a>&nbsp;</div></td><td class="Element206" valign="top" width="60%"> <div class="Element207"> Request for an <a href="Lightstreamer_DotNet_Client_Log_ILogger.html" target="topic">ILogger</a> instance that will be used for logging occuring on the given category. It is suggested, but not mandatory, that subsequent calls to this method related to the same category return the same <a href="Lightstreamer_DotNet_Client_Log_ILogger.html" target="topic">ILogger</a> instance.<br>&nbsp;</div></td></tr></table></div></div> </div> </div> </div> <!-- End Page Content --> <!-- Begin Page Footer --> <div class="Element93"> <table width="100%" cellspacing="0" cellpadding="0"> <tr><td width="100%"> <div class="Element3"> <a href="http://www.lightstreamer.com/" target="_blank">Copyright (C) 2004-2011 Weswit s.r.l.</a></div> </td></tr><tr><td width="100%"> <div class="Element4"> <a href="contents.html" target="tocidx">Contents</a> | <a href="idx.html" target="tocidx">Index</a> | <a href="index.html" target="topic">Home</a></div> </td></tr></table></div> <!-- End Page Footer --> </div> </div> <!-- End Client Area --> </body></html>
cityindex-attic/CIAPI.CS
src/packages/Lightstreamer 4.2 Client/sdk_client_windows_phone/doc/API-reference/_!!MEMBEROVERVIEW_Lightstreamer_DotNet_Client_Log_ILoggerProvider_14_Public_Methods.html
HTML
apache-2.0
4,336
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.sysml.runtime.matrix.data; public abstract class SparseBlockFactory { public static SparseBlock createSparseBlock(int rlen) { return createSparseBlock(MatrixBlock.DEFAULT_SPARSEBLOCK, rlen); } public static SparseBlock createSparseBlock( SparseBlock.Type type, int rlen ) { switch( type ) { case MCSR: return new SparseBlockMCSR(rlen, -1); case CSR: return new SparseBlockCSR(rlen); case COO: return new SparseBlockCOO(rlen); default: throw new RuntimeException("Unexpected sparse block type: "+type.toString()); } } public static SparseBlock copySparseBlock( SparseBlock.Type type, SparseBlock sblock, boolean forceCopy ) { //sanity check for empty inputs if( sblock == null ) return null; //check for existing target type if( !forceCopy && isSparseBlockType(sblock, type) ){ return sblock; } //create target sparse block switch( type ) { case MCSR: return new SparseBlockMCSR(sblock); case CSR: return new SparseBlockCSR(sblock); case COO: return new SparseBlockCOO(sblock); default: throw new RuntimeException("Unexpected sparse block type: "+type.toString()); } } public static boolean isSparseBlockType(SparseBlock sblock, SparseBlock.Type type) { return (getSparseBlockType(sblock) == type); } public static SparseBlock.Type getSparseBlockType(SparseBlock sblock) { return (sblock instanceof SparseBlockMCSR) ? SparseBlock.Type.MCSR : (sblock instanceof SparseBlockCSR) ? SparseBlock.Type.CSR : (sblock instanceof SparseBlockCOO) ? SparseBlock.Type.COO : null; } public static long estimateSizeSparseInMemory(SparseBlock.Type type, long nrows, long ncols, double sparsity) { switch( type ) { case MCSR: return SparseBlockMCSR.estimateMemory(nrows, ncols, sparsity); case CSR: return SparseBlockCSR.estimateMemory(nrows, ncols, sparsity); case COO: return SparseBlockCOO.estimateMemory(nrows, ncols, sparsity); default: throw new RuntimeException("Unexpected sparse block type: "+type.toString()); } } }
sandeep-n/incubator-systemml
src/main/java/org/apache/sysml/runtime/matrix/data/SparseBlockFactory.java
Java
apache-2.0
2,859
/* $NetBSD: bcm2835_spireg.h,v 1.1 2013/01/05 20:31:23 jakllsch Exp $ */ /* * Copyright (c) 2012 Jonathan A. Kollasch * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _BROADCOM_BCM2835_SPIREG_H_ #define _BROADCOM_BCM2835_SPIREG_H_ #include <sys/cdefs.h> #define __BIT32(x) ((uint32_t)__BIT(x)) #define __BITS32(x, y) ((uint32_t)__BITS((x), (y))) #define SPI_CS 0x000 #define SPI_CS_CS __BITS32(1,0) #define SPI_CS_CPHA __BIT32(2) #define SPI_CS_CPOL __BIT32(3) #define SPI_CS_CLEAR_TX __BIT32(4) #define SPI_CS_CLEAR_RX __BIT32(5) #define SPI_CS_CSPOL __BIT32(6) #define SPI_CS_TA __BIT32(7) #define SPI_CS_DMAEN __BIT32(8) #define SPI_CS_INTD __BIT32(9) #define SPI_CS_INTR __BIT32(10) #define SPI_CS_ADCS __BIT32(11) #define SPI_CS_REN __BIT32(12) #define SPI_CS_LEN __BIT32(13) #define SPI_CS_LMONO __BIT32(14) #define SPI_CS_TE_EN __BIT32(15) #define SPI_CS_DONE __BIT32(16) #define SPI_CS_RXD __BIT32(17) #define SPI_CS_TXD __BIT32(18) #define SPI_CS_RXR __BIT32(19) #define SPI_CS_RXF __BIT32(20) #define SPI_CS_CSPOL0 __BIT32(21) #define SPI_CS_CSPOL1 __BIT32(22) #define SPI_CS_CSPOL2 __BIT32(23) #define SPI_CS_DMA_LEN __BIT32(24) #define SPI_CS_LEN_LONG __BIT32(25) #define SPI_FIFO 0x004 #define SPI_CLK 0x008 #define SPI_CLK_CDIV __BITS32(15,0) #define SPI_DLEN 0x00c #define SPI_LTOH 0x010 #define SPI_DC 0x014 #endif /* _BROADCOM_BCM2835_SPIREG_H_ */
execunix/vinos
sys/arch/arm/broadcom/bcm2835_spireg.h
C
apache-2.0
2,699
## Vision API Web Demo * Navigate to [Cloud Vision API](https://cloud.google.com/vision/#vision-api-demo) * Find an image on the web like this one showing [Purdue university](http://www.purdue.edu/purdue/images/audience/about-banner.jpg) * Upload the image to the web demo * Analyze the result that is returned (object identification, web search, etc.)
GoogleCloudPlatform/training-data-analyst
courses/data-to-insights/demos/vision-api.md
Markdown
apache-2.0
358
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np import paddle.fluid as fluid import os def data_generator(): data = [0, 1, 2, 3] for val in data: yield val class TestDistributedReader(unittest.TestCase): def test_distributed_reader(self): trainer_num = 4 os.environ['PADDLE_TRAINER_ID'] = str(1) os.environ['PADDLE_TRAINERS_NUM'] = str(trainer_num) reader = fluid.contrib.reader.distributed_batch_reader(data_generator) data = next(reader()) assert data == 1 os.unsetenv('PADDLE_TRAINER_ID') os.unsetenv('PADDLE_TRAINERS_NUM') if __name__ == '__main__': unittest.main()
tensor-tang/Paddle
python/paddle/fluid/contrib/tests/test_distributed_reader.py
Python
apache-2.0
1,305
@echo off rem rem rem Licensed to the Apache Software Foundation (ASF) under one or more rem contributor license agreements. See the NOTICE file distributed with rem this work for additional information regarding copyright ownership. rem The ASF licenses this file to You under the Apache License, Version 2.0 rem (the "License"); you may not use this file except in compliance with rem the License. You may obtain a copy of the License at rem rem http://www.apache.org/licenses/LICENSE-2.0 rem rem Unless required by applicable law or agreed to in writing, software rem distributed under the License is distributed on an "AS IS" BASIS, rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. rem See the License for the specific language governing permissions and rem limitations under the License. rem if not "%ECHO%" == "" echo %ECHO% setlocal set DIRNAME=%~dp0% set PROGNAME=%~nx0% set ARGS=%* rem Sourcing environment settings for karaf similar to tomcats setenv SET KARAF_SCRIPT="shell.bat" if exist "%DIRNAME%setenv.bat" ( call "%DIRNAME%setenv.bat" ) rem Check console window title. Set to Karaf by default if not "%KARAF_TITLE%" == "" ( title %KARAF_TITLE% ) else ( title Karaf ) rem Check/Set up some easily accessible MIN/MAX params for JVM mem usage if "%JAVA_MIN_MEM%" == "" ( set JAVA_MIN_MEM=128M ) if "%JAVA_MAX_MEM%" == "" ( set JAVA_MAX_MEM=512M ) goto BEGIN :warn echo %PROGNAME%: %* goto :EOF :BEGIN rem # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # if not "%KARAF_HOME%" == "" ( call :warn Ignoring predefined value for KARAF_HOME ) set KARAF_HOME=%DIRNAME%.. if not exist "%KARAF_HOME%" ( call :warn KARAF_HOME is not valid: "%KARAF_HOME%" goto END ) if not "%KARAF_BASE%" == "" ( if not exist "%KARAF_BASE%" ( call :warn KARAF_BASE is not valid: "%KARAF_BASE%" goto END ) ) if "%KARAF_BASE%" == "" ( set "KARAF_BASE=%KARAF_HOME%" ) if not "%KARAF_DATA%" == "" ( if not exist "%KARAF_DATA%" ( call :warn KARAF_DATA is not valid: "%KARAF_DATA%" goto END ) ) if "%KARAF_DATA%" == "" ( set "KARAF_DATA=%KARAF_BASE%\data" ) if not "%KARAF_ETC%" == "" ( if not exist "%KARAF_ETC%" ( call :warn KARAF_ETC is not valid: "%KARAF_ETC%" goto END ) ) if "%KARAF_ETC%" == "" ( set "KARAF_ETC=%KARAF_BASE%\etc" ) set DEFAULT_JAVA_OPTS= set DEFAULT_JAVA_DEBUG_OPTS=-Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005 rem Support for loading native libraries set PATH=%PATH%;%KARAF_BASE%\lib;%KARAF_HOME%\lib rem Setup the Java Virtual Machine if not "%JAVA%" == "" goto :Check_JAVA_END set JAVA=java if "%JAVA_HOME%" == "" call :warn JAVA_HOME not set; results may vary if not "%JAVA_HOME%" == "" set JAVA=%JAVA_HOME%\bin\java if not exist "%JAVA_HOME%" ( call :warn JAVA_HOME is not valid: "%JAVA_HOME%" goto END ) :Check_JAVA_END if "%JAVA_OPTS%" == "" set JAVA_OPTS=%DEFAULT_JAVA_OPTS% if "%EXTRA_JAVA_OPTS%" == "" goto :KARAF_EXTRA_JAVA_OPTS_END set JAVA_OPTS="%JAVA_OPTS% %EXTRA_JAVA_OPTS%" :KARAF_EXTRA_JAVA_OPTS_END if "%KARAF_DEBUG%" == "" goto :KARAF_DEBUG_END rem Use the defaults if JAVA_DEBUG_OPTS was not set if "%JAVA_DEBUG_OPTS%" == "" set JAVA_DEBUG_OPTS=%DEFAULT_JAVA_DEBUG_OPTS% set JAVA_OPTS="%JAVA_DEBUG_OPTS% %JAVA_OPTS%" call :warn Enabling Java debug options: %JAVA_DEBUG_OPTS% :KARAF_DEBUG_END set CLASSPATH=%KARAF_HOME%\system\org\apache\karaf\shell\org.apache.karaf.shell.console\2.4.0\org.apache.karaf.shell.console-2.4.0.jar set CLASSPATH=%CLASSPATH%;%KARAF_HOME%\system\org\ops4j\pax\logging\pax-logging-api\1.7.4\pax-logging-api-1.7.4.jar :EXECUTE if "%SHIFT%" == "true" SET ARGS=%2 %3 %4 %5 %6 %7 %8 if not "%SHIFT%" == "true" SET ARGS=%1 %2 %3 %4 %5 %6 %7 %8 rem Execute the Java Virtual Machine "%JAVA%" %JAVA_OPTS% %OPTS% -classpath "%CLASSPATH%" -Dkaraf.instances="%KARAF_HOME%\instances" -Dkaraf.home="%KARAF_HOME%" -Dkaraf.base="%KARAF_BASE%" -Dkaraf.data="%KARAF_DATA%" -Dkaraf.etc="%KARAF_ETC%" -Djava.io.tmpdir="%KARAF_DATA%\tmp" -Djava.util.logging.config.file="%KARAF_ETC%\java.util.logging.properties" %KARAF_OPTS% org.apache.karaf.shell.console.Main --classpath="%KARAF_HOME%\system" %ARGS% rem # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # :END endlocal
davsclaus/jube
images/fabric8/karaf-2.4/src/main/overlay/bin/shell.bat
Batchfile
apache-2.0
4,493
package org.carlspring.strongbox.providers.io; import org.carlspring.strongbox.storage.repository.Repository; @FunctionalInterface public interface LayoutFileSystemFactory { LayoutFileSystem create(Repository repository); }
sbespalov/strongbox
strongbox-storage/strongbox-storage-api/src/main/java/org/carlspring/strongbox/providers/io/LayoutFileSystemFactory.java
Java
apache-2.0
236
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <title>XML::setName() \ Language (API) \ Processing 2+</title> <link rel="icon" href="/favicon.ico" type="image/x-icon" /> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta name="Author" content="Processing Foundation" /> <meta name="Publisher" content="Processing Foundation" /> <meta name="Keywords" content="Processing, Sketchbook, Programming, Coding, Code, Art, Design" /> <meta name="Description" content="Processing is a flexible software sketchbook and a language for learning how to code within the context of the visual arts. Since 2001, Processing has promoted software literacy within the visual arts and visual literacy within technology." /> <meta name="Copyright" content="All contents copyright the Processing Foundation, Ben Fry, Casey Reas, and the MIT Media Laboratory" /> <script src="javascript/modernizr-2.6.2.touch.js" type="text/javascript"></script> <link href="css/style.css" rel="stylesheet" type="text/css" /> </head> <body id="Langauge-en" onload="" > <!-- ==================================== PAGE ============================ --> <div id="container"> <!-- ==================================== HEADER ============================ --> <div id="ribbon"> <ul class="left"> <li class="highlight"><a href="http://processing.org/">Processing</a></li> <li><a href="http://p5js.org/">p5.js</a></li> <li><a href="http://py.processing.org/">Processing.py</a></li> </ul> <ul class="right"> <li><a href="https://processingfoundation.org/">Processing Foundation</a></li> </ul> <div class="clear"></div> </div> <div id="header"> <a href="/" title="Back to the Processing cover."><div class="processing-logo no-cover" alt="Processing cover"></div></a> <form name="search" method="get" action="//www.google.com/search"> <p><input type="hidden" name="as_sitesearch" value="processing.org" /> <input type="text" name="as_q" value="" size="20" class="text" /> <input type="submit" value=" " /></p> </form> </div> <a id="TOP" name="TOP"></a> <div id="navigation"> <div class="navBar" id="mainnav"> <a href="index.html" class='active'>Language</a><br> <a href="libraries/index.html" >Libraries</a><br> <a href="tools/index.html">Tools</a><br> <a href="environment/index.html">Environment</a><br> </div> <script> document.querySelectorAll(".processing-logo")[0].className = "processing-logo"; </script> </div> <!-- ==================================== CONTENT - Headers ============================ --> <div class="content"> <p class="ref-notice">This reference is for Processing 3.0+. If you have a previous version, use the reference included with your software in the Help menu. If you see any errors or have suggestions, <a href="https://github.com/processing/processing-docs/issues?state=open">please let us know</a>. If you prefer a more technical reference, visit the <a href="http://processing.github.io/processing-javadocs/core/">Processing Core Javadoc</a> and <a href="http://processing.github.io/processing-javadocs/libraries/">Libraries Javadoc</a>.</p> <table cellpadding="0" cellspacing="0" border="0" class="ref-item"> <tr class=""><th scope="row">Class</th><td><p><a href="XML.html">XML</a></p></td></tr> <tr class="name-row"> <th scope="row">Name</th> <td><h3>setName()</h3></td> </tr> <tr class=""> <tr class=""><th scope="row">Examples</th><td><div class="example"><pre > // The following short XML file called "mammals.xml" is parsed // in the code below. It must be in the project's "data" folder. // // &lt;?xml version=&quot;1.0&quot;?&gt; // &lt;mammals&gt; // &lt;animal id=&quot;0&quot; species=&quot;Capra hircus&quot;&gt;Goat&lt;/animal&gt; // &lt;animal id=&quot;1&quot; species=&quot;Panthera pardus&quot;&gt;Leopard&lt;/animal&gt; // &lt;animal id=&quot;2&quot; species=&quot;Equus zebra&quot;&gt;Zebra&lt;/animal&gt; // &lt;/mammals&gt; XML xml; void setup() { xml = loadXML("mammals.xml"); println(xml.getName()); xml.setName("fish"); println(xml.getName()); } // Sketch prints: // mammals // fish </pre></div> </td></tr> <tr class=""> <th scope="row">Description</th> <td> Sets the element's name, which is specified as a String. </td> </tr> <tr class=""><th scope="row">Syntax</th><td><pre><kbd></kbd>.setName(<kbd>newName</kbd>)</pre></td></tr> <tr class=""><th scope="row">Returns</th><td class="code">void</td></tr> </table> Updated on February 10, 2016 00:49:45am EST<br /><br /> <!-- Creative Commons License --> <div class="license"> <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/"><img alt="Creative Commons License" style="border: none" src="http://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png" /></a> </div> <!-- <?xpacket begin='' id=''?> <x:xmpmeta xmlns:x='adobe:ns:meta/'> <rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#'> <rdf:Description rdf:about='' xmlns:xapRights='http://ns.adobe.com/xap/1.0/rights/'> <xapRights:Marked>True</xapRights:Marked> </rdf:Description> <rdf:Description rdf:about='' xmlns:xapRights='http://ns.adobe.com/xap/1.0/rights/' > <xapRights:UsageTerms> <rdf:Alt> <rdf:li xml:lang='x-default' >This work is licensed under a &lt;a rel=&#34;license&#34; href=&#34;http://creativecommons.org/licenses/by-nc-sa/4.0/&#34;&gt;Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License&lt;/a&gt;.</rdf:li> <rdf:li xml:lang='en' >This work is licensed under a &lt;a rel=&#34;license&#34; href=&#34;http://creativecommons.org/licenses/by-nc-sa/4.0/&#34;&gt;Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License&lt;/a&gt;.</rdf:li> </rdf:Alt> </xapRights:UsageTerms> </rdf:Description> <rdf:Description rdf:about='' xmlns:cc='http://creativecommons.org/ns#'> <cc:license rdf:resource='http://creativecommons.org/licenses/by-nc-sa/4.0/'/> </rdf:Description> </rdf:RDF> </x:xmpmeta> <?xpacket end='r'?> --> </div> <!-- ==================================== FOOTER ============================ --> <div id="footer"> <div id="copyright">Processing is an open project intiated by <a href="http://benfry.com/">Ben Fry</a> and <a href="http://reas.com">Casey Reas</a>. It is developed by a <a href="http://processing.org/about/people/">team of volunteers</a>.</div> <div id="colophon"> <a href="copyright.html">&copy; Info</a> </div> </div> </div> <script src="javascript/jquery-1.9.1.min.js"></script> <script src="javascript/site.js" type="text/javascript"></script> </body> </html>
evgeniy-storozhenko/parking-finder
3rdParty/processing-3.0.2-windows64/modes/java/reference/XML_setName_.html
HTML
apache-2.0
6,937
# frozen_string_literal: true # Cloud Foundry Java Buildpack # Copyright 2013-2019 the original author or authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'fileutils' require 'pathname' require 'java_buildpack/component/base_component' require 'java_buildpack/framework' require 'java_buildpack/logging/logger_factory' require 'java_buildpack/util/filtering_pathname' require 'yaml' module JavaBuildpack module Framework # Encapsulates the functionality for multi buildpack support. class MultiBuildpack < JavaBuildpack::Component::BaseComponent include JavaBuildpack::Util # (see JavaBuildpack::Component::BaseComponent#initialize) def initialize(context) super(context) @logger = JavaBuildpack::Logging::LoggerFactory.instance.get_logger MultiBuildpack @logger.debug { "Dependencies Directory: #{ARGV[3]}" } end # (see JavaBuildpack::Component::BaseComponent#detect) def detect !dep_directories.empty? ? "multi-buildpack=#{names(dep_directories).join(',')}" : nil end # (see JavaBuildpack::Component::BaseComponent#compile) def compile puts "#{'----->'.red.bold} #{'Multiple Buildpacks'.blue.bold} detected" dep_directories.each do |dep_directory| config = config(config_file(dep_directory)) name = name(config) log_configuration config log_dep_contents dep_directory contributions = [ add_bin(dep_directory), add_lib(dep_directory), add_additional_libraries(config), add_environment_variables(config), add_extension_directories(config), add_java_opts(config), add_security_providers(config) ] puts " #{name}#{contributions_message(contributions)}" end end # (see JavaBuildpack::Component::BaseComponent#release) def release dep_directories.each do |dep_directory| config = config(config_file(dep_directory)) add_bin(dep_directory) add_lib(dep_directory) add_additional_libraries(config) add_environment_variables(config) add_extension_directories(config) add_java_opts(config) add_security_providers(config) end end private def add_additional_libraries(config) additional_libraries = config['config']['additional_libraries'] return unless additional_libraries additional_libraries.each do |additional_library| @droplet.additional_libraries << filtering_pathname(additional_library) end 'Additional Libraries' end def add_agentpaths(java_opts) agentpaths = java_opts['agentpaths'] return unless agentpaths agentpaths.each do |agentpath| @droplet.java_opts.add_agentpath filtering_pathname(agentpath) end 'Agents' end def add_agentpaths_with_props(java_opts) agentpaths = java_opts['agentpaths_with_props'] return unless agentpaths agentpaths.each do |agentpath, props| @droplet.java_opts.add_agentpath_with_props filtering_pathname(agentpath), props end 'Agent with Properties' end def add_bin(dep_directory) bin_directory = dep_directory + 'bin' return unless bin_directory.exist? @droplet.environment_variables .add_environment_variable('PATH', "$PATH:#{qualify_dep(bin_directory)}") '$PATH' end def filtering_pathname(path) JavaBuildpack::Util::FilteringPathname.new(Pathname.new(path), ->(_) { true }, false) end def qualify_dep(dep_dir) ret = dep_dir.to_s.gsub(%r{.+(/deps/[0-9]+/\w+)$}, '\1') "$PWD/..#{ret}" end def add_bootclasspath_ps(java_opts) bootclasspath_ps = java_opts['bootclasspath_ps'] return unless bootclasspath_ps bootclasspath_ps.each do |bootclasspath_p| @droplet.java_opts.add_bootclasspath_p filtering_pathname(bootclasspath_p) end 'Boot Classpaths' end def add_environment_variables(config) environment_variables = config['config']['environment_variables'] return unless environment_variables environment_variables.each do |key, value| path = Pathname.new(value) if path.exist? @droplet.environment_variables.add_environment_variable key, filtering_pathname(value) else @droplet.environment_variables.add_environment_variable key, value end end 'Environment Variables' end def add_extension_directories(config) extension_directories = config['config']['extension_directories'] return unless extension_directories extension_directories.each do |extension_directory| @droplet.extension_directories << filtering_pathname(extension_directory) end 'Extension Directories' end def add_javaagent(java_opts) javaagents = java_opts['javaagents'] return unless javaagents javaagents.each do |javaagent| @droplet.java_opts.add_javaagent filtering_pathname(javaagent) end 'Java Agents' end def add_java_opts(config) java_opts = config['config']['java_opts'] return unless java_opts [ add_agentpaths(java_opts), add_agentpaths_with_props(java_opts), add_bootclasspath_ps(java_opts), add_javaagent(java_opts), add_options(java_opts), add_preformatted_options(java_opts), add_system_properties(java_opts) ] end def add_lib(dep_directory) lib_directory = dep_directory + 'lib' return unless lib_directory.exist? @droplet.environment_variables .add_environment_variable('LD_LIBRARY_PATH', "$LD_LIBRARY_PATH:#{qualify_dep(lib_directory)}") '$LD_LIBRARY_PATH' end def add_options(java_opts) options = java_opts['options'] return unless options options.each do |key, value| path = Pathname.new(value) if path.exist? @droplet.java_opts.add_option key, filtering_pathname(value) else @droplet.java_opts.add_option key, value end end 'Options' end def add_preformatted_options(java_opts) preformatted_options = java_opts['preformatted_options'] return unless preformatted_options preformatted_options.each do |preformatted_option| @droplet.java_opts.add_preformatted_options preformatted_option end 'Preformatted Options' end def add_security_providers(config) security_providers = config['config']['security_providers'] return unless security_providers security_providers.each do |security_provider| @droplet.security_providers << security_provider end 'Security Providers' end def add_system_properties(java_opts) system_properties = java_opts['system_properties'] return unless system_properties system_properties.each do |key, value| path = Pathname.new(value) if path.exist? @droplet.java_opts.add_system_property key, filtering_pathname(value) else @droplet.java_opts.add_system_property key, value end end 'System Properties' end def config(config_file) YAML.load_file(config_file) end def config_file(dep_directory) dep_directory + 'config.yml' end def contributions_message(contributions) return if contributions.compact.empty? " contributed to: #{contributions.flatten.compact.sort.join(', ')}" end def dep_directories deps = Pathname.glob('/tmp/*/deps').map(&:children).flatten return [] unless deps deps .select { |dep_directory| config_file(dep_directory).exist? } .sort_by(&:basename) end def log_configuration(config) @logger.debug { "Configuration: #{config}" } end def log_dep_contents(dep_directory) @logger.debug do paths = [] dep_directory.find { |f| paths << f.relative_path_from(dep_directory).to_s } "Application Contents (#{dep_directory}): #{paths}" end end def name(config) config['name'] end def names(dep_directories) dep_directories.map { |dep_directory| name(config(config_file(dep_directory))) } end end end end
rakutentech/java-buildpack
lib/java_buildpack/framework/multi_buildpack.rb
Ruby
apache-2.0
9,317
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* * This file create an DFG_GraphvizDrawPass which helps to draw a data flow * graph's structure using graphviz. */ #pragma once #include <fstream> #include <string> #include "paddle/fluid/inference/analysis/analysis_pass.h" #include "paddle/fluid/inference/analysis/dot.h" namespace paddle { namespace inference { namespace analysis { /* * Output a dot file and write to some place. */ class DFG_GraphvizDrawPass : public DataFlowGraphPass { public: struct Config { Config(const std::string &dir, const std::string &id, bool display_deleted_node = false) : dir(dir), id(id), display_deleted_node(display_deleted_node) {} // The directory to store the .dot or .png files. const std::string dir; // The identifier for this dot file. const std::string id; // Whether to display deleted nodes, default false. const bool display_deleted_node; }; explicit DFG_GraphvizDrawPass(const Config &config) : config_(config) {} bool Initialize(Argument *argument) override { return true; } void Run(DataFlowGraph *graph) override; bool Finalize() override { return true; } std::string repr() const override { return "DFG graphviz drawer"; } std::string description() const override { return "Debug a DFG by draw with graphviz"; } protected: // A counter to add a number prefix to the debugger image output so that they // will sort in the triggered order. static int counter_; // Path of the dot file to output. std::string GenDotPath() const { return config_.dir + "/" + std::to_string(counter_++) + "-graph_" + config_.id + ".dot"; } virtual std::string Draw(DataFlowGraph *graph); Config config_; }; } // namespace analysis } // namespace inference } // namespace paddle
reyoung/Paddle
paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h
C
apache-2.0
2,375
using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using T4TS.Example.Models; using T4TS.Tests.Fixtures.Basic; namespace T4TS.Tests.Fixtures.Partial { [TypeScriptInterface] public partial class PartialModel { public BasicModel OnPartialModel { get; set; } } public partial class PartialModel : ModelFromDifferentProject { public BasicModel OnOtherPartialModel { get; set; } } }
dolly22/t4ts
T4TS.Tests/Fixtures/Partial/PartialModel.cs
C#
apache-2.0
492
/* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package org.apache.geode.cache.query.cq.dunit; import static org.apache.geode.internal.Assert.assertTrue; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import static org.junit.Assume.assumeTrue; import java.util.List; import java.util.concurrent.TimeUnit; import junitparams.Parameters; import org.awaitility.Awaitility; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.apache.geode.cache.query.CqAttributesFactory; import org.apache.geode.cache.query.CqEvent; import org.apache.geode.cache.query.CqException; import org.apache.geode.cache.query.CqListener; import org.apache.geode.cache.query.CqQuery; import org.apache.geode.cache.query.QueryService; import org.apache.geode.cache.query.RegionNotFoundException; import org.apache.geode.security.query.QuerySecurityBase; import org.apache.geode.security.query.data.QueryTestObject; import org.apache.geode.test.junit.categories.DistributedTest; import org.apache.geode.test.junit.categories.SecurityTest; import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory; @Category({DistributedTest.class, SecurityTest.class}) @RunWith(Parameterized.class) @Parameterized.UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class) public class CqSecurityAuthorizedUserDUnitTest extends QuerySecurityBase { @Parameterized.Parameters public static Object[] usersAllowed() { return new Object[] {"dataReader", "dataReaderRegion", "clusterManagerDataReader", "clusterManagerDataReaderRegion", "super-user"}; } @Parameterized.Parameter public String user; @Before public void configureSpecificClientAndKeyAndValues() { createClientCache(specificUserClient, user, userPerms.getUserPassword(user)); createProxyRegion(specificUserClient, regionName); keys = new Object[] {"key-0"}; values = new Object[] {new QueryTestObject(0, "John")}; } // Variables that need to be shared across invoke calls. protected static CqSecurityTestCqListener cqListener = null; private String regexForExpectedExceptions = ".*Unauthorized access.*"; @Test public void cqExecuteNoMethodInvocationWithUsersWithCqPermissionsWithPrepopulatedRegionShouldBeAllowed() throws Exception { putIntoRegion(superUserClient, keys, values, regionName); String query = "select * from /" + regionName + " r where r.id = 0"; specificUserClient.invoke(() -> { QueryService queryService = getClientCache().getQueryService(); CqSecurityTestCqListener cqListener = new CqSecurityTestCqListener(); CqSecurityAuthorizedUserDUnitTest.cqListener = cqListener; CqQuery cq = createCq(queryService, query, cqListener); cq.execute(); }); putIntoRegion(superUserClient, keys, new Object[] {new QueryTestObject(0, "Bethany")}, regionName); specificUserClient.invoke(() -> { Awaitility.await().atMost(30, TimeUnit.SECONDS) .until(() -> assertEquals(1, cqListener.getNumEvent())); }); } @Test public void cqExecuteWithMethodInvocationWithUsersWithCqPermissionsWithPrepopulatedRegionIsGettingExceptionInReplicatedRegion() throws Exception { putIntoRegion(superUserClient, keys, values, regionName); String query = "select * from /" + regionName + " r where r.name = 'Beth'"; specificUserClient.invoke(() -> { QueryService queryService = getClientCache().getQueryService(); CqSecurityTestCqListener cqListener = new CqSecurityTestCqListener(); CqSecurityAuthorizedUserDUnitTest.cqListener = cqListener; CqQuery cq = createCq(queryService, query, cqListener); executeCqButExpectException(cq, user, regexForExpectedExceptions); }); } @Test public void cqExecuteWithInitialResultsWithMethodInvocationWithUsersWithCqPermissionsWithPrepopulatedRegionShouldBeDeniedBecauseOfInvocation() throws Exception { putIntoRegion(superUserClient, keys, values, regionName); String query = "select * from /" + regionName + " r where r.name = 'Beth'"; specificUserClient.invoke(() -> { QueryService queryService = getClientCache().getQueryService(); CqSecurityTestCqListener cqListener = new CqSecurityTestCqListener(); CqSecurityAuthorizedUserDUnitTest.cqListener = cqListener; CqQuery cq = createCq(queryService, query, cqListener); executeCqWithInitialResultsButExpectException(cq, user, regexForExpectedExceptions); }); } @Test public void cqExecuteWithInitialResultsWithMethodInvocationWithUnpopulatedRegionAndFollowedByAPutShouldTriggerCqError() throws Exception { String query = "select * from /" + regionName + " r where r.name = 'Beth'"; specificUserClient.invoke(() -> { QueryService queryService = getClientCache().getQueryService(); CqSecurityTestCqListener cqListener = new CqSecurityTestCqListener(); CqSecurityAuthorizedUserDUnitTest.cqListener = cqListener; CqQuery cq = createCq(queryService, query, cqListener); cq.executeWithInitialResults(); }); Object[] keys = {"key-0"}; Object[] values = {new QueryTestObject(1, "Mary")}; putIntoRegion(superUserClient, keys, values, regionName); specificUserClient.invoke(() -> { Awaitility.await().atMost(30, TimeUnit.SECONDS) .until(() -> assertEquals(1, cqListener.getNumErrors())); }); } @Test public void cqExecuteWithMethodInvocationWithUnpopulatedRegionAndFollowedByAPutShouldTriggerCqError() throws Exception { String query = "select * from /" + regionName + " r where r.name = 'Beth'";; specificUserClient.invoke(() -> { QueryService queryService = getClientCache().getQueryService(); CqSecurityTestCqListener cqListener = new CqSecurityTestCqListener(); CqSecurityAuthorizedUserDUnitTest.cqListener = cqListener; CqQuery cq = createCq(queryService, query, cqListener); cq.execute(); }); Object[] keys = {"key-0"}; Object[] values = {new QueryTestObject(1, "Mary")}; putIntoRegion(superUserClient, keys, values, regionName); specificUserClient.invoke(() -> { Awaitility.await().atMost(30, TimeUnit.SECONDS) .until(() -> assertEquals(1, cqListener.getNumErrors())); }); } @Test public void cqCanBeClosedByTheCreator() throws Exception { String query = "select * from /" + regionName + " r where r.id = 0"; specificUserClient.invoke(() -> { QueryService queryService = getClientCache().getQueryService(); CqSecurityTestCqListener cqListener = new CqSecurityTestCqListener(); CqSecurityAuthorizedUserDUnitTest.cqListener = cqListener; CqQuery cq = createCq(queryService, query, cqListener); cq.execute(); cq.close(); assertTrue(cq.isClosed()); }); assertEquals(0, server.getCache().getCqService().getAllCqs().size()); } protected CqQuery createCq(QueryService queryService, String query, CqListener cqListener) throws CqException { CqAttributesFactory cqaf = new CqAttributesFactory(); cqaf.addCqListener(cqListener); CqQuery cq = queryService.newCq(query, cqaf.create()); return cq; } protected void executeCqButExpectException(CqQuery cq, String user, String regexForExpectedException) { try { cq.execute(); fail("Expected an exception when executing cq:" + cq.getQueryString() + " with user:" + user); } catch (RegionNotFoundException | CqException e) { if (!e.getMessage().matches(regexForExpectedException)) { Throwable cause = e.getCause(); while (cause != null) { if (cause.getMessage().matches(regexForExpectedException)) { return; } cause = cause.getCause(); } e.printStackTrace(); fail("Exception thrown did not match:" + regexForExpectedException + ". Instead was:" + e); } } } private void executeCqWithInitialResultsButExpectException(CqQuery cq, String user, String regexForExpectedException) { try { cq.executeWithInitialResults(); fail("Expected an exception when executing cq:" + cq + " with user:" + user); } catch (RegionNotFoundException | CqException e) { e.printStackTrace(); if (!e.getMessage().matches(regexForExpectedException)) { Throwable cause = e.getCause(); while (cause != null) { if (cause.getMessage() != null && cause.getMessage().matches(regexForExpectedException)) { return; } cause = cause.getCause(); } e.printStackTrace(); fail("Exception thrown did not match:" + regexForExpectedException + ". Instead was:" + e); } } } public class CqSecurityTestCqListener implements CqListener { private int numEvents = 0; private int numErrors = 0; @Override public void onEvent(CqEvent aCqEvent) { numEvents++; } @Override public void onError(CqEvent aCqEvent) { numErrors++; } public int getNumEvent() { return numEvents; } public int getNumErrors() { return numErrors; } @Override public void close() { } } }
pivotal-amurmann/geode
geode-cq/src/test/java/org/apache/geode/cache/query/cq/dunit/CqSecurityAuthorizedUserDUnitTest.java
Java
apache-2.0
10,083
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.addthis.hydra.task.output; import javax.annotation.Nonnull; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import com.addthis.basis.util.JitterClock; import com.addthis.bundle.core.Bundle; import com.addthis.codec.annotations.FieldConfig; import com.addthis.hydra.data.filter.bundle.BundleFilter; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * The OutputWriter has a single concurrent queue onto which bundles * are placed in preparation for moving these bundles to disk. One * or more disk flushing threads are responsible for moving bundles * from the central queue to the disk. * <p/> * If the disk flushing threads fall behind the threads producing bundles, * then the central concurrent queue can grow too large. There are two * strategies for shrinking the concurrent queue when it grows too large. * In one strategy a thread that is producing bundles will assist in * moving bundles from the central queue to the disk. In the second strategy * the thread that is producing bundles will wait for the disk flushing * threads to shrink the queue. * <p/> * The default behavior is the first strategy. The second strategy is desirable * when you want to ensure that bundles are emitted to the destination sink * in the order in which the bundles are produced. The second strategy is enabled * by setting {@link #waitForDiskFlushThread} to true. */ public abstract class AbstractOutputWriter implements WritableRootPaths { private static final Logger log = LoggerFactory.getLogger(AbstractOutputWriter.class); private static final int maxBufferSize = (int) Math.pow(10.0, 8.0); /** * Options for data layout within the output files. * The default is type "channel". */ @FieldConfig(codable = true) protected OutputStreamFormatter format = new OutputStreamChannel(); /** * Maximum number of bundles that can be stored * in the bundle cache before the asynchronous * flush is invoked. Default is 100. */ @FieldConfig(codable = true) private int maxBundles = 100; /** * The total cache size is equal to * maxBundles * bufferSizeRatio. bufferSizeRatio * must be greater than 1. Default value is 100. */ @FieldConfig(codable = true) private int bufferSizeRatio = 100; /** * If true then wait until the disk flush * threads have caught up when the buffer is full. * If false then perform a synchronous flush * when the buffer is full. Default is false. */ @FieldConfig(codable = true) private boolean waitForDiskFlushThread = false; /** * Number of threads that flush data from * bundle cache to disk. Default is one. */ @FieldConfig(codable = true) private int diskFlushThreads = 1; @FieldConfig(codable = true) private BundleFilter filter; /** * Throw error if shutdown thread takes * longer than this many seconds */ @FieldConfig(codable = true) private int maxShutDownSeconds = 240; @FieldConfig(codable = true) private boolean errorOnMaintenanceShutdownExceeded = true; private final Semaphore diskFlushThreadSemaphore = new Semaphore(0); private volatile boolean stopped = false; private volatile boolean exiting = false; private volatile boolean errored = false; private DiskFlushThread[] diskFlushThreadArray; protected ScheduledExecutorService writerMaintenanceThread; private QueueWriter queueWriter; private final AtomicReference<IOException> errorCause = new AtomicReference<>(); public final void writeLine(String file, Bundle nextLine) throws IOException { if (errored) { throw new IOException(errorCause.get()); } else if (stopped) { log.warn("Tried to write a line after the writer has been stopped, line was: " + nextLine); throw new RuntimeException("Tried to write a line after the writer has been stopped"); } else if (filter == null || filter.filter(nextLine)) { queueWriter.addBundle(file, nextLine); } } protected abstract void doCloseOpenOutputs(); public final void closeOpenOutputs() { try { exiting = true; // first stop the async flush threads shutdownMaintenanceThreads(); shutdownDiskFlushThreads(); queueWriter.drain(true); doCloseOpenOutputs(); } finally { stopped = true; } } private boolean bufferSizeInRange(int bufferSize) { return bufferSize > maxBundles && bufferSize < maxBufferSize; } /** * Sets the volatile boolean error variable and stores * the first exception that is encountered. * * @param cause the error to store if it is the first exception */ private void setErrorCause(@Nonnull IOException cause) { errorCause.compareAndSet(null, cause); errored = true; } public void open() { if (format != null) { format.open(); } /** * The next several lines of logic are to handle * ridiculous input values for maxBundles and bufferSizeRatio. */ int bufferSize = maxBundles * bufferSizeRatio; if (!bufferSizeInRange(bufferSize)) { bufferSize = maxBundles * 10; } if (!bufferSizeInRange(bufferSize)) { bufferSize = maxBundles * 2; } if (!bufferSizeInRange(bufferSize)) { bufferSize = maxBundles + 1; } queueWriter = new QueueWriter(bufferSize); writerMaintenanceThread = new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("AbstractOutputWriterCleanUpThread-%d") .build()); // thread to force drain queues that have data but haven't reached their drain threshold if (!waitForDiskFlushThread) { writerMaintenanceThread.scheduleAtFixedRate(new Runnable() { @Override public void run() { long deltaThreshold = (JitterClock.globalTime() - 60000); if (queueWriter.size() > 0 && queueWriter.lastUpdatedTime < deltaThreshold) { queueWriter.drain(false); } } }, 10000, 10000, TimeUnit.MILLISECONDS); } diskFlushThreadArray = new DiskFlushThread[diskFlushThreads]; for (int i = 0; i < diskFlushThreads; i++) { diskFlushThreadArray[i] = new DiskFlushThread(i); diskFlushThreadArray[i].setDaemon(true); diskFlushThreadArray[i].start(); } } private class QueueWriter { final BlockingQueue<WriteTuple> buffer; volatile long lastUpdatedTime = JitterClock.globalTime(); private QueueWriter(int bufferSize) { buffer = new ArrayBlockingQueue<>(bufferSize); } public int size() { return buffer.size(); } public List<WriteTuple> drainOutputBundles(List<WriteTuple> outputList, int maxElements) { buffer.drainTo(outputList, maxElements); return outputList; } public List<WriteTuple> drainOutputBundles(int maxElements) { List<WriteTuple> outputList = new ArrayList<>(maxElements); buffer.drainTo(outputList, maxElements); return outputList; } /** * Helper function to {@link #addBundle} method. Returns * when no further processing is needed on the input * tuple. Method returns when the bundle is successfully * inserted into the buffer or when an exception is thrown. */ private void addBundleHelper(WriteTuple tuple) { boolean tupleProcessed = false; while (!tupleProcessed) { try { if (waitForDiskFlushThread) { buffer.put(tuple); tupleProcessed = true; } else { tupleProcessed = buffer.offer(tuple); } } catch (InterruptedException e) { log.error("error writing to buffer: ", e); tupleProcessed = true; setErrorCause(new IOException(e)); } if (!tupleProcessed) { try { List<WriteTuple> outputList = drainOutputBundles(maxBundles); dequeueWrite(outputList); } catch (IOException e) { log.error("error dequeuing write: ", e); tupleProcessed = true; setErrorCause(e); } } } } public void addBundle(String file, Bundle nextLine) { WriteTuple tuple = new WriteTuple(file, nextLine); addBundleHelper(tuple); lastUpdatedTime = JitterClock.globalTime(); /** * Avoid sending an avalanche of flush messages * to the disk flushing threads by sending a * message when the disk flush threads are waiting. */ if (buffer.size() > maxBundles && diskFlushThreadSemaphore.availablePermits() == 0) { diskFlushThreadSemaphore.release(diskFlushThreads); } } public void drain(boolean iterate) { do { try { List<WriteTuple> outputList = drainOutputBundles(size()); dequeueWrite(outputList); } catch (IOException e) { log.error("error draining queue: ", e); setErrorCause(e); } } while (iterate && size() > 0); } } protected static final class WriteTuple { public final String fileName; public final Bundle bundle; WriteTuple(String fileName, Bundle bundle) { this.fileName = fileName; this.bundle = bundle; } } protected final class DiskFlushThread extends Thread { final List<WriteTuple> outputList; DiskFlushThread(int id) { super("OutputWriterDiskFlushThread-" + id); outputList = new ArrayList<>(maxBundles); } @Override public void run() { while (true) { try { diskFlushThreadSemaphore.acquireUninterruptibly(); int outstandingBundles; do { if (exiting) { return; } /** * Do not drain the entire output queue in order * to allow other DiskFlushThreads to concurrently * transfer bundles from the queue to the disk. */ outputList.clear(); queueWriter.drainOutputBundles(outputList, maxBundles); dequeueWrite(outputList); outstandingBundles = queueWriter.size(); } while (outstandingBundles > maxBundles); } catch (Exception ex) { log.error("output writer disk flush error : ", ex); setErrorCause(new IOException(ex)); } } } } /** * called by multiple thread consumers of the input queue. must be thread * safe. */ protected abstract void dequeueWrite(List<WriteTuple> outputTuples) throws IOException; private void shutdownMaintenanceThreads() { writerMaintenanceThread.shutdown(); try { if (!writerMaintenanceThread.awaitTermination(maxShutDownSeconds, TimeUnit.SECONDS)) { log.error("Waited {} seconds for write maintenance termination but it did not finish", maxShutDownSeconds); if (errorOnMaintenanceShutdownExceeded) { throw new RuntimeException("Exceeded maximum allowable write maintenance shutdown time, with errorOnMaintenanceShutdownExceeded=true"); } } } catch (InterruptedException ie) { log.error("Thread interrupted while waiting for write maintenance termination"); } } private void shutdownDiskFlushThreads() { diskFlushThreadSemaphore.release(diskFlushThreads); for (int i = 0; i < diskFlushThreads; i++) { try { diskFlushThreadArray[i].join(); } catch (InterruptedException ex) { log.error("shutdown disk flush threads error : ", ex); } } } public final AbstractOutputWriter setFormat(OutputStreamFormatter format) { this.format = format; return this; } }
mythguided/hydra
hydra-task/src/main/java/com/addthis/hydra/task/output/AbstractOutputWriter.java
Java
apache-2.0
14,218
package fr.galaisen.groomreader; import android.app.Application; import android.test.ApplicationTestCase; /** * <a href="http://d.android.com/tools/testing/testing_android.html">Testing Fundamentals</a> */ public class ApplicationTest extends ApplicationTestCase<Application> { public ApplicationTest() { super(Application.class); } }
Fifoxy/groom-android
app/src/androidTest/java/com/hufi/taxmanreader/ApplicationTest.java
Java
apache-2.0
354
'use strict'; angular.module("ngLocale", [], ["$provide", function($provide) { var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"}; function getDecimals(n) { n = n + ''; var i = n.indexOf('.'); return (i == -1) ? 0 : n.length - i - 1; } function getVF(n, opt_precision) { var v = opt_precision; if (undefined === v) { v = Math.min(getDecimals(n), 3); } var base = Math.pow(10, v); var f = ((n * base) | 0) % base; return {v: v, f: f}; } $provide.value("$locale", { "DATETIME_FORMATS": { "AMPMS": [ "AM", "PM" ], "DAY": [ "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" ], "ERANAMES": [ "Before Christ", "Anno Domini" ], "ERAS": [ "BC", "AD" ], "FIRSTDAYOFWEEK": 6, "MONTH": [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" ], "SHORTDAY": [ "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" ], "SHORTMONTH": [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ], "STANDALONEMONTH": [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" ], "WEEKENDRANGE": [ 5, 6 ], "fullDate": "EEEE, d MMMM y", "longDate": "dd MMMM y", "medium": "dd MMM y HH:mm:ss", "mediumDate": "dd MMM y", "mediumTime": "HH:mm:ss", "short": "dd/MM/y HH:mm", "shortDate": "dd/MM/y", "shortTime": "HH:mm" }, "NUMBER_FORMATS": { "CURRENCY_SYM": "\u20ac", "DECIMAL_SEP": ".", "GROUP_SEP": ",", "PATTERNS": [ { "gSize": 3, "lgSize": 3, "maxFrac": 3, "minFrac": 0, "minInt": 1, "negPre": "-", "negSuf": "", "posPre": "", "posSuf": "" }, { "gSize": 3, "lgSize": 3, "maxFrac": 2, "minFrac": 2, "minInt": 1, "negPre": "-\u00a4", "negSuf": "", "posPre": "\u00a4", "posSuf": "" } ] }, "id": "en-mt", "localeID": "en_MT", "pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;} }); }]);
yoyocms/YoYoCms.AbpProjectTemplate
src/YoYoCms.AbpProjectTemplate.Web/Scripts/i18n/angular-locale_en-mt.js
JavaScript
apache-2.0
2,717
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (1.8.0_45) on Sat Oct 15 09:06:18 PDT 2016 --> <title>org.apache.derby.security (Apache Derby 10.13 API Documentation)</title> <meta name="date" content="2016-10-15"> <link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style"> <script type="text/javascript" src="../../../../script.js"></script> </head> <body> <h1 class="bar"><a href="../../../../org/apache/derby/security/package-summary.html" target="classFrame">org.apache.derby.security</a></h1> <div class="indexContainer"> <h2 title="Classes">Classes</h2> <ul title="Classes"> <li><a href="SystemPermission.html" title="class in org.apache.derby.security" target="classFrame">SystemPermission</a></li> </ul> </div> </body> </html>
sadrayan/topic_web
db/db-derby-10.13.1.1-bin/javadoc/org/apache/derby/security/package-frame.html
HTML
apache-2.0
886
/* * #%L * Wildfly Camel :: Testsuite * %% * Copyright (C) 2013 - 2016 RedHat * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package org.wildfly.camel.test.nats; import io.nats.client.Connection; import io.nats.client.Message; import io.nats.client.Nats; import io.nats.client.Options; import java.nio.charset.StandardCharsets; import org.apache.camel.CamelContext; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; import org.apache.camel.component.nats.NatsConsumer; import org.apache.camel.impl.DefaultCamelContext; import org.arquillian.cube.CubeController; import org.arquillian.cube.docker.impl.requirement.RequiresDocker; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.arquillian.test.api.ArquillianResource; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.wildfly.camel.test.common.utils.TestUtils; import org.wildfly.extension.camel.CamelAware; @CamelAware @RunWith(ArquillianConditionalRunner.class) @RequiresDocker public class NatsIntegrationTest { private static final String CONTAINER_NATS = "nats"; @ArquillianResource private CubeController cubeController; @Deployment public static JavaArchive createDeployment() { return ShrinkWrap.create(JavaArchive.class, "camel-nats-tests.jar") .addClasses(TestUtils.class); } @Before public void setUp() { cubeController.create(CONTAINER_NATS); cubeController.start(CONTAINER_NATS); } @After public void tearDown() { cubeController.stop(CONTAINER_NATS); cubeController.destroy(CONTAINER_NATS); } @Test public void testNatsRoutes() throws Exception { CamelContext camelctx = new DefaultCamelContext(); try { camelctx.addRoutes(new RouteBuilder() { @Override public void configure() throws Exception { from("nats://" + TestUtils.getDockerHost() + ":4222?topic=test").id("nats-route") .to("mock:result"); } }); MockEndpoint to = camelctx.getEndpoint("mock:result", MockEndpoint.class); to.expectedMessageCount(1); camelctx.start(); // Make sure the consumer has subscribed to the topic before sending messages NatsConsumer consumer = (NatsConsumer) camelctx.getRoute("nats-route").getConsumer(); int count = 0; while(!consumer.isActive()) { Thread.sleep(500); count += 1; if (count > 10) { throw new IllegalStateException("Gave up waiting for nats consumer to subscribe to topic"); } } Options options = new Options.Builder().server("nats://" + TestUtils.getDockerHost() + ":4222").build(); Connection connection = Nats.connect(options); final byte[] payload = "test-message".getBytes(StandardCharsets.UTF_8); connection.publish("test", payload); to.assertIsSatisfied(5000); Message natsMessage = to.getExchanges().get(0).getIn().getBody(Message.class); Assert.assertEquals("test", natsMessage.getSubject()); Assert.assertNull(natsMessage.getReplyTo()); Assert.assertArrayEquals(payload, natsMessage.getData()); } finally { camelctx.stop(); } } }
tadayosi/wildfly-camel
itests/standalone/docker/src/test/java/org/wildfly/camel/test/nats/NatsIntegrationTest.java
Java
apache-2.0
4,235
# Encoding: utf-8 # # This is auto-generated code, changes will be overwritten. # # Copyright:: Copyright 2015, Google Inc. All Rights Reserved. # License:: Licensed under the Apache License, Version 2.0. # # Code generated by AdsCommon library 0.11.0 on 2015-10-08 10:50:18. require 'ads_common/savon_service' require 'adwords_api/v201509/campaign_criterion_service_registry' module AdwordsApi; module V201509; module CampaignCriterionService class CampaignCriterionService < AdsCommon::SavonService def initialize(config, endpoint) namespace = 'https://adwords.google.com/api/adwords/cm/v201509' super(config, endpoint, namespace, :v201509) end def get(*args, &block) return execute_action('get', args, &block) end def get_to_xml(*args) return get_soap_xml('get', args) end def mutate(*args, &block) return execute_action('mutate', args, &block) end def mutate_to_xml(*args) return get_soap_xml('mutate', args) end def query(*args, &block) return execute_action('query', args, &block) end def query_to_xml(*args) return get_soap_xml('query', args) end private def get_service_registry() return CampaignCriterionServiceRegistry end def get_module() return AdwordsApi::V201509::CampaignCriterionService end end end; end; end
Tei1988/google-api-ads-ruby
adwords_api/lib/adwords_api/v201509/campaign_criterion_service.rb
Ruby
apache-2.0
1,376
/** * <copyright> * </copyright> * * $Id$ */ package org.wso2.developerstudio.eclipse.gmf.esb; /** * <!-- begin-user-doc --> * A representation of the model object '<em><b>Cache Mediator</b></em>'. * <!-- end-user-doc --> * * <p> * The following features are supported: * </p> * <ul> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getCacheProtocolType <em>Cache Protocol Type</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getCacheType <em>Cache Type</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getHashGenerator <em>Hash Generator</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getCacheTimeout <em>Cache Timeout</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getMaxMessageSize <em>Max Message Size</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getCacheProtocolMethods <em>Cache Protocol Methods</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getMaxEntryCount <em>Max Entry Count</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getSequenceType <em>Sequence Type</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getSequenceKey <em>Sequence Key</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getInputConnector <em>Input Connector</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getOutputConnector <em>Output Connector</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getOnHitOutputConnector <em>On Hit Output Connector</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getMediatorFlow <em>Mediator Flow</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getHeadersToExcludeInHash <em>Headers To Exclude In Hash</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getResponseCodes <em>Response Codes</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#isEnableCacheControl <em>Enable Cache Control</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#isIncludeAgeHeader <em>Include Age Header</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getId <em>Id</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getHashGeneratorAttribute <em>Hash Generator Attribute</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getScope <em>Scope</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getImplementationType <em>Implementation Type</em>}</li> * <li>{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getCacheMediatorImplementation <em>Cache Mediator Implementation</em>}</li> * </ul> * * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator() * @model * @generated */ public interface CacheMediator extends Mediator { /** * Returns the value of the '<em><b>Cache Protocol Type</b></em>' attribute. * The literals are from the enumeration {@link org.wso2.developerstudio.eclipse.gmf.esb.CacheProtocolType}. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Cache Protocol Type</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Cache Protocol Type</em>' attribute. * @see org.wso2.developerstudio.eclipse.gmf.esb.CacheProtocolType * @see #setCacheProtocolType(CacheProtocolType) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_CacheProtocolType() * @model * @generated */ CacheProtocolType getCacheProtocolType(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getCacheProtocolType <em>Cache Protocol Type</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Cache Protocol Type</em>' attribute. * @see org.wso2.developerstudio.eclipse.gmf.esb.CacheProtocolType * @see #getCacheProtocolType() * @generated */ void setCacheProtocolType(CacheProtocolType value); /** * Returns the value of the '<em><b>Cache Type</b></em>' attribute. * The default value is <code>"FINDER"</code>. * The literals are from the enumeration {@link org.wso2.developerstudio.eclipse.gmf.esb.CacheType}. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Cache Type</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Cache Type</em>' attribute. * @see org.wso2.developerstudio.eclipse.gmf.esb.CacheType * @see #setCacheType(CacheType) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_CacheType() * @model default="FINDER" * @generated */ CacheType getCacheType(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getCacheType <em>Cache Type</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Cache Type</em>' attribute. * @see org.wso2.developerstudio.eclipse.gmf.esb.CacheType * @see #getCacheType() * @generated */ void setCacheType(CacheType value); /** * Returns the value of the '<em><b>Hash Generator</b></em>' attribute. * The default value is <code>"org.wso2.carbon.mediator.cache.digest.HttpRequestHashGenerator"</code>. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Hash Generator</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Hash Generator</em>' attribute. * @see #setHashGenerator(String) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_HashGenerator() * @model default="org.wso2.carbon.mediator.cache.digest.HttpRequestHashGenerator" * @generated */ String getHashGenerator(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getHashGenerator <em>Hash Generator</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Hash Generator</em>' attribute. * @see #getHashGenerator() * @generated */ void setHashGenerator(String value); /** * Returns the value of the '<em><b>Cache Timeout</b></em>' attribute. * The default value is <code>"120"</code>. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Cache Timeout</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Cache Timeout</em>' attribute. * @see #setCacheTimeout(int) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_CacheTimeout() * @model default="120" * @generated */ int getCacheTimeout(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getCacheTimeout <em>Cache Timeout</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Cache Timeout</em>' attribute. * @see #getCacheTimeout() * @generated */ void setCacheTimeout(int value); /** * Returns the value of the '<em><b>Max Message Size</b></em>' attribute. * The default value is <code>"2000"</code>. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Max Message Size</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Max Message Size</em>' attribute. * @see #setMaxMessageSize(int) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_MaxMessageSize() * @model default="2000" * @generated */ int getMaxMessageSize(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getMaxMessageSize <em>Max Message Size</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Max Message Size</em>' attribute. * @see #getMaxMessageSize() * @generated */ void setMaxMessageSize(int value); /** * Returns the value of the '<em><b>Cache Protocol Methods</b></em>' attribute. * The default value is <code>"*"</code>. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Cache Protocol Methods</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Cache Protocol Methods</em>' attribute. * @see #setCacheProtocolMethods(String) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_CacheProtocolMethods() * @model default="*" * @generated */ String getCacheProtocolMethods(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getCacheProtocolMethods <em>Cache Protocol Methods</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Cache Protocol Methods</em>' attribute. * @see #getCacheProtocolMethods() * @generated */ void setCacheProtocolMethods(String value); /** * Returns the value of the '<em><b>Max Entry Count</b></em>' attribute. * The default value is <code>"1000"</code>. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Max Entry Count</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Max Entry Count</em>' attribute. * @see #setMaxEntryCount(int) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_MaxEntryCount() * @model default="1000" * @generated */ int getMaxEntryCount(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getMaxEntryCount <em>Max Entry Count</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Max Entry Count</em>' attribute. * @see #getMaxEntryCount() * @generated */ void setMaxEntryCount(int value); /** * Returns the value of the '<em><b>Sequence Type</b></em>' attribute. * The default value is <code>"REGISTRY_REFERENCE"</code>. * The literals are from the enumeration {@link org.wso2.developerstudio.eclipse.gmf.esb.CacheSequenceType}. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Sequence Type</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Sequence Type</em>' attribute. * @see org.wso2.developerstudio.eclipse.gmf.esb.CacheSequenceType * @see #setSequenceType(CacheSequenceType) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_SequenceType() * @model default="REGISTRY_REFERENCE" * @generated */ CacheSequenceType getSequenceType(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getSequenceType <em>Sequence Type</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Sequence Type</em>' attribute. * @see org.wso2.developerstudio.eclipse.gmf.esb.CacheSequenceType * @see #getSequenceType() * @generated */ void setSequenceType(CacheSequenceType value); /** * Returns the value of the '<em><b>Sequence Key</b></em>' containment reference. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Sequence Key</em>' reference isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Sequence Key</em>' containment reference. * @see #setSequenceKey(RegistryKeyProperty) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_SequenceKey() * @model containment="true" * @generated */ RegistryKeyProperty getSequenceKey(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getSequenceKey <em>Sequence Key</em>}' containment reference. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Sequence Key</em>' containment reference. * @see #getSequenceKey() * @generated */ void setSequenceKey(RegistryKeyProperty value); /** * Returns the value of the '<em><b>Input Connector</b></em>' containment reference. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Input Connector</em>' containment reference isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Input Connector</em>' containment reference. * @see #setInputConnector(CacheMediatorInputConnector) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_InputConnector() * @model containment="true" * @generated */ CacheMediatorInputConnector getInputConnector(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getInputConnector <em>Input Connector</em>}' containment reference. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Input Connector</em>' containment reference. * @see #getInputConnector() * @generated */ void setInputConnector(CacheMediatorInputConnector value); /** * Returns the value of the '<em><b>Output Connector</b></em>' containment reference. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Output Connector</em>' containment reference isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Output Connector</em>' containment reference. * @see #setOutputConnector(CacheMediatorOutputConnector) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_OutputConnector() * @model containment="true" * @generated */ CacheMediatorOutputConnector getOutputConnector(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getOutputConnector <em>Output Connector</em>}' containment reference. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Output Connector</em>' containment reference. * @see #getOutputConnector() * @generated */ void setOutputConnector(CacheMediatorOutputConnector value); /** * Returns the value of the '<em><b>On Hit Output Connector</b></em>' containment reference. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>On Hit Output Connector</em>' containment reference isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>On Hit Output Connector</em>' containment reference. * @see #setOnHitOutputConnector(CacheMediatorOnHitOutputConnector) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_OnHitOutputConnector() * @model containment="true" * @generated */ CacheMediatorOnHitOutputConnector getOnHitOutputConnector(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getOnHitOutputConnector <em>On Hit Output Connector</em>}' containment reference. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>On Hit Output Connector</em>' containment reference. * @see #getOnHitOutputConnector() * @generated */ void setOnHitOutputConnector(CacheMediatorOnHitOutputConnector value); /** * Returns the value of the '<em><b>Mediator Flow</b></em>' containment reference. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Mediator Flow</em>' containment reference isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Mediator Flow</em>' containment reference. * @see #setMediatorFlow(MediatorFlow) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_MediatorFlow() * @model containment="true" * @generated */ MediatorFlow getMediatorFlow(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getMediatorFlow <em>Mediator Flow</em>}' containment reference. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Mediator Flow</em>' containment reference. * @see #getMediatorFlow() * @generated */ void setMediatorFlow(MediatorFlow value); /** * Returns the value of the '<em><b>Headers To Exclude In Hash</b></em>' attribute. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Headers To Exclude In Hash</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Headers To Exclude In Hash</em>' attribute. * @see #setHeadersToExcludeInHash(String) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_HeadersToExcludeInHash() * @model * @generated */ String getHeadersToExcludeInHash(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getHeadersToExcludeInHash <em>Headers To Exclude In Hash</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Headers To Exclude In Hash</em>' attribute. * @see #getHeadersToExcludeInHash() * @generated */ void setHeadersToExcludeInHash(String value); /** * Returns the value of the '<em><b>Response Codes</b></em>' attribute. * The default value is <code>".*"</code>. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Response Codes</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Response Codes</em>' attribute. * @see #setResponseCodes(String) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_ResponseCodes() * @model default=".*" * @generated */ String getResponseCodes(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getResponseCodes <em>Response Codes</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Response Codes</em>' attribute. * @see #getResponseCodes() * @generated */ void setResponseCodes(String value); /** * Returns the value of the '<em><b>Enable Cache Control</b></em>' attribute. * The default value is <code>"false"</code>. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Enable Cache Control</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Enable Cache Control</em>' attribute. * @see #setEnableCacheControl(boolean) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_EnableCacheControl() * @model default="false" * @generated */ boolean isEnableCacheControl(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#isEnableCacheControl <em>Enable Cache Control</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Enable Cache Control</em>' attribute. * @see #isEnableCacheControl() * @generated */ void setEnableCacheControl(boolean value); /** * Returns the value of the '<em><b>Include Age Header</b></em>' attribute. * The default value is <code>"false"</code>. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Include Age Header</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Include Age Header</em>' attribute. * @see #setIncludeAgeHeader(boolean) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_IncludeAgeHeader() * @model default="false" * @generated */ boolean isIncludeAgeHeader(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#isIncludeAgeHeader <em>Include Age Header</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Include Age Header</em>' attribute. * @see #isIncludeAgeHeader() * @generated */ void setIncludeAgeHeader(boolean value); /** * Returns the value of the '<em><b>Id</b></em>' attribute. * The default value is <code>""</code>. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Id</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Id</em>' attribute. * @see #setId(String) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_Id() * @model default="" * @generated */ String getId(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getId <em>Id</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Id</em>' attribute. * @see #getId() * @generated */ void setId(String value); /** * Returns the value of the '<em><b>Hash Generator Attribute</b></em>' attribute. * The default value is <code>"org.wso2.carbon.mediator.cache.digest.DOMHASHGenerator"</code>. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Hash Generator Attribute</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Hash Generator Attribute</em>' attribute. * @see #setHashGeneratorAttribute(String) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_HashGeneratorAttribute() * @model default="org.wso2.carbon.mediator.cache.digest.DOMHASHGenerator" * @generated */ String getHashGeneratorAttribute(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getHashGeneratorAttribute <em>Hash Generator Attribute</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Hash Generator Attribute</em>' attribute. * @see #getHashGeneratorAttribute() * @generated */ void setHashGeneratorAttribute(String value); /** * Returns the value of the '<em><b>Scope</b></em>' attribute. * The default value is <code>"Per_Host"</code>. * The literals are from the enumeration {@link org.wso2.developerstudio.eclipse.gmf.esb.CacheScopeType}. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Scope</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Scope</em>' attribute. * @see org.wso2.developerstudio.eclipse.gmf.esb.CacheScopeType * @see #setScope(CacheScopeType) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_Scope() * @model default="Per_Host" * @generated */ CacheScopeType getScope(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getScope <em>Scope</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Scope</em>' attribute. * @see org.wso2.developerstudio.eclipse.gmf.esb.CacheScopeType * @see #getScope() * @generated */ void setScope(CacheScopeType value); /** * Returns the value of the '<em><b>Implementation Type</b></em>' attribute. * The default value is <code>"memory"</code>. * The literals are from the enumeration {@link org.wso2.developerstudio.eclipse.gmf.esb.CacheImplementationType}. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Implementation Type</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Implementation Type</em>' attribute. * @see org.wso2.developerstudio.eclipse.gmf.esb.CacheImplementationType * @see #setImplementationType(CacheImplementationType) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_ImplementationType() * @model default="memory" * @generated */ CacheImplementationType getImplementationType(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getImplementationType <em>Implementation Type</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Implementation Type</em>' attribute. * @see org.wso2.developerstudio.eclipse.gmf.esb.CacheImplementationType * @see #getImplementationType() * @generated */ void setImplementationType(CacheImplementationType value); /** * Returns the value of the '<em><b>Cache Mediator Implementation</b></em>' attribute. * The default value is <code>"Default"</code>. * The literals are from the enumeration {@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediatorType}. * <!-- begin-user-doc --> * <p> * If the meaning of the '<em>Cache Mediator Implementation</em>' attribute isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @return the value of the '<em>Cache Mediator Implementation</em>' attribute. * @see org.wso2.developerstudio.eclipse.gmf.esb.CacheMediatorType * @see #setCacheMediatorImplementation(CacheMediatorType) * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getCacheMediator_CacheMediatorImplementation() * @model default="Default" * @generated */ CacheMediatorType getCacheMediatorImplementation(); /** * Sets the value of the '{@link org.wso2.developerstudio.eclipse.gmf.esb.CacheMediator#getCacheMediatorImplementation <em>Cache Mediator Implementation</em>}' attribute. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the new value of the '<em>Cache Mediator Implementation</em>' attribute. * @see org.wso2.developerstudio.eclipse.gmf.esb.CacheMediatorType * @see #getCacheMediatorImplementation() * @generated */ void setCacheMediatorImplementation(CacheMediatorType value); } // CacheMediator
prabushi/devstudio-tooling-esb
plugins/org.wso2.developerstudio.eclipse.gmf.esb/src/org/wso2/developerstudio/eclipse/gmf/esb/CacheMediator.java
Java
apache-2.0
28,590
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.streams.kstream; import org.apache.kafka.streams.kstream.internals.TimeWindow; import org.junit.Test; import java.util.Map; import static org.apache.kafka.streams.EqualityCheck.verifyEquality; import static org.apache.kafka.streams.EqualityCheck.verifyInEquality; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.fail; public class TimeWindowsTest { private static final long ANY_SIZE = 123L; @Test public void shouldSetWindowSize() { assertEquals(ANY_SIZE, TimeWindows.of(ANY_SIZE).sizeMs); } @Test public void shouldSetWindowAdvance() { final long anyAdvance = 4; assertEquals(anyAdvance, TimeWindows.of(ANY_SIZE).advanceBy(anyAdvance).advanceMs); } @SuppressWarnings("deprecation") // specifically testing deprecated APIs @Test public void shouldSetWindowRetentionTime() { assertEquals(ANY_SIZE, TimeWindows.of(ANY_SIZE).until(ANY_SIZE).maintainMs()); } @SuppressWarnings("deprecation") // specifically testing deprecated APIs @Test public void shouldUseWindowSizeAsRentitionTimeIfWindowSizeIsLargerThanDefaultRetentionTime() { final long windowSize = 2 * TimeWindows.of(1).maintainMs(); assertEquals(windowSize, TimeWindows.of(windowSize).maintainMs()); } @Test(expected = IllegalArgumentException.class) public void windowSizeMustNotBeZero() { TimeWindows.of(0); } @Test(expected = IllegalArgumentException.class) public void windowSizeMustNotBeNegative() { TimeWindows.of(-1); } @Test public void advanceIntervalMustNotBeZero() { final TimeWindows windowSpec = TimeWindows.of(ANY_SIZE); try { windowSpec.advanceBy(0); fail("should not accept zero advance parameter"); } catch (final IllegalArgumentException e) { // expected } } @Test public void advanceIntervalMustNotBeNegative() { final TimeWindows windowSpec = TimeWindows.of(ANY_SIZE); try { windowSpec.advanceBy(-1); fail("should not accept negative advance parameter"); } catch (final IllegalArgumentException e) { // expected } } @Deprecated @Test public void advanceIntervalMustNotBeLargerThanWindowSize() { final TimeWindows windowSpec = TimeWindows.of(ANY_SIZE); try { windowSpec.advanceBy(ANY_SIZE + 1); fail("should not accept advance greater than window size"); } catch (final IllegalArgumentException e) { // expected } } @Deprecated @Test public void retentionTimeMustNoBeSmallerThanWindowSize() { final TimeWindows windowSpec = TimeWindows.of(ANY_SIZE); try { windowSpec.until(ANY_SIZE - 1); fail("should not accept retention time smaller than window size"); } catch (final IllegalArgumentException e) { // expected } } @Test public void gracePeriodShouldEnforceBoundaries() { TimeWindows.of(3L).grace(0L); try { TimeWindows.of(3L).grace(-1L); fail("should not accept negatives"); } catch (final IllegalArgumentException e) { //expected } } @Test public void shouldComputeWindowsForHoppingWindows() { final TimeWindows windows = TimeWindows.of(12L).advanceBy(5L); final Map<Long, TimeWindow> matched = windows.windowsFor(21L); assertEquals(12L / 5L + 1, matched.size()); assertEquals(new TimeWindow(10L, 22L), matched.get(10L)); assertEquals(new TimeWindow(15L, 27L), matched.get(15L)); assertEquals(new TimeWindow(20L, 32L), matched.get(20L)); } @Test public void shouldComputeWindowsForBarelyOverlappingHoppingWindows() { final TimeWindows windows = TimeWindows.of(6L).advanceBy(5L); final Map<Long, TimeWindow> matched = windows.windowsFor(7L); assertEquals(1, matched.size()); assertEquals(new TimeWindow(5L, 11L), matched.get(5L)); } @Test public void shouldComputeWindowsForTumblingWindows() { final TimeWindows windows = TimeWindows.of(12L); final Map<Long, TimeWindow> matched = windows.windowsFor(21L); assertEquals(1, matched.size()); assertEquals(new TimeWindow(12L, 24L), matched.get(12L)); } @Test public void equalsAndHashcodeShouldBeValidForPositiveCases() { verifyEquality(TimeWindows.of(3), TimeWindows.of(3)); verifyEquality(TimeWindows.of(3).advanceBy(1), TimeWindows.of(3).advanceBy(1)); verifyEquality(TimeWindows.of(3).grace(1), TimeWindows.of(3).grace(1)); verifyEquality(TimeWindows.of(3).until(4), TimeWindows.of(3).until(4)); verifyEquality( TimeWindows.of(3).advanceBy(1).grace(1).until(4), TimeWindows.of(3).advanceBy(1).grace(1).until(4) ); } @Test public void equalsAndHashcodeShouldBeValidForNegativeCases() { verifyInEquality(TimeWindows.of(9), TimeWindows.of(3)); verifyInEquality(TimeWindows.of(3).advanceBy(2), TimeWindows.of(3).advanceBy(1)); verifyInEquality(TimeWindows.of(3).grace(2), TimeWindows.of(3).grace(1)); verifyInEquality(TimeWindows.of(3).until(9), TimeWindows.of(3).until(4)); verifyInEquality( TimeWindows.of(4).advanceBy(2).grace(2).until(4), TimeWindows.of(3).advanceBy(2).grace(2).until(4) ); verifyInEquality( TimeWindows.of(3).advanceBy(1).grace(2).until(4), TimeWindows.of(3).advanceBy(2).grace(2).until(4) ); assertNotEquals( TimeWindows.of(3).advanceBy(2).grace(1).until(4), TimeWindows.of(3).advanceBy(2).grace(2).until(4) ); assertNotEquals( TimeWindows.of(3).advanceBy(2).grace(2).until(9), TimeWindows.of(3).advanceBy(2).grace(2).until(4) ); } }
mihbor/kafka
streams/src/test/java/org/apache/kafka/streams/kstream/TimeWindowsTest.java
Java
apache-2.0
6,907
""" Plots a scatter plot of 2 metrics provided. Data could be given from postgres or a csv file. """ from matplotlib.colors import LogNorm from mpl_toolkits.mplot3d import Axes3D import sys import numpy as np import argparse import matplotlib import matplotlib.pyplot as plt import pandas as pd from common import add_db_args from common import add_plot_limit_args from common import set_db_connection from common import set_plot_limits def parse_args(*argument_list): parser = argparse.ArgumentParser() source_group = parser.add_mutually_exclusive_group(required=True) source_group.add_argument('--csv') source_group.add_argument('--table') source_group.add_argument('--query') plot_type_group = parser.add_mutually_exclusive_group(required=True) plot_type_group.add_argument('--scatter', nargs=2) plot_type_group.add_argument('--histogram') plot_type_group.add_argument('--hist2d', nargs=2) plot_type_group.add_argument('--scatter3d', nargs=3) parser.add_argument('--histogram-bins', type=int, default=100) parser.add_argument('--filter-num-rtus', type=int) parser.add_argument('--filter-controller', type=int) parser.add_argument('--labels', help='Labels for labeled data (different colors on the ' 'plot)') parser.add_argument('--miscellaneous-cutoff', type=float, default=0.001, help='Part of the data, that should a label have in ' 'order to be show in the plot') parser.add_argument('--do-not-scale-down', action='store_false', dest='scale_down') parser.add_argument('--scale-down', action='store_true') parser.add_argument('--savefig') add_plot_limit_args(parser) add_db_args(parser) args = parser.parse_args(*argument_list) if args.csv is None: set_db_connection(args) return args def plot_scatter3d(data, args): data = data[data[args.scatter3d[0]].notnull()][data[args.scatter3d[1]].notnull()][data[args.scatter3d[2]].notnull()] data = data[:100000] print len(data) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(data[args.scatter3d[0]], data[args.scatter3d[1]], data[args.scatter3d[2]]) def _plot_hist2d(data, args): data = data[data[args.hist2d[0]].notnull()][data[args.hist2d[1]].notnull()] if data.shape[0] < 1000: sys.exit(1) df = data.replace([np.inf, -np.inf], np.nan).dropna(subset=args.hist2d) plt.hist2d(df[args.hist2d[0]].astype(float), df[args.hist2d[1]].astype(float), bins=args.histogram_bins, norm=LogNorm()) plt.colorbar() set_plot_limits(plt, args) plt.xlabel(args.hist2d[0]) plt.ylabel(args.hist2d[1]) set_plot_limits(plt, args) plt.title("N = {}".format(data.shape[0])) def plot_distribution(args): if args.csv is not None: data = pd.read_csv(args.csv) print ' '.join(list(data.columns.values)) if args.filter_num_rtus: print 'before filtering size =', data.shape[0] data = data[data['num_rtus'] == args.filter_num_rtus] print 'after filtering size =', data.shape[0] if args.filter_controller: print 'before filtering size =', data.shape[0] data = data[data['controller_id'] == args.filter_controller] print 'after filtering size =', data.shape[0] if 'controller_id' in data: print 'total controller_ids included =', len(set(data['controller_id'])) if 'num_rtus' in data: print 'distinct num_rtus =', len(set(data['num_rtus'])), set(data['num_rtus']) else: cursor = args.db_connection.cursor() cursor.execute("select relname from pg_class where relkind='r' and relname !~ '^(pg_|sql_)';") # noqa if args.query: with open(args.query, 'r') as infile: sql = ''.join(list(infile)) else: sql = """ SELECT {select} FROM {table}; """.format(select='*', table=args.table) print sql cursor.execute(sql) colnames = [desc[0] for desc in cursor.description] data = pd.DataFrame(cursor.fetchall(), columns=colnames) # Set args.data, so we can pass only args to functions args.data = data data_size = data.shape[0] if args.scatter is not None: if args.labels: interesting_data = data[[args.scatter[0], args.scatter[1], args.labels]] different_labels = set(data[args.labels]) for label, color in zip(different_labels, matplotlib.colors.cnames.keys()): df = interesting_data.query('{column} == "{label}"'.format( column=args.labels, label=label)) plt.scatter(df[args.scatter[0]], df[args.scatter[1]], c=color, label=label) else: plt.scatter(data[args.scatter[0]], data[args.scatter[1]], c=color) plt.xlabel(args.scatter[0]) plt.ylabel(args.scatter[1]) elif args.histogram is not None: if args.labels: interesting_data = data[[args.histogram, args.labels]] different_labels = set(data[args.labels]) data_to_plot, colors_to_use, labels_to_show = [], [], [] miscellaneous_labels = set() misc_frame, misc_color = pd.DataFrame(), None for label, color in zip(different_labels, matplotlib.colors.cnames.keys()): df = interesting_data.query('{column} == "{label}"'.format( column=args.labels, label=label)) if df.shape[0] < args.miscellaneous_cutoff * data_size: miscellaneous_labels.add(label) misc_frame = pd.concat([misc_frame, df[args.histogram]]) misc_color = color continue labels_to_show.append('{label} ({count})'.format(label=label, count=df.shape[0])) data_to_plot.append(df[args.histogram]) colors_to_use.append(color) if misc_color is not None: labels_to_show.append('miscellaneous ({count})'.format( count=misc_frame.shape[0])) data_to_plot.append(misc_frame) # colors_to_use.append(misc_color) colors_to_use.append('cyan') plt.hist(data_to_plot, args.histogram_bins, histtype='bar', color=colors_to_use, label=labels_to_show) else: df = data.replace([np.inf, -np.inf], np.nan).dropna(subset=[args.histogram]) plt.hist(df[args.histogram].astype(float), bins=args.histogram_bins, label=args.histogram) plt.yscale('log') plt.xlabel(args.histogram) if args.scale_down: plt.ylim(ymax=int(data_size * args.miscellaneous_cutoff)) elif args.hist2d is not None: _plot_hist2d(data, args) elif args.scatter3d is not None: plot_scatter3d(data, args) plt.legend() if not args.scatter3d and not args.histogram: set_plot_limits(plt, args) if args.savefig is not None: plt.savefig(args.savefig, dpi=320) plt.clf() else: plt.show() if __name__ == '__main__': args = parse_args() plot_distribution(args)
kexinrong/macrobase
tools/py_analysis/plot_distribution.py
Python
apache-2.0
7,071
/* * Copyright 2012-2016 bambooCORE, greenstep of copyright Chen Xin Nien * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * ----------------------------------------------------------------------- * * author: Chen Xin Nien * contact: chen.xin.nien@gmail.com * */ package com.netsteadfast.greenstep.bsc.dao.impl; import java.util.List; import org.springframework.context.annotation.Scope; import org.springframework.stereotype.Repository; import com.netsteadfast.greenstep.base.dao.BaseDAO; import com.netsteadfast.greenstep.bsc.dao.IDegreeFeedbackProjectDAO; import com.netsteadfast.greenstep.po.hbm.BbDegreeFeedbackProject; @Repository("bsc.dao.DegreeFeedbackProjectDAO") @Scope("prototype") public class DegreeFeedbackProjectDAOImpl extends BaseDAO<BbDegreeFeedbackProject, String> implements IDegreeFeedbackProjectDAO<BbDegreeFeedbackProject, String> { public DegreeFeedbackProjectDAOImpl() { super(); } @SuppressWarnings("unchecked") @Override public List<BbDegreeFeedbackProject> findByPublishFlag(String publishFlag, String raterId) throws Exception { return this.getCurrentSession() .createQuery("FROM BbDegreeFeedbackProject m WHERE m.publishFlag = :publishFlag AND m.oid IN ( SELECT DISTINCT b.projectOid FROM BbDegreeFeedbackAssign b WHERE b.raterId = :raterId ) ORDER BY m.year DESC, m.name ASC") .setString("publishFlag", publishFlag) .setString("raterId", raterId) .setMaxResults(100) .list(); } }
quangnguyen9x/bamboobsc_quangnv
gsbsc-standard/src/com/netsteadfast/greenstep/bsc/dao/impl/DegreeFeedbackProjectDAOImpl.java
Java
apache-2.0
1,978
package countword; import backtype.storm.Config; import backtype.storm.LocalCluster; import backtype.storm.topology.TopologyBuilder; import backtype.storm.tuple.Fields; import countword.bolts.WordCounter; import countword.bolts.WordNormalizer; import countword.spouts.SignalsSpout; import countword.spouts.WordReader; public class TopologyMain { public static void main(String[] args) throws InterruptedException { //Topology definition TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("word-reader", new WordReader()); builder.setSpout("signals-spout", new SignalsSpout()); builder.setBolt("word-normalizer", new WordNormalizer()) .shuffleGrouping("word-reader"); builder.setBolt("word-counter", new WordCounter(), 2) .fieldsGrouping("word-normalizer", new Fields("word")) .allGrouping("signals-spout", "signals"); //Configuration Config conf = new Config(); conf.put("wordsFile", args[0]); conf.setDebug(true); //Topology run conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("Count-Word-Toplogy-With-Refresh-Cache", conf, builder.createTopology()); Thread.sleep(5000); cluster.shutdown(); } }
c3p0hz/microscope
microscope-storm/src/test/java/countword/TopologyMain.java
Java
apache-2.0
1,368
/** * Copyright (c) 2009 - 2013 AppWork UG(haftungsbeschränkt) <e-mail@appwork.org> * * This file is part of org.appwork.utils.swing.dialog.dimensor * * This software is licensed under the Artistic License 2.0, * see the LICENSE file or http://www.opensource.org/licenses/artistic-license-2.0.php * for details */ package org.appwork.utils.swing.dialog.dimensor; import java.awt.Dimension; import org.appwork.utils.swing.dialog.AbstractDialog; import org.appwork.utils.swing.dimensor.RememberLastDimensor; /** * @author Thomas * */ public class RememberLastDialogDimension implements DialogDimensor { private String id; private RememberLastDimensor delegate; /** * @param string */ public RememberLastDialogDimension(final String id) { delegate = new RememberLastDimensor(id); } /* * (non-Javadoc) * * @see * org.appwork.utils.swing.dialog.dimensor.DialogDimensor#getPreferredSize * (org.appwork.utils.swing.dialog.AbstractDialog) */ @Override public Dimension getDimension(final AbstractDialog<?> dialog) { // TODO Auto-generated method stub return delegate.getDimension(dialog.getDialog()); } /* * (non-Javadoc) * * @see * org.appwork.utils.swing.dialog.dimensor.DialogDimensor#onClose(org.appwork * .utils.swing.dialog.AbstractDialog) */ @Override public void onClose(final AbstractDialog<?> dialog) { delegate.onClose(dialog.getDialog()); } }
friedlwo/AppWoksUtils
src/org/appwork/utils/swing/dialog/dimensor/RememberLastDialogDimension.java
Java
artistic-2.0
1,600
import { RouterConfig } from '@angular/router'; import { HeroesComponent } from '../components/hero/heroes.component'; import { HeroDetailComponent } from "../components/hero/./hero-detail.component"; import { NewHeroDetailComponent } from "../components/hero/./new-hero-detail.component"; export const HeroesRoutes: RouterConfig = [ {path: 'heroes', component: HeroesComponent}, {path: 'hero/:id', component: HeroDetailComponent}, {path: 'newHero', component: NewHeroDetailComponent} ];
automotify/de.bht.ng2.DieInventarListe
app/routes/heroes.routes.ts
TypeScript
artistic-2.0
563
export function timeout(duration = 0) { return new Promise((resolve, reject) => { setTimeout(resolve, duration); }) }
priyatam/es6fp
lib/promises.js
JavaScript
artistic-2.0
134
--- title: "_ChemmineR_: Cheminformatics Toolkit for R" author: "Authors: Kevin Horan, Yiqun Cao, Tyler Backman, [Thomas Girke](mailto:thomas.girke@ucr.edu)" date: "Last update: 06 March, 2016" package: "ChemmineR 2.23.1" output: BiocStyle::html_document: toc: true toc_depth: 3 fig_caption: yes fontsize: 14pt bibliography: references.bib --- <!-- %% \VignetteEngine{knitr::rmarkdown} %% \VignetteEncoding{UTF-8} %\VignetteIndexEntry{ChemmineR} %% \VignetteDepends{ChemmineOB, fmcsR, ChemmineDrugs} %% \VignetteKeywords{cheminformatics, structural searching, clustering} %% \VignettePackage{ChemmineR} --> <!--- - Compile from command-line echo "rmarkdown::render('ChemmineR.Rmd'), clean=F" | R -slave; R CMD Stangle ChemmineR.Rmd; Rscript ../md2jekyll.R ChemmineR.knit.md 7 - Commit to github git commit -am "some edits"; git push -u origin master - To customize font size and other style features, add this line to output section in preamble: css: style.css --> <script type="text/javascript"> document.addEventListener("DOMContentLoaded", function() { document.querySelector("h1").className = "title"; }); </script> <script type="text/javascript"> document.addEventListener("DOMContentLoaded", function() { var links = document.links; for (var i = 0, linksLength = links.length; i < linksLength; i++) if (links[i].hostname != window.location.hostname) links[i].target = '_blank'; }); </script> Note: the most recent version of this tutorial can be found <a href="https://htmlpreview.github.io/?https://github.com/girke-lab/ChemmineR/blob/master/vignettes/ChemmineR.html">here</a> and a short overview slide show [here](http://faculty.ucr.edu/~tgirke/HTML_Presentations/Manuals/Workshop_Dec_5_8_2014/Rcheminfo/Cheminfo.pdf). Introduction ============ `ChemmineR` is a cheminformatics package for analyzing drug-like small molecule data in R. Its latest version contains functions for efficient processing of large numbers of small molecules, physicochemical/structural property predictions, structural similarity searching, classification and clustering of compound libraries with a wide spectrum of algorithms. ![Figure: `ChemmineR` environment with its add-on packages and selected functionalities](overview.png) In addition, `ChemmineR` offers visualization functions for compound clustering results and chemical structures. The integration of chemoinformatic tools with the R programming environment has many advantages, such as easy access to a wide spectrum of statistical methods, machine learning algorithms and graphic utilities. The first version of this package was published in Cao et al. [-@Cao_2008]. Since then many additional utilities and add-on packages have been added to the environment (Figure 2) and many more are under development for future releases [@Backman_2011; @Wang_2013]. </br> __Recently Added Features__ - Improved SMILES support via new `SMIset` object class and SMILES import/export functions - Integration of a subset of OpenBabel functionalities via new `ChemmineOB` add-on package [@Cao_2008] - Streaming functionality for processing millions of molecules on a laptop - Mismatch tolerant maximum common substructure (MCS) search algorithm - Fast and memory efficient fingerprint search support using atom pair or PubChem fingerprints <div align="right">[Back to Table of Contents]()</div> Getting Started =============== Installation ------------ The R software for running ChemmineR can be downloaded from CRAN (<http://cran.at.r-project.org/>). The ChemmineR package can be installed from R using the `bioLite` install command. ```r source("http://bioconductor.org/biocLite.R") # Sources the biocLite.R installation script. biocLite("ChemmineR") # Installs the package. ``` <div align="right">[Back to Table of Contents]()</div> Loading the Package and Documentation ------------------------------------- ```r library("ChemmineR") # Loads the package ``` ```r library(help="ChemmineR") # Lists all functions and classes vignette("ChemmineR") # Opens this PDF manual from R ``` <div align="right">[Back to Table of Contents]()</div> Five Minute Tutorial -------------------- The following code gives an overview of the most important functionalities provided by `ChemmineR`. Copy and paste of the commands into the R console will demonstrate their utilities. Create Instances of `SDFset` class: ```r data(sdfsample) sdfset <- sdfsample sdfset # Returns summary of SDFset ``` ``` ## An instance of "SDFset" with 100 molecules ``` ```r sdfset[1:4] # Subsetting of object ``` ``` ## An instance of "SDFset" with 4 molecules ``` ```r sdfset[[1]] # Returns summarized content of one SDF ``` ``` ## An instance of "SDF" ## ## <<header>> ## Molecule_Name Source ## "650001" " -OEChem-07071010512D" ## Comment Counts_Line ## "" " 61 64 0 0 0 0 0 0 0999 V2000" ## ## <<atomblock>> ## C1 C2 C3 C5 C6 C7 C8 C9 C10 C11 C12 C13 C14 C15 C16 ## O_1 7.0468 0.0839 0 0 0 0 0 0 0 0 0 0 0 0 0 ## O_2 12.2708 1.0492 0 0 0 0 0 0 0 0 0 0 0 0 0 ## ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ## H_60 1.8411 -1.5985 0 0 0 0 0 0 0 0 0 0 0 0 0 ## H_61 2.6597 -1.2843 0 0 0 0 0 0 0 0 0 0 0 0 0 ## ## <<bondblock>> ## C1 C2 C3 C4 C5 C6 C7 ## 1 1 16 2 0 0 0 0 ## 2 2 23 1 0 0 0 0 ## ... ... ... ... ... ... ... ... ## 63 33 60 1 0 0 0 0 ## 64 33 61 1 0 0 0 0 ## ## <<datablock>> (33 data items) ## PUBCHEM_COMPOUND_CID PUBCHEM_COMPOUND_CANONICALIZED PUBCHEM_CACTVS_COMPLEXITY ## "650001" "1" "700" ## PUBCHEM_CACTVS_HBOND_ACCEPTOR ## "7" "..." ``` ```r view(sdfset[1:4]) # Returns summarized content of many SDFs, not printed here as(sdfset[1:4], "list") # Returns complete content of many SDFs, not printed here ``` An `SDFset` is created during the import of an SD file: ```r sdfset <- read.SDFset("http://faculty.ucr.edu/ tgirke/Documents/R_BioCond/Samples/sdfsample.sdf") ``` Miscellaneous accessor methods for `SDFset` container: ```r header(sdfset[1:4]) # Not printed here ``` ```r header(sdfset[[1]]) ``` ``` ## Molecule_Name Source ## "650001" " -OEChem-07071010512D" ## Comment Counts_Line ## "" " 61 64 0 0 0 0 0 0 0999 V2000" ``` ```r atomblock(sdfset[1:4]) # Not printed here ``` ```r atomblock(sdfset[[1]])[1:4,] ``` ``` ## C1 C2 C3 C5 C6 C7 C8 C9 C10 C11 C12 C13 C14 C15 C16 ## O_1 7.0468 0.0839 0 0 0 0 0 0 0 0 0 0 0 0 0 ## O_2 12.2708 1.0492 0 0 0 0 0 0 0 0 0 0 0 0 0 ## O_3 12.2708 3.1186 0 0 0 0 0 0 0 0 0 0 0 0 0 ## O_4 7.9128 2.5839 0 0 0 0 0 0 0 0 0 0 0 0 0 ``` ```r bondblock(sdfset[1:4]) # Not printed here ``` ```r bondblock(sdfset[[1]])[1:4,] ``` ``` ## C1 C2 C3 C4 C5 C6 C7 ## 1 1 16 2 0 0 0 0 ## 2 2 23 1 0 0 0 0 ## 3 2 27 1 0 0 0 0 ## 4 3 25 1 0 0 0 0 ``` ```r datablock(sdfset[1:4]) # Not printed here ``` ```r datablock(sdfset[[1]])[1:4] ``` ``` ## PUBCHEM_COMPOUND_CID PUBCHEM_COMPOUND_CANONICALIZED PUBCHEM_CACTVS_COMPLEXITY ## "650001" "1" "700" ## PUBCHEM_CACTVS_HBOND_ACCEPTOR ## "7" ``` Assigning compound IDs and keeping them unique: ```r cid(sdfset)[1:4] # Returns IDs from SDFset object ``` ``` ## [1] "CMP1" "CMP2" "CMP3" "CMP4" ``` ```r sdfid(sdfset)[1:4] # Returns IDs from SD file header block ``` ``` ## [1] "650001" "650002" "650003" "650004" ``` ```r unique_ids <- makeUnique(sdfid(sdfset)) ``` ``` ## [1] "No duplicates detected!" ``` ```r cid(sdfset) <- unique_ids ``` Converting the data blocks in an `SDFset` to a matrix: ```r blockmatrix <- datablock2ma(datablocklist=datablock(sdfset)) # Converts data block to matrix numchar <- splitNumChar(blockmatrix=blockmatrix) # Splits to numeric and character matrix numchar[[1]][1:2,1:2] # Slice of numeric matrix ``` ``` ## PUBCHEM_COMPOUND_CID PUBCHEM_COMPOUND_CANONICALIZED ## 650001 650001 1 ## 650002 650002 1 ``` ```r numchar[[2]][1:2,10:11] # Slice of character matrix ``` ``` ## PUBCHEM_MOLECULAR_FORMULA PUBCHEM_OPENEYE_CAN_SMILES ## 650001 "C23H28N4O6" "CC1=CC(=NO1)NC(=O)CCC(=O)N(CC(=O)NC2CCCC2)C3=CC4=C(C=C3)OCCO4" ## 650002 "C18H23N5O3" "CN1C2=C(C(=O)NC1=O)N(C(=N2)NCCCO)CCCC3=CC=CC=C3" ``` Compute atom frequency matrix, molecular weight and formula: ```r propma <- data.frame(MF=MF(sdfset), MW=MW(sdfset), atomcountMA(sdfset)) propma[1:4, ] ``` ``` ## MF MW C H N O S F Cl ## 650001 C23H28N4O6 456.4916 23 28 4 6 0 0 0 ## 650002 C18H23N5O3 357.4069 18 23 5 3 0 0 0 ## 650003 C18H18N4O3S 370.4255 18 18 4 3 1 0 0 ## 650004 C21H27N5O5S 461.5346 21 27 5 5 1 0 0 ``` Assign matrix data to data block: ```r datablock(sdfset) <- propma datablock(sdfset[1]) ``` ``` ## $`650001` ## MF MW C H N O S ## "C23H28N4O6" "456.4916" "23" "28" "4" "6" "0" ## F Cl ## "0" "0" ``` String searching in `SDFset`: ```r grepSDFset("650001", sdfset, field="datablock", mode="subset") # Returns summary view of matches. Not printed here. ``` ```r grepSDFset("650001", sdfset, field="datablock", mode="index") ``` ``` ## 1 1 1 1 1 1 1 1 1 ## 1 2 3 4 5 6 7 8 9 ``` Export SDFset to SD file: ```r write.SDF(sdfset[1:4], file="sub.sdf", sig=TRUE) ``` Plot molecule structure of one or many SDFs: ```r plot(sdfset[1:4], print=FALSE) # Plots structures to R graphics device ``` ![](ChemmineR_files/figure-html/plotstruct-1.png)\ ```r sdf.visualize(sdfset[1:4]) # Compound viewing in web browser ``` ![Figure: Visualization webpage created by calling`sdf.visualize`.](visualizescreenshot-small.png) Structure similarity searching and clustering: ```r apset <- sdf2ap(sdfset) # Generate atom pair descriptor database for searching ``` ```r data(apset) # Load sample apset data provided by library. cmp.search(apset, apset[1], type=3, cutoff = 0.3, quiet=TRUE) # Search apset database with single compound. ``` ``` ## index cid scores ## 1 1 650001 1.0000000 ## 2 96 650102 0.3516643 ## 3 67 650072 0.3117569 ## 4 88 650094 0.3094629 ## 5 15 650015 0.3010753 ``` ```r cmp.cluster(db=apset, cutoff = c(0.65, 0.5), quiet=TRUE)[1:4,] # Binning clustering using variable similarity cutoffs. ``` ``` ## ## sorting result... ``` ``` ## ids CLSZ_0.65 CLID_0.65 CLSZ_0.5 CLID_0.5 ## 48 650049 2 48 2 48 ## 49 650050 2 48 2 48 ## 54 650059 2 54 2 54 ## 55 650060 2 54 2 54 ``` <div align="right">[Back to Table of Contents]()</div> OpenBabel Functions =================== `ChemmineR` integrates now a subset of cheminformatics functionalities implemented in the OpenBabel C++ library [@greycite13432; @Cao_2008]. These utilities can be accessed by installing the `ChemmineOB` package and the OpenBabel software itself. `ChemmineR` will automatically detect the availability of `ChemmineOB` and make use of the additional utilities. The following lists the functions and methods that make use of OpenBabel. References are included to locate the sections in the manual where the utility and usage of these functions is described. *Structure format interconversions* (see Section [Format Inter-Conversions](#format-interconversions)) - `smiles2sdf`: converts from SMILES to SDF object - `sdf2smiles`: converts from SDF to SMILES object - `convertFormat`: converts strings between two formats - `convertFormatFile`: converts files between two formats. This function can be used to enable ChemmineR to read in any format supported by Open Babel. For example, if you had an SML file you could do: ```r convertFormatFile("SML","SDF","mycompound.sml","mycompound.sdf") sdfset=read.SDFset("mycompound.sdf") ``` `propOB`: generates several compound properties. See the man page for a current list of properties computed. ```r propOB(sdfset[1]) ``` ``` ## cansmi ## 650001 O=C(Nc1noc(c1)C)CCC(=O)N(c1ccc2c(c1)OCCO2)CC(=O)NC1CCCC1 ## cansmiNS formula title ## 650001 O=C(Nc1noc(c1)C)CCC(=O)N(c1ccc2c(c1)OCCO2)CC(=O)NC1CCCC1 C23H28N4O6 650001 ## InChI ## 650001 InChI=1S/C23H28N4O6/c1-15-12-20(26-33-15)25-21(28)8-9-23(30)27(14-22(29)24-16-4-2-3-5-16)17-6-7-18-19(13-17)32-11-10-31-18/h6-7,12-13,16H,2-5,8-11,14H2,1H3,(H,24,29)(H,25,26,28) ## HBA1 HBA2 HBD logP MR MW nF TPSA ## 650001 37 10 2 3.0288 119.9234 456.4916 0 123 ``` `fingerprintOB`: generates fingerprints for compounds. The fingerprint name can be anything supported by OpenBabel. See the man page for a current list. ```r fingerprintOB(sdfset,"FP2") ``` ``` ## An instance of a 1024 bit "FPset" of type "FP2" with 100 molecules ``` `smartsSearchOB`: find matches of SMARTS patterns in compounds ```r #count rotable bonds smartsSearchOB(sdfset[1:5],"[!$(*#*)&!D1]-!@[!$(*#*)&!D1]",uniqueMatches=FALSE) ``` ``` ## 650001 650002 650003 650004 650005 ## 24 20 14 30 10 ``` `exactMassOB`: Compute the monoisotopic (exact) mass of a set of compounds ```r exactMassOB(sdfset[1:5]) ``` ``` ## 650001 650002 650003 650004 650005 ## 456.2009 357.1801 370.1100 461.1733 318.1943 ``` `regenerateCoords`: Re-compute the 2D coordinates of a compound using Open Babel. This can sometimes improve the quality of the compounds plot. See also the `regenCoords` option of the plot function. ```r sdfset2 = regenerateCoords(sdfset[1:5]) plot(sdfset[1], regenCoords=TRUE,print=FALSE) ``` ![](ChemmineR_files/figure-html/unnamed-chunk-30-1.png)\ `generate3DCoords`: Generate 3D coordinates for compounds with only 2D coordinates. ```r sdf3D = generate3DCoords(sdfset[1]) ``` `canonicalize`: Compute a canonicalized atom numbering. This allows compounds with the same molecular structure but different atom numberings to be compared properly. ```r canonicalSdf= canonicalize(sdfset[1]) ``` `canonicalNumbering`: Return a mapping from the original atom numbering to the canonical atom number. ```r mapping = canonicalNumbering(sdfset[1]) ``` <div align="right">[Back to Table of Contents]()</div> Overview of Classes and Functions ================================= The following list gives an overview of the most important S4 classes, methods and functions available in the ChemmineR package. The help documents of the package provide much more detailed information on each utility. The standard R help documents for these utilities can be accessed with this syntax: `?function\_name` (*e.g.* `?cid`) and `?class\_name-class` (*e.g*. `?"SDFset-class"`). <div align="right">[Back to Table of Contents]()</div> Molecular Structure Data ------------------------ Classes - `SDFstr`: intermediate string class to facilitate SD file import; not important for end user - `SDF`: container for single molecule imported from an SD file - `SDFset`: container for many SDF objects; most important structure container for end user - `SMI`: container for a single SMILES string - `SMIset`: container for many SMILES strings Functions/Methods (mainly for `SDFset` container, `SMIset` should be coerced with `smiles2sd` to `SDFset`) - Accessor methods for `SDF/SDFset` - Object slots: `cid`, `header`, `atomblock`, `bondblock`, `datablock` (`sdfid`, `datablocktag`) - Summary of `SDFset`: `view` - Matrix conversion of data block: `datablock2ma`, `splitNumChar` - String search in SDFset: `grepSDFset` - Coerce one class to another - Standard syntax `as(..., "...")` works in most cases. For details see R help with `?"SDFset-class"`. - Utilities - Atom frequencies: `atomcountMA`, `atomcount` - Molecular weight: `MW` - Molecular formula: `MF` - ... - Compound structure depictions - R graphics device: `plot`, `plotStruc` - Online: `cmp.visualize` <div align="right">[Back to Table of Contents]()</div> Structure Descriptor Data ------------------------- Classes - `AP`: container for atom pair descriptors of a single molecule - `APset`: container for many AP objects; most important structure descriptor container for end user - `FP`: container for fingerprint of a single molecule - `FPset`: container for fingerprints of many molecules, most important structure descriptor container for end user Functions/Methods - Create `AP/APset` instances - From `SDFset`: `sdf2ap` - From SD file: `cmp.parse` - Summary of `AP/APset`: `view`, `db.explain` - Accessor methods for AP/APset - Object slots: `ap`, `cid` - Coerce one class to another - Standard syntax `as(..., "...")` works in most cases. For details see R help with `?"APset-class"`. - Structure Similarity comparisons and Searching - Compute pairwise similarities : `cmp.similarity`, `fpSim` - Search APset database: `cmp.search`, `fpSim` - AP-based Structure Similarity Clustering - Single-linkage binning clustering: `cmp.cluster` - Visualize clustering result with MDS: `cluster.visualize` - Size distribution of clusters: `cluster.sizestat` - Folding - Fold a descriptor with `fold` - Query the number of times a descriptor has been folded: `foldCount` - Query the number of bits in a descriptor: `numBits` <div align="right">[Back to Table of Contents]()</div> Import of Compounds =================== SDF Import ---------- The following gives an overview of the most important import/export functionalities for small molecules provided by `ChemmineR`. The given example creates an instance of the `SDFset` class using as sample data set the first 100 compounds from this PubChem SD file (SDF): Compound\_00650001\_00675000.sdf.gz (<ftp://ftp.ncbi.nih.gov/pubchem/Compound/CURRENT-Full/SDF/>). SDFs can be imported with the `read.SDFset` function: ```r sdfset <- read.SDFset("http://faculty.ucr.edu/ tgirke/Documents/R_BioCond/Samples/sdfsample.sdf") ``` ```r data(sdfsample) # Loads the same SDFset provided by the library sdfset <- sdfsample valid <- validSDF(sdfset) # Identifies invalid SDFs in SDFset objects sdfset <- sdfset[valid] # Removes invalid SDFs, if there are any ``` Import SD file into `SDFstr` container: ```r sdfstr <- read.SDFstr("http://faculty.ucr.edu/ tgirke/Documents/R_BioCond/Samples/sdfsample.sdf") ``` Create `SDFset` from `SDFstr` class: ```r sdfstr <- as(sdfset, "SDFstr") sdfstr ``` ``` ## An instance of "SDFstr" with 100 molecules ``` ```r as(sdfstr, "SDFset") ``` ``` ## An instance of "SDFset" with 100 molecules ``` <div align="right">[Back to Table of Contents]()</div> SMILES Import ------------- The `read.SMIset` function imports one or many molecules from a SMILES file and stores them in a `SMIset` container. The input file is expected to contain one SMILES string per row with tab-separated compound identifiers at the end of each line. The compound identifiers are optional. Create sample SMILES file and then import it: ```r data(smisample); smiset <- smisample write.SMI(smiset[1:4], file="sub.smi") smiset <- read.SMIset("sub.smi") ``` Inspect content of `SMIset`: ```r data(smisample) # Loads the same SMIset provided by the library smiset <- smisample smiset ``` ``` ## An instance of "SMIset" with 100 molecules ``` ```r view(smiset[1:2]) ``` ``` ## $`650001` ## An instance of "SMI" ## [1] "O=C(NC1CCCC1)CN(c1cc2OCCOc2cc1)C(=O)CCC(=O)Nc1noc(c1)C" ## ## $`650002` ## An instance of "SMI" ## [1] "O=c1[nH]c(=O)n(c2nc(n(CCCc3ccccc3)c12)NCCCO)C" ``` Accessor functions: ```r cid(smiset[1:4]) ``` ``` ## [1] "650001" "650002" "650003" "650004" ``` ```r smi <- as.character(smiset[1:2]) ``` Create `SMIset` from named character vector: ```r as(smi, "SMIset") ``` ``` ## An instance of "SMIset" with 2 molecules ``` <div align="right">[Back to Table of Contents]()</div> Export of Compounds =================== SDF Export ---------- Write objects of classes `SDFset/SDFstr/SDF` to SD file: ```r write.SDF(sdfset[1:4], file="sub.sdf") ``` Writing customized `SDFset` to file containing `ChemmineR` signature, IDs from `SDFset` and no data block: ```r write.SDF(sdfset[1:4], file="sub.sdf", sig=TRUE, cid=TRUE, db=NULL) ``` Example for injecting a custom matrix/data frame into the data block of an `SDFset` and then writing it to an SD file: ```r props <- data.frame(MF=MF(sdfset), MW=MW(sdfset), atomcountMA(sdfset)) datablock(sdfset) <- props write.SDF(sdfset[1:4], file="sub.sdf", sig=TRUE, cid=TRUE) ``` Indirect export via `SDFstr` object: ```r sdf2str(sdf=sdfset[[1]], sig=TRUE, cid=TRUE) # Uses default components sdf2str(sdf=sdfset[[1]], head=letters[1:4], db=NULL) # Uses custom components for header and data block ``` Write `SDF`, `SDFset` or `SDFstr` classes to file: ```r write.SDF(sdfset[1:4], file="sub.sdf", sig=TRUE, cid=TRUE, db=NULL) write.SDF(sdfstr[1:4], file="sub.sdf") cat(unlist(as(sdfstr[1:4], "list")), file="sub.sdf", sep="") ``` <div align="right">[Back to Table of Contents]()</div> SMILES Export ------------- Write objects of class `SMIset` to SMILES file with and without compound identifiers: ```r data(smisample); smiset <- smisample # Sample data set write.SMI(smiset[1:4], file="sub.smi", cid=TRUE) write.SMI(smiset[1:4], file="sub.smi", cid=FALSE) ``` <div align="right">[Back to Table of Contents]()</div> Format Interconversions ======================= The `sdf2smiles` and `smiles2sdf` functions provide format interconversion between SMILES strings (Simplified Molecular Input Line Entry Specification) and `SDFset` containers. Convert an `SDFset` container to a SMILES `character` string: ```r data(sdfsample); sdfset <- sdfsample[1] smiles <- sdf2smiles(sdfset) smiles ``` Convert a SMILES `character` string to an `SDFset` container: ```r sdf <- smiles2sdf("CC(=O)OC1=CC=CC=C1C(=O)O") view(sdf) ``` When the `ChemineOB` package is installed these conversions are performed with the OpenBabel Open Source Chemistry Toolbox. Otherwise the functions will fall back to using the ChemMine Tools web service for this operation. The latter will require internet connectivity and is limited to only the first compound given. `ChemmineOB` provides access to the compound format conversion functions of OpenBabel. Currently, over 160 formats are supported by OpenBabel. The functions `convertFormat` and `convertFormatFile` can be used to convert files or strings between any two formats supported by OpenBabel. For example, to convert a SMILES string to an SDF string, one can use the `convertFormat` function. ```r sdfStr <- convertFormat("SMI","SDF","CC(=O)OC1=CC=CC=C1C(=O)O_name") ``` This will return the given compound as an SDF formatted string. 2D coordinates are also computed and included in the resulting SDF string. To convert a file with compounds encoded in one format to another format, the `convertFormatFile` function can be used instead. ```r convertFormatFile("SMI","SDF","test.smiles","test.sdf") ``` To see the whole list of file formats supported by OpenBabel, one can run from the command-line "obabel -L formats". <div align="right">[Back to Table of Contents]()</div> Splitting SD Files ================== The following `write.SDFsplit` function allows to split SD Files into any number of smaller SD Files. This can become important when working with very big SD Files. Users should note that this function can output many files, thus one should run it in a dedicated directory! Create sample SD File with 100 molecules: ```r write.SDF(sdfset, "test.sdf") ``` Read in sample SD File. Note: reading file into SDFstr is much faster than into SDFset: ```r sdfstr <- read.SDFstr("test.sdf") ``` Run export on `SDFstr` object: ```r write.SDFsplit(x=sdfstr, filetag="myfile", nmol=10) # 'nmol' defines the number of molecules to write to each file ``` Run export on `SDFset` object: ```r write.SDFsplit(x=sdfset, filetag="myfile", nmol=10) ``` <div align="right">[Back to Table of Contents]()</div> Streaming Through Large SD Files ================================ The `sdfStream` function allows to stream through SD Files with millions of molecules without consuming much memory. During this process any set of descriptors, supported by `ChemmineR`, can be computed (*e.g.* atom pairs, molecular properties, etc.), as long as they can be returned in tabular format. In addition to descriptor values, the function returns a line index that gives the start and end positions of each molecule in the source SD File. This line index can be used by the downstream `read.SDFindex` function to retrieve specific molecules of interest from the source SD File without reading the entire file into R. The following outlines the typical workflow of this streaming functionality in `ChemmineR`. Create sample SD File with 100 molecules: ```r write.SDF(sdfset, "test.sdf") ``` Define descriptor set in a simple function: ```r desc <- function(sdfset) cbind(SDFID=sdfid(sdfset), # datablock2ma(datablocklist=datablock(sdfset)), MW=MW(sdfset), groups(sdfset), APFP=desc2fp(x=sdf2ap(sdfset), descnames=1024, type="character"), AP=sdf2ap(sdfset, type="character"), rings(sdfset, type="count", upper=6, arom=TRUE) ) ``` Run `sdfStream` with `desc` function and write results to a file called `matrix.xls`: ```r sdfStream(input="test.sdf", output="matrix.xls", fct=desc, Nlines=1000) # 'Nlines': number of lines to read from input SD File at a time ``` One can also start reading from a specific line number in the SD file. The following example starts at line number 950. This is useful for restarting and debugging the process. With `append=TRUE` the result can be appended to an existing file. ```r sdfStream(input="test.sdf", output="matrix2.xls", append=FALSE, fct=desc, Nlines=1000, startline=950) ``` Select molecules meeting certain property criteria from SD File using line index generated by previous `sdfStream` step: ```r indexDF <- read.delim("matrix.xls", row.names=1)[,1:4] indexDFsub <- indexDF[indexDF$MW < 400, ] # Selects molecules with MW < 400 sdfset <- read.SDFindex(file="test.sdf", index=indexDFsub, type="SDFset") # Collects results in 'SDFset' container ``` Write results directly to SD file without storing larger numbers of molecules in memory: ```r read.SDFindex(file="test.sdf", index=indexDFsub, type="file", outfile="sub.sdf") ``` Read AP/APFP strings from file into `APset` or `FP` object: ```r apset <- read.AP(x="matrix.xls", type="ap", colid="AP") apfp <- read.AP(x="matrix.xls", type="fp", colid="APFP") ``` Alternatively, one can provide the AP/APFP strings in a named character vector: ```r apset <- read.AP(x=sdf2ap(sdfset[1:20], type="character"), type="ap") fpchar <- desc2fp(sdf2ap(sdfset[1:20]), descnames=1024, type="character") fpset <- as(fpchar, "FPset") ``` <div align="right">[Back to Table of Contents]()</div> Storing Compounds in an SQL Database ==================================== As an alternative to sdfStream, there is now also an option to store data in an SQL database, which then allows for fast queries and compound retrieval. The default database is SQLite, but any other SQL database should work with some minor modifications to the table definitions, which are stored in schema/compounds.SQLite under the ChemmineR package directory. Compounds are stored in their entirety in the databases so there is no need to keep any original data files. Users can define their own set of compound features to compute and store when loading new compounds. Each of these features will be stored in its own, indexed table. Searches can then be performed using these features to quickly find specific compounds. Compounds can always be retrieved quickly because of the database index, no need to scan a large compound file. In addition to user defined features, descriptors can also be computed and stored for each compound. A new database can be created with the `initDb` function. This takes either an existing database connection, or a filename. If a filename is given then an SQLite database connection is created. It then ensures that the required tables exist and creates them if not. The connection object is then returned. This function can be called safely on the same connection or database many times and will not delete any data. <div align="right">[Back to Table of Contents]()</div> Loading Data ------------ The functions `loadSdf` and `loadSmiles` can be used to load compound data from either a file (both) or an `SDFset` (`loadSdf` only). The `fct` parameter should be a function to extract features from the data. It will be handed an `SDFset` generated from the data being loaded. This may be done in batches, so there is no guarantee that the given SDFSset will contain the whole dataset. This function should return a data frame with a column for each feature and a row for each compound given. The order of the final data frame should be the same as that of the `SDFset`. The column names will become the feature names. Each of these features will become a new, indexed, table in the database which can be used later to search for compounds. The `descriptors` parameter can be a function which computes descriptors. This function will also be given an `SDFset` object, which may be done in batches. It should return a data frame with the following two columns: "descriptor" and "descriptor\_type". The "descriptor" column should contain a string representation of the descriptor, and "descriptor\_type" is the type of the descriptor. Our convention for atom pair is "ap" and "fp" for finger print. The order should also be maintained. When the data has been loaded, `loadSdf` will return the compound id numbers of each compound loaded. These compound id numbers are computed by the database and are not extracted from the compound data itself. They can be used to quickly retrieve compounds later. New features can also be added using this function. However, all compounds must have all features so if new features are added to a new set of compounds, all existing features must be computable by the `fct` function given. If new features are detected, all existing compounds will be run through `fct` in order to compute the new features for them as well. For example, if dataset X is loaded with features F1 and F2, and then at a later time we load dataset Y with new feature F3, the `fct` function used to load dataset Y must compute and return features F1, F2, and F3. `loadSdf` will call `fct` with both datasets X and Y so that all features are available for all compounds. If any features are missing an error will be raised. If just new features are being added, but no new compounds, use the `addNewFeatures` function. In this example, we create a new database called "test.db" and load it with data from an `SDFset`. We also define `fct` to compute the molecular weight, "MW", and the number of rings and aromatic rings. The rings function actually returns a data frame with columns "RINGS" and "AROMATIC", which will be merged into the data frame being created which will also contain the "MW" column. These will be the names used for these features and must be used when searching with them. Finally, the new compound ids are returned and stored in the "ids" variable. ```r data(sdfsample) #create and initialize a new SQLite database conn <- initDb("test.db") ``` ``` ## Loading required package: RSQLite ``` ``` ## Loading required package: DBI ``` ```r # load data and compute 3 features: molecular weight, with the MW function, # and counts for RINGS and AROMATIC, as computed by rings, which # returns a data frame itself. ids<-loadSdf(conn,sdfsample, function(sdfset) data.frame(rings(sdfset,type="count",upper=6, arom=TRUE),propOB(sdfset)) ) #list features in the database: print(listFeatures(conn)) ``` ``` ## [1] "aromatic" "cansmi" "cansmins" "formula" "hba1" "hba2" "hbd" "inchi" ## [9] "logp" "mr" "mw" "nf" "rings" "title" "tpsa" ``` <div align="right">[Back to Table of Contents]()</div> Updates ------- By default the `loadSdf` / `loadSmiles` functions will detect duplicate compound entries and only insert one of them. This means it is safe to run these functions on the same data set several times and you won't end up with duplicates. This allows the functions to be re-run in the event that a previous run on a dataset does not complete. Duplicate compounds are detected by compouting the MD5 checksum on the textual representation of it. It can also update existing compounds with new versions of the same compound. To enable this, set `updateByName` to true. It will then consider two compounds with the same name to be the same, even if the definition is different. Then, if the name of a compound exists in the database and it is trying to insert another compound with the same name, it will overwrite the existing compound. It will also drop and re-compute all associated descriptors and features for the new compound (assuming the required functions for descriptor and feature computation are available at the time the update is performed). <div align="right">[Back to Table of Contents]()</div> Duplicate Descriptors --------------------- It is often the case when loading a large set of compounds that several compounds will produce the same descriptor. `ChemmineR` detects this case and only stores one copy of the descriptor for every compound it is for. This feature saves some space and some time for processes that need to be applied to every descriptor. It also highlights a new problem. If you have a descriptor in hand and you want to find a single compound to represent it, which compound should be used if the descriptor was produced from multiple compounds? To address this problem, `ChemmineR` allows you to set priority values for each compound-descriptor mapping. Then, in contexts where a single compound is required, the highest priority compound will be chosen. Highest priority corresponds to the lowest numerical value. So mapping with priority 0 would be used first. To set these priorities there is the function `setPriorities`. It takes a function, `priorityFn`, for computing these priority values. The `setPriorities` function should be run after loading a complete set of data. It will find each group of compounds which share the same descriptor and call the given function, `priorityFn`, with the compound_id numbers of the group. This function should then assign priorities to each compound-descriptor pair, however it wishes. One built in priority function is `forestSizePriorities`. This simply prefers compounds with fewer disconnected components over compounds with more dissconnected components. ```r setPriorities(conn,forestSizePriorities) ``` <div align="right">[Back to Table of Contents]()</div> Searching --------- Compounds can be searched for using the `findCompounds` function. This function takes a connection object, a vector of feature names used in the tests, and finally, a vector of tests that must all pass for a compound to be included in the result set. Each test should be a boolean expression. For example: `c("MW <= 400","RINGS \> 3")` would return all compounds with a molecular weight of 400 or less and more than 3 rings, assuming these features exist in the database. The syntax for each test is `'\<feature name\> \<SQL operator\> \<value\>'`. If you know SQL you can go beyond this basic syntax. These tests will simply be concatenated together with "AND" in-between them and tacked on the end of a WHERE clause of an SQL statement. So any SQL that will work in that context is fine. The function will return a list of compound ids, the actual compounds can be fetched with `getCompounds`. If just the names are needed, the `getCompoundNames` function can be used. Compounds can also be fetched by name using the `findCompoundsByName` function. In this example we search for compounds with molecular weight less than 300. ```r results = findCompounds(conn,"mw",c("mw < 300")) message("found ",length(results)) ``` ``` ## found 23 ``` If more than one test is given, only compounds which satisfy all tests are found. So if we wanted to further restrict our search to compounds with 2 or more aromatic rings we could do: ```r results = findCompounds(conn,c("mw","aromatic"),c("mw < 300","aromatic >= 2")) message("found ",length(results)) ``` ``` ## found 9 ``` Remember that any feature used in some test must be listed in the second argument. String patterns can also be used. So if we wanted to match a substring of the molecular formula, say to find compounds with 21 carbon atoms, we could do: ```r results = findCompounds(conn,"formula",c("formula like '%C21%'")) message("found ",length(results)) ``` ``` ## found 12 ``` The "like" operator does a pattern match. There are two wildcard operators that can be used with this operator. The "%" will match any stretch of characters while the "?" will match any single character. So the above expression would match a formula like "C21H28N4O6". Valid comparison operators are: - <, <=, > , >= - =, ==, !=, <>, IS, IS NOT, IN, LIKE The boolean operators "AND" and "OR" can also be used to create more complex expressions within a single test. If you just want to fetch every compound in the database you can use the `getAllCompoundIds` function: ```r allIds = getAllCompoundIds(conn) message("found ",length(allIds)) ``` ``` ## found 100 ``` <div align="right">[Back to Table of Contents]()</div> Using Search Results ----------------------- Once you have a list of compound ids from the `findCompounds` function, you can either fetch the compound names, or the whole set of compounds as an SDFset. ```r #get the names of the compounds: names = getCompoundNames(conn,results) #if the name order is important set keepOrder=TRUE #It will take a little longer though names = getCompoundNames(conn,results,keepOrder=TRUE) # get the whole set of compounds compounds = getCompounds(conn,results) #in order: compounds = getCompounds(conn,results,keepOrder=TRUE) #write results directly to a file: compounds = getCompounds(conn,results,filename=file.path(tempdir(),"results.sdf")) ``` Using the `getCompoundFeatures` function, you can get a set of feature values as a data frame: ```r getCompoundFeatures(conn,results[1:5],c("mw","logp","formula")) ``` ``` ## compound_id mw logp formula ## 1 204 461.5346 1.3127 C21H27N5O5S ## 2 208 438.4780 3.5492 C21H19FN6O2S ## 3 221 340.4592 3.1325 C21H28N2O2 ## 4 238 447.9351 5.2940 C21H22ClN3O4S ## 5 243 456.5181 3.0020 C21H24N6O4S ``` ```r #write results directly to a CSV file (reduces memory usage): getCompoundFeatures(conn,results[1:5],c("mw","logp","formula"),filename="features.csv") #maintain input order in output: print(results[1:5]) ``` ``` ## [1] 204 208 221 238 243 ``` ```r getCompoundFeatures(conn,results[1:5],c("mw","logp","formula"),keepOrder=TRUE) ``` ``` ## compound_id mw logp formula ## 204 204 461.5346 1.3127 C21H27N5O5S ## 208 208 438.4780 3.5492 C21H19FN6O2S ## 221 221 340.4592 3.1325 C21H28N2O2 ## 238 238 447.9351 5.2940 C21H22ClN3O4S ## 243 243 456.5181 3.0020 C21H24N6O4S ``` <div align="right">[Back to Table of Contents]()</div> Pre-Built Databases -------------------- We have pre-built SQLite databases for the Drug Bank and DUD datasets. They can be found in the ChemmineDrugs annotation package. Connections to these databases can be fetched from the functions `DrugBank` and `DUD` to get the corresponding database. Any of the above functions can then be used to query the database. The DUD dataset was downloaded from [here](http://dude.docking.org/db/subsets/all/all.tar.gz). A description can be found [here](http://dude.docking.org/). The Drug Bank data set is version 4.1. It can be downloaded [here](http://www.drugbank.ca/system/downloads/current/structures/all.sdf.zip) The following features are included: - **aromatic**: Number of aromatic rings - **cansmi**: Canonical SMILES sting - **cansmins**: - **formula**: Molecular formula - **hba1**: - **hba2**: - **hbd**: - **inchi**: INCHI string - **logp**: - **mr**: - **mw**: Molecular weight - **ncharges**: - **nf**: - **r2nh**: - **r3n**: - **rcch**: - **rcho**: - **rcn**: - **rcooh**: - **rcoor**: - **rcor**: - **rings**: - **rnh2**: - **roh**: - **ropo3**: - **ror**: - **title**: - **tpsa**: The DUD database additionally includes: - **target_name**: Name of the target - **type**: either "active" or "decoy" <div align="right">[Back to Table of Contents]()</div> Working with SDF/SDFset Classes =============================== Several methods are available to return the different data components of `SDF/SDFset` containers in batches. The following examples list the most important ones. To save space their content is not printed in the manual. ```r view(sdfset[1:4]) # Summary view of several molecules length(sdfset) # Returns number of molecules sdfset[[1]] # Returns single molecule from SDFset as SDF object sdfset[[1]][[2]] # Returns atom block from first compound as matrix sdfset[[1]][[2]][1:4,] c(sdfset[1:4], sdfset[5:8]) # Concatenation of several SDFsets ``` The `grepSDFset` function allows string matching/searching on the different data components in `SDFset`. By default the function returns a SDF summary of the matching entries. Alternatively, an index of the matches can be returned with the setting `mode="index"`. ```r grepSDFset("650001", sdfset, field="datablock", mode="subset") # To return index, set mode="index") ``` Utilities to maintain unique compound IDs: ```r sdfid(sdfset[1:4]) # Retrieves CMP IDs from Molecule Name field in header block. cid(sdfset[1:4]) # Retrieves CMP IDs from ID slot in SDFset. unique_ids <- makeUnique(sdfid(sdfset)) # Creates unique IDs by appending a counter to duplicates. cid(sdfset) <- unique_ids # Assigns uniquified IDs to ID slot ``` Subsetting by character, index and logical vectors: ```r view(sdfset[c("650001", "650012")]) view(sdfset[4:1]) mylog <- cid(sdfset) view(sdfset[mylog]) ``` Accessing `SDF/SDFset` components: header, atom, bond and data blocks: ```r atomblock(sdf); sdf[[2]]; sdf[["atomblock"]] # All three methods return the same component header(sdfset[1:4]) atomblock(sdfset[1:4]) bondblock(sdfset[1:4]) datablock(sdfset[1:4]) header(sdfset[[1]]) atomblock(sdfset[[1]]) bondblock(sdfset[[1]]) datablock(sdfset[[1]]) ``` Replacement Methods: ```r sdfset[[1]][[2]][1,1] <- 999 atomblock(sdfset)[1] <- atomblock(sdfset)[2] datablock(sdfset)[1] <- datablock(sdfset)[2] ``` Assign matrix data to data block: ```r datablock(sdfset) <- as.matrix(iris[1:100,]) view(sdfset[1:4]) ``` Class coercions from `SDFstr` to `list`, `SDF` and `SDFset`: ```r as(sdfstr[1:2], "list") as(sdfstr[[1]], "SDF") as(sdfstr[1:2], "SDFset") ``` Class coercions from `SDF` to `SDFstr`, `SDFset`, list with SDF sub-components: ```r sdfcomplist <- as(sdf, "list") sdfcomplist <- as(sdfset[1:4], "list"); as(sdfcomplist[[1]], "SDF") sdflist <- as(sdfset[1:4], "SDF"); as(sdflist, "SDFset") as(sdfset[[1]], "SDFstr") as(sdfset[[1]], "SDFset") ``` Class coercions from `SDFset` to lists with components consisting of SDF or sub-components: ```r as(sdfset[1:4], "SDF") as(sdfset[1:4], "list") as(sdfset[1:4], "SDFstr") ``` <div align="right">[Back to Table of Contents]()</div> Molecular Property Functions (Physicochemical Descriptors) ========================================================== Several methods and functions are available to compute basic compound descriptors, such as molecular formula (MF), molecular weight (MW), and frequencies of atoms and functional groups. In many of these functions, it is important to set `addH=TRUE` in order to include/add hydrogens that are often not specified in an SD file. ```r propma <- atomcountMA(sdfset, addH=FALSE) boxplot(propma, col="blue", main="Atom Frequency") ``` ![](ChemmineR_files/figure-html/boxplot-1.png)\ ```r boxplot(rowSums(propma), main="All Atom Frequency") ``` Data frame provided by library containing atom names, atom symbols, standard atomic weights, group and period numbers: ```r data(atomprop) atomprop[1:4,] ``` ``` ## Number Name Symbol Atomic_weight Group Period ## 1 1 hydrogen H 1.007940 1 1 ## 2 2 helium He 4.002602 18 1 ## 3 3 lithium Li 6.941000 1 2 ## 4 4 beryllium Be 9.012182 2 2 ``` Compute MW and formula: ```r MW(sdfset[1:4], addH=FALSE) ``` ``` ## CMP1 CMP2 CMP3 CMP4 ## 456.4916 357.4069 370.4255 461.5346 ``` ```r MF(sdfset[1:4], addH=FALSE) ``` ``` ## CMP1 CMP2 CMP3 CMP4 ## "C23H28N4O6" "C18H23N5O3" "C18H18N4O3S" "C21H27N5O5S" ``` Enumerate functional groups: ```r groups(sdfset[1:4], groups="fctgroup", type="countMA") ``` ``` ## RNH2 R2NH R3N ROPO3 ROH RCHO RCOR RCOOH RCOOR ROR RCCH RCN ## CMP1 0 2 1 0 0 0 0 0 0 2 0 0 ## CMP2 0 2 2 0 1 0 0 0 0 0 0 0 ## CMP3 0 1 1 0 1 0 1 0 0 0 0 0 ## CMP4 0 1 3 0 0 0 0 0 0 2 0 0 ``` Combine MW, MF, charges, atom counts, functional group counts and ring counts in one data frame: ```r propma <- data.frame(MF=MF(sdfset, addH=FALSE), MW=MW(sdfset, addH=FALSE), Ncharges=sapply(bonds(sdfset, type="charge"), length), atomcountMA(sdfset, addH=FALSE), groups(sdfset, type="countMA"), rings(sdfset, upper=6, type="count", arom=TRUE)) propma[1:4,] ``` ``` ## MF MW Ncharges C H N O S F Cl RNH2 R2NH R3N ROPO3 ROH RCHO RCOR RCOOH RCOOR ## CMP1 C23H28N4O6 456.4916 0 23 28 4 6 0 0 0 0 2 1 0 0 0 0 0 0 ## CMP2 C18H23N5O3 357.4069 0 18 23 5 3 0 0 0 0 2 2 0 1 0 0 0 0 ## CMP3 C18H18N4O3S 370.4255 0 18 18 4 3 1 0 0 0 1 1 0 1 0 1 0 0 ## CMP4 C21H27N5O5S 461.5346 0 21 27 5 5 1 0 0 0 1 3 0 0 0 0 0 0 ## ROR RCCH RCN RINGS AROMATIC ## CMP1 2 0 0 4 2 ## CMP2 0 0 0 3 3 ## CMP3 0 0 0 4 2 ## CMP4 2 0 0 3 3 ``` The following shows an example for assigning the values stored in a matrix (*e.g.* property descriptors) to the data block components in an `SDFset`. Each matrix row will be assigned to the corresponding slot position in the `SDFset`. ```r datablock(sdfset) <- propma # Works with all SDF components datablock(sdfset)[1:4] test <- apply(propma[1:4,], 1, function(x) data.frame(col=colnames(propma), value=x)) ``` The data blocks in SDFs contain often important annotation information about compounds. The `datablock2ma` function returns this information as matrix for all compounds stored in an `SDFset` container. The `splitNumChar` function can then be used to organize all numeric columns in a `numeric matrix` and the character columns in a `character matrix` as components of a `list` object. ```r datablocktag(sdfset, tag="PUBCHEM_NIST_INCHI") datablocktag(sdfset, tag="PUBCHEM_OPENEYE_CAN_SMILES") ``` Convert entire data block to matrix: ```r blockmatrix <- datablock2ma(datablocklist=datablock(sdfset)) # Converts data block to matrix numchar <- splitNumChar(blockmatrix=blockmatrix) # Splits matrix to numeric matrix and character matrix numchar[[1]][1:4,]; numchar[[2]][1:4,] # Splits matrix to numeric matrix and character matrix ``` <div align="right">[Back to Table of Contents]()</div> Bond Matrices ============= Bond matrices provide an efficient data structure for many basic computations on small molecules. The function `conMA` creates this data structure from `SDF` and `SDFset` objects. The resulting bond matrix contains the atom labels in the row/column titles and the bond types in the data part. The labels are defined as follows: 0 is no connection, 1 is a single bond, 2 is a double bond and 3 is a triple bond. ```r conMA(sdfset[1:2], exclude=c("H")) # Create bond matrix for first two molecules in sdfset conMA(sdfset[[1]], exclude=c("H")) # Return bond matrix for first molecule plot(sdfset[1], atomnum = TRUE, noHbonds=FALSE , no_print_atoms = "", atomcex=0.8) # Plot its structure with atom numbering rowSums(conMA(sdfset[[1]], exclude=c("H"))) # Return number of non-H bonds for each atom ``` <div align="right">[Back to Table of Contents]()</div> Charges and Missing Hydrogens ============================= The function `bonds` returns information about the number of bonds, charges and missing hydrogens in `SDF` and `SDFset` objects. It is used by many other functions (*e.g.* `MW`, `MF`, `atomcount`, `atomcuntMA` and `plot`) to correct for missing hydrogens that are often not specified in SD files. ```r bonds(sdfset[[1]], type="bonds")[1:4,] ``` ``` ## atom Nbondcount Nbondrule charge ## 1 O 2 2 0 ## 2 O 2 2 0 ## 3 O 2 2 0 ## 4 O 2 2 0 ``` ```r bonds(sdfset[1:2], type="charge") ``` ``` ## $CMP1 ## NULL ## ## $CMP2 ## NULL ``` ```r bonds(sdfset[1:2], type="addNH") ``` ``` ## CMP1 CMP2 ## 0 0 ``` <div align="right">[Back to Table of Contents]()</div> Ring Perception and Aromaticity Assignment ========================================== The function `rings` identifies all possible rings in one or many molecules (here `sdfset[1]`) using the exhaustive ring perception algorithm from Hanser et al. [-@Hanser_1996]. In addition, the function can return all smallest possible rings as well as aromaticity information. The following example returns all possible rings in a `list`. The argument `upper` allows to specify an upper length limit for rings. Choosing smaller length limits will reduce the search space resulting in shortened compute times. Note: each ring is represented by a character vector of atom symbols that are numbered by their position in the atom block of the corresponding `SDF/SDFset` object. ```r ringatoms <- rings(sdfset[1], upper=Inf, type="all", arom=FALSE, inner=FALSE) ``` For visual inspection, the corresponding compound structure can be plotted with the ring bonds highlighted in color: ```r atomindex <- as.numeric(gsub(".*_", "", unique(unlist(ringatoms)))) plot(sdfset[1], print=FALSE, colbonds=atomindex) ``` ![](ChemmineR_files/figure-html/unnamed-chunk-90-1.png)\ Alternatively, one can include the atom numbers in the plot: ```r plot(sdfset[1], print=FALSE, atomnum=TRUE, no_print_atoms="H") ``` ![](ChemmineR_files/figure-html/unnamed-chunk-91-1.png)\ Aromaticity information of the rings can be returned in a logical vector by setting `arom=TRUE`: ```r rings(sdfset[1], upper=Inf, type="all", arom=TRUE, inner=FALSE) ``` ``` ## $RINGS ## $RINGS$ring1 ## [1] "N_10" "O_6" "C_32" "C_31" "C_30" ## ## $RINGS$ring2 ## [1] "C_12" "C_14" "C_15" "C_13" "C_11" ## ## $RINGS$ring3 ## [1] "C_23" "O_2" "C_27" "C_28" "O_3" "C_25" ## ## $RINGS$ring4 ## [1] "C_23" "C_21" "C_18" "C_22" "C_26" "C_25" ## ## $RINGS$ring5 ## [1] "O_3" "C_28" "C_27" "O_2" "C_23" "C_21" "C_18" "C_22" "C_26" "C_25" ## ## ## $AROMATIC ## ring1 ring2 ring3 ring4 ring5 ## TRUE FALSE FALSE TRUE FALSE ``` Return rings with no more than 6 atoms that are also aromatic: ```r rings(sdfset[1], upper=6, type="arom", arom=TRUE, inner=FALSE) ``` ``` ## $AROMATIC_RINGS ## $AROMATIC_RINGS$ring1 ## [1] "N_10" "O_6" "C_32" "C_31" "C_30" ## ## $AROMATIC_RINGS$ring4 ## [1] "C_23" "C_21" "C_18" "C_22" "C_26" "C_25" ``` Count shortest possible rings and their aromaticity assignments by setting `type=count` and `inner=TRUE`. The inner (smallest possible) rings are identified by first computing all possible rings and then selecting only the inner rings. For more details, consult the help documentation with `?rings`. ```r rings(sdfset[1:4], upper=Inf, type="count", arom=TRUE, inner=TRUE) ``` ``` ## RINGS AROMATIC ## CMP1 4 2 ## CMP2 3 3 ## CMP3 4 2 ## CMP4 3 3 ``` <div align="right">[Back to Table of Contents]()</div> Rendering Chemical Structure Images =================================== R Graphics Device ----------------- A new plotting function for compound structures has been added to the package recently. This function uses the native R graphics device for generating compound depictions. At this point this function is still in an experimental developmental stage but should become stable soon. If you have `ChemmineOB` available you can use the `regenCoords` option to have OpenBabel regenerate the coordinates for the compound. This can sometimes produce better looking plots. Plot compound Structures with R's graphics device: ```r data(sdfsample) sdfset <- sdfsample plot(sdfset[1:4], regenCoords=TRUE,print=FALSE) # 'print=TRUE' returns SDF summaries ``` ![](ChemmineR_files/figure-html/plotstruct2-1.png)\ Customized plots: ```r plot(sdfset[1:4], griddim=c(2,2), print_cid=letters[1:4], print=FALSE, noHbonds=FALSE) ``` In the following plot, the atom block position numbers in the SDF are printed next to the atom symbols (`atomnum = TRUE`). For more details, consult help documentation with `?plotStruc` or `?plot`. ```r plot(sdfset["CMP1"], atomnum = TRUE, noHbonds=F , no_print_atoms = "", atomcex=0.8, sub=paste("MW:", MW(sdfsample["CMP1"])), print=FALSE) ``` ![](ChemmineR_files/figure-html/plotstruct3-1.png)\ Substructure highlighting by atom numbers: ```r plot(sdfset[1], print=FALSE, colbonds=c(22,26,25,3,28,27,2,23,21,18,8,19,20,24)) ``` ![](ChemmineR_files/figure-html/plotstruct4-1.png)\ <div align="right">[Back to Table of Contents]()</div> Online with ChemMine Tools -------------------------- Alternatively, one can visualize compound structures with a standard web browser using the online ChemMine Tools service. Plot structures using web service ChemMine Tools: ```r sdf.visualize(sdfset[1:4]) ``` ![Figure: Visualization webpage created by calling `sdf.visualize`.](visualizescreenshot-small.png) <div align="right">[Back to Table of Contents]()</div> Similarity Comparisons and Searching ==================================== Maximum Common Substructure (MCS) Searching ------------------------------------------- The `ChemmineR` add-on package [`fmcsR`](http://www.bioconductor.org/packages/devel/bioc/html/fmcsR.html) provides support for identifying maximum common substructures (MCSs) and flexible MCSs among compounds. The algorithm can be used for pairwise compound comparisons, structure similarity searching and clustering. The manual describing this functionality is available [here](http://www.bioconductor.org/packages/devel/bioc/vignettes/fmcsR/inst/doc/fmcsR.html) and the associated publication is Wang et al. [-@Wang_2013]. The following gives a short preview of some functionalities provided by the `fmcsR` package. ```r library(fmcsR) data(fmcstest) # Loads test sdfset object test <- fmcs(fmcstest[1], fmcstest[2], au=2, bu=1) # Searches for MCS with mismatches plotMCS(test) # Plots both query compounds with MCS in color ``` ![](ChemmineR_files/figure-html/plotmcs-1.png)\ <div align="right">[Back to Table of Contents]()</div> AP/APset Classes for Storing Atom Pair Descriptors -------------------------------------------------- The function `sdf2ap` computes atom pair descriptors for one or many compounds [@Carhart_1985; @Chen_2002]. It returns a searchable atom pair database stored in a container of class `APset`, which can be used for structural similarity searching and clustering. As similarity measure, the Tanimoto coefficient or related coefficients can be used. An `APset` object consists of one or many `AP` entries each storing the atom pairs of a single compound. Note: the deprecated `cmp.parse` function is still available which also generates atom pair descriptor databases, but directly from an SD file. Since the latter function is less flexible it may be discontinued in the future. Generate atom pair descriptor database for searching: ```r ap <- sdf2ap(sdfset[[1]]) # For single compound ap ``` ``` ## An instance of "AP" ## <<atom pairs>> ## 52614450304 52615497856 52615514112 52616547456 52616554624 ... length: 528 ``` ```r apset <- sdf2ap(sdfset) # For many compounds. ``` ```r view(apset[1:4]) ``` ``` ## $`650001` ## An instance of "AP" ## <<atom pairs>> ## 53688190976 53688190977 53688190978 53688190979 53688190980 ... length: 528 ## ## $`650002` ## An instance of "AP" ## <<atom pairs>> ## 53688190976 53688190977 53688190978 53688190979 53689239552 ... length: 325 ## ## $`650003` ## An instance of "AP" ## <<atom pairs>> ## 52615496704 53688190976 53688190977 53689239552 53697627136 ... length: 325 ## ## $`650004` ## An instance of "AP" ## <<atom pairs>> ## 52617593856 52618642432 52619691008 52619691009 52628079616 ... length: 496 ``` Return main components of APset objects: ```r cid(apset[1:4]) # Compound IDs ap(apset[1:4]) # Atom pair descriptors db.explain(apset[1]) # Return atom pairs in human readable format ``` Coerce APset to other objects: ```r apset2descdb(apset) # Returns old list-style AP database tmp <- as(apset, "list") # Returns list as(tmp, "APset") # Converts list back to APset ``` <div align="right">[Back to Table of Contents]()</div> Large SDF and Atom Pair Databases --------------------------------- When working with large data sets it is often desirable to save the `SDFset` and `APset` containers as binary R objects to files for later use. This way they can be loaded very quickly into a new R session without recreating them every time from scratch. Save and load of `SDFset` and `APset` containers: ```r save(sdfset, file = "sdfset.rda", compress = TRUE) load("sdfset.rda") save(apset, file = "apset.rda", compress = TRUE) load("apset.rda") ``` <div align="right">[Back to Table of Contents]()</div> Pairwise Compound Comparisons with Atom Pairs --------------------------------------------- The `cmp.similarity` function computes the atom pair similarity between two compounds using the Tanimoto coefficient as similarity measure. The coefficient is defined as *c/(a+b+c)*, which is the proportion of the atom pairs shared among two compounds divided by their union. The variable *c* is the number of atom pairs common in both compounds, while *a* and *b* are the numbers of their unique atom pairs. ```r cmp.similarity(apset[1], apset[2]) ``` ``` ## [1] 0.2637037 ``` ```r cmp.similarity(apset[1], apset[1]) ``` ``` ## [1] 1 ``` <div align="right">[Back to Table of Contents]()</div> Similarity Searching with Atom Pairs ------------------------------------ The `cmp.search` function searches an atom pair database for compounds that are similar to a query compound. The following example returns a data frame where the rows are sorted by the Tanimoto similarity score (best to worst). The first column contains the indices of the matching compounds in the database. The argument cutoff can be a similarity cutoff, meaning only compounds with a similarity value larger than this cutoff will be returned; or it can be an integer value restricting how many compounds will be returned. When supplying a cutoff of 0, the function will return the similarity values for every compound in the database. ```r cmp.search(apset, apset["650065"], type=3, cutoff = 0.3, quiet=TRUE) ``` ``` ## index cid scores ## 1 61 650066 1.0000000 ## 2 60 650065 1.0000000 ## 3 67 650072 0.3389831 ## 4 11 650011 0.3190608 ## 5 15 650015 0.3184524 ## 6 86 650092 0.3154270 ## 7 64 650069 0.3010279 ``` Alternatively, the function can return the matches in form of an index or a named vector if the `type` argument is set to `1` or `2`, respectively. ```r cmp.search(apset, apset["650065"], type=1, cutoff = 0.3, quiet=TRUE) ``` ``` ## [1] 61 60 67 11 15 86 64 ``` ```r cmp.search(apset, apset["650065"], type=2, cutoff = 0.3, quiet=TRUE) ``` ``` ## 650066 650065 650072 650011 650015 650092 650069 ## 1.0000000 1.0000000 0.3389831 0.3190608 0.3184524 0.3154270 0.3010279 ``` <div align="right">[Back to Table of Contents]()</div> FP/FPset Classes for Storing Fingerprints ----------------------------------------- The `FPset` class stores fingerprints of small molecules in a matrix-like representation where every molecule is encoded as a fingerprint of the same type and length. The `FPset` container acts as a searchable database that contains the fingerprints of many molecules. The `FP` container holds only one fingerprint. Several constructor and coerce methods are provided to populate `FP/FPset` containers with fingerprints, while supporting any type and length of fingerprints. For instance, the function `desc2fp` generates fingerprints from an atom pair database stored in an `APset`, and `as(matrix, "FPset")` and `as(character, "FPset")` construct an `FPset` database from objects where the fingerprints are represented as `matrix` or `character` objects, respectively. Show slots of `FPset` class: ```r showClass("FPset") ``` ``` ## Class "FPset" [package "ChemmineR"] ## ## Slots: ## ## Name: fpma type foldCount ## Class: matrix character numeric ``` Instance of `FPset` class: ```r data(apset) fpset <- desc2fp(apset) view(fpset[1:2]) ``` ``` ## $`650001` ## An instance of "FP" of type "unknown-4721" ## <<fingerprint>> ## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 ... length: 1024 ## ## $`650002` ## An instance of "FP" of type "unknown-4173" ## <<fingerprint>> ## 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 0 1 1 ... length: 1024 ``` `FPset` class usage: ```r fpset[1:4] # behaves like a list ``` ``` ## An instance of a 1024 bit "FPset" of type "apfp" with 4 molecules ``` ```r fpset[[1]] # returns FP object ``` ``` ## An instance of "FP" of type "unknown-2986" ## <<fingerprint>> ## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 ... length: 1024 ``` ```r length(fpset) # number of compounds ENDCOMMENT ``` ``` ## [1] 100 ``` ```r cid(fpset) # returns compound ids ``` ``` ## [1] "650001" "650002" "650003" "650004" "650005" "650006" "650007" "650008" "650009" "650010" ## [11] "650011" "650012" "650013" "650014" "650015" "650016" "650017" "650019" "650020" "650021" ## [21] "650022" "650023" "650024" "650025" "650026" "650027" "650028" "650029" "650030" "650031" ## [31] "650032" "650033" "650034" "650035" "650036" "650037" "650038" "650039" "650040" "650041" ## [41] "650042" "650043" "650044" "650045" "650046" "650047" "650048" "650049" "650050" "650052" ## [51] "650054" "650056" "650058" "650059" "650060" "650061" "650062" "650063" "650064" "650065" ## [61] "650066" "650067" "650068" "650069" "650070" "650071" "650072" "650073" "650074" "650075" ## [71] "650076" "650077" "650078" "650079" "650080" "650081" "650082" "650083" "650085" "650086" ## [81] "650087" "650088" "650089" "650090" "650091" "650092" "650093" "650094" "650095" "650096" ## [91] "650097" "650098" "650099" "650100" "650101" "650102" "650103" "650104" "650105" "650106" ``` ```r fpset[10] <- 0 # replacement of 10th fingerprint to all zeros cid(fpset) <- 1:length(fpset) # replaces compound ids c(fpset[1:4], fpset[11:14]) # concatenation of several FPset objects ``` ``` ## An instance of a 1024 bit "FPset" of type "apfp" with 8 molecules ``` Construct `FPset` class form `matrix`: ```r fpma <- as.matrix(fpset) # coerces FPset to matrix as(fpma, "FPset") ``` ``` ## An instance of a 1024 bit "FPset" of type "unknown-6312" with 100 molecules ``` Construct `FPset` class form `character vector`: ```r fpchar <- as.character(fpset) # coerces FPset to character strings as(fpchar, "FPset") # construction of FPset class from character vector ``` ``` ## An instance of a 1024 bit "FPset" of type "apfp" with 100 molecules ``` Compound similarity searching with `FPset`: ```r fpSim(fpset[1], fpset, method="Tanimoto", cutoff=0.4, top=4) ``` ``` ## 1 96 67 15 ## 1.0000000 0.4719101 0.4288499 0.4275229 ``` Folding fingerprints: ```r fold(fpset) # fold each FP once ``` ``` ## An instance of a 512 bit "FPset" of type "apfp" with 100 molecules ``` ```r fold(fpset, count=2) #fold each FP twice ``` ``` ## An instance of a 256 bit "FPset" of type "apfp" with 100 molecules ``` ```r fold(fpset, bits=128) #fold each FP down to 128 bits ``` ``` ## An instance of a 128 bit "FPset" of type "apfp" with 100 molecules ``` ```r fold(fpset[[1]]) # fold an individual FP ``` ``` ## An instance of "FP" of type "unknown-2996" ## <<fingerprint>> ## 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 ... length: 512 ``` ```r fptype(fpset) # get type of FPs ``` ``` ## [1] "apfp" ``` ```r numBits(fpset) # get the number of bits of each FP ``` ``` ## [1] 1024 ``` ```r foldCount(fold(fpset)) # the number of times an FP or FPset has been folded ``` ``` ## [1] 1 ``` <div align="right">[Back to Table of Contents]()</div> Atom Pair Fingerprints ---------------------- Atom pairs can be converted into binary atom pair fingerprints of fixed length. Computations on this compact data structure are more time and memory efficient than on their relatively complex atom pair counterparts. The function `desc2fp` generates fingerprints from descriptor vectors of variable length such as atom pairs stored in `APset` or `list` containers. The obtained fingerprints can be used for structure similarity comparisons, searching and clustering. Create atom pair sample data set: ```r data(sdfsample) sdfset <- sdfsample[1:10] apset <- sdf2ap(sdfset) ``` Compute atom pair fingerprint database using internal atom pair selection containing the 4096 most common atom pairs identified in DrugBank's compound collection. For details see `?apfp`. The following example uses from this set the 1024 most frequent atom pairs: ```r fpset <- desc2fp(apset, descnames=1024, type="FPset") ``` Alternatively, one can provide any custom atom pair selection. Here, the 1024 most common ones in `apset`: ```r fpset1024 <- names(rev(sort(table(unlist(as(apset, "list")))))[1:1024]) fpset <- desc2fp(apset, descnames=fpset1024, type="FPset") ``` A more compact way of storing fingerprints is as character values: ```r fpchar <- desc2fp(x=apset, descnames=1024, type="character") fpchar <- as.character(fpset) ``` Converting a fingerprint database to a matrix and vice versa: ```r fpma <- as.matrix(fpset) fpset <- as(fpma, "FPset") ``` Similarity searching and returning Tanimoto similarity coefficients: ```r fpSim(fpset[1], fpset, method="Tanimoto") ``` Under `method` one can choose from several predefined similarity measures including *Tanimoto* (default), *Euclidean*, *Tversky* or *Dice*. Alternatively, one can pass on custom similarity functions. ```r fpSim(fpset[1], fpset, method="Tversky", cutoff=0.4, top=4, alpha=0.5, beta=1) ``` Example for using a custom similarity function: ```r myfct <- function(a, b, c, d) c/(a+b+c+d) fpSim(fpset[1], fpset, method=myfct) ``` Clustering example: ```r simMAap <- sapply(cid(apfpset), function(x) fpSim(x=apfpset[x], apfpset, sorted=FALSE)) hc <- hclust(as.dist(1-simMAap), method="single") plot(as.dendrogram(hc), edgePar=list(col=4, lwd=2), horiz=TRUE) ``` <div align="right">[Back to Table of Contents]()</div> Fingerprint E-values --------------------- The `fpSim` function can also return Z-scores, E-values, and p-values if given a set of score distribution parameters. These parameters can be computed over an `fpSet` with the `genParameters` function. ```r params <- genParameters(fpset) ``` This function will compute all pairwise distances between the given fingerprints and then fit a Beta distribution to the resulting Tanimoto scores, conditioned on the number of set bits in each fingerprint. For large data sets where you would not want to compute all pairwise distances, you can set what fraction to sample with the `sampleFraction` argument. This step only needs to be done once for each database of `fpSet` objects. Alternatively, if you have a large database of fingerprints, or you believe that the parameters computed on a single database are more generally applicable, you can use the resulting parameters for other databases as well. Once you have a set of parameters, you can pass them to `fpSim` with the `parameters` argument. ```r fpSim(fpset[[1]], fpset, top=10, parameters=params) ``` ``` ## similarity zscore evalue pvalue ## 1 1.0000000 6.2418215 0.000000 0.0000000 ## 96 0.4719101 1.6075792 6.748413 0.9988273 ## 67 0.4288499 1.2297052 12.012285 0.9999939 ## 15 0.4275229 1.2180604 12.211967 0.9999950 ## 88 0.4247423 1.1936587 12.638193 0.9999968 ## 64 0.4187380 1.1409688 13.594938 0.9999988 ## 4 0.4166667 1.1227914 13.936692 0.9999991 ## 86 0.3978686 0.9578290 17.319191 1.0000000 ## 77 0.3970588 0.9507232 17.476453 1.0000000 ## 69 0.3940000 0.9238806 18.079243 1.0000000 ``` This will then return a data frame with the similarity, Z-score, E-value, and p-value. You can change which value will be used as a cutoff and to sort by by setting the argument `scoreType` to one of these scores. In this way you could set an E-value cutoff of 0.04 for example. ```r fpSim(fpset[[1]], fpset, cutoff=0.04, scoreType="evalue", parameters=params) ``` ``` ## similarity zscore evalue pvalue ## 1 1 6.241822 0 0 ``` <div align="right">[Back to Table of Contents]()</div> Pairwise Compound Comparisons with PubChem Fingerprints ------------------------------------------------------- The `fpSim` function computes the similarity coefficients (*e.g.* Tanimoto) for pairwise comparisons of binary fingerprints. For this data type, *c* is the number of "on-bits" common in both compounds, and *a* and *b* are the numbers of their unique "on-bits". Currently, the PubChem fingerprints need to be provided (here PubChem's SD files) and cannot be computed from scratch in `ChemmineR`. The PubChem fingerprint specifications can be loaded with `data(pubchemFPencoding)`. Convert base 64 encoded PubChem fingerprints to `character` vector, `matrix` or `FPset` object: ```r cid(sdfset) <- sdfid(sdfset) fpset <- fp2bit(sdfset, type=1) fpset <- fp2bit(sdfset, type=2) fpset <- fp2bit(sdfset, type=3) fpset ``` ``` ## An instance of a 881 bit "FPset" of type "pubchem" with 100 molecules ``` Pairwise compound structure comparisons: ```r fpSim(fpset[1], fpset[2]) ``` ``` ## 650002 ## 0.5364807 ``` <div align="right">[Back to Table of Contents]()</div> Similarity Searching with PubChem Fingerprints ---------------------------------------------- Similarly, the `fpSim` function provides search functionality for PubChem fingerprints: ```r fpSim(fpset["650065"], fpset, method="Tanimoto", cutoff=0.6, top=6) ``` ``` ## 650065 650066 650035 650019 650012 650046 ## 1.0000000 0.9944751 0.7435897 0.7432432 0.7230047 0.7142857 ``` <div align="right">[Back to Table of Contents]()</div> Visualize Similarity Search Results ----------------------------------- The `cmp.search` function allows to visualize the chemical structures for the search results. Similar but more flexible chemical structure rendering functions are `plot` and `sdf.visualize` described above. By setting the visualize argument in `cmp.search` to `TRUE`, the matching compounds and their scores can be visualized with a standard web browser. Depending on the `visualize.browse` argument, an URL will be printed or a webpage will be opened showing the structures of the matching compounds. View similarity search results in R's graphics device: ```r cid(sdfset) <- cid(apset) # Assure compound name consistency among objects. plot(sdfset[names(cmp.search(apset, apset["650065"], type=2, cutoff=4, quiet=TRUE))], print=FALSE) ``` ![](ChemmineR_files/figure-html/search_result-1.png)\ View results online with Chemmine Tools: ```r similarities <- cmp.search(apset, apset[1], type=3, cutoff = 10) sdf.visualize(sdfset[similarities[,1]]) ``` <div align="right">[Back to Table of Contents]()</div> Clustering ========== Clustering Identical or Very Similar Compounds ---------------------------------------------- Often it is of interest to identify very similar or identical compounds in a compound set. The `cmp.duplicated` function can be used to quickly identify very similar compounds in atom pair sets, which will be frequently, but not necessarily, identical compounds. Identify compounds with identical AP sets: ```r cmp.duplicated(apset, type=1)[1:4] # Returns AP duplicates as logical vector ``` ``` ## [1] FALSE FALSE FALSE FALSE ``` ```r cmp.duplicated(apset, type=2)[1:4,] # Returns AP duplicates as data frame ``` ``` ## ids CLSZ_100 CLID_100 ## 1 650082 1 1 ## 2 650059 2 2 ## 3 650060 2 2 ## 4 650010 1 3 ``` Plot the structure of two pairs of duplicates: ```r plot(sdfset[c("650059","650060", "650065", "650066")], print=FALSE) ``` ![](ChemmineR_files/figure-html/duplicates-1.png)\ Remove AP duplicates from SDFset and APset objects: ```r apdups <- cmp.duplicated(apset, type=1) sdfset[which(!apdups)]; apset[which(!apdups)] ``` ``` ## An instance of "SDFset" with 96 molecules ``` ``` ## An instance of "APset" with 96 molecules ``` Alternatively, one can identify duplicates via other descriptor types if they are provided in the data block of an imported SD file. For instance, one can use here fingerprints, InChI, SMILES or other molecular representations. The following examples show how to enumerate by identical InChI strings, SMILES strings and molecular formula, respectively. ```r count <- table(datablocktag(sdfset, tag="PUBCHEM_NIST_INCHI")) count <- table(datablocktag(sdfset, tag="PUBCHEM_OPENEYE_CAN_SMILES")) count <- table(datablocktag(sdfset, tag="PUBCHEM_MOLECULAR_FORMULA")) count[1:4] ``` ``` ## ## C10H9FN2O2S C11H12N4OS C11H13NO4 C12H11ClN2OS ## 1 1 1 1 ``` <div align="right">[Back to Table of Contents]()</div> Binning Clustering ------------------ Compound libraries can be clustered into discrete similarity groups with the binning clustering function `cmp.cluster`. The function accepts as input an atom pair (`APset`) or a fingerprint (`FPset`) descriptor database as well as a similarity threshold. The binning clustering result is returned in form of a data frame. Single linkage is used for cluster joining. The function calculates the required compound-to-compound distance information on the fly, while a memory-intensive distance matrix is only created upon user request via the `save.distances` argument (see below). Because an optimum similarity threshold is often not known, the `cmp.cluster` function can calculate cluster results for multiple cutoffs in one step with almost the same speed as for a single cutoff. This can be achieved by providing several cutoffs under the cutoff argument. The clustering results for the different cutoffs will be stored in one data frame. One may force the `cmp.cluster` function to calculate and store the distance matrix by supplying a file name to the `save.distances` argument. The generated distance matrix can be loaded and passed on to many other clustering methods available in R, such as the hierarchical clustering function `hclust` (see below). If a distance matrix is available, it may also be supplied to `cmp.cluster` via the `use.distances` argument. This is useful when one has a pre-computed distance matrix either from a previous call to `cmp.cluster` or from other distance calculation subroutines. Single-linkage binning clustering with one or multiple cutoffs: ```r clusters <- cmp.cluster(db=apset, cutoff = c(0.7, 0.8, 0.9), quiet = TRUE) ``` ``` ## ## sorting result... ``` ```r clusters[1:12,] ``` ``` ## ids CLSZ_0.7 CLID_0.7 CLSZ_0.8 CLID_0.8 CLSZ_0.9 CLID_0.9 ## 48 650049 2 48 2 48 2 48 ## 49 650050 2 48 2 48 2 48 ## 54 650059 2 54 2 54 2 54 ## 55 650060 2 54 2 54 2 54 ## 56 650061 2 56 2 56 2 56 ## 57 650062 2 56 2 56 2 56 ## 58 650063 2 58 2 58 2 58 ## 59 650064 2 58 2 58 2 58 ## 60 650065 2 60 2 60 2 60 ## 61 650066 2 60 2 60 2 60 ## 1 650001 1 1 1 1 1 1 ## 2 650002 1 2 1 2 1 2 ``` Clustering of `FPset` objects with multiple cutoffs. This method allows to call various similarity methods provided by the `fpSim` function. For details consult `?fpSim`. ```r fpset <- desc2fp(apset) clusters2 <- cmp.cluster(fpset, cutoff=c(0.5, 0.7, 0.9), method="Tanimoto", quiet=TRUE) ``` ``` ## ## sorting result... ``` ```r clusters2[1:12,] ``` ``` ## ids CLSZ_0.5 CLID_0.5 CLSZ_0.7 CLID_0.7 CLSZ_0.9 CLID_0.9 ## 69 650074 14 11 2 69 1 69 ## 79 650085 14 11 2 69 1 79 ## 11 650011 14 11 1 11 1 11 ## 15 650015 14 11 1 15 1 15 ## 45 650046 14 11 1 45 1 45 ## 47 650048 14 11 1 47 1 47 ## 51 650054 14 11 1 51 1 51 ## 53 650058 14 11 1 53 1 53 ## 64 650069 14 11 1 64 1 64 ## 65 650070 14 11 1 65 1 65 ## 67 650072 14 11 1 67 1 67 ## 86 650092 14 11 1 86 1 86 ``` Sames as above, but using Tversky similarity measure: ```r clusters3 <- cmp.cluster(fpset, cutoff=c(0.5, 0.7, 0.9), method="Tversky", alpha=0.3, beta=0.7, quiet=TRUE) ``` ``` ## ## sorting result... ``` Return cluster size distributions for each cutoff: ```r cluster.sizestat(clusters, cluster.result=1) ``` ``` ## cluster size count ## 1 1 90 ## 2 2 5 ``` ```r cluster.sizestat(clusters, cluster.result=2) ``` ``` ## cluster size count ## 1 1 90 ## 2 2 5 ``` ```r cluster.sizestat(clusters, cluster.result=3) ``` ``` ## cluster size count ## 1 1 90 ## 2 2 5 ``` Enforce calculation of distance matrix: ```r clusters <- cmp.cluster(db=apset, cutoff = c(0.65, 0.5, 0.3), save.distances="distmat.rda") # Saves distance matrix to file "distmat.rda" in current working directory. load("distmat.rda") # Loads distance matrix. ``` <div align="right">[Back to Table of Contents]()</div> Jarvis-Patrick Clustering ------------------------- The Jarvis-Patrick clustering algorithm is widely used in cheminformatics [@greycite13371]. It requires a nearest neighbor table, which consists of *j* nearest neighbors for each item (*e.g.* compound). The nearest neighbor table is then used to join items into clusters when they meet the following requirements: (a) they are contained in each other's neighbor list and (b) they share at least *k* nearest neighbors. The values for *j* and *k* are user-defined parameters. The `jarvisPatrick` function implemented in `ChemmineR` takes a nearest neighbor table generated by `nearestNeighbors`, which works for `APset` and `FPset` objects. This function takes either the standard Jarvis-Patrick *j* parameter (as the `numNbrs` parameter), or else a `cutoff` value, which is an extension to the basic algorithm that we have added. Given a cutoff value, the nearest neighbor table returned contains every neighbor with a similarity greater than the cutoff value, for each item. This allows one to generate tighter clusters and to minimize certain limitations of this method, such as false joins of completely unrelated items when operating on small data sets. The `trimNeighbors` function can also be used to take an existing nearest neighbor table and remove all neighbors whose similarity value is below a given cutoff value. This allows one to compute a very relaxed nearest neighbor table initially, and then quickly try different refinements later. In case an existing nearest neighbor matrix needs to be used, the `fromNNMatrix` function can be used to transform it into the list structure that `jarvisPatrick` requires. The input matrix must have a row for each compound, and each row should be the index values of the neighbors of compound represented by that row. The names of each compound can also be given through the `names` argument. If not given, it will attempt to use the `rownames` of the given matrix. The `jarvisPatrick` function also allows one to relax some of the requirements of the algorithm through the `mode` parameter. When set to "a1a2b", then all requirements are used. If set to "a1b", then (a) is relaxed to a unidirectional requirement. Lastly, if `mode` is set to "b", then only requirement (b) is used, which means that all pairs of items will be checked to see if (b) is satisfied between them. The size of the clusters generated by the different methods increases in this order: "a1a2b" < "a1b" < "b". The run time of method "a1a2b" follows a close to linear relationship, while it is nearly quadratic for the much more exhaustive method "b". Only methods "a1a2b" and "a1b" are suitable for clustering very large data sets (e.g. \>50,000 items) in a reasonable amount of time. An additional extension to the algorithm is the ability to set the linkage mode. The `linkage` parameter can be one of "single", "average", or "complete", for single linkage, average linkage and complete linkage merge requirements, respectively. In the context of Jarvis-Patrick, average linkage means that at least half of the pairs between the clusters under consideration must meet requirement (b). Similarly, for complete linkage, all pairs must requirement (b). Single linkage is the normal case for Jarvis-Patrick and just means that at least one pair must meet requirement (b). The output is a cluster `vector` with the item labels in the name slot and the cluster IDs in the data slot. There is a utility function called `byCluster`, which takes out cluster vector output by `jarvisPatrick` and transforms it into a list of vectors. Each slot of the list is named with a cluster id and the vector contains the cluster members. By default the function excludes singletons from the output, but they can be included by setting `excludeSingletons`=FALSE`. Load/create sample `APset` and `FPset`: ```r data(apset) fpset <- desc2fp(apset) ``` Standard Jarvis-Patrick clustering on `APset` and `FPset` objects: ```r jarvisPatrick(nearestNeighbors(apset,numNbrs=6), k=5, mode="a1a2b") ``` ``` ## 650001 650002 650003 650004 650005 650006 650007 650008 650009 650010 650011 650012 650013 650014 ## 1 2 3 4 5 6 7 8 9 10 11 12 13 14 ## 650015 650016 650017 650019 650020 650021 650022 650023 650024 650025 650026 650027 650028 650029 ## 11 15 16 17 18 19 20 21 22 23 24 25 26 27 ## 650030 650031 650032 650033 650034 650035 650036 650037 650038 650039 650040 650041 650042 650043 ## 28 29 30 31 32 33 34 35 36 37 38 39 40 41 ## 650044 650045 650046 650047 650048 650049 650050 650052 650054 650056 650058 650059 650060 650061 ## 42 43 44 45 46 47 48 49 50 51 52 53 54 55 ## 650062 650063 650064 650065 650066 650067 650068 650069 650070 650071 650072 650073 650074 650075 ## 56 57 58 59 60 61 62 63 64 65 66 67 68 69 ## 650076 650077 650078 650079 650080 650081 650082 650083 650085 650086 650087 650088 650089 650090 ## 70 71 72 73 74 75 76 77 78 79 80 81 82 83 ## 650091 650092 650093 650094 650095 650096 650097 650098 650099 650100 650101 650102 650103 650104 ## 84 85 86 87 88 89 90 91 92 93 94 95 96 97 ## 650105 650106 ## 98 99 ``` ```r #Using "APset" jarvisPatrick(nearestNeighbors(fpset,numNbrs=6), k=5, mode="a1a2b") ``` ``` ## 650001 650002 650003 650004 650005 650006 650007 650008 650009 650010 650011 650012 650013 650014 ## 1 2 3 4 5 6 7 8 9 10 11 12 13 14 ## 650015 650016 650017 650019 650020 650021 650022 650023 650024 650025 650026 650027 650028 650029 ## 11 15 16 17 18 19 20 21 22 23 24 25 26 27 ## 650030 650031 650032 650033 650034 650035 650036 650037 650038 650039 650040 650041 650042 650043 ## 28 29 30 31 32 33 34 35 36 37 38 39 40 41 ## 650044 650045 650046 650047 650048 650049 650050 650052 650054 650056 650058 650059 650060 650061 ## 42 43 44 45 46 47 48 49 50 51 52 53 54 55 ## 650062 650063 650064 650065 650066 650067 650068 650069 650070 650071 650072 650073 650074 650075 ## 56 57 58 59 60 61 62 63 64 65 66 67 68 69 ## 650076 650077 650078 650079 650080 650081 650082 650083 650085 650086 650087 650088 650089 650090 ## 70 71 72 73 74 75 76 77 78 79 80 81 82 83 ## 650091 650092 650093 650094 650095 650096 650097 650098 650099 650100 650101 650102 650103 650104 ## 84 85 86 87 88 89 90 91 92 93 94 1 95 96 ## 650105 650106 ## 97 98 ``` ```r #Using "FPset" ``` The following example runs Jarvis-Patrick clustering with a minimum similarity `cutoff` value (here Tanimoto coefficient). In addition, it uses the much more exhaustive `"b"` method that generates larger cluster sizes, but significantly increased the run time. For more details, consult the corresponding help file with `?jarvisPatrick`. ```r cl<-jarvisPatrick(nearestNeighbors(fpset,cutoff=0.6, method="Tanimoto"), k=2 ,mode="b") byCluster(cl) ``` ``` ## $`11` ## [1] "650011" "650092" ## ## $`15` ## [1] "650015" "650069" ## ## $`45` ## [1] "650046" "650054" ## ## $`48` ## [1] "650049" "650050" ## ## $`52` ## [1] "650059" "650060" ## ## $`53` ## [1] "650061" "650062" ## ## $`54` ## [1] "650063" "650064" ## ## $`55` ## [1] "650065" "650066" ## ## $`62` ## [1] "650074" "650085" ``` Output nearest neighbor table (`matrix`): ```r nnm <- nearestNeighbors(fpset,numNbrs=6) nnm$names[1:4] ``` ``` ## [1] "650001" "650002" "650003" "650004" ``` ```r nnm$ids[1:4,] ``` ``` ## NULL ``` ```r nnm$similarities[1:4,] ``` ``` ## 650001 650102 650072 650015 650094 650069 ## sim 1 0.4719101 0.4288499 0.4275229 0.4247423 0.4187380 ## sim 1 0.4343891 0.4246575 0.4216867 0.3939394 0.3922078 ## sim 1 0.4152249 0.3619303 0.3610315 0.3424242 0.3367089 ## sim 1 0.5791045 0.4973958 0.4192708 0.4166667 0.4104683 ``` Trim nearest neighbor table: ```r nnm <- trimNeighbors(nnm,cutoff=0.4) nnm$similarities[1:4,] ``` ``` ## 650001 650102 650072 650015 650094 650069 ## sim 1 0.4719101 0.4288499 0.4275229 0.4247423 0.4187380 ## sim 1 0.4343891 0.4246575 0.4216867 NA NA ## sim 1 0.4152249 NA NA NA NA ## sim 1 0.5791045 0.4973958 0.4192708 0.4166667 0.4104683 ``` Perform clustering on precomputed nearest neighbor table: ```r jarvisPatrick(nnm, k=5,mode="b") ``` ``` ## 650001 650002 650003 650004 650005 650006 650007 650008 650009 650010 650011 650012 650013 650014 ## 1 2 3 4 5 6 7 8 9 10 11 12 13 14 ## 650015 650016 650017 650019 650020 650021 650022 650023 650024 650025 650026 650027 650028 650029 ## 11 15 16 17 18 19 20 21 22 23 24 25 26 27 ## 650030 650031 650032 650033 650034 650035 650036 650037 650038 650039 650040 650041 650042 650043 ## 28 29 30 31 32 33 34 35 36 37 38 39 40 41 ## 650044 650045 650046 650047 650048 650049 650050 650052 650054 650056 650058 650059 650060 650061 ## 42 43 11 44 11 45 46 47 48 49 50 51 52 53 ## 650062 650063 650064 650065 650066 650067 650068 650069 650070 650071 650072 650073 650074 650075 ## 54 55 56 57 57 58 59 11 60 61 62 63 64 65 ## 650076 650077 650078 650079 650080 650081 650082 650083 650085 650086 650087 650088 650089 650090 ## 66 67 68 69 37 70 71 72 64 73 74 75 76 77 ## 650091 650092 650093 650094 650095 650096 650097 650098 650099 650100 650101 650102 650103 650104 ## 78 11 79 80 81 82 83 84 85 86 87 1 88 89 ## 650105 650106 ## 90 91 ``` Using a user defined nearest neighbor matrix: ```r nn <- matrix(c(1,2,2,1),2,2,dimnames=list(c('one','two'))) nn ``` ``` ## [,1] [,2] ## one 1 2 ## two 2 1 ``` ```r byCluster(jarvisPatrick(fromNNMatrix(nn),k=1)) ``` ``` ## $`1` ## [1] "one" "two" ``` <div align="right">[Back to Table of Contents]()</div> Multi-Dimensional Scaling (MDS) ------------------------------- To visualize and compare clustering results, the `cluster.visualize` function can be used. The function performs Multi-Dimensional Scaling (MDS) and visualizes the results in form of a scatter plot. It requires as input an `APset`, a clustering result from `cmp.cluster`, and a cutoff for the minimum cluster size to consider in the plot. To help determining a proper cutoff size, the `cluster.sizestat` function is provided to generate cluster size statistics. MDS clustering and scatter plot: ```r cluster.visualize(apset, clusters, size.cutoff=2, quiet = TRUE) # Color codes clusters with at least two members. cluster.visualize(apset, clusters, quiet = TRUE) # Plots all items. ``` Create a 3D scatter plot of MDS result: ```r library(scatterplot3d) coord <- cluster.visualize(apset, clusters, size.cutoff=1, dimensions=3, quiet=TRUE) scatterplot3d(coord) ``` ![](ChemmineR_files/figure-html/mds_scatter-1.png)\ Interactive 3D scatter plot with Open GL (graphics not evaluated here): ```r library(rgl) rgl.open(); offset <- 50; par3d(windowRect=c(offset, offset, 640+offset, 640+offset)) rm(offset) rgl.clear() rgl.viewpoint(theta=45, phi=30, fov=60, zoom=1) spheres3d(coord[,1], coord[,2], coord[,3], radius=0.03, color=coord[,4], alpha=1, shininess=20) aspect3d(1, 1, 1) axes3d(col='black') title3d("", "", "", "", "", col='black') bg3d("white") # To save a snapshot of the graph, one can use the command rgl.snapshot("test.png"). ``` <div align="right">[Back to Table of Contents]()</div> Clustering with Other Algorithms -------------------------------- `ChemmineR` allows the user to take advantage of the wide spectrum of clustering utilities available in R. An example on how to perform hierarchical clustering with the hclust function is given below. Create atom pair distance matrix: ```r dummy <- cmp.cluster(db=apset, cutoff=0, save.distances="distmat.rda", quiet=TRUE) ``` ``` ## ## sorting result... ``` ```r load("distmat.rda") ``` Hierarchical clustering with `hclust`: ```r hc <- hclust(as.dist(distmat), method="single") hc[["labels"]] <- cid(apset) # Assign correct item labels plot(as.dendrogram(hc), edgePar=list(col=4, lwd=2), horiz=T) ``` ![](ChemmineR_files/figure-html/hclust-1.png)\ Instead of atom pairs one can use PubChem's fingerprints for clustering: ```r simMA <- sapply(cid(fpset), function(x) fpSim(fpset[x], fpset, sorted=FALSE)) hc <- hclust(as.dist(1-simMA), method="single") plot(as.dendrogram(hc), edgePar=list(col=4, lwd=2), horiz=TRUE) ``` Plot dendrogram with heatmap (here similarity matrix): ```r library(gplots) ``` ``` ## ## Attaching package: 'gplots' ``` ``` ## The following object is masked from 'package:stats': ## ## lowess ``` ```r heatmap.2(1-distmat, Rowv=as.dendrogram(hc), Colv=as.dendrogram(hc), col=colorpanel(40, "darkblue", "yellow", "white"), density.info="none", trace="none") ``` ![](ChemmineR_files/figure-html/heatmap-1.png)\ <div align="right">[Back to Table of Contents]()</div> Searching PubChem ================= Get Compounds from PubChem by Id -------------------------------- The function `getIds` accepts one or more numeric PubChem compound ids and downloads the corresponding compounds from PubChem Power User Gateway (PUG) returning results in an `SDFset` container. The ChemMine Tools web service is used as an intermediate, to translate queries from plain HTTP POST to a PUG SOAP query. Fetch 2 compounds from PubChem: ```r compounds <- getIds(c(111,123)) compounds ``` <div align="right">[Back to Table of Contents]()</div> Search a SMILES Query in PubChem -------------------------------- The function `searchString` accepts one SMILES string (Simplified Molecular Input Line Entry Specification) and performs a \>0.95 similarity PubChem fingerprint search, returning the hits in an `SDFset` container. The ChemMine Tools web service is used as an intermediate, to translate queries from plain HTTP POST to a PubChem Power User Gateway (PUG) query. Search a SMILES string on PubChem: ```r compounds <- searchString("CC(=O)OC1=CC=CC=C1C(=O)O") compounds ``` <div align="right">[Back to Table of Contents]()</div> Search an SDF Query in PubChem ------------------------------ The function `searchSim` performs a PubChem similarity search just like `searchString`, but accepts a query in an `SDFset` container. If the query contains more than one compound, only the first is searched. Search an `SDFset` container on PubChem: ```r data(sdfsample); sdfset <- sdfsample[1] compounds <- searchSim(sdfset) compounds ``` <div align="right">[Back to Table of Contents]()</div> ChemMine Tools R Interface ========================== ChemMine Web Tools is an online service for analyzing and clustering small molecules. It provides numerous cheminformatics tools which can be used directly on the website, or called remotely from within R. When called within R jobs are sent remotely to a queue on a compute cluster at UC Riverside, which is a free service offered to `ChemmineR` users. The website is free and open to all users and is available at <http://chemmine.ucr.edu>. When new tools are added to the service, they automatically become availiable within `ChemmineR` without updating your local R package. List all available tools: ```r listCMTools() ``` ``` ## Category Name Input Output ## 1 Upload Upload CSV Data character data.frame ## 2 Upload Upload Tab Delimited Data character data.frame ## 3 Properties JoeLib Descriptors SDFset data.frame ## 4 Properties OpenBabel Descriptors SDFset data.frame ## 5 Clustering Binning Clustering SDFset character ## 6 Clustering Multidimensional Scaling (MDS) SDFset character ## 7 Clustering Numeric Data Clustering SDFset character ## 8 Clustering Hierarchical Clustering SDFset character ## 9 Search pubchemID2SDF data.frame SDFset ## 10 Plotting Graph Visualizer igraph character ## 11 Properties ChemmineR Properties SDFset data.frame ## 12 ChemmineR sdf.visualize SDFset SDFset ## 13 Search EI Search SDFset integer ## 14 Search Fingerprint Search SDFset integer ``` Show options and description for a tool. This also provides an example function call which can be copied verbatim, and changed as necessary: ```r toolDetails("Fingerprint Search") ``` ``` ## Category: Search ## Name: Fingerprint Search ## Input R Object: SDFset ## Input mime type: chemical/x-mdl-sdfile ## Output R Object: integer ## Output mime type: text/fp.search.result ## ###### BEGIN DESCRIPTION ###### ## PubChem Fingerprint Search ## ####### END DESCRIPTION ####### ## Option 1: 'Similarity Cutoff' ## Allowed Values: '0.5' '0.6' '0.7' '0.8' '0.85' '0.9' '0.91' '0.92' '0.93' '0.94' '0.95' '0.96' '0.97' '0.98' '0.99' ## Option 2: 'Max Compounds Returned' ## Allowed Values: '10' '50' '100' '200' '1000' ## Example function call: ## job <- launchCMTool( ## 'Fingerprint Search', ## SDFset, ## 'Similarity Cutoff'='0.5', ## 'Max Compounds Returned'='10' ## ) ``` <div align="right">[Back to Table of Contents]()</div> Launch a Job ------------------------------ When a job is launched it returns a job token which refers to the running job on the UC Riverside cluster. You can check the status of a job or obtain the results as follows. If `result` is called on a job that is still running, it will loop internally until the job is completed, and then return the result. Launch the tool `pubchemID2SDF` to obtain the structure for PubChem cid 2244: ```r job1 <- launchCMTool("pubchemID2SDF", 2244) status(job1) result1 <- result(job1) ``` Use the previous result to search PubChem for similar compounds: ```r job2 <- launchCMTool('Fingerprint Search', result1, 'Similarity Cutoff'=0.95, 'Max Compounds Returned'=200) result2 <- result(job2) job3 <- launchCMTool("pubchemID2SDF", result2) result3 <- result(job3) ``` Compute OpenBabel descriptors for these search results: ```r job4 <- launchCMTool("OpenBabel Descriptors", result3) result4 <- result(job4) result4[1:10,] # show first 10 lines of result ``` ``` ## cid abonds atoms bonds dbonds HBA1 HBA2 HBD logP MR MW nF sbonds tbonds TPSA ## 1 2244 6 21 21 2 12 4 1 1.3101 44.9003 180.1574 0 13 0 63.60 ## 2 5161 12 29 30 2 15 5 2 2.3096 66.8248 258.2262 0 16 0 83.83 ## 3 68484 6 24 24 2 14 4 0 1.3985 49.2205 194.1840 0 16 0 52.60 ## 4 10745 12 34 35 3 18 6 1 2.5293 76.3008 300.2629 0 20 0 89.90 ## 5 135269 6 30 30 2 18 4 1 2.4804 59.3213 222.2372 0 22 0 63.60 ## 6 67252 6 22 22 1 13 3 1 1.7835 44.7003 166.1739 0 15 0 46.53 ## 7 171511 6 25 23 2 16 5 2 1.2458 47.9481 222.4777 0 15 0 72.83 ## 8 3053800 6 39 39 2 24 4 1 3.6507 73.7423 264.3169 0 31 0 63.60 ## 9 71586929 6 38 33 2 29 7 6 1.7922 60.7157 294.2140 0 25 0 91.29 ## 10 78094 6 24 24 2 14 4 1 1.6185 49.8663 194.1840 0 16 0 63.60 ``` <div align="right">[Back to Table of Contents]()</div> View Job Result in Browser ------------------------------ The function `browseJob` launches a web browser to view the results of a job online, just as if they had been run from the ChemMine Tools website itself. If you also want the result data within R, you must first call the `result` object from within R before calling `browseJob`. Once `browseJob` has been called on a job token, the results are no longer accessible within R. If you have an account on ChemMine Tools and would like to save the web results from your job, you must first login to your account within the default web browser on your system before you launch `browseJob`. The job will then be assigned automatically to the currently logged in account. View OpenBabel descriptors online: ```r browseJob(job4) ``` Perform binning clustering and visualize result online: ```r job5 <- launchCMTool("Binning Clustering", result3, 'Similarity Cutoff'=0.9) browseJob(job5) ``` <div align="right">[Back to Table of Contents]()</div> Version Information =================== ```r sessionInfo() ``` R version 3.2.3 (2015-12-10) Platform: x86_64-pc-linux-gnu (64-bit) Running under: Ubuntu 14.04.3 LTS locale: [1] LC_CTYPE=en_US.UTF-8 LC_NUMERIC=C LC_TIME=en_US.UTF-8 [4] LC_COLLATE=en_US.UTF-8 LC_MONETARY=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 [7] LC_PAPER=en_US.UTF-8 LC_NAME=C LC_ADDRESS=C [10] LC_TELEPHONE=C LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C attached base packages: [1] stats graphics utils datasets grDevices methods base other attached packages: [1] gplots_2.17.0 scatterplot3d_0.3-36 RSQLite_1.0.0 DBI_0.3.1 [5] ggplot2_2.0.0 fmcsR_1.12.0 ChemmineOB_1.8.0 ChemmineR_2.23.1 [9] BiocStyle_1.8.0 loaded via a namespace (and not attached): [1] Rcpp_0.12.3 knitr_1.12 magrittr_1.5 zlibbioc_1.16.0 munsell_0.4.2 [6] colorspace_1.2-6 rjson_0.2.15 stringr_1.0.0 plyr_1.8.3 caTools_1.17.1 [11] tools_3.2.3 parallel_3.2.3 grid_3.2.3 gtable_0.1.2 KernSmooth_2.23-15 [16] gtools_3.5.0 htmltools_0.3 yaml_2.1.13 digest_0.6.9 formatR_1.2.1 [21] codetools_0.2-14 bitops_1.0-6 RCurl_1.95-4.7 evaluate_0.8 rmarkdown_0.9.2 [26] gdata_2.17.0 stringi_1.0-1 scales_0.3.0 <div align="right">[Back to Table of Contents]()</div> Funding ======= This software was developed with funding from the National Science Foundation: [ABI-0957099](http://www.nsf.gov/awardsearch/showAward.do?AwardNumber=0957099), 2010-0520325 and IGERT-0504249. References ===========
tgirke/manuals
vignettes/08_ChemmineR/ChemmineR.utf8.md
Markdown
artistic-2.0
104,746
package local import ( "errors" "fmt" "io" "io/ioutil" "os" "path/filepath" "sort" "github.com/restic/restic/backend" ) var ErrWrongData = errors.New("wrong data returned by backend, checksum does not match") type Local struct { p string } // Open opens the local backend at dir. func Open(dir string) (*Local, error) { items := []string{ dir, filepath.Join(dir, backend.Paths.Data), filepath.Join(dir, backend.Paths.Snapshots), filepath.Join(dir, backend.Paths.Index), filepath.Join(dir, backend.Paths.Locks), filepath.Join(dir, backend.Paths.Keys), filepath.Join(dir, backend.Paths.Temp), } // test if all necessary dirs are there for _, d := range items { if _, err := os.Stat(d); err != nil { return nil, fmt.Errorf("%s does not exist", d) } } return &Local{p: dir}, nil } // Create creates all the necessary files and directories for a new local // backend at dir. Afterwards a new config blob should be created. func Create(dir string) (*Local, error) { dirs := []string{ dir, filepath.Join(dir, backend.Paths.Data), filepath.Join(dir, backend.Paths.Snapshots), filepath.Join(dir, backend.Paths.Index), filepath.Join(dir, backend.Paths.Locks), filepath.Join(dir, backend.Paths.Keys), filepath.Join(dir, backend.Paths.Temp), } // test if config file already exists _, err := os.Lstat(backend.Paths.Config) if err == nil { return nil, errors.New("config file already exists") } // test if directories already exist for _, d := range dirs[1:] { if _, err := os.Stat(d); err == nil { return nil, fmt.Errorf("dir %s already exists", d) } } // create paths for data, refs and temp for _, d := range dirs { err := os.MkdirAll(d, backend.Modes.Dir) if err != nil { return nil, err } } // open backend return Open(dir) } // Location returns this backend's location (the directory name). func (b *Local) Location() string { return b.p } // Return temp directory in correct directory for this backend. func (b *Local) tempFile() (*os.File, error) { return ioutil.TempFile(filepath.Join(b.p, backend.Paths.Temp), "temp-") } type localBlob struct { f *os.File size uint final bool basedir string } func (lb *localBlob) Write(p []byte) (int, error) { if lb.final { return 0, errors.New("blob already closed") } n, err := lb.f.Write(p) lb.size += uint(n) return n, err } func (lb *localBlob) Size() uint { return lb.size } func (lb *localBlob) Finalize(t backend.Type, name string) error { if lb.final { return errors.New("Already finalized") } lb.final = true err := lb.f.Close() if err != nil { return fmt.Errorf("local: file.Close: %v", err) } f := filename(lb.basedir, t, name) // create directories if necessary, ignore errors if t == backend.Data { os.MkdirAll(filepath.Dir(f), backend.Modes.Dir) } // test if new path already exists if _, err := os.Stat(f); err == nil { return fmt.Errorf("Close(): file %v already exists", f) } if err := os.Rename(lb.f.Name(), f); err != nil { return err } // set mode to read-only fi, err := os.Stat(f) if err != nil { return err } return os.Chmod(f, fi.Mode()&os.FileMode(^uint32(0222))) } // Create creates a new Blob. The data is available only after Finalize() // has been called on the returned Blob. func (b *Local) Create() (backend.Blob, error) { // TODO: make sure that tempfile is removed upon error // create tempfile in backend file, err := b.tempFile() if err != nil { return nil, err } blob := localBlob{ f: file, basedir: b.p, } return &blob, nil } // Construct path for given Type and name. func filename(base string, t backend.Type, name string) string { if t == backend.Config { return filepath.Join(base, "config") } return filepath.Join(dirname(base, t, name), name) } // Construct directory for given Type. func dirname(base string, t backend.Type, name string) string { var n string switch t { case backend.Data: n = backend.Paths.Data if len(name) > 2 { n = filepath.Join(n, name[:2]) } case backend.Snapshot: n = backend.Paths.Snapshots case backend.Index: n = backend.Paths.Index case backend.Lock: n = backend.Paths.Locks case backend.Key: n = backend.Paths.Keys } return filepath.Join(base, n) } // Get returns a reader that yields the content stored under the given // name. The reader should be closed after draining it. func (b *Local) Get(t backend.Type, name string) (io.ReadCloser, error) { return os.Open(filename(b.p, t, name)) } // GetReader returns an io.ReadCloser for the Blob with the given name of // type t at offset and length. If length is 0, the reader reads until EOF. func (b *Local) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) { f, err := os.Open(filename(b.p, t, name)) if err != nil { return nil, err } _, err = f.Seek(int64(offset), 0) if err != nil { return nil, err } if length == 0 { return f, nil } return backend.LimitReadCloser(f, int64(length)), nil } // Test returns true if a blob of the given type and name exists in the backend. func (b *Local) Test(t backend.Type, name string) (bool, error) { _, err := os.Stat(filename(b.p, t, name)) if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } return true, nil } // Remove removes the blob with the given name and type. func (b *Local) Remove(t backend.Type, name string) error { return os.Remove(filename(b.p, t, name)) } // List returns a channel that yields all names of blobs of type t. A // goroutine is started for this. If the channel done is closed, sending // stops. func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string { // TODO: use os.Open() and d.Readdirnames() instead of Glob() var pattern string if t == backend.Data { pattern = filepath.Join(dirname(b.p, t, ""), "*", "*") } else { pattern = filepath.Join(dirname(b.p, t, ""), "*") } ch := make(chan string) matches, err := filepath.Glob(pattern) if err != nil { close(ch) return ch } for i := range matches { matches[i] = filepath.Base(matches[i]) } sort.Strings(matches) go func() { defer close(ch) for _, m := range matches { if m == "" { continue } select { case ch <- m: case <-done: return } } }() return ch } // Delete removes the repository and all files. func (b *Local) Delete() error { return os.RemoveAll(b.p) } // Close does nothing func (b *Local) Close() error { return nil }
intfrr/restic
backend/local/local.go
GO
bsd-2-clause
6,514
/* * Copyright (C) 2009 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef WebKit_h #define WebKit_h #include "../platform/Platform.h" #include <v8.h> namespace blink { // Initialize the entire Blink (wtf, platform, core, modules and web). // If you just need wtf and platform, use Platform::initialize instead. // // Must be called on the thread that will be the main thread before // using any other public APIs. The provided Platform; must be // non-null and must remain valid until the current thread calls shutdown. BLINK_EXPORT void initialize(Platform*); // Get the V8 Isolate for the main thread. // initialize must have been called first. BLINK_EXPORT v8::Isolate* mainThreadIsolate(); // Once shutdown, the Platform passed to initialize will no longer // be accessed. No other WebKit objects should be in use when this function is // called. Any background threads created by WebKit are promised to be // terminated by the time this function returns. BLINK_EXPORT void shutdown(); // Alters the rendering of content to conform to a fixed set of rules. BLINK_EXPORT void setLayoutTestMode(bool); BLINK_EXPORT bool layoutTestMode(); // Enables or disables the use of the mock theme for layout tests. This function // must be called only if setLayoutTestMode(true). BLINK_EXPORT void setMockThemeEnabledForTest(bool); // Alters the rendering of fonts for layout tests. BLINK_EXPORT void setFontAntialiasingEnabledForTest(bool); BLINK_EXPORT bool fontAntialiasingEnabledForTest(); // Forces the use of the complex text path for layout tests. BLINK_EXPORT void setAlwaysUseComplexTextForTest(bool); BLINK_EXPORT bool alwaysUseComplexTextForTest(); // Purge the plugin list cache. If |reloadPages| is true, any pages // containing plugins will be reloaded after refreshing the plugin list. BLINK_EXPORT void resetPluginCache(bool reloadPages = false); // The embedder should call this periodically in an attempt to balance overall // performance and memory usage. BLINK_EXPORT void decommitFreeableMemory(); // Send memory pressure notification to worker thread isolate. BLINK_EXPORT void MemoryPressureNotificationToWorkerThreadIsolates( v8::MemoryPressureLevel); // Set the RAIL performance mode on all worker thread isolates. BLINK_EXPORT void setRAILModeOnWorkerThreadIsolates(v8::RAILMode); } // namespace blink #endif
ssaroha/node-webrtc
third_party/webrtc/include/chromium/src/third_party/WebKit/public/web/WebKit.h
C
bsd-2-clause
3,845
To use builtin contracts you can refer them with `Contracts::*`: ```ruby Contract Contracts::Num => Contracts::Maybe(Contracts::Num) ``` It is recommended to use a short alias for `Contracts`, for example `C`: ```ruby C = Contracts Contract C::Num => C::Maybe(C::Num) ``` It is possible to `include Contracts` and refer them without namespace, but this is deprecated and not recommended. *NOTE: in the future it will be possible to do `include Contracts::Builtin` instead.* *NOTE: all contracts marked as (TODO) have their documentaion `.feature` file as stub. Contributions to those are warmly welcome!*
smt116/contracts.ruby
features/builtin_contracts/README.md
Markdown
bsd-2-clause
612
Copyright (c) 2013, Ryan Popa All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Mov1s/RafiBot
LICENSE.md
Markdown
bsd-2-clause
1,292
#ifndef Z3_H #define Z3_H #include "../SMT.h" #include <memory> #include <vector> #include <z3++.h> namespace SMT { using namespace z3; // class Z3Expr { // std::shared_ptr<> // } class Z3Solver : public SMTFactory<expr, expr> { public: static inline const std::string solver_name() { return "Z3"; }; Z3Solver(); ~Z3Solver(); // sort createBoolType() override; // sort createBVType(int size) override; expr createVar2(const std::string name, int size) override; expr createBoolVar(const std::string name) override; expr createBVVar(const std::string name, int size) override; expr createBVConst(int value, int size) override; expr createBoolConst(int value) override; expr createTrue() override; expr createFalse() override; expr createOrExpr(expr lhs, expr rhs) override; expr createAndExpr(expr lhs, expr rhs) override; expr createNotExpr(expr _expr) override; expr createCondExpr(expr cond, expr choice1, expr choice2) override; expr createEqExpr(expr lhs, expr rhs) override; expr createGtExpr(expr lhs, expr rhs) override; expr createGEqExpr(expr lhs, expr rhs) override; expr createLtExpr(expr lhs, expr rhs) override; expr createLEqExpr(expr lhs, expr rhs) override; expr createImplExpr(expr lhs, expr rhs) override; expr createBitSet(expr container, unsigned int ith, expr value) override; expr createDistinct(std::list<expr> exprs) override; expr joinExprsWithAnd(std::list<expr>& exprs) override; expr joinExprsWithOr(std::list<expr>& exprs) override; void assertLater(expr e) override; void assertNow(expr e) override; SMTResult solve() override; void printModel() override; void loadToSolver() override; void clean() override; void deep_clean() override; void printExpr(expr e) override; void printContext() override; void printContext(std::string filename) override; // void push() override; // void pop() override; private: z3::context context; z3::solver solver; }; } #endif
lamthientruc/vac
src/VACSAT/src/SMTSolvers/Z3.h
C
bsd-2-clause
2,297
/*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD: soc2013/dpl/head/sys/i386/include/nexusvar.h 177200 2008-03-13 20:39:04Z jhb $ */ #ifndef _MACHINE_NEXUSVAR_H_ #define _MACHINE_NEXUSVAR_H_ struct nexus_device { struct resource_list nx_resources; }; DECLARE_CLASS(nexus_driver); extern struct rman irq_rman, drq_rman, port_rman, mem_rman; void nexus_init_resources(void); #endif /* !_MACHINE_NEXUSVAR_H_ */
dplbsd/soc2013
head/sys/i386/include/nexusvar.h
C
bsd-2-clause
1,880
/* See LICENSE file for copyright and license details. */ #define ENV_SUPATH "/bin" #define ENV_PATH "/bin" #define PW_CIPHER "$6$" /* SHA-512 */ #undef UTMP_PATH #define UTMP_PATH "/var/run/utmp" #undef BTMP_PATH #define BTMP_PATH "/var/log/btmp" #undef WTMP_PATH #define WTMP_PATH "/var/log/wtmp"
mikedlowis-prototypes/albase
source/ubase/config.h
C
bsd-2-clause
300
PROGRAM test_abstraction8 USE mo_column_extra , ONLY: compute_one REAL :: q ( 1 : 20 , 1 : 60 ) REAL :: t ( 1 : 20 , 1 : 60 ) INTEGER :: nproma INTEGER :: nz INTEGER :: p nproma = 20 nz = 60 DO p = 1 , nproma , 1 q ( p , 1 ) = 0.0 t ( p , 1 ) = 0.0 END DO !$omp target data map(alloc:q(:,:),t(:,:)) !$omp target update to(q(:,:),t(:,:)) CALL compute_one ( nz , q ( : , : ) , t ( : , : ) , nproma = nproma ) !$omp target update from(q(:,:),t(:,:)) !$omp end target data PRINT * , sum ( q ) PRINT * , sum ( t ) END PROGRAM test_abstraction8
clementval/claw-compiler
test/claw/sca/forward_dependencies/reference_main_omp.f90
FORTRAN
bsd-2-clause
558
# Perforce Triggered Build Perforce can trigger Jenkins to build based on an event, such as a submitted change. To enable builds to be triggered by Perforce, select **Perforce triggered build** in the Freestyle job page. A triggered build also requires an administrator to add a Perforce trigger to the Perforce server. For information about adding a trigger, see [Using triggers to customize behavior](https://www.perforce.com/perforce/doc.current/manuals/p4sag/chapter.scripting.html) in [Helix Core Server Administrator Guide: Fundamentals](https://www.perforce.com/perforce/doc.current/manuals/p4sag/index.html#P4SAG/about.html). The trigger needs to POST a JSON payload to the Jenkins end-point `JENKINS_URL/p4/change/`. The JSON payload must contain the `p4port` string that matchs the P4Port field specified in the **Perforce Credential** (please note that the field `change` is not currently used, it has been added for future compatibility). **For example:** - A simple `change-commit` or `graph-push-complete` trigger can use curl: ``` #!/bin/bash CHANGE=$1 P4PORT=perforce:1666 JUSER=admin JPASS=pass JSERVER=http://localhost:8080 curl --header 'Content-Type: application/json' \ --request POST \ --silent \ --user $JUSER:$JPASS \ --data payload="{change:$CHANGE,p4port:\"$P4PORT\"}" \ $JSERVER/p4/change ``` - It must have an entry in `p4 triggers` for changes on `//depot/...`: ``` jenkins change-commit //depot/... "/p4/common/bin/triggers/jdepot.sh %change%" ``` - or for Graph content: ``` helix4git graph-push-complete //repos/... "/p4/common/bin/triggers/jgraph.sh %depotName% %repoName% %pusher%" ``` ## Server Authentication *Deprecated as of 1.8.10 (CRUMB support is embedded)* If your Jenkins server needs authentication you will also need to provide a security `CRUMB`. The following is an example of how you can get this and use it to trigger a job: ``` #!/bin/bash CHANGE=$1 P4PORT=perforce:1666 JUSER=admin JPASS=pass JSERVER=http://localhost:8080 # Get CRUMB CRUMB=$(curl --silent --user $JUSER:$JPASS $JSERVER/crumbIssuer/api/xml?xpath=concat\(//crumbRequestField,%22":"%22,//crumb\)) # Trigger builds across all triggered jobs (where relevant) curl --header "$CRUMB" \ --request POST \ --silent \ --user $JUSER:$JPASS \ --data payload="{change:$CHANGE,p4port:\"$P4PORT\"}" \ $JSERVER/p4/change ``` Click the browser **Back** button to go back to the previous page.
jenkinsci/p4-plugin
docs/BUILDTRIGGERPERFORCE.md
Markdown
bsd-2-clause
2,490
/* * Copyright (c) 2013, Jay Elliott * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef IMG_ERROR_H_ #define IMG_ERROR_H_ /* maximum length of error messages */ #define IMG_ERROR_LEN 256 /* * set the current error message. * This function can be called with NULL * to unset the error. */ void img_set_error_msg(const char* msg, ...); /* get the current error message. Will return NULL if there is no error */ const char* img_get_error_msg(void); #endif
snickerbockers/image_loader
include/error.h
C
bsd-2-clause
1,739
cask 'menubar-stats' do version '3.0' sha256 '2e4987f5479a20c1279612f32a3d2a77f531e83106105ca91bfd83215417f621' url 'https://seense.com/menubarstats/updateapp/mbs.zip' appcast 'https://www.seense.com/menubarstats/updateapp/appcast.xml' name 'MenuBar Stats' homepage 'https://seense.com/menubarstats/' depends_on macos: '>= :sierra' app 'MenuBar Stats.app' end
jasmas/homebrew-cask
Casks/menubar-stats.rb
Ruby
bsd-2-clause
379
/* ---------------------------------------------------------------------------- */ /* Atmel Microcontroller Software Support */ /* SAM Software Package License */ /* ---------------------------------------------------------------------------- */ /* Copyright (c) 2015, Atmel Corporation */ /* */ /* All rights reserved. */ /* */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following condition is met: */ /* */ /* - Redistributions of source code must retain the above copyright notice, */ /* this list of conditions and the disclaimer below. */ /* */ /* Atmel's name may not be used to endorse or promote products derived from */ /* this software without specific prior written permission. */ /* */ /* DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR */ /* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE */ /* DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ /* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, */ /* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING */ /* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, */ /* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* ---------------------------------------------------------------------------- */ #include "sam4sp.h" /* @cond 0 */ /**INDENT-OFF**/ #ifdef __cplusplus extern "C" { #endif /**INDENT-ON**/ /* @endcond */ /* Clock Settings (120MHz) */ #define SYS_BOARD_OSCOUNT (CKGR_MOR_MOSCXTST(0x8U)) #define SYS_BOARD_PLLAR (CKGR_PLLAR_ONE \ | CKGR_PLLAR_MULA(0x13U) \ | CKGR_PLLAR_PLLACOUNT(0x3fU) \ | CKGR_PLLAR_DIVA(0x1U)) #define SYS_BOARD_MCKR (PMC_MCKR_PRES_CLK_2 | PMC_MCKR_CSS_PLLA_CLK) #define SYS_CKGR_MOR_KEY_VALUE CKGR_MOR_KEY(0x37) /* Key to unlock MOR register */ uint32_t SystemCoreClock = CHIP_FREQ_MAINCK_RC_4MHZ; /** * \brief Setup the microcontroller system. * Initialize the System and update the SystemFrequency variable. */ void SystemInit(void) { /* Set FWS according to SYS_BOARD_MCKR configuration */ EFC0->EEFC_FMR = EEFC_FMR_FWS(5); EFC1->EEFC_FMR = EEFC_FMR_FWS(5); /* Initialize main oscillator */ if (!(PMC->CKGR_MOR & CKGR_MOR_MOSCSEL)) { PMC->CKGR_MOR = SYS_CKGR_MOR_KEY_VALUE | SYS_BOARD_OSCOUNT | CKGR_MOR_MOSCRCEN | CKGR_MOR_MOSCXTEN; while (!(PMC->PMC_SR & PMC_SR_MOSCXTS)) { } } /* Switch to 3-20MHz Xtal oscillator */ PMC->CKGR_MOR = SYS_CKGR_MOR_KEY_VALUE | SYS_BOARD_OSCOUNT | CKGR_MOR_MOSCRCEN | CKGR_MOR_MOSCXTEN | CKGR_MOR_MOSCSEL; while (!(PMC->PMC_SR & PMC_SR_MOSCSELS)) { } PMC->PMC_MCKR = (PMC->PMC_MCKR & ~(uint32_t)PMC_MCKR_CSS_Msk) | PMC_MCKR_CSS_MAIN_CLK; while (!(PMC->PMC_SR & PMC_SR_MCKRDY)) { } /* Initialize PLLA */ PMC->CKGR_PLLAR = SYS_BOARD_PLLAR; while (!(PMC->PMC_SR & PMC_SR_LOCKA)) { } /* Switch to main clock */ PMC->PMC_MCKR = (SYS_BOARD_MCKR & ~PMC_MCKR_CSS_Msk) | PMC_MCKR_CSS_MAIN_CLK; while (!(PMC->PMC_SR & PMC_SR_MCKRDY)) { } /* Switch to PLLA */ PMC->PMC_MCKR = SYS_BOARD_MCKR; while (!(PMC->PMC_SR & PMC_SR_MCKRDY)) { } SystemCoreClock = CHIP_FREQ_CPU_MAX; } void SystemCoreClockUpdate(void) { /* Determine clock frequency according to clock register values */ switch (PMC->PMC_MCKR & (uint32_t) PMC_MCKR_CSS_Msk) { case PMC_MCKR_CSS_SLOW_CLK: /* Slow clock */ if (SUPC->SUPC_SR & SUPC_SR_OSCSEL) { SystemCoreClock = CHIP_FREQ_XTAL_32K; } else { SystemCoreClock = CHIP_FREQ_SLCK_RC; } break; case PMC_MCKR_CSS_MAIN_CLK: /* Main clock */ if (PMC->CKGR_MOR & CKGR_MOR_MOSCSEL) { SystemCoreClock = CHIP_FREQ_XTAL_20M; } else { SystemCoreClock = CHIP_FREQ_MAINCK_RC_4MHZ; switch (PMC->CKGR_MOR & CKGR_MOR_MOSCRCF_Msk) { case CKGR_MOR_MOSCRCF_4_MHz: break; case CKGR_MOR_MOSCRCF_8_MHz: SystemCoreClock *= 2U; break; case CKGR_MOR_MOSCRCF_12_MHz: SystemCoreClock *= 3U; break; default: break; } } break; case PMC_MCKR_CSS_PLLA_CLK: /* PLLA clock */ case PMC_MCKR_CSS_PLLB_CLK: /* PLLB clock */ if (PMC->CKGR_MOR & CKGR_MOR_MOSCSEL) { SystemCoreClock = CHIP_FREQ_XTAL_20M; } else { SystemCoreClock = CHIP_FREQ_MAINCK_RC_4MHZ; switch (PMC->CKGR_MOR & CKGR_MOR_MOSCRCF_Msk) { case CKGR_MOR_MOSCRCF_4_MHz: break; case CKGR_MOR_MOSCRCF_8_MHz: SystemCoreClock *= 2U; break; case CKGR_MOR_MOSCRCF_12_MHz: SystemCoreClock *= 3U; break; default: break; } } if ((uint32_t) (PMC->PMC_MCKR & (uint32_t) PMC_MCKR_CSS_Msk) == PMC_MCKR_CSS_PLLA_CLK) { SystemCoreClock *= ((((PMC->CKGR_PLLAR) & CKGR_PLLAR_MULA_Msk) >> CKGR_PLLAR_MULA_Pos) + 1U); SystemCoreClock /= ((((PMC->CKGR_PLLAR) & CKGR_PLLAR_DIVA_Msk) >> CKGR_PLLAR_DIVA_Pos)); } else { SystemCoreClock *= ((((PMC->CKGR_PLLBR) & CKGR_PLLBR_MULB_Msk) >> CKGR_PLLBR_MULB_Pos) + 1U); SystemCoreClock /= ((((PMC->CKGR_PLLBR) & CKGR_PLLBR_DIVB_Msk) >> CKGR_PLLBR_DIVB_Pos)); } break; default: break; } if ((PMC->PMC_MCKR & PMC_MCKR_PRES_Msk) == PMC_MCKR_PRES_CLK_3) { SystemCoreClock /= 3U; } else { SystemCoreClock >>= ((PMC->PMC_MCKR & PMC_MCKR_PRES_Msk) >> PMC_MCKR_PRES_Pos); } } /** * Initialize flash. */ void system_init_flash(uint32_t ul_clk) { /* Set FWS for embedded Flash access according to operating frequency */ if (ul_clk < CHIP_FREQ_FWS_0) { EFC0->EEFC_FMR = EEFC_FMR_FWS(0); EFC1->EEFC_FMR = EEFC_FMR_FWS(0); } else if (ul_clk < CHIP_FREQ_FWS_1) { EFC0->EEFC_FMR = EEFC_FMR_FWS(1); EFC1->EEFC_FMR = EEFC_FMR_FWS(1); } else if (ul_clk < CHIP_FREQ_FWS_2) { EFC0->EEFC_FMR = EEFC_FMR_FWS(2); EFC1->EEFC_FMR = EEFC_FMR_FWS(2); } else if (ul_clk < CHIP_FREQ_FWS_3) { EFC0->EEFC_FMR = EEFC_FMR_FWS(3); EFC1->EEFC_FMR = EEFC_FMR_FWS(3); } else if (ul_clk < CHIP_FREQ_FWS_4) { EFC0->EEFC_FMR = EEFC_FMR_FWS(4); EFC1->EEFC_FMR = EEFC_FMR_FWS(4); } else { EFC0->EEFC_FMR = EEFC_FMR_FWS(5); EFC1->EEFC_FMR = EEFC_FMR_FWS(5); } } /* @cond 0 */ /**INDENT-OFF**/ #ifdef __cplusplus } #endif /**INDENT-ON**/ /* @endcond */
AtmelUniversityFrance/SAM4E-XPRO
Examples/CMSIS/Device/ATMEL/sam4sp/source/system_sam4sp.c
C
bsd-2-clause
7,240
/* * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Daniel Eischen. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD: soc2013/dpl/head/lib/libkse/thread/thr_priority_queue.c 174155 2007-11-30 17:20:29Z deischen $ */ #include "namespace.h" #include <stdlib.h> #include <sys/queue.h> #include <string.h> #include <pthread.h> #include "un-namespace.h" #include "thr_private.h" /* Prototypes: */ static void pq_insert_prio_list(pq_queue_t *pq, int prio); #if defined(_PTHREADS_INVARIANTS) #define PQ_IN_SCHEDQ (THR_FLAGS_IN_RUNQ | THR_FLAGS_IN_WAITQ) #define PQ_SET_ACTIVE(pq) (pq)->pq_flags |= PQF_ACTIVE #define PQ_CLEAR_ACTIVE(pq) (pq)->pq_flags &= ~PQF_ACTIVE #define PQ_ASSERT_ACTIVE(pq, msg) do { \ if (((pq)->pq_flags & PQF_ACTIVE) == 0) \ PANIC(msg); \ } while (0) #define PQ_ASSERT_INACTIVE(pq, msg) do { \ if (((pq)->pq_flags & PQF_ACTIVE) != 0) \ PANIC(msg); \ } while (0) #define PQ_ASSERT_IN_WAITQ(thrd, msg) do { \ if (((thrd)->flags & THR_FLAGS_IN_WAITQ) == 0) \ PANIC(msg); \ } while (0) #define PQ_ASSERT_IN_RUNQ(thrd, msg) do { \ if (((thrd)->flags & THR_FLAGS_IN_RUNQ) == 0) \ PANIC(msg); \ } while (0) #define PQ_ASSERT_NOT_QUEUED(thrd, msg) do { \ if (((thrd)->flags & PQ_IN_SCHEDQ) != 0) \ PANIC(msg); \ } while (0) #else #define PQ_SET_ACTIVE(pq) #define PQ_CLEAR_ACTIVE(pq) #define PQ_ASSERT_ACTIVE(pq, msg) #define PQ_ASSERT_INACTIVE(pq, msg) #define PQ_ASSERT_IN_WAITQ(thrd, msg) #define PQ_ASSERT_IN_RUNQ(thrd, msg) #define PQ_ASSERT_NOT_QUEUED(thrd, msg) #endif int _pq_alloc(pq_queue_t *pq, int minprio, int maxprio) { int ret = 0; int prioslots = maxprio - minprio + 1; if (pq == NULL) ret = -1; /* Create the priority queue with (maxprio - minprio + 1) slots: */ else if ((pq->pq_lists = (pq_list_t *) malloc(sizeof(pq_list_t) * prioslots)) == NULL) ret = -1; else { /* Remember the queue size: */ pq->pq_size = prioslots; ret = _pq_init(pq); } return (ret); } void _pq_free(pq_queue_t *pq) { if ((pq != NULL) && (pq->pq_lists != NULL)) free(pq->pq_lists); } int _pq_init(pq_queue_t *pq) { int i, ret = 0; if ((pq == NULL) || (pq->pq_lists == NULL)) ret = -1; else { /* Initialize the queue for each priority slot: */ for (i = 0; i < pq->pq_size; i++) { TAILQ_INIT(&pq->pq_lists[i].pl_head); pq->pq_lists[i].pl_prio = i; pq->pq_lists[i].pl_queued = 0; } /* Initialize the priority queue: */ TAILQ_INIT(&pq->pq_queue); pq->pq_flags = 0; pq->pq_threads = 0; } return (ret); } void _pq_remove(pq_queue_t *pq, pthread_t pthread) { int prio = pthread->active_priority; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_INACTIVE(pq, "_pq_remove: pq_active"); PQ_SET_ACTIVE(pq); PQ_ASSERT_IN_RUNQ(pthread, "_pq_remove: Not in priority queue"); /* * Remove this thread from priority list. Note that if * the priority list becomes empty, it is not removed * from the priority queue because another thread may be * added to the priority list (resulting in a needless * removal/insertion). Priority lists are only removed * from the priority queue when _pq_first is called. */ TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe); pq->pq_threads--; /* This thread is now longer in the priority queue. */ pthread->flags &= ~THR_FLAGS_IN_RUNQ; PQ_CLEAR_ACTIVE(pq); } void _pq_insert_head(pq_queue_t *pq, pthread_t pthread) { int prio; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_INACTIVE(pq, "_pq_insert_head: pq_active"); PQ_SET_ACTIVE(pq); PQ_ASSERT_NOT_QUEUED(pthread, "_pq_insert_head: Already in priority queue"); prio = pthread->active_priority; TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe); if (pq->pq_lists[prio].pl_queued == 0) /* Insert the list into the priority queue: */ pq_insert_prio_list(pq, prio); pq->pq_threads++; /* Mark this thread as being in the priority queue. */ pthread->flags |= THR_FLAGS_IN_RUNQ; PQ_CLEAR_ACTIVE(pq); } void _pq_insert_tail(pq_queue_t *pq, pthread_t pthread) { int prio; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_INACTIVE(pq, "_pq_insert_tail: pq_active"); PQ_SET_ACTIVE(pq); PQ_ASSERT_NOT_QUEUED(pthread, "_pq_insert_tail: Already in priority queue"); prio = pthread->active_priority; TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe); if (pq->pq_lists[prio].pl_queued == 0) /* Insert the list into the priority queue: */ pq_insert_prio_list(pq, prio); pq->pq_threads++; /* Mark this thread as being in the priority queue. */ pthread->flags |= THR_FLAGS_IN_RUNQ; PQ_CLEAR_ACTIVE(pq); } pthread_t _pq_first(pq_queue_t *pq) { pq_list_t *pql; pthread_t pthread = NULL; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_INACTIVE(pq, "_pq_first: pq_active"); PQ_SET_ACTIVE(pq); while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) && (pthread == NULL)) { if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) { /* * The priority list is empty; remove the list * from the queue. */ TAILQ_REMOVE(&pq->pq_queue, pql, pl_link); /* Mark the list as not being in the queue: */ pql->pl_queued = 0; } } PQ_CLEAR_ACTIVE(pq); return (pthread); } /* * Select a thread which is allowed to run by debugger, we probably * should merge the function into _pq_first if that function is only * used by scheduler to select a thread. */ pthread_t _pq_first_debug(pq_queue_t *pq) { pq_list_t *pql, *pqlnext = NULL; pthread_t pthread = NULL; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_INACTIVE(pq, "_pq_first: pq_active"); PQ_SET_ACTIVE(pq); for (pql = TAILQ_FIRST(&pq->pq_queue); pql != NULL && pthread == NULL; pql = pqlnext) { if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) { /* * The priority list is empty; remove the list * from the queue. */ pqlnext = TAILQ_NEXT(pql, pl_link); TAILQ_REMOVE(&pq->pq_queue, pql, pl_link); /* Mark the list as not being in the queue: */ pql->pl_queued = 0; } else { /* * note there may be a suspension event during this * test, If TMDF_SUSPEND is set after we tested it, * we will run the thread, this seems be a problem, * fortunatly, when we are being debugged, all context * switch will be done by kse_switchin, that is a * syscall, kse_switchin will check the flag again, * the thread will be returned via upcall, so next * time, UTS won't run the thread. */ while (pthread != NULL && !DBG_CAN_RUN(pthread)) { pthread = TAILQ_NEXT(pthread, pqe); } if (pthread == NULL) pqlnext = TAILQ_NEXT(pql, pl_link); } } PQ_CLEAR_ACTIVE(pq); return (pthread); } static void pq_insert_prio_list(pq_queue_t *pq, int prio) { pq_list_t *pql; /* * Make some assertions when debugging is enabled: */ PQ_ASSERT_ACTIVE(pq, "pq_insert_prio_list: pq_active"); /* * The priority queue is in descending priority order. Start at * the beginning of the queue and find the list before which the * new list should be inserted. */ pql = TAILQ_FIRST(&pq->pq_queue); while ((pql != NULL) && (pql->pl_prio > prio)) pql = TAILQ_NEXT(pql, pl_link); /* Insert the list: */ if (pql == NULL) TAILQ_INSERT_TAIL(&pq->pq_queue, &pq->pq_lists[prio], pl_link); else TAILQ_INSERT_BEFORE(pql, &pq->pq_lists[prio], pl_link); /* Mark this list as being in the queue: */ pq->pq_lists[prio].pl_queued = 1; }
dplbsd/zcaplib
head/lib/libkse/thread/thr_priority_queue.c
C
bsd-2-clause
9,144
require_relative "shared_examples/requires_cask_token" require_relative "shared_examples/invalid_option" describe Hbc::CLI::Uninstall, :cask do it_behaves_like "a command that requires a Cask token" it_behaves_like "a command that handles invalid options" it "displays the uninstallation progress" do caffeine = Hbc::CaskLoader.load(cask_path("local-caffeine")) Hbc::Installer.new(caffeine).install output = Regexp.new <<~EOS ==> Uninstalling Cask local-caffeine ==> Backing App 'Caffeine.app' up to '.*Caffeine.app'. ==> Removing App '.*Caffeine.app'. ==> Purging files for version 1.2.3 of Cask local-caffeine EOS expect { described_class.run("local-caffeine") }.to output(output).to_stdout end it "shows an error when a bad Cask is provided" do expect { described_class.run("notacask") } .to raise_error(Hbc::CaskUnavailableError, /is unavailable/) end it "shows an error when a Cask is provided that's not installed" do expect { described_class.run("local-caffeine") } .to raise_error(Hbc::CaskNotInstalledError, /is not installed/) end it "tries anyway on a non-present Cask when --force is given" do expect { described_class.run("local-caffeine", "--force") }.not_to raise_error end it "can uninstall and unlink multiple Casks at once" do caffeine = Hbc::CaskLoader.load(cask_path("local-caffeine")) transmission = Hbc::CaskLoader.load(cask_path("local-transmission")) Hbc::Installer.new(caffeine).install Hbc::Installer.new(transmission).install expect(caffeine).to be_installed expect(transmission).to be_installed described_class.run("local-caffeine", "local-transmission") expect(caffeine).not_to be_installed expect(Hbc::Config.global.appdir.join("Transmission.app")).not_to exist expect(transmission).not_to be_installed expect(Hbc::Config.global.appdir.join("Caffeine.app")).not_to exist end it "calls `uninstall` before removing artifacts" do cask = Hbc::CaskLoader.load(cask_path("with-uninstall-script-app")) Hbc::Installer.new(cask).install expect(cask).to be_installed expect(Hbc::Config.global.appdir.join("MyFancyApp.app")).to exist expect { described_class.run("with-uninstall-script-app") }.not_to raise_error expect(cask).not_to be_installed expect(Hbc::Config.global.appdir.join("MyFancyApp.app")).not_to exist end it "can uninstall Casks when the uninstall script is missing, but only when using `--force`" do cask = Hbc::CaskLoader.load(cask_path("with-uninstall-script-app")) Hbc::Installer.new(cask).install expect(cask).to be_installed Hbc::Config.global.appdir.join("MyFancyApp.app").rmtree expect { described_class.run("with-uninstall-script-app") } .to raise_error(Hbc::CaskError, /uninstall script .* does not exist/) expect(cask).to be_installed expect { described_class.run("with-uninstall-script-app", "--force") }.not_to raise_error expect(cask).not_to be_installed end describe "when multiple versions of a cask are installed" do let(:token) { "versioned-cask" } let(:first_installed_version) { "1.2.3" } let(:last_installed_version) { "4.5.6" } let(:timestamped_versions) { [ [first_installed_version, "123000"], [last_installed_version, "456000"], ] } let(:caskroom_path) { Hbc::Caskroom.path.join(token).tap(&:mkpath) } before do timestamped_versions.each do |timestamped_version| caskroom_path.join(".metadata", *timestamped_version, "Casks").tap(&:mkpath) .join("#{token}.rb").open("w") do |caskfile| caskfile.puts <<~EOS cask '#{token}' do version '#{timestamped_version[0]}' end EOS end caskroom_path.join(timestamped_version[0]).mkpath end end it "uninstalls one version at a time" do described_class.run("versioned-cask") expect(caskroom_path.join(first_installed_version)).to exist expect(caskroom_path.join(last_installed_version)).not_to exist expect(caskroom_path).to exist described_class.run("versioned-cask") expect(caskroom_path.join(first_installed_version)).not_to exist expect(caskroom_path).not_to exist end it "displays a message when versions remain installed" do expect { expect { described_class.run("versioned-cask") }.not_to output.to_stderr }.to output(/#{token} #{first_installed_version} is still installed./).to_stdout end end describe "when Casks in Taps have been renamed or removed" do let(:app) { Hbc::Config.global.appdir.join("ive-been-renamed.app") } let(:caskroom_path) { Hbc::Caskroom.path.join("ive-been-renamed").tap(&:mkpath) } let(:saved_caskfile) { caskroom_path.join(".metadata", "latest", "timestamp", "Casks").join("ive-been-renamed.rb") } before do app.tap(&:mkpath) .join("Contents").tap(&:mkpath) .join("Info.plist").tap(&FileUtils.method(:touch)) caskroom_path.mkpath saved_caskfile.dirname.mkpath IO.write saved_caskfile, <<~EOS cask 'ive-been-renamed' do version :latest app 'ive-been-renamed.app' end EOS end it "can still uninstall those Casks" do described_class.run("ive-been-renamed") expect(app).not_to exist expect(caskroom_path).not_to exist end end end
ilovezfs/brew
Library/Homebrew/test/cask/cli/uninstall_spec.rb
Ruby
bsd-2-clause
5,629
"""Implementation of basic magic functions.""" import argparse import textwrap import io import sys from pprint import pformat from IPython.core import magic_arguments, page from IPython.core.error import UsageError from IPython.core.magic import Magics, magics_class, line_magic, magic_escapes from IPython.utils.text import format_screen, dedent, indent from IPython.testing.skipdoctest import skip_doctest from IPython.utils.ipstruct import Struct from warnings import warn from logging import error class MagicsDisplay(object): def __init__(self, magics_manager, ignore=None): self.ignore = ignore if ignore else [] self.magics_manager = magics_manager def _lsmagic(self): """The main implementation of the %lsmagic""" mesc = magic_escapes['line'] cesc = magic_escapes['cell'] mman = self.magics_manager magics = mman.lsmagic() out = ['Available line magics:', mesc + (' '+mesc).join(sorted([m for m,v in magics['line'].items() if (v not in self.ignore)])), '', 'Available cell magics:', cesc + (' '+cesc).join(sorted([m for m,v in magics['cell'].items() if (v not in self.ignore)])), '', mman.auto_status()] return '\n'.join(out) def _repr_pretty_(self, p, cycle): p.text(self._lsmagic()) def __str__(self): return self._lsmagic() def _jsonable(self): """turn magics dict into jsonable dict of the same structure replaces object instances with their class names as strings """ magic_dict = {} mman = self.magics_manager magics = mman.lsmagic() for key, subdict in magics.items(): d = {} magic_dict[key] = d for name, obj in subdict.items(): try: classname = obj.__self__.__class__.__name__ except AttributeError: classname = 'Other' d[name] = classname return magic_dict def _repr_json_(self): return self._jsonable() @magics_class class BasicMagics(Magics): """Magics that provide central IPython functionality. These are various magics that don't fit into specific categories but that are all part of the base 'IPython experience'.""" @magic_arguments.magic_arguments() @magic_arguments.argument( '-l', '--line', action='store_true', help="""Create a line magic alias.""" ) @magic_arguments.argument( '-c', '--cell', action='store_true', help="""Create a cell magic alias.""" ) @magic_arguments.argument( 'name', help="""Name of the magic to be created.""" ) @magic_arguments.argument( 'target', help="""Name of the existing line or cell magic.""" ) @magic_arguments.argument( '-p', '--params', default=None, help="""Parameters passed to the magic function.""" ) @line_magic def alias_magic(self, line=''): """Create an alias for an existing line or cell magic. Examples -------- :: In [1]: %alias_magic t timeit Created `%t` as an alias for `%timeit`. Created `%%t` as an alias for `%%timeit`. In [2]: %t -n1 pass 1 loops, best of 3: 954 ns per loop In [3]: %%t -n1 ...: pass ...: 1 loops, best of 3: 954 ns per loop In [4]: %alias_magic --cell whereami pwd UsageError: Cell magic function `%%pwd` not found. In [5]: %alias_magic --line whereami pwd Created `%whereami` as an alias for `%pwd`. In [6]: %whereami Out[6]: u'/home/testuser' In [7]: %alias_magic h history -p "-l 30" --line Created `%h` as an alias for `%history -l 30`. """ args = magic_arguments.parse_argstring(self.alias_magic, line) shell = self.shell mman = self.shell.magics_manager escs = ''.join(magic_escapes.values()) target = args.target.lstrip(escs) name = args.name.lstrip(escs) params = args.params if (params and ((params.startswith('"') and params.endswith('"')) or (params.startswith("'") and params.endswith("'")))): params = params[1:-1] # Find the requested magics. m_line = shell.find_magic(target, 'line') m_cell = shell.find_magic(target, 'cell') if args.line and m_line is None: raise UsageError('Line magic function `%s%s` not found.' % (magic_escapes['line'], target)) if args.cell and m_cell is None: raise UsageError('Cell magic function `%s%s` not found.' % (magic_escapes['cell'], target)) # If --line and --cell are not specified, default to the ones # that are available. if not args.line and not args.cell: if not m_line and not m_cell: raise UsageError( 'No line or cell magic with name `%s` found.' % target ) args.line = bool(m_line) args.cell = bool(m_cell) params_str = "" if params is None else " " + params if args.line: mman.register_alias(name, target, 'line', params) print('Created `%s%s` as an alias for `%s%s%s`.' % ( magic_escapes['line'], name, magic_escapes['line'], target, params_str)) if args.cell: mman.register_alias(name, target, 'cell', params) print('Created `%s%s` as an alias for `%s%s%s`.' % ( magic_escapes['cell'], name, magic_escapes['cell'], target, params_str)) @line_magic def lsmagic(self, parameter_s=''): """List currently available magic functions.""" return MagicsDisplay(self.shell.magics_manager, ignore=[self.pip]) def _magic_docs(self, brief=False, rest=False): """Return docstrings from magic functions.""" mman = self.shell.magics_manager docs = mman.lsmagic_docs(brief, missing='No documentation') if rest: format_string = '**%s%s**::\n\n%s\n\n' else: format_string = '%s%s:\n%s\n' return ''.join( [format_string % (magic_escapes['line'], fname, indent(dedent(fndoc))) for fname, fndoc in sorted(docs['line'].items())] + [format_string % (magic_escapes['cell'], fname, indent(dedent(fndoc))) for fname, fndoc in sorted(docs['cell'].items())] ) @line_magic def magic(self, parameter_s=''): """Print information about the magic function system. Supported formats: -latex, -brief, -rest """ mode = '' try: mode = parameter_s.split()[0][1:] except IndexError: pass brief = (mode == 'brief') rest = (mode == 'rest') magic_docs = self._magic_docs(brief, rest) if mode == 'latex': print(self.format_latex(magic_docs)) return else: magic_docs = format_screen(magic_docs) out = [""" IPython's 'magic' functions =========================== The magic function system provides a series of functions which allow you to control the behavior of IPython itself, plus a lot of system-type features. There are two kinds of magics, line-oriented and cell-oriented. Line magics are prefixed with the % character and work much like OS command-line calls: they get as an argument the rest of the line, where arguments are passed without parentheses or quotes. For example, this will time the given statement:: %timeit range(1000) Cell magics are prefixed with a double %%, and they are functions that get as an argument not only the rest of the line, but also the lines below it in a separate argument. These magics are called with two arguments: the rest of the call line and the body of the cell, consisting of the lines below the first. For example:: %%timeit x = numpy.random.randn((100, 100)) numpy.linalg.svd(x) will time the execution of the numpy svd routine, running the assignment of x as part of the setup phase, which is not timed. In a line-oriented client (the terminal or Qt console IPython), starting a new input with %% will automatically enter cell mode, and IPython will continue reading input until a blank line is given. In the notebook, simply type the whole cell as one entity, but keep in mind that the %% escape can only be at the very start of the cell. NOTE: If you have 'automagic' enabled (via the command line option or with the %automagic function), you don't need to type in the % explicitly for line magics; cell magics always require an explicit '%%' escape. By default, IPython ships with automagic on, so you should only rarely need the % escape. Example: typing '%cd mydir' (without the quotes) changes your working directory to 'mydir', if it exists. For a list of the available magic functions, use %lsmagic. For a description of any of them, type %magic_name?, e.g. '%cd?'. Currently the magic system has the following functions:""", magic_docs, "Summary of magic functions (from %slsmagic):" % magic_escapes['line'], str(self.lsmagic()), ] page.page('\n'.join(out)) @line_magic def page(self, parameter_s=''): """Pretty print the object and display it through a pager. %page [options] OBJECT If no object is given, use _ (last output). Options: -r: page str(object), don't pretty-print it.""" # After a function contributed by Olivier Aubert, slightly modified. # Process options/args opts, args = self.parse_options(parameter_s, 'r') raw = 'r' in opts oname = args and args or '_' info = self.shell._ofind(oname) if info['found']: txt = (raw and str or pformat)( info['obj'] ) page.page(txt) else: print('Object `%s` not found' % oname) @line_magic def profile(self, parameter_s=''): """Print your currently active IPython profile. See Also -------- prun : run code using the Python profiler (:meth:`~IPython.core.magics.execution.ExecutionMagics.prun`) """ raise UsageError("The `%profile` magic has been deprecated since IPython 2.0. " "and removed in IPython 6.0. Please use the value of `get_ipython().profile` instead " "to see current profile in use. Perhaps you meant to use `%prun` to profile code?") @line_magic def pprint(self, parameter_s=''): """Toggle pretty printing on/off.""" ptformatter = self.shell.display_formatter.formatters['text/plain'] ptformatter.pprint = bool(1 - ptformatter.pprint) print('Pretty printing has been turned', ['OFF','ON'][ptformatter.pprint]) @line_magic def colors(self, parameter_s=''): """Switch color scheme for prompts, info system and exception handlers. Currently implemented schemes: NoColor, Linux, LightBG. Color scheme names are not case-sensitive. Examples -------- To get a plain black and white terminal:: %colors nocolor """ def color_switch_err(name): warn('Error changing %s color schemes.\n%s' % (name, sys.exc_info()[1]), stacklevel=2) new_scheme = parameter_s.strip() if not new_scheme: raise UsageError( "%colors: you must specify a color scheme. See '%colors?'") # local shortcut shell = self.shell # Set shell colour scheme try: shell.colors = new_scheme shell.refresh_style() except: color_switch_err('shell') # Set exception colors try: shell.InteractiveTB.set_colors(scheme = new_scheme) shell.SyntaxTB.set_colors(scheme = new_scheme) except: color_switch_err('exception') # Set info (for 'object?') colors if shell.color_info: try: shell.inspector.set_active_scheme(new_scheme) except: color_switch_err('object inspector') else: shell.inspector.set_active_scheme('NoColor') @line_magic def xmode(self, parameter_s=''): """Switch modes for the exception handlers. Valid modes: Plain, Context and Verbose. If called without arguments, acts as a toggle.""" def xmode_switch_err(name): warn('Error changing %s exception modes.\n%s' % (name,sys.exc_info()[1])) shell = self.shell new_mode = parameter_s.strip().capitalize() try: shell.InteractiveTB.set_mode(mode=new_mode) print('Exception reporting mode:',shell.InteractiveTB.mode) except: xmode_switch_err('user') @line_magic def pip(self, args=''): """ Intercept usage of ``pip`` in IPython and direct user to run command outside of IPython. """ print(textwrap.dedent(''' The following command must be run outside of the IPython shell: $ pip {args} The Python package manager (pip) can only be used from outside of IPython. Please reissue the `pip` command in a separate terminal or command prompt. See the Python documentation for more informations on how to install packages: https://docs.python.org/3/installing/'''.format(args=args))) @line_magic def quickref(self, arg): """ Show a quick reference sheet """ from IPython.core.usage import quick_reference qr = quick_reference + self._magic_docs(brief=True) page.page(qr) @line_magic def doctest_mode(self, parameter_s=''): """Toggle doctest mode on and off. This mode is intended to make IPython behave as much as possible like a plain Python shell, from the perspective of how its prompts, exceptions and output look. This makes it easy to copy and paste parts of a session into doctests. It does so by: - Changing the prompts to the classic ``>>>`` ones. - Changing the exception reporting mode to 'Plain'. - Disabling pretty-printing of output. Note that IPython also supports the pasting of code snippets that have leading '>>>' and '...' prompts in them. This means that you can paste doctests from files or docstrings (even if they have leading whitespace), and the code will execute correctly. You can then use '%history -t' to see the translated history; this will give you the input after removal of all the leading prompts and whitespace, which can be pasted back into an editor. With these features, you can switch into this mode easily whenever you need to do testing and changes to doctests, without having to leave your existing IPython session. """ # Shorthands shell = self.shell meta = shell.meta disp_formatter = self.shell.display_formatter ptformatter = disp_formatter.formatters['text/plain'] # dstore is a data store kept in the instance metadata bag to track any # changes we make, so we can undo them later. dstore = meta.setdefault('doctest_mode',Struct()) save_dstore = dstore.setdefault # save a few values we'll need to recover later mode = save_dstore('mode',False) save_dstore('rc_pprint',ptformatter.pprint) save_dstore('xmode',shell.InteractiveTB.mode) save_dstore('rc_separate_out',shell.separate_out) save_dstore('rc_separate_out2',shell.separate_out2) save_dstore('rc_separate_in',shell.separate_in) save_dstore('rc_active_types',disp_formatter.active_types) if not mode: # turn on # Prompt separators like plain python shell.separate_in = '' shell.separate_out = '' shell.separate_out2 = '' ptformatter.pprint = False disp_formatter.active_types = ['text/plain'] shell.magic('xmode Plain') else: # turn off shell.separate_in = dstore.rc_separate_in shell.separate_out = dstore.rc_separate_out shell.separate_out2 = dstore.rc_separate_out2 ptformatter.pprint = dstore.rc_pprint disp_formatter.active_types = dstore.rc_active_types shell.magic('xmode ' + dstore.xmode) # mode here is the state before we switch; switch_doctest_mode takes # the mode we're switching to. shell.switch_doctest_mode(not mode) # Store new mode and inform dstore.mode = bool(not mode) mode_label = ['OFF','ON'][dstore.mode] print('Doctest mode is:', mode_label) @line_magic def gui(self, parameter_s=''): """Enable or disable IPython GUI event loop integration. %gui [GUINAME] This magic replaces IPython's threaded shells that were activated using the (pylab/wthread/etc.) command line flags. GUI toolkits can now be enabled at runtime and keyboard interrupts should work without any problems. The following toolkits are supported: wxPython, PyQt4, PyGTK, Tk and Cocoa (OSX):: %gui wx # enable wxPython event loop integration %gui qt4|qt # enable PyQt4 event loop integration %gui qt5 # enable PyQt5 event loop integration %gui gtk # enable PyGTK event loop integration %gui gtk3 # enable Gtk3 event loop integration %gui tk # enable Tk event loop integration %gui osx # enable Cocoa event loop integration # (requires %matplotlib 1.1) %gui # disable all event loop integration WARNING: after any of these has been called you can simply create an application object, but DO NOT start the event loop yourself, as we have already handled that. """ opts, arg = self.parse_options(parameter_s, '') if arg=='': arg = None try: return self.shell.enable_gui(arg) except Exception as e: # print simple error message, rather than traceback if we can't # hook up the GUI error(str(e)) @skip_doctest @line_magic def precision(self, s=''): """Set floating point precision for pretty printing. Can set either integer precision or a format string. If numpy has been imported and precision is an int, numpy display precision will also be set, via ``numpy.set_printoptions``. If no argument is given, defaults will be restored. Examples -------- :: In [1]: from math import pi In [2]: %precision 3 Out[2]: u'%.3f' In [3]: pi Out[3]: 3.142 In [4]: %precision %i Out[4]: u'%i' In [5]: pi Out[5]: 3 In [6]: %precision %e Out[6]: u'%e' In [7]: pi**10 Out[7]: 9.364805e+04 In [8]: %precision Out[8]: u'%r' In [9]: pi**10 Out[9]: 93648.047476082982 """ ptformatter = self.shell.display_formatter.formatters['text/plain'] ptformatter.float_precision = s return ptformatter.float_format @magic_arguments.magic_arguments() @magic_arguments.argument( '-e', '--export', action='store_true', default=False, help=argparse.SUPPRESS ) @magic_arguments.argument( 'filename', type=str, help='Notebook name or filename' ) @line_magic def notebook(self, s): """Export and convert IPython notebooks. This function can export the current IPython history to a notebook file. For example, to export the history to "foo.ipynb" do "%notebook foo.ipynb". The -e or --export flag is deprecated in IPython 5.2, and will be removed in the future. """ args = magic_arguments.parse_argstring(self.notebook, s) from nbformat import write, v4 cells = [] hist = list(self.shell.history_manager.get_range()) if(len(hist)<=1): raise ValueError('History is empty, cannot export') for session, execution_count, source in hist[:-1]: cells.append(v4.new_code_cell( execution_count=execution_count, source=source )) nb = v4.new_notebook(cells=cells) with io.open(args.filename, 'w', encoding='utf-8') as f: write(nb, f, version=4)
unnikrishnankgs/va
venv/lib/python3.5/site-packages/IPython/core/magics/basic.py
Python
bsd-2-clause
21,310
#include "cran.h" void Rprintf2(FILE * stream, const char *format, ...) { va_list(ap); va_start(ap, format); Rprintf(format, ap); va_end(ap); }
hadley/readxl
src/cran.c
C
bsd-2-clause
153
/*- * Copyright (c) 2011 The FreeBSD Foundation * All rights reserved. * * This software was developed by David Chisnall under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD: soc2013/dpl/head/lib/libc/locale/xlocale_private.h 252089 2013-05-21 19:59:37Z ed $ */ #ifndef _XLOCALE_PRIVATE__H_ #define _XLOCALE_PRIVATE__H_ #include <xlocale.h> #include <locale.h> #include <stdlib.h> #include <stdint.h> #include <sys/types.h> #include <machine/atomic.h> #include "setlocale.h" enum { XLC_COLLATE = 0, XLC_CTYPE, XLC_MONETARY, XLC_NUMERIC, XLC_TIME, XLC_MESSAGES, XLC_LAST }; /** * Header used for objects that are reference counted. Objects may optionally * have a destructor associated, which is responsible for destroying the * structure. Global / static versions of the structure should have no * destructor set - they can then have their reference counts manipulated as * normal, but will not do anything with them. * * The header stores a retain count - objects are assumed to have a reference * count of 1 when they are created, but the retain count is 0. When the * retain count is less than 0, they are freed. */ struct xlocale_refcounted { /** Number of references to this component. */ long retain_count; /** Function used to destroy this component, if one is required*/ void(*destructor)(void*); }; /** * Header for a locale component. All locale components must begin with this * header. */ struct xlocale_component { struct xlocale_refcounted header; /** Name of the locale used for this component. */ char locale[ENCODING_LEN+1]; }; /** * xlocale structure, stores per-thread locale information. */ struct _xlocale { struct xlocale_refcounted header; /** Components for the locale. */ struct xlocale_component *components[XLC_LAST]; /** Flag indicating if components[XLC_MONETARY] has changed since the * last call to localeconv_l() with this locale. */ int monetary_locale_changed; /** Flag indicating whether this locale is actually using a locale for * LC_MONETARY (1), or if it should use the C default instead (0). */ int using_monetary_locale; /** Flag indicating if components[XLC_NUMERIC] has changed since the * last call to localeconv_l() with this locale. */ int numeric_locale_changed; /** Flag indicating whether this locale is actually using a locale for * LC_NUMERIC (1), or if it should use the C default instead (0). */ int using_numeric_locale; /** Flag indicating whether this locale is actually using a locale for * LC_TIME (1), or if it should use the C default instead (0). */ int using_time_locale; /** Flag indicating whether this locale is actually using a locale for * LC_MESSAGES (1), or if it should use the C default instead (0). */ int using_messages_locale; /** The structure to be returned from localeconv_l() for this locale. */ struct lconv lconv; /** Persistent state used by mblen() calls. */ __mbstate_t mblen; /** Persistent state used by mbrlen() calls. */ __mbstate_t mbrlen; /** Persistent state used by mbrtoc16() calls. */ __mbstate_t mbrtoc16; /** Persistent state used by mbrtoc32() calls. */ __mbstate_t mbrtoc32; /** Persistent state used by mbrtowc() calls. */ __mbstate_t mbrtowc; /** Persistent state used by mbsnrtowcs() calls. */ __mbstate_t mbsnrtowcs; /** Persistent state used by mbsrtowcs() calls. */ __mbstate_t mbsrtowcs; /** Persistent state used by mbtowc() calls. */ __mbstate_t mbtowc; /** Persistent state used by c16rtomb() calls. */ __mbstate_t c16rtomb; /** Persistent state used by c32rtomb() calls. */ __mbstate_t c32rtomb; /** Persistent state used by wcrtomb() calls. */ __mbstate_t wcrtomb; /** Persistent state used by wcsnrtombs() calls. */ __mbstate_t wcsnrtombs; /** Persistent state used by wcsrtombs() calls. */ __mbstate_t wcsrtombs; /** Persistent state used by wctomb() calls. */ __mbstate_t wctomb; /** Buffer used by nl_langinfo_l() */ char *csym; }; /** * Increments the reference count of a reference-counted structure. */ __attribute__((unused)) static void* xlocale_retain(void *val) { struct xlocale_refcounted *obj = val; atomic_add_long(&(obj->retain_count), 1); return (val); } /** * Decrements the reference count of a reference-counted structure, freeing it * if this is the last reference, calling its destructor if it has one. */ __attribute__((unused)) static void xlocale_release(void *val) { struct xlocale_refcounted *obj = val; long count = atomic_fetchadd_long(&(obj->retain_count), -1) - 1; if (count < 0) { if (0 != obj->destructor) { obj->destructor(obj); } } } /** * Load functions. Each takes the name of a locale and a pointer to the data * to be initialised as arguments. Two special values are allowed for the */ extern void* __collate_load(const char*, locale_t); extern void* __ctype_load(const char*, locale_t); extern void* __messages_load(const char*, locale_t); extern void* __monetary_load(const char*, locale_t); extern void* __numeric_load(const char*, locale_t); extern void* __time_load(const char*, locale_t); extern struct _xlocale __xlocale_global_locale; extern struct _xlocale __xlocale_C_locale; /** * Caches the rune table in TLS for fast access. */ void __set_thread_rune_locale(locale_t loc); /** * Flag indicating whether a per-thread locale has been set. If no per-thread * locale has ever been set, then we always use the global locale. */ extern int __has_thread_locale; #ifndef __NO_TLS /** * The per-thread locale. Avoids the need to use pthread lookup functions when * getting the per-thread locale. */ extern _Thread_local locale_t __thread_locale; /** * Returns the current locale for this thread, or the global locale if none is * set. The caller does not have to free the locale. The return value from * this call is not guaranteed to remain valid after the locale changes. As * such, this should only be called within libc functions. */ static inline locale_t __get_locale(void) { if (!__has_thread_locale) { return (&__xlocale_global_locale); } return (__thread_locale ? __thread_locale : &__xlocale_global_locale); } #else locale_t __get_locale(void); #endif /** * Two magic values are allowed for locale_t objects. NULL and -1. This * function maps those to the real locales that they represent. */ static inline locale_t get_real_locale(locale_t locale) { switch ((intptr_t)locale) { case 0: return (&__xlocale_C_locale); case -1: return (&__xlocale_global_locale); default: return (locale); } } /** * Replace a placeholder locale with the real global or thread-local locale_t. */ #define FIX_LOCALE(l) (l = get_real_locale(l)) #endif
dplbsd/soc2013
head/lib/libc/locale/xlocale_private.h
C
bsd-2-clause
7,969
/*- * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD: soc2013/dpl/head/sys/dev/tsec/if_tsec.h 232801 2012-03-04 19:22:52Z raj $ */ #ifndef _IF_TSEC_H #define _IF_TSEC_H #include <dev/ofw/openfirm.h> #define TSEC_RX_NUM_DESC 256 #define TSEC_TX_NUM_DESC 256 /* Interrupt Coalescing types */ #define TSEC_IC_RX 0 #define TSEC_IC_TX 1 /* eTSEC ID */ #define TSEC_ETSEC_ID 0x0124 /* Frame sizes */ #define TSEC_MIN_FRAME_SIZE 64 #define TSEC_MAX_FRAME_SIZE 9600 struct tsec_softc { /* XXX MII bus requires that struct ifnet is first!!! */ struct ifnet *tsec_ifp; struct mtx transmit_lock; /* transmitter lock */ struct mtx receive_lock; /* receiver lock */ phandle_t node; device_t dev; device_t tsec_miibus; struct mii_data *tsec_mii; /* MII media control */ int tsec_link; bus_dma_tag_t tsec_tx_dtag; /* TX descriptors tag */ bus_dmamap_t tsec_tx_dmap; /* TX descriptors map */ struct tsec_desc *tsec_tx_vaddr;/* vadress of TX descriptors */ uint32_t tsec_tx_raddr; /* real adress of TX descriptors */ bus_dma_tag_t tsec_rx_dtag; /* RX descriptors tag */ bus_dmamap_t tsec_rx_dmap; /* RX descriptors map */ struct tsec_desc *tsec_rx_vaddr; /* vadress of RX descriptors */ uint32_t tsec_rx_raddr; /* real adress of RX descriptors */ bus_dma_tag_t tsec_tx_mtag; /* TX mbufs tag */ bus_dma_tag_t tsec_rx_mtag; /* TX mbufs tag */ struct rx_data_type { bus_dmamap_t map; /* mbuf map */ struct mbuf *mbuf; uint32_t paddr; /* DMA addres of buffer */ } rx_data[TSEC_RX_NUM_DESC]; uint32_t tx_cur_desc_cnt; uint32_t tx_dirty_desc_cnt; uint32_t rx_cur_desc_cnt; struct resource *sc_rres; /* register resource */ int sc_rrid; /* register rid */ struct { bus_space_tag_t bst; bus_space_handle_t bsh; } sc_bas; struct resource *sc_transmit_ires; void *sc_transmit_ihand; int sc_transmit_irid; struct resource *sc_receive_ires; void *sc_receive_ihand; int sc_receive_irid; struct resource *sc_error_ires; void *sc_error_ihand; int sc_error_irid; int tsec_if_flags; int is_etsec; /* Watchdog and MII tick related */ struct callout tsec_callout; int tsec_watchdog; /* TX maps */ bus_dmamap_t tx_map_data[TSEC_TX_NUM_DESC]; /* unused TX maps data */ uint32_t tx_map_unused_get_cnt; uint32_t tx_map_unused_put_cnt; bus_dmamap_t *tx_map_unused_data[TSEC_TX_NUM_DESC]; /* used TX maps data */ uint32_t tx_map_used_get_cnt; uint32_t tx_map_used_put_cnt; bus_dmamap_t *tx_map_used_data[TSEC_TX_NUM_DESC]; /* mbufs in TX queue */ uint32_t tx_mbuf_used_get_cnt; uint32_t tx_mbuf_used_put_cnt; struct mbuf *tx_mbuf_used_data[TSEC_TX_NUM_DESC]; /* interrupt coalescing */ struct mtx ic_lock; uint32_t rx_ic_time; /* RW, valid values 0..65535 */ uint32_t rx_ic_count; /* RW, valid values 0..255 */ uint32_t tx_ic_time; uint32_t tx_ic_count; /* currently received frame */ struct mbuf *frame; int phyaddr; struct tsec_softc *phy_sc; }; /* interface to get/put generic objects */ #define TSEC_CNT_INIT(cnt, wrap) ((cnt) = ((wrap) - 1)) #define TSEC_INC(count, wrap) (count = ((count) + 1) & ((wrap) - 1)) #define TSEC_GET_GENERIC(hand, tab, count, wrap) \ ((hand)->tab[TSEC_INC((hand)->count, wrap)]) #define TSEC_PUT_GENERIC(hand, tab, count, wrap, val) \ ((hand)->tab[TSEC_INC((hand)->count, wrap)] = val) #define TSEC_BACK_GENERIC(sc, count, wrap) do { \ if ((sc)->count > 0) \ (sc)->count--; \ else \ (sc)->count = (wrap) - 1; \ } while (0) /* TX maps interface */ #define TSEC_TX_MAP_CNT_INIT(sc) do { \ TSEC_CNT_INIT((sc)->tx_map_unused_get_cnt, TSEC_TX_NUM_DESC); \ TSEC_CNT_INIT((sc)->tx_map_unused_put_cnt, TSEC_TX_NUM_DESC); \ TSEC_CNT_INIT((sc)->tx_map_used_get_cnt, TSEC_TX_NUM_DESC); \ TSEC_CNT_INIT((sc)->tx_map_used_put_cnt, TSEC_TX_NUM_DESC); \ } while (0) /* interface to get/put unused TX maps */ #define TSEC_ALLOC_TX_MAP(sc) \ TSEC_GET_GENERIC(sc, tx_map_unused_data, tx_map_unused_get_cnt, \ TSEC_TX_NUM_DESC) #define TSEC_FREE_TX_MAP(sc, val) \ TSEC_PUT_GENERIC(sc, tx_map_unused_data, tx_map_unused_put_cnt, \ TSEC_TX_NUM_DESC, val) /* interface to get/put used TX maps */ #define TSEC_GET_TX_MAP(sc) \ TSEC_GET_GENERIC(sc, tx_map_used_data, tx_map_used_get_cnt, \ TSEC_TX_NUM_DESC) #define TSEC_PUT_TX_MAP(sc, val) \ TSEC_PUT_GENERIC(sc, tx_map_used_data, tx_map_used_put_cnt, \ TSEC_TX_NUM_DESC, val) /* interface to get/put TX mbufs in send queue */ #define TSEC_TX_MBUF_CNT_INIT(sc) do { \ TSEC_CNT_INIT((sc)->tx_mbuf_used_get_cnt, TSEC_TX_NUM_DESC); \ TSEC_CNT_INIT((sc)->tx_mbuf_used_put_cnt, TSEC_TX_NUM_DESC); \ } while (0) #define TSEC_GET_TX_MBUF(sc) \ TSEC_GET_GENERIC(sc, tx_mbuf_used_data, tx_mbuf_used_get_cnt, \ TSEC_TX_NUM_DESC) #define TSEC_PUT_TX_MBUF(sc, val) \ TSEC_PUT_GENERIC(sc, tx_mbuf_used_data, tx_mbuf_used_put_cnt, \ TSEC_TX_NUM_DESC, val) #define TSEC_EMPTYQ_TX_MBUF(sc) \ ((sc)->tx_mbuf_used_get_cnt == (sc)->tx_mbuf_used_put_cnt) /* interface for manage tx tsec_desc */ #define TSEC_TX_DESC_CNT_INIT(sc) do { \ TSEC_CNT_INIT((sc)->tx_cur_desc_cnt, TSEC_TX_NUM_DESC); \ TSEC_CNT_INIT((sc)->tx_dirty_desc_cnt, TSEC_TX_NUM_DESC); \ } while (0) #define TSEC_GET_CUR_TX_DESC(sc) \ &TSEC_GET_GENERIC(sc, tsec_tx_vaddr, tx_cur_desc_cnt, \ TSEC_TX_NUM_DESC) #define TSEC_GET_DIRTY_TX_DESC(sc) \ &TSEC_GET_GENERIC(sc, tsec_tx_vaddr, tx_dirty_desc_cnt, \ TSEC_TX_NUM_DESC) #define TSEC_BACK_DIRTY_TX_DESC(sc) \ TSEC_BACK_GENERIC(sc, tx_dirty_desc_cnt, TSEC_TX_NUM_DESC) #define TSEC_CUR_DIFF_DIRTY_TX_DESC(sc) \ ((sc)->tx_cur_desc_cnt != (sc)->tx_dirty_desc_cnt) #define TSEC_FREE_TX_DESC(sc) \ (((sc)->tx_cur_desc_cnt < (sc)->tx_dirty_desc_cnt) ? \ ((sc)->tx_dirty_desc_cnt - (sc)->tx_cur_desc_cnt - 1) \ : \ (TSEC_TX_NUM_DESC - (sc)->tx_cur_desc_cnt \ + (sc)->tx_dirty_desc_cnt - 1)) /* interface for manage rx tsec_desc */ #define TSEC_RX_DESC_CNT_INIT(sc) do { \ TSEC_CNT_INIT((sc)->rx_cur_desc_cnt, TSEC_RX_NUM_DESC); \ } while (0) #define TSEC_GET_CUR_RX_DESC(sc) \ &TSEC_GET_GENERIC(sc, tsec_rx_vaddr, rx_cur_desc_cnt, \ TSEC_RX_NUM_DESC) #define TSEC_BACK_CUR_RX_DESC(sc) \ TSEC_BACK_GENERIC(sc, rx_cur_desc_cnt, TSEC_RX_NUM_DESC) #define TSEC_GET_CUR_RX_DESC_CNT(sc) \ ((sc)->rx_cur_desc_cnt) /* init all counters (for init only!) */ #define TSEC_TX_RX_COUNTERS_INIT(sc) do { \ TSEC_TX_MAP_CNT_INIT(sc); \ TSEC_TX_MBUF_CNT_INIT(sc); \ TSEC_TX_DESC_CNT_INIT(sc); \ TSEC_RX_DESC_CNT_INIT(sc); \ } while (0) /* read/write bus functions */ #define TSEC_READ(sc, reg) \ bus_space_read_4((sc)->sc_bas.bst, (sc)->sc_bas.bsh, (reg)) #define TSEC_WRITE(sc, reg, val) \ bus_space_write_4((sc)->sc_bas.bst, (sc)->sc_bas.bsh, (reg), (val)) /* Lock for transmitter */ #define TSEC_TRANSMIT_LOCK(sc) do { \ mtx_assert(&(sc)->receive_lock, MA_NOTOWNED); \ mtx_lock(&(sc)->transmit_lock); \ } while (0) #define TSEC_TRANSMIT_UNLOCK(sc) mtx_unlock(&(sc)->transmit_lock) #define TSEC_TRANSMIT_LOCK_ASSERT(sc) mtx_assert(&(sc)->transmit_lock, MA_OWNED) /* Lock for receiver */ #define TSEC_RECEIVE_LOCK(sc) do { \ mtx_assert(&(sc)->transmit_lock, MA_NOTOWNED); \ mtx_lock(&(sc)->receive_lock); \ } while (0) #define TSEC_RECEIVE_UNLOCK(sc) mtx_unlock(&(sc)->receive_lock) #define TSEC_RECEIVE_LOCK_ASSERT(sc) mtx_assert(&(sc)->receive_lock, MA_OWNED) /* Lock for interrupts coalescing */ #define TSEC_IC_LOCK(sc) do { \ mtx_assert(&(sc)->ic_lock, MA_NOTOWNED); \ mtx_lock(&(sc)->ic_lock); \ } while (0) #define TSEC_IC_UNLOCK(sc) mtx_unlock(&(sc)->ic_lock) #define TSEC_IC_LOCK_ASSERT(sc) mtx_assert(&(sc)->ic_lock, MA_OWNED) /* Global tsec lock (with all locks) */ #define TSEC_GLOBAL_LOCK(sc) do { \ if ((mtx_owned(&(sc)->transmit_lock) ? 1 : 0) != \ (mtx_owned(&(sc)->receive_lock) ? 1 : 0)) { \ panic("tsec deadlock possibility detection!"); \ } \ mtx_lock(&(sc)->transmit_lock); \ mtx_lock(&(sc)->receive_lock); \ } while (0) #define TSEC_GLOBAL_UNLOCK(sc) do { \ TSEC_RECEIVE_UNLOCK(sc); \ TSEC_TRANSMIT_UNLOCK(sc); \ } while (0) #define TSEC_GLOBAL_LOCK_ASSERT(sc) do { \ TSEC_TRANSMIT_LOCK_ASSERT(sc); \ TSEC_RECEIVE_LOCK_ASSERT(sc); \ } while (0) /* From global to {transmit,receive} */ #define TSEC_GLOBAL_TO_TRANSMIT_LOCK(sc) do { \ mtx_unlock(&(sc)->receive_lock);\ } while (0) #define TSEC_GLOBAL_TO_RECEIVE_LOCK(sc) do { \ mtx_unlock(&(sc)->transmit_lock);\ } while (0) struct tsec_desc { volatile uint16_t flags; /* descriptor flags */ volatile uint16_t length; /* buffer length */ volatile uint32_t bufptr; /* buffer pointer */ }; #define TSEC_READ_RETRY 10000 #define TSEC_READ_DELAY 100 /* Structures and defines for TCP/IP Off-load */ struct tsec_tx_fcb { volatile uint16_t flags; volatile uint8_t l4_offset; volatile uint8_t l3_offset; volatile uint16_t ph_chsum; volatile uint16_t vlan; }; struct tsec_rx_fcb { volatile uint16_t flags; volatile uint8_t rq_index; volatile uint8_t protocol; volatile uint16_t unused; volatile uint16_t vlan; }; #define TSEC_CHECKSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) #define TSEC_TX_FCB_IP4 TSEC_TX_FCB_L3_IS_IP #define TSEC_TX_FCB_IP6 (TSEC_TX_FCB_L3_IS_IP | TSEC_TX_FCB_L3_IS_IP6) #define TSEC_TX_FCB_TCP TSEC_TX_FCB_L4_IS_TCP_UDP #define TSEC_TX_FCB_UDP (TSEC_TX_FCB_L4_IS_TCP_UDP | TSEC_TX_FCB_L4_IS_UDP) #define TSEC_RX_FCB_IP_CSUM_CHECKED(flags) \ ((flags & (TSEC_RX_FCB_IP_FOUND | TSEC_RX_FCB_IP6_FOUND | \ TSEC_RX_FCB_IP_CSUM | TSEC_RX_FCB_PARSE_ERROR)) \ == (TSEC_RX_FCB_IP_FOUND | TSEC_RX_FCB_IP_CSUM)) #define TSEC_RX_FCB_TCP_UDP_CSUM_CHECKED(flags) \ ((flags & (TSEC_RX_FCB_TCP_UDP_FOUND | TSEC_RX_FCB_TCP_UDP_CSUM \ | TSEC_RX_FCB_PARSE_ERROR)) \ == (TSEC_RX_FCB_TCP_UDP_FOUND | TSEC_RX_FCB_TCP_UDP_CSUM)) /* Prototypes */ extern devclass_t tsec_devclass; int tsec_attach(struct tsec_softc *sc); int tsec_detach(struct tsec_softc *sc); void tsec_error_intr(void *arg); void tsec_receive_intr(void *arg); void tsec_transmit_intr(void *arg); int tsec_miibus_readreg(device_t dev, int phy, int reg); int tsec_miibus_writereg(device_t dev, int phy, int reg, int value); void tsec_miibus_statchg(device_t dev); int tsec_resume(device_t dev); /* XXX */ int tsec_shutdown(device_t dev); int tsec_suspend(device_t dev); /* XXX */ void tsec_get_hwaddr(struct tsec_softc *sc, uint8_t *addr); #endif /* _IF_TSEC_H */
dplbsd/soc2013
head/sys/dev/tsec/if_tsec.h
C
bsd-2-clause
11,770
create table t (re float, im float); \set file `echo "'$TMPDIR/examples/test5000.avro'"` copy t from :file with parser AvroParser() no commit; select * from t; drop table t;
vertica/Vertica-Extension-Packages
avro_parser/examples/test5000.sql
SQL
bsd-2-clause
177
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ASH_MUS_WINDOW_MANAGER_H_ #define ASH_MUS_WINDOW_MANAGER_H_ #include <stdint.h> #include <memory> #include <set> #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/observer_list.h" #include "services/ui/common/types.h" #include "services/ui/public/cpp/window_manager_delegate.h" #include "services/ui/public/cpp/window_tree_client_delegate.h" #include "services/ui/public/interfaces/display/display_controller.mojom.h" #include "services/ui/public/interfaces/window_manager.mojom.h" namespace base { class SequencedWorkerPool; } namespace display { class Display; class ScreenBase; } namespace service_manager { class Connector; } namespace views { class PointerWatcherEventRouter; } namespace ash { namespace mus { class AcceleratorHandler; class RootWindowController; class ShadowController; class WindowManagerObserver; class WmShellMus; class WmLookupMus; class WmTestHelper; // WindowManager serves as the WindowManagerDelegate and // WindowTreeClientDelegate for mash. WindowManager creates (and owns) // a RootWindowController per Display. WindowManager takes ownership of // the WindowTreeClient. class WindowManager : public ui::WindowManagerDelegate, public ui::WindowTreeClientDelegate { public: explicit WindowManager(service_manager::Connector* connector); ~WindowManager() override; void Init(std::unique_ptr<ui::WindowTreeClient> window_tree_client, const scoped_refptr<base::SequencedWorkerPool>& blocking_pool); WmShellMus* shell() { return shell_.get(); } display::ScreenBase* screen() { return screen_.get(); } ui::WindowTreeClient* window_tree_client() { return window_tree_client_.get(); } ui::WindowManagerClient* window_manager_client() { return window_manager_client_; } service_manager::Connector* connector() { return connector_; } void SetScreenLocked(bool is_locked); // Creates a new top level window. ui::Window* NewTopLevelWindow( std::map<std::string, std::vector<uint8_t>>* properties); std::set<RootWindowController*> GetRootWindowControllers(); // Returns the next accelerator namespace id by value in |id|. Returns true // if there is another slot available, false if all slots are taken up. bool GetNextAcceleratorNamespaceId(uint16_t* id); void AddAcceleratorHandler(uint16_t id_namespace, AcceleratorHandler* handler); void RemoveAcceleratorHandler(uint16_t id_namespace); void AddObserver(WindowManagerObserver* observer); void RemoveObserver(WindowManagerObserver* observer); // Returns the DisplayController interface if available. Will be null if no // service_manager::Connector was available, for example in some tests. display::mojom::DisplayController* GetDisplayController(); private: friend class WmTestHelper; using RootWindowControllers = std::set<std::unique_ptr<RootWindowController>>; RootWindowController* CreateRootWindowController( ui::Window* window, const display::Display& display); // Deletes the specified RootWindowController. Called when a display is // removed. void DestroyRootWindowController( RootWindowController* root_window_controller); void Shutdown(); // Returns an iterator into |root_window_controllers_|. Returns // root_window_controllers_.end() if |window| is not the root of a // RootWindowController. RootWindowControllers::iterator FindRootWindowControllerByWindow( ui::Window* window); RootWindowController* GetPrimaryRootWindowController(); // Returns the RootWindowController where new top levels are created. // |properties| is the properties supplied during window creation. RootWindowController* GetRootWindowControllerForNewTopLevelWindow( std::map<std::string, std::vector<uint8_t>>* properties); // WindowTreeClientDelegate: void OnEmbed(ui::Window* root) override; void OnEmbedRootDestroyed(ui::Window* root) override; void OnLostConnection(ui::WindowTreeClient* client) override; void OnPointerEventObserved(const ui::PointerEvent& event, ui::Window* target) override; // WindowManagerDelegate: void SetWindowManagerClient(ui::WindowManagerClient* client) override; bool OnWmSetBounds(ui::Window* window, gfx::Rect* bounds) override; bool OnWmSetProperty( ui::Window* window, const std::string& name, std::unique_ptr<std::vector<uint8_t>>* new_data) override; ui::Window* OnWmCreateTopLevelWindow( std::map<std::string, std::vector<uint8_t>>* properties) override; void OnWmClientJankinessChanged(const std::set<ui::Window*>& client_windows, bool not_responding) override; void OnWmNewDisplay(ui::Window* window, const display::Display& display) override; void OnWmDisplayRemoved(ui::Window* window) override; void OnWmDisplayModified(const display::Display& display) override; void OnWmPerformMoveLoop(ui::Window* window, ui::mojom::MoveLoopSource source, const gfx::Point& cursor_location, const base::Callback<void(bool)>& on_done) override; void OnWmCancelMoveLoop(ui::Window* window) override; ui::mojom::EventResult OnAccelerator(uint32_t id, const ui::Event& event) override; service_manager::Connector* connector_; display::mojom::DisplayControllerPtr display_controller_; std::unique_ptr<ui::WindowTreeClient> window_tree_client_; ui::WindowManagerClient* window_manager_client_ = nullptr; std::unique_ptr<views::PointerWatcherEventRouter> pointer_watcher_event_router_; std::unique_ptr<ShadowController> shadow_controller_; RootWindowControllers root_window_controllers_; base::ObserverList<WindowManagerObserver> observers_; std::unique_ptr<display::ScreenBase> screen_; std::unique_ptr<WmShellMus> shell_; std::unique_ptr<WmLookupMus> lookup_; std::map<uint16_t, AcceleratorHandler*> accelerator_handlers_; uint16_t next_accelerator_namespace_id_ = 0u; DISALLOW_COPY_AND_ASSIGN(WindowManager); }; } // namespace mus } // namespace ash #endif // ASH_MUS_WINDOW_MANAGER_H_
ssaroha/node-webrtc
third_party/webrtc/include/chromium/src/ash/mus/window_manager.h
C
bsd-2-clause
6,409
// Copyright (c) 2003-present, Jodd Team (http://jodd.org) // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. package jodd.htmlstapler; import jodd.bean.BeanUtil; import jodd.datetime.TimeUtil; import jodd.io.StreamUtil; import jodd.lagarto.TagVisitor; import jodd.lagarto.TagWriter; import jodd.lagarto.adapter.StripHtmlTagAdapter; import jodd.lagarto.filter.SimpleLagartoServletFilter; import jodd.servlet.DispatcherUtil; import jodd.servlet.ServletUtil; import jodd.util.MimeTypes; import jodd.util.StringPool; import jodd.log.Logger; import jodd.log.LoggerFactory; import javax.servlet.FilterConfig; import javax.servlet.ServletContext; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.OutputStream; import static jodd.htmlstapler.HtmlStaplerBundlesManager.Strategy; /** * HtmlStapler filter. * Part of the parameters are here, the other part is in the * {@link #createBundleManager(javax.servlet.ServletContext, jodd.htmlstapler.HtmlStaplerBundlesManager.Strategy)} bundle manager}. */ public class HtmlStaplerFilter extends SimpleLagartoServletFilter { private static final Logger log = LoggerFactory.getLogger(HtmlStaplerFilter.class); protected HtmlStaplerBundlesManager bundlesManager; protected boolean enabled = true; protected boolean stripHtml = true; protected boolean resetOnStart = true; protected boolean useGzip; protected int cacheMaxAge = TimeUtil.SECONDS_IN_DAY * 30; protected Strategy staplerStrategy = Strategy.RESOURCES_ONLY; @Override public void init(FilterConfig filterConfig) throws ServletException { super.init(filterConfig); bundlesManager = createBundleManager(filterConfig.getServletContext(), staplerStrategy); readFilterConfigParameters(filterConfig, this, "enabled", "stripHtml", "resetOnStart", "useGzip", "cacheMaxAge" ); String staplerStrategyName = filterConfig.getInitParameter("strategy"); if (staplerStrategyName != null) { if (staplerStrategyName.equalsIgnoreCase("ACTION_MANAGED")) { staplerStrategy = Strategy.ACTION_MANAGED; } } readFilterConfigParameters(filterConfig, bundlesManager, "bundleFolder", "downloadLocal", "localAddressAndPort", "localFilesEncoding", "notFoundExceptionEnabled", "sortResources", "staplerPath", "randomDigestChars" ); if (resetOnStart) { bundlesManager.reset(); } } /** * Reads filter config parameters and set into destination target. */ protected void readFilterConfigParameters(FilterConfig filterConfig, Object target, String... parameters) { for (String parameter : parameters) { String value = filterConfig.getInitParameter(parameter); if (value != null) { BeanUtil.setDeclaredProperty(target, parameter, value); } } } /** * Creates {@link HtmlStaplerBundlesManager} instance. */ protected HtmlStaplerBundlesManager createBundleManager(ServletContext servletContext, Strategy strategy) { String webRoot = servletContext.getRealPath(StringPool.EMPTY); String contextPath = ServletUtil.getContextPath(servletContext); return new HtmlStaplerBundlesManager(contextPath, webRoot, strategy); } @Override protected LagartoParsingProcessor createParsingProcessor() { if (enabled == false) { return null; } return new LagartoParsingProcessor(true) { @Override protected char[] parse(TagWriter rootTagWriter, HttpServletRequest request) { TagVisitor visitor = rootTagWriter; if (stripHtml) { visitor = new StripHtmlTagAdapter(rootTagWriter) { @Override public void end() { super.end(); if (log.isDebugEnabled()) { log.debug("Stripped: " + getStrippedCharsCount() + " chars"); } } }; } String servletPath = DispatcherUtil.getServletPath(request); HtmlStaplerTagAdapter htmlStaplerTagAdapter = new HtmlStaplerTagAdapter(bundlesManager, servletPath, visitor); // todo add more adapters char[] content = invokeLagarto(htmlStaplerTagAdapter); return htmlStaplerTagAdapter.postProcess(content); } }; } @Override protected boolean processActionPath(HttpServletRequest servletRequest, HttpServletResponse servletResponse, String actionPath) throws IOException { String bundlePath = '/' + bundlesManager.getStaplerPath() + '/'; if (actionPath.startsWith(bundlePath) == false) { return false; } String bundleId = actionPath.substring(bundlePath.length()); File file = bundlesManager.lookupBundleFile(bundleId); if (log.isDebugEnabled()) { log.debug("bundle: " + bundleId); } int ndx = bundleId.lastIndexOf('.'); String extension = bundleId.substring(ndx + 1); String contentType = MimeTypes.getMimeType(extension); servletResponse.setContentType(contentType); if (useGzip && ServletUtil.isGzipSupported(servletRequest)) { file = bundlesManager.lookupGzipBundleFile(file); servletResponse.setHeader("Content-Encoding", "gzip"); } if (file.exists() == false) { throw new IOException("bundle not found: " + bundleId); } servletResponse.setHeader("Content-Length", String.valueOf(file.length())); servletResponse.setHeader("Last-Modified", TimeUtil.formatHttpDate(file.lastModified())); if (cacheMaxAge > 0) { servletResponse.setHeader("Cache-Control", "max-age=" + cacheMaxAge); } sendBundleFile(servletResponse, file); return true; } /** * Outputs bundle file to the response. */ protected void sendBundleFile(HttpServletResponse resp, File bundleFile) throws IOException { OutputStream out = resp.getOutputStream(); FileInputStream fileInputStream = new FileInputStream(bundleFile); try { StreamUtil.copy(fileInputStream, out); } finally { StreamUtil.close(fileInputStream); } } }
tempbottle/jodd
jodd-lagarto-web/src/main/java/jodd/htmlstapler/HtmlStaplerFilter.java
Java
bsd-2-clause
7,417
/*- * Copyright (c) 2003-2009 RMI Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of RMI Corporation, nor the names of its contributors, * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * $FreeBSD: release/9.1.0/sys/mips/rmi/dev/nlge/if_nlge.h 215939 2010-11-27 13:35:19Z jchandra $ * * RMI_BSD */ /* #define MAC_SPLIT_MODE */ #define MAC_SPACING 0x400 #define XGMAC_SPACING 0x400 /* PE-MCXMAC register and bit field definitions */ #define R_MAC_CONFIG_1 0x00 #define O_MAC_CONFIG_1__srst 31 #define O_MAC_CONFIG_1__simr 30 #define O_MAC_CONFIG_1__hrrmc 18 #define W_MAC_CONFIG_1__hrtmc 2 #define O_MAC_CONFIG_1__hrrfn 16 #define W_MAC_CONFIG_1__hrtfn 2 #define O_MAC_CONFIG_1__intlb 8 #define O_MAC_CONFIG_1__rxfc 5 #define O_MAC_CONFIG_1__txfc 4 #define O_MAC_CONFIG_1__srxen 3 #define O_MAC_CONFIG_1__rxen 2 #define O_MAC_CONFIG_1__stxen 1 #define O_MAC_CONFIG_1__txen 0 #define R_MAC_CONFIG_2 0x01 #define O_MAC_CONFIG_2__prlen 12 #define W_MAC_CONFIG_2__prlen 4 #define O_MAC_CONFIG_2__speed 8 #define W_MAC_CONFIG_2__speed 2 #define O_MAC_CONFIG_2__hugen 5 #define O_MAC_CONFIG_2__flchk 4 #define O_MAC_CONFIG_2__crce 1 #define O_MAC_CONFIG_2__fulld 0 #define R_IPG_IFG 0x02 #define O_IPG_IFG__ipgr1 24 #define W_IPG_IFG__ipgr1 7 #define O_IPG_IFG__ipgr2 16 #define W_IPG_IFG__ipgr2 7 #define O_IPG_IFG__mifg 8 #define W_IPG_IFG__mifg 8 #define O_IPG_IFG__ipgt 0 #define W_IPG_IFG__ipgt 7 #define R_HALF_DUPLEX 0x03 #define O_HALF_DUPLEX__abebt 24 #define W_HALF_DUPLEX__abebt 4 #define O_HALF_DUPLEX__abebe 19 #define O_HALF_DUPLEX__bpnb 18 #define O_HALF_DUPLEX__nobo 17 #define O_HALF_DUPLEX__edxsdfr 16 #define O_HALF_DUPLEX__retry 12 #define W_HALF_DUPLEX__retry 4 #define O_HALF_DUPLEX__lcol 0 #define W_HALF_DUPLEX__lcol 10 #define R_MAXIMUM_FRAME_LENGTH 0x04 #define O_MAXIMUM_FRAME_LENGTH__maxf 0 #define W_MAXIMUM_FRAME_LENGTH__maxf 16 #define R_TEST 0x07 #define O_TEST__mbof 3 #define O_TEST__rthdf 2 #define O_TEST__tpause 1 #define O_TEST__sstct 0 #define R_MII_MGMT_CONFIG 0x08 #define O_MII_MGMT_CONFIG__scinc 5 #define O_MII_MGMT_CONFIG__spre 4 #define O_MII_MGMT_CONFIG__clks 3 #define W_MII_MGMT_CONFIG__clks 3 #define R_MII_MGMT_COMMAND 0x09 #define O_MII_MGMT_COMMAND__scan 1 #define O_MII_MGMT_COMMAND__rstat 0 #define R_MII_MGMT_ADDRESS 0x0A #define O_MII_MGMT_ADDRESS__fiad 8 #define W_MII_MGMT_ADDRESS__fiad 5 #define O_MII_MGMT_ADDRESS__fgad 5 #define W_MII_MGMT_ADDRESS__fgad 0 #define R_MII_MGMT_WRITE_DATA 0x0B #define O_MII_MGMT_WRITE_DATA__ctld 0 #define W_MII_MGMT_WRITE_DATA__ctld 16 #define R_MII_MGMT_STATUS 0x0C #define R_MII_MGMT_INDICATORS 0x0D #define O_MII_MGMT_INDICATORS__nvalid 2 #define O_MII_MGMT_INDICATORS__scan 1 #define O_MII_MGMT_INDICATORS__busy 0 #define R_INTERFACE_CONTROL 0x0E #define O_INTERFACE_CONTROL__hrstint 31 #define O_INTERFACE_CONTROL__tbimode 27 #define O_INTERFACE_CONTROL__ghdmode 26 #define O_INTERFACE_CONTROL__lhdmode 25 #define O_INTERFACE_CONTROL__phymod 24 #define O_INTERFACE_CONTROL__hrrmi 23 #define O_INTERFACE_CONTROL__rspd 16 #define O_INTERFACE_CONTROL__hr100 15 #define O_INTERFACE_CONTROL__frcq 10 #define O_INTERFACE_CONTROL__nocfr 9 #define O_INTERFACE_CONTROL__dlfct 8 #define O_INTERFACE_CONTROL__enjab 0 #define R_INTERFACE_STATUS 0x0F #define O_INTERFACE_STATUS__xsdfr 9 #define O_INTERFACE_STATUS__ssrr 8 #define W_INTERFACE_STATUS__ssrr 5 #define O_INTERFACE_STATUS__miilf 3 #define O_INTERFACE_STATUS__locar 2 #define O_INTERFACE_STATUS__sqerr 1 #define O_INTERFACE_STATUS__jabber 0 #define R_STATION_ADDRESS_LS 0x10 #define R_STATION_ADDRESS_MS 0x11 /* A-XGMAC register and bit field definitions */ #define R_XGMAC_CONFIG_0 0x00 #define O_XGMAC_CONFIG_0__hstmacrst 31 #define O_XGMAC_CONFIG_0__hstrstrctl 23 #define O_XGMAC_CONFIG_0__hstrstrfn 22 #define O_XGMAC_CONFIG_0__hstrsttctl 18 #define O_XGMAC_CONFIG_0__hstrsttfn 17 #define O_XGMAC_CONFIG_0__hstrstmiim 16 #define O_XGMAC_CONFIG_0__hstloopback 8 #define R_XGMAC_CONFIG_1 0x01 #define O_XGMAC_CONFIG_1__hsttctlen 31 #define O_XGMAC_CONFIG_1__hsttfen 30 #define O_XGMAC_CONFIG_1__hstrctlen 29 #define O_XGMAC_CONFIG_1__hstrfen 28 #define O_XGMAC_CONFIG_1__tfen 26 #define O_XGMAC_CONFIG_1__rfen 24 #define O_XGMAC_CONFIG_1__hstrctlshrtp 12 #define O_XGMAC_CONFIG_1__hstdlyfcstx 10 #define W_XGMAC_CONFIG_1__hstdlyfcstx 2 #define O_XGMAC_CONFIG_1__hstdlyfcsrx 8 #define W_XGMAC_CONFIG_1__hstdlyfcsrx 2 #define O_XGMAC_CONFIG_1__hstppen 7 #define O_XGMAC_CONFIG_1__hstbytswp 6 #define O_XGMAC_CONFIG_1__hstdrplt64 5 #define O_XGMAC_CONFIG_1__hstprmscrx 4 #define O_XGMAC_CONFIG_1__hstlenchk 3 #define O_XGMAC_CONFIG_1__hstgenfcs 2 #define O_XGMAC_CONFIG_1__hstpadmode 0 #define W_XGMAC_CONFIG_1__hstpadmode 2 #define R_XGMAC_CONFIG_2 0x02 #define O_XGMAC_CONFIG_2__hsttctlfrcp 31 #define O_XGMAC_CONFIG_2__hstmlnkflth 27 #define O_XGMAC_CONFIG_2__hstalnkflth 26 #define O_XGMAC_CONFIG_2__rflnkflt 24 #define W_XGMAC_CONFIG_2__rflnkflt 2 #define O_XGMAC_CONFIG_2__hstipgextmod 16 #define W_XGMAC_CONFIG_2__hstipgextmod 5 #define O_XGMAC_CONFIG_2__hstrctlfrcp 15 #define O_XGMAC_CONFIG_2__hstipgexten 5 #define O_XGMAC_CONFIG_2__hstmipgext 0 #define W_XGMAC_CONFIG_2__hstmipgext 5 #define R_XGMAC_CONFIG_3 0x03 #define O_XGMAC_CONFIG_3__hstfltrfrm 31 #define W_XGMAC_CONFIG_3__hstfltrfrm 16 #define O_XGMAC_CONFIG_3__hstfltrfrmdc 15 #define W_XGMAC_CONFIG_3__hstfltrfrmdc 16 #define R_XGMAC_STATION_ADDRESS_LS 0x04 #define O_XGMAC_STATION_ADDRESS_LS__hstmacadr0 0 #define W_XGMAC_STATION_ADDRESS_LS__hstmacadr0 32 #define R_XGMAC_STATION_ADDRESS_MS 0x05 #define R_XGMAC_MAX_FRAME_LEN 0x08 #define O_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx 16 #define W_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx 14 #define O_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx 0 #define W_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx 16 #define R_XGMAC_REV_LEVEL 0x0B #define O_XGMAC_REV_LEVEL__revlvl 0 #define W_XGMAC_REV_LEVEL__revlvl 15 #define R_XGMAC_MIIM_COMMAND 0x10 #define O_XGMAC_MIIM_COMMAND__hstldcmd 3 #define O_XGMAC_MIIM_COMMAND__hstmiimcmd 0 #define W_XGMAC_MIIM_COMMAND__hstmiimcmd 3 #define R_XGMAC_MIIM_FILED 0x11 #define O_XGMAC_MIIM_FILED__hststfield 30 #define W_XGMAC_MIIM_FILED__hststfield 2 #define O_XGMAC_MIIM_FILED__hstopfield 28 #define W_XGMAC_MIIM_FILED__hstopfield 2 #define O_XGMAC_MIIM_FILED__hstphyadx 23 #define W_XGMAC_MIIM_FILED__hstphyadx 5 #define O_XGMAC_MIIM_FILED__hstregadx 18 #define W_XGMAC_MIIM_FILED__hstregadx 5 #define O_XGMAC_MIIM_FILED__hsttafield 16 #define W_XGMAC_MIIM_FILED__hsttafield 2 #define O_XGMAC_MIIM_FILED__miimrddat 0 #define W_XGMAC_MIIM_FILED__miimrddat 16 #define R_XGMAC_MIIM_CONFIG 0x12 #define O_XGMAC_MIIM_CONFIG__hstnopram 7 #define O_XGMAC_MIIM_CONFIG__hstclkdiv 0 #define W_XGMAC_MIIM_CONFIG__hstclkdiv 7 #define R_XGMAC_MIIM_LINK_FAIL_VECTOR 0x13 #define O_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec 0 #define W_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec 32 #define R_XGMAC_MIIM_INDICATOR 0x14 #define O_XGMAC_MIIM_INDICATOR__miimphylf 4 #define O_XGMAC_MIIM_INDICATOR__miimmoncplt 3 #define O_XGMAC_MIIM_INDICATOR__miimmonvld 2 #define O_XGMAC_MIIM_INDICATOR__miimmon 1 #define O_XGMAC_MIIM_INDICATOR__miimbusy 0 /* GMAC stats registers */ #define R_RBYT 0x27 #define R_RPKT 0x28 #define R_RFCS 0x29 #define R_RMCA 0x2A #define R_RBCA 0x2B #define R_RXCF 0x2C #define R_RXPF 0x2D #define R_RXUO 0x2E #define R_RALN 0x2F #define R_RFLR 0x30 #define R_RCDE 0x31 #define R_RCSE 0x32 #define R_RUND 0x33 #define R_ROVR 0x34 #define R_TBYT 0x38 #define R_TPKT 0x39 #define R_TMCA 0x3A #define R_TBCA 0x3B #define R_TXPF 0x3C #define R_TDFR 0x3D #define R_TEDF 0x3E #define R_TSCL 0x3F #define R_TMCL 0x40 #define R_TLCL 0x41 #define R_TXCL 0x42 #define R_TNCL 0x43 #define R_TJBR 0x46 #define R_TFCS 0x47 #define R_TXCF 0x48 #define R_TOVR 0x49 #define R_TUND 0x4A #define R_TFRG 0x4B /* Glue logic register and bit field definitions */ #define R_MAC_ADDR0 0x50 #define R_MAC_ADDR1 0x52 #define R_MAC_ADDR2 0x54 #define R_MAC_ADDR3 0x56 #define R_MAC_ADDR_MASK2 0x58 #define R_MAC_ADDR_MASK3 0x5A #define R_MAC_FILTER_CONFIG 0x5C #define O_MAC_FILTER_CONFIG__BROADCAST_EN 10 #define O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN 9 #define O_MAC_FILTER_CONFIG__ALL_MCAST_EN 8 #define O_MAC_FILTER_CONFIG__ALL_UCAST_EN 7 #define O_MAC_FILTER_CONFIG__HASH_MCAST_EN 6 #define O_MAC_FILTER_CONFIG__HASH_UCAST_EN 5 #define O_MAC_FILTER_CONFIG__ADDR_MATCH_DISC 4 #define O_MAC_FILTER_CONFIG__MAC_ADDR3_VALID 3 #define O_MAC_FILTER_CONFIG__MAC_ADDR2_VALID 2 #define O_MAC_FILTER_CONFIG__MAC_ADDR1_VALID 1 #define O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID 0 #define R_HASH_TABLE_VECTOR 0x30 #define R_TX_CONTROL 0x0A0 #define O_TX_CONTROL__Tx15Halt 31 #define O_TX_CONTROL__Tx14Halt 30 #define O_TX_CONTROL__Tx13Halt 29 #define O_TX_CONTROL__Tx12Halt 28 #define O_TX_CONTROL__Tx11Halt 27 #define O_TX_CONTROL__Tx10Halt 26 #define O_TX_CONTROL__Tx9Halt 25 #define O_TX_CONTROL__Tx8Halt 24 #define O_TX_CONTROL__Tx7Halt 23 #define O_TX_CONTROL__Tx6Halt 22 #define O_TX_CONTROL__Tx5Halt 21 #define O_TX_CONTROL__Tx4Halt 20 #define O_TX_CONTROL__Tx3Halt 19 #define O_TX_CONTROL__Tx2Halt 18 #define O_TX_CONTROL__Tx1Halt 17 #define O_TX_CONTROL__Tx0Halt 16 #define O_TX_CONTROL__TxIdle 15 #define O_TX_CONTROL__TxEnable 14 #define O_TX_CONTROL__TxThreshold 0 #define W_TX_CONTROL__TxThreshold 14 #define R_RX_CONTROL 0x0A1 #define O_RX_CONTROL__RGMII 10 #define O_RX_CONTROL__SoftReset 2 #define O_RX_CONTROL__RxHalt 1 #define O_RX_CONTROL__RxEnable 0 #define R_DESC_PACK_CTRL 0x0A2 #define O_DESC_PACK_CTRL__ByteOffset 17 #define W_DESC_PACK_CTRL__ByteOffset 3 #define O_DESC_PACK_CTRL__PrePadEnable 16 #define O_DESC_PACK_CTRL__MaxEntry 14 #define W_DESC_PACK_CTRL__MaxEntry 2 #define O_DESC_PACK_CTRL__RegularSize 0 #define W_DESC_PACK_CTRL__RegularSize 14 #define R_STATCTRL 0x0A3 #define O_STATCTRL__OverFlowEn 4 #define O_STATCTRL__GIG 3 #define O_STATCTRL__Sten 2 #define O_STATCTRL__ClrCnt 1 #define O_STATCTRL__AutoZ 0 #define R_L2ALLOCCTRL 0x0A4 #define O_L2ALLOCCTRL__TxL2Allocate 9 #define W_L2ALLOCCTRL__TxL2Allocate 9 #define O_L2ALLOCCTRL__RxL2Allocate 0 #define W_L2ALLOCCTRL__RxL2Allocate 9 #define R_INTMASK 0x0A5 #define O_INTMASK__Spi4TxError 28 #define O_INTMASK__Spi4RxError 27 #define O_INTMASK__RGMIIHalfDupCollision 27 #define O_INTMASK__Abort 26 #define O_INTMASK__Underrun 25 #define O_INTMASK__DiscardPacket 24 #define O_INTMASK__AsyncFifoFull 23 #define O_INTMASK__TagFull 22 #define O_INTMASK__Class3Full 21 #define O_INTMASK__C3EarlyFull 20 #define O_INTMASK__Class2Full 19 #define O_INTMASK__C2EarlyFull 18 #define O_INTMASK__Class1Full 17 #define O_INTMASK__C1EarlyFull 16 #define O_INTMASK__Class0Full 15 #define O_INTMASK__C0EarlyFull 14 #define O_INTMASK__RxDataFull 13 #define O_INTMASK__RxEarlyFull 12 #define O_INTMASK__RFreeEmpty 9 #define O_INTMASK__RFEarlyEmpty 8 #define O_INTMASK__P2PSpillEcc 7 #define O_INTMASK__FreeDescFull 5 #define O_INTMASK__FreeEarlyFull 4 #define O_INTMASK__TxFetchError 3 #define O_INTMASK__StatCarry 2 #define O_INTMASK__MDInt 1 #define O_INTMASK__TxIllegal 0 #define R_INTREG 0x0A6 #define O_INTREG__Spi4TxError 28 #define O_INTREG__Spi4RxError 27 #define O_INTREG__RGMIIHalfDupCollision 27 #define O_INTREG__Abort 26 #define O_INTREG__Underrun 25 #define O_INTREG__DiscardPacket 24 #define O_INTREG__AsyncFifoFull 23 #define O_INTREG__TagFull 22 #define O_INTREG__Class3Full 21 #define O_INTREG__C3EarlyFull 20 #define O_INTREG__Class2Full 19 #define O_INTREG__C2EarlyFull 18 #define O_INTREG__Class1Full 17 #define O_INTREG__C1EarlyFull 16 #define O_INTREG__Class0Full 15 #define O_INTREG__C0EarlyFull 14 #define O_INTREG__RxDataFull 13 #define O_INTREG__RxEarlyFull 12 #define O_INTREG__RFreeEmpty 9 #define O_INTREG__RFEarlyEmpty 8 #define O_INTREG__P2PSpillEcc 7 #define O_INTREG__FreeDescFull 5 #define O_INTREG__FreeEarlyFull 4 #define O_INTREG__TxFetchError 3 #define O_INTREG__StatCarry 2 #define O_INTREG__MDInt 1 #define O_INTREG__TxIllegal 0 #define R_TXRETRY 0x0A7 #define O_TXRETRY__CollisionRetry 6 #define O_TXRETRY__BusErrorRetry 5 #define O_TXRETRY__UnderRunRetry 4 #define O_TXRETRY__Retries 0 #define W_TXRETRY__Retries 4 #define R_CORECONTROL 0x0A8 #define O_CORECONTROL__ErrorThread 4 #define W_CORECONTROL__ErrorThread 7 #define O_CORECONTROL__Shutdown 2 #define O_CORECONTROL__Speed 0 #define W_CORECONTROL__Speed 2 #define R_BYTEOFFSET0 0x0A9 #define R_BYTEOFFSET1 0x0AA #define R_L2TYPE_0 0x0F0 #define O_L2TYPE__ExtraHdrProtoSize 26 #define W_L2TYPE__ExtraHdrProtoSize 5 #define O_L2TYPE__ExtraHdrProtoOffset 20 #define W_L2TYPE__ExtraHdrProtoOffset 6 #define O_L2TYPE__ExtraHeaderSize 14 #define W_L2TYPE__ExtraHeaderSize 6 #define O_L2TYPE__ProtoOffset 8 #define W_L2TYPE__ProtoOffset 6 #define O_L2TYPE__L2HdrOffset 2 #define W_L2TYPE__L2HdrOffset 6 #define O_L2TYPE__L2Proto 0 #define W_L2TYPE__L2Proto 2 #define R_L2TYPE_1 0xF0 #define R_L2TYPE_2 0xF0 #define R_L2TYPE_3 0xF0 #define R_PARSERCONFIGREG 0x100 #define O_PARSERCONFIGREG__CRCHashPoly 8 #define W_PARSERCONFIGREG__CRCHashPoly 7 #define O_PARSERCONFIGREG__PrePadOffset 4 #define W_PARSERCONFIGREG__PrePadOffset 4 #define O_PARSERCONFIGREG__UseCAM 2 #define O_PARSERCONFIGREG__UseHASH 1 #define O_PARSERCONFIGREG__UseProto 0 #define R_L3CTABLE 0x140 #define O_L3CTABLE__Offset0 25 #define W_L3CTABLE__Offset0 7 #define O_L3CTABLE__Len0 21 #define W_L3CTABLE__Len0 4 #define O_L3CTABLE__Offset1 14 #define W_L3CTABLE__Offset1 7 #define O_L3CTABLE__Len1 10 #define W_L3CTABLE__Len1 4 #define O_L3CTABLE__Offset2 4 #define W_L3CTABLE__Offset2 6 #define O_L3CTABLE__Len2 0 #define W_L3CTABLE__Len2 4 #define O_L3CTABLE__L3HdrOffset 26 #define W_L3CTABLE__L3HdrOffset 6 #define O_L3CTABLE__L4ProtoOffset 20 #define W_L3CTABLE__L4ProtoOffset 6 #define O_L3CTABLE__IPChksumCompute 19 #define O_L3CTABLE__L4Classify 18 #define O_L3CTABLE__L2Proto 16 #define W_L3CTABLE__L2Proto 2 #define O_L3CTABLE__L3ProtoKey 0 #define W_L3CTABLE__L3ProtoKey 16 #define R_L4CTABLE 0x160 #define O_L4CTABLE__Offset0 21 #define W_L4CTABLE__Offset0 6 #define O_L4CTABLE__Len0 17 #define W_L4CTABLE__Len0 4 #define O_L4CTABLE__Offset1 11 #define W_L4CTABLE__Offset1 6 #define O_L4CTABLE__Len1 7 #define W_L4CTABLE__Len1 4 #define O_L4CTABLE__TCPChksumEnable 0 #define R_CAM4X128TABLE 0x172 #define O_CAM4X128TABLE__ClassId 7 #define W_CAM4X128TABLE__ClassId 2 #define O_CAM4X128TABLE__BucketId 1 #define W_CAM4X128TABLE__BucketId 6 #define O_CAM4X128TABLE__UseBucket 0 #define R_CAM4X128KEY 0x180 #define R_TRANSLATETABLE 0x1A0 #define R_DMACR0 0x200 #define O_DMACR0__Data0WrMaxCr 27 #define W_DMACR0__Data0WrMaxCr 3 #define O_DMACR0__Data0RdMaxCr 24 #define W_DMACR0__Data0RdMaxCr 3 #define O_DMACR0__Data1WrMaxCr 21 #define W_DMACR0__Data1WrMaxCr 3 #define O_DMACR0__Data1RdMaxCr 18 #define W_DMACR0__Data1RdMaxCr 3 #define O_DMACR0__Data2WrMaxCr 15 #define W_DMACR0__Data2WrMaxCr 3 #define O_DMACR0__Data2RdMaxCr 12 #define W_DMACR0__Data2RdMaxCr 3 #define O_DMACR0__Data3WrMaxCr 9 #define W_DMACR0__Data3WrMaxCr 3 #define O_DMACR0__Data3RdMaxCr 6 #define W_DMACR0__Data3RdMaxCr 3 #define O_DMACR0__Data4WrMaxCr 3 #define W_DMACR0__Data4WrMaxCr 3 #define O_DMACR0__Data4RdMaxCr 0 #define W_DMACR0__Data4RdMaxCr 3 #define R_DMACR1 0x201 #define O_DMACR1__Data5WrMaxCr 27 #define W_DMACR1__Data5WrMaxCr 3 #define O_DMACR1__Data5RdMaxCr 24 #define W_DMACR1__Data5RdMaxCr 3 #define O_DMACR1__Data6WrMaxCr 21 #define W_DMACR1__Data6WrMaxCr 3 #define O_DMACR1__Data6RdMaxCr 18 #define W_DMACR1__Data6RdMaxCr 3 #define O_DMACR1__Data7WrMaxCr 15 #define W_DMACR1__Data7WrMaxCr 3 #define O_DMACR1__Data7RdMaxCr 12 #define W_DMACR1__Data7RdMaxCr 3 #define O_DMACR1__Data8WrMaxCr 9 #define W_DMACR1__Data8WrMaxCr 3 #define O_DMACR1__Data8RdMaxCr 6 #define W_DMACR1__Data8RdMaxCr 3 #define O_DMACR1__Data9WrMaxCr 3 #define W_DMACR1__Data9WrMaxCr 3 #define O_DMACR1__Data9RdMaxCr 0 #define W_DMACR1__Data9RdMaxCr 3 #define R_DMACR2 0x202 #define O_DMACR2__Data10WrMaxCr 27 #define W_DMACR2__Data10WrMaxCr 3 #define O_DMACR2__Data10RdMaxCr 24 #define W_DMACR2__Data10RdMaxCr 3 #define O_DMACR2__Data11WrMaxCr 21 #define W_DMACR2__Data11WrMaxCr 3 #define O_DMACR2__Data11RdMaxCr 18 #define W_DMACR2__Data11RdMaxCr 3 #define O_DMACR2__Data12WrMaxCr 15 #define W_DMACR2__Data12WrMaxCr 3 #define O_DMACR2__Data12RdMaxCr 12 #define W_DMACR2__Data12RdMaxCr 3 #define O_DMACR2__Data13WrMaxCr 9 #define W_DMACR2__Data13WrMaxCr 3 #define O_DMACR2__Data13RdMaxCr 6 #define W_DMACR2__Data13RdMaxCr 3 #define O_DMACR2__Data14WrMaxCr 3 #define W_DMACR2__Data14WrMaxCr 3 #define O_DMACR2__Data14RdMaxCr 0 #define W_DMACR2__Data14RdMaxCr 3 #define R_DMACR3 0x203 #define O_DMACR3__Data15WrMaxCr 27 #define W_DMACR3__Data15WrMaxCr 3 #define O_DMACR3__Data15RdMaxCr 24 #define W_DMACR3__Data15RdMaxCr 3 #define O_DMACR3__SpClassWrMaxCr 21 #define W_DMACR3__SpClassWrMaxCr 3 #define O_DMACR3__SpClassRdMaxCr 18 #define W_DMACR3__SpClassRdMaxCr 3 #define O_DMACR3__JumFrInWrMaxCr 15 #define W_DMACR3__JumFrInWrMaxCr 3 #define O_DMACR3__JumFrInRdMaxCr 12 #define W_DMACR3__JumFrInRdMaxCr 3 #define O_DMACR3__RegFrInWrMaxCr 9 #define W_DMACR3__RegFrInWrMaxCr 3 #define O_DMACR3__RegFrInRdMaxCr 6 #define W_DMACR3__RegFrInRdMaxCr 3 #define O_DMACR3__FrOutWrMaxCr 3 #define W_DMACR3__FrOutWrMaxCr 3 #define O_DMACR3__FrOutRdMaxCr 0 #define W_DMACR3__FrOutRdMaxCr 3 #define R_REG_FRIN_SPILL_MEM_START_0 0x204 #define O_REG_FRIN_SPILL_MEM_START_0__RegFrInSpillMemStart0 0 #define W_REG_FRIN_SPILL_MEM_START_0__RegFrInSpillMemStart0 32 #define R_REG_FRIN_SPILL_MEM_START_1 0x205 #define O_REG_FRIN_SPILL_MEM_START_1__RegFrInSpillMemStart1 0 #define W_REG_FRIN_SPILL_MEM_START_1__RegFrInSpillMemStart1 3 #define R_REG_FRIN_SPILL_MEM_SIZE 0x206 #define O_REG_FRIN_SPILL_MEM_SIZE__RegFrInSpillMemSize 0 #define W_REG_FRIN_SPILL_MEM_SIZE__RegFrInSpillMemSize 32 #define R_FROUT_SPILL_MEM_START_0 0x207 #define O_FROUT_SPILL_MEM_START_0__FrOutSpillMemStart0 0 #define W_FROUT_SPILL_MEM_START_0__FrOutSpillMemStart0 32 #define R_FROUT_SPILL_MEM_START_1 0x208 #define O_FROUT_SPILL_MEM_START_1__FrOutSpillMemStart1 0 #define W_FROUT_SPILL_MEM_START_1__FrOutSpillMemStart1 3 #define R_FROUT_SPILL_MEM_SIZE 0x209 #define O_FROUT_SPILL_MEM_SIZE__FrOutSpillMemSize 0 #define W_FROUT_SPILL_MEM_SIZE__FrOutSpillMemSize 32 #define R_CLASS0_SPILL_MEM_START_0 0x20A #define O_CLASS0_SPILL_MEM_START_0__Class0SpillMemStart0 0 #define W_CLASS0_SPILL_MEM_START_0__Class0SpillMemStart0 32 #define R_CLASS0_SPILL_MEM_START_1 0x20B #define O_CLASS0_SPILL_MEM_START_1__Class0SpillMemStart1 0 #define W_CLASS0_SPILL_MEM_START_1__Class0SpillMemStart1 3 #define R_CLASS0_SPILL_MEM_SIZE 0x20C #define O_CLASS0_SPILL_MEM_SIZE__Class0SpillMemSize 0 #define W_CLASS0_SPILL_MEM_SIZE__Class0SpillMemSize 32 #define R_JUMFRIN_SPILL_MEM_START_0 0x20D #define O_JUMFRIN_SPILL_MEM_START_0__JumFrInSpillMemStar0 0 #define W_JUMFRIN_SPILL_MEM_START_0__JumFrInSpillMemStar0 32 #define R_JUMFRIN_SPILL_MEM_START_1 0x20E #define O_JUMFRIN_SPILL_MEM_START_1__JumFrInSpillMemStart1 0 #define W_JUMFRIN_SPILL_MEM_START_1__JumFrInSpillMemStart1 3 #define R_JUMFRIN_SPILL_MEM_SIZE 0x20F #define O_JUMFRIN_SPILL_MEM_SIZE__JumFrInSpillMemSize 0 #define W_JUMFRIN_SPILL_MEM_SIZE__JumFrInSpillMemSize 32 #define R_CLASS1_SPILL_MEM_START_0 0x210 #define O_CLASS1_SPILL_MEM_START_0__Class1SpillMemStart0 0 #define W_CLASS1_SPILL_MEM_START_0__Class1SpillMemStart0 32 #define R_CLASS1_SPILL_MEM_START_1 0x211 #define O_CLASS1_SPILL_MEM_START_1__Class1SpillMemStart1 0 #define W_CLASS1_SPILL_MEM_START_1__Class1SpillMemStart1 3 #define R_CLASS1_SPILL_MEM_SIZE 0x212 #define O_CLASS1_SPILL_MEM_SIZE__Class1SpillMemSize 0 #define W_CLASS1_SPILL_MEM_SIZE__Class1SpillMemSize 32 #define R_CLASS2_SPILL_MEM_START_0 0x213 #define O_CLASS2_SPILL_MEM_START_0__Class2SpillMemStart0 0 #define W_CLASS2_SPILL_MEM_START_0__Class2SpillMemStart0 32 #define R_CLASS2_SPILL_MEM_START_1 0x214 #define O_CLASS2_SPILL_MEM_START_1__Class2SpillMemStart1 0 #define W_CLASS2_SPILL_MEM_START_1__Class2SpillMemStart1 3 #define R_CLASS2_SPILL_MEM_SIZE 0x215 #define O_CLASS2_SPILL_MEM_SIZE__Class2SpillMemSize 0 #define W_CLASS2_SPILL_MEM_SIZE__Class2SpillMemSize 32 #define R_CLASS3_SPILL_MEM_START_0 0x216 #define O_CLASS3_SPILL_MEM_START_0__Class3SpillMemStart0 0 #define W_CLASS3_SPILL_MEM_START_0__Class3SpillMemStart0 32 #define R_CLASS3_SPILL_MEM_START_1 0x217 #define O_CLASS3_SPILL_MEM_START_1__Class3SpillMemStart1 0 #define W_CLASS3_SPILL_MEM_START_1__Class3SpillMemStart1 3 #define R_CLASS3_SPILL_MEM_SIZE 0x218 #define O_CLASS3_SPILL_MEM_SIZE__Class3SpillMemSize 0 #define W_CLASS3_SPILL_MEM_SIZE__Class3SpillMemSize 32 #define R_REG_FRIN1_SPILL_MEM_START_0 0x219 #define R_REG_FRIN1_SPILL_MEM_START_1 0x21a #define R_REG_FRIN1_SPILL_MEM_SIZE 0x21b #define R_SPIHNGY0 0x219 #define O_SPIHNGY0__EG_HNGY_THRESH_0 24 #define W_SPIHNGY0__EG_HNGY_THRESH_0 7 #define O_SPIHNGY0__EG_HNGY_THRESH_1 16 #define W_SPIHNGY0__EG_HNGY_THRESH_1 7 #define O_SPIHNGY0__EG_HNGY_THRESH_2 8 #define W_SPIHNGY0__EG_HNGY_THRESH_2 7 #define O_SPIHNGY0__EG_HNGY_THRESH_3 0 #define W_SPIHNGY0__EG_HNGY_THRESH_3 7 #define R_SPIHNGY1 0x21A #define O_SPIHNGY1__EG_HNGY_THRESH_4 24 #define W_SPIHNGY1__EG_HNGY_THRESH_4 7 #define O_SPIHNGY1__EG_HNGY_THRESH_5 16 #define W_SPIHNGY1__EG_HNGY_THRESH_5 7 #define O_SPIHNGY1__EG_HNGY_THRESH_6 8 #define W_SPIHNGY1__EG_HNGY_THRESH_6 7 #define O_SPIHNGY1__EG_HNGY_THRESH_7 0 #define W_SPIHNGY1__EG_HNGY_THRESH_7 7 #define R_SPIHNGY2 0x21B #define O_SPIHNGY2__EG_HNGY_THRESH_8 24 #define W_SPIHNGY2__EG_HNGY_THRESH_8 7 #define O_SPIHNGY2__EG_HNGY_THRESH_9 16 #define W_SPIHNGY2__EG_HNGY_THRESH_9 7 #define O_SPIHNGY2__EG_HNGY_THRESH_10 8 #define W_SPIHNGY2__EG_HNGY_THRESH_10 7 #define O_SPIHNGY2__EG_HNGY_THRESH_11 0 #define W_SPIHNGY2__EG_HNGY_THRESH_11 7 #define R_SPIHNGY3 0x21C #define O_SPIHNGY3__EG_HNGY_THRESH_12 24 #define W_SPIHNGY3__EG_HNGY_THRESH_12 7 #define O_SPIHNGY3__EG_HNGY_THRESH_13 16 #define W_SPIHNGY3__EG_HNGY_THRESH_13 7 #define O_SPIHNGY3__EG_HNGY_THRESH_14 8 #define W_SPIHNGY3__EG_HNGY_THRESH_14 7 #define O_SPIHNGY3__EG_HNGY_THRESH_15 0 #define W_SPIHNGY3__EG_HNGY_THRESH_15 7 #define R_SPISTRV0 0x21D #define O_SPISTRV0__EG_STRV_THRESH_0 24 #define W_SPISTRV0__EG_STRV_THRESH_0 7 #define O_SPISTRV0__EG_STRV_THRESH_1 16 #define W_SPISTRV0__EG_STRV_THRESH_1 7 #define O_SPISTRV0__EG_STRV_THRESH_2 8 #define W_SPISTRV0__EG_STRV_THRESH_2 7 #define O_SPISTRV0__EG_STRV_THRESH_3 0 #define W_SPISTRV0__EG_STRV_THRESH_3 7 #define R_SPISTRV1 0x21E #define O_SPISTRV1__EG_STRV_THRESH_4 24 #define W_SPISTRV1__EG_STRV_THRESH_4 7 #define O_SPISTRV1__EG_STRV_THRESH_5 16 #define W_SPISTRV1__EG_STRV_THRESH_5 7 #define O_SPISTRV1__EG_STRV_THRESH_6 8 #define W_SPISTRV1__EG_STRV_THRESH_6 7 #define O_SPISTRV1__EG_STRV_THRESH_7 0 #define W_SPISTRV1__EG_STRV_THRESH_7 7 #define R_SPISTRV2 0x21F #define O_SPISTRV2__EG_STRV_THRESH_8 24 #define W_SPISTRV2__EG_STRV_THRESH_8 7 #define O_SPISTRV2__EG_STRV_THRESH_9 16 #define W_SPISTRV2__EG_STRV_THRESH_9 7 #define O_SPISTRV2__EG_STRV_THRESH_10 8 #define W_SPISTRV2__EG_STRV_THRESH_10 7 #define O_SPISTRV2__EG_STRV_THRESH_11 0 #define W_SPISTRV2__EG_STRV_THRESH_11 7 #define R_SPISTRV3 0x220 #define O_SPISTRV3__EG_STRV_THRESH_12 24 #define W_SPISTRV3__EG_STRV_THRESH_12 7 #define O_SPISTRV3__EG_STRV_THRESH_13 16 #define W_SPISTRV3__EG_STRV_THRESH_13 7 #define O_SPISTRV3__EG_STRV_THRESH_14 8 #define W_SPISTRV3__EG_STRV_THRESH_14 7 #define O_SPISTRV3__EG_STRV_THRESH_15 0 #define W_SPISTRV3__EG_STRV_THRESH_15 7 #define R_TXDATAFIFO0 0x221 #define O_TXDATAFIFO0__Tx0DataFifoStart 24 #define W_TXDATAFIFO0__Tx0DataFifoStart 7 #define O_TXDATAFIFO0__Tx0DataFifoSize 16 #define W_TXDATAFIFO0__Tx0DataFifoSize 7 #define O_TXDATAFIFO0__Tx1DataFifoStart 8 #define W_TXDATAFIFO0__Tx1DataFifoStart 7 #define O_TXDATAFIFO0__Tx1DataFifoSize 0 #define W_TXDATAFIFO0__Tx1DataFifoSize 7 #define R_TXDATAFIFO1 0x222 #define O_TXDATAFIFO1__Tx2DataFifoStart 24 #define W_TXDATAFIFO1__Tx2DataFifoStart 7 #define O_TXDATAFIFO1__Tx2DataFifoSize 16 #define W_TXDATAFIFO1__Tx2DataFifoSize 7 #define O_TXDATAFIFO1__Tx3DataFifoStart 8 #define W_TXDATAFIFO1__Tx3DataFifoStart 7 #define O_TXDATAFIFO1__Tx3DataFifoSize 0 #define W_TXDATAFIFO1__Tx3DataFifoSize 7 #define R_TXDATAFIFO2 0x223 #define O_TXDATAFIFO2__Tx4DataFifoStart 24 #define W_TXDATAFIFO2__Tx4DataFifoStart 7 #define O_TXDATAFIFO2__Tx4DataFifoSize 16 #define W_TXDATAFIFO2__Tx4DataFifoSize 7 #define O_TXDATAFIFO2__Tx5DataFifoStart 8 #define W_TXDATAFIFO2__Tx5DataFifoStart 7 #define O_TXDATAFIFO2__Tx5DataFifoSize 0 #define W_TXDATAFIFO2__Tx5DataFifoSize 7 #define R_TXDATAFIFO3 0x224 #define O_TXDATAFIFO3__Tx6DataFifoStart 24 #define W_TXDATAFIFO3__Tx6DataFifoStart 7 #define O_TXDATAFIFO3__Tx6DataFifoSize 16 #define W_TXDATAFIFO3__Tx6DataFifoSize 7 #define O_TXDATAFIFO3__Tx7DataFifoStart 8 #define W_TXDATAFIFO3__Tx7DataFifoStart 7 #define O_TXDATAFIFO3__Tx7DataFifoSize 0 #define W_TXDATAFIFO3__Tx7DataFifoSize 7 #define R_TXDATAFIFO4 0x225 #define O_TXDATAFIFO4__Tx8DataFifoStart 24 #define W_TXDATAFIFO4__Tx8DataFifoStart 7 #define O_TXDATAFIFO4__Tx8DataFifoSize 16 #define W_TXDATAFIFO4__Tx8DataFifoSize 7 #define O_TXDATAFIFO4__Tx9DataFifoStart 8 #define W_TXDATAFIFO4__Tx9DataFifoStart 7 #define O_TXDATAFIFO4__Tx9DataFifoSize 0 #define W_TXDATAFIFO4__Tx9DataFifoSize 7 #define R_TXDATAFIFO5 0x226 #define O_TXDATAFIFO5__Tx10DataFifoStart 24 #define W_TXDATAFIFO5__Tx10DataFifoStart 7 #define O_TXDATAFIFO5__Tx10DataFifoSize 16 #define W_TXDATAFIFO5__Tx10DataFifoSize 7 #define O_TXDATAFIFO5__Tx11DataFifoStart 8 #define W_TXDATAFIFO5__Tx11DataFifoStart 7 #define O_TXDATAFIFO5__Tx11DataFifoSize 0 #define W_TXDATAFIFO5__Tx11DataFifoSize 7 #define R_TXDATAFIFO6 0x227 #define O_TXDATAFIFO6__Tx12DataFifoStart 24 #define W_TXDATAFIFO6__Tx12DataFifoStart 7 #define O_TXDATAFIFO6__Tx12DataFifoSize 16 #define W_TXDATAFIFO6__Tx12DataFifoSize 7 #define O_TXDATAFIFO6__Tx13DataFifoStart 8 #define W_TXDATAFIFO6__Tx13DataFifoStart 7 #define O_TXDATAFIFO6__Tx13DataFifoSize 0 #define W_TXDATAFIFO6__Tx13DataFifoSize 7 #define R_TXDATAFIFO7 0x228 #define O_TXDATAFIFO7__Tx14DataFifoStart 24 #define W_TXDATAFIFO7__Tx14DataFifoStart 7 #define O_TXDATAFIFO7__Tx14DataFifoSize 16 #define W_TXDATAFIFO7__Tx14DataFifoSize 7 #define O_TXDATAFIFO7__Tx15DataFifoStart 8 #define W_TXDATAFIFO7__Tx15DataFifoStart 7 #define O_TXDATAFIFO7__Tx15DataFifoSize 0 #define W_TXDATAFIFO7__Tx15DataFifoSize 7 #define R_RXDATAFIFO0 0x229 #define O_RXDATAFIFO0__Rx0DataFifoStart 24 #define W_RXDATAFIFO0__Rx0DataFifoStart 7 #define O_RXDATAFIFO0__Rx0DataFifoSize 16 #define W_RXDATAFIFO0__Rx0DataFifoSize 7 #define O_RXDATAFIFO0__Rx1DataFifoStart 8 #define W_RXDATAFIFO0__Rx1DataFifoStart 7 #define O_RXDATAFIFO0__Rx1DataFifoSize 0 #define W_RXDATAFIFO0__Rx1DataFifoSize 7 #define R_RXDATAFIFO1 0x22A #define O_RXDATAFIFO1__Rx2DataFifoStart 24 #define W_RXDATAFIFO1__Rx2DataFifoStart 7 #define O_RXDATAFIFO1__Rx2DataFifoSize 16 #define W_RXDATAFIFO1__Rx2DataFifoSize 7 #define O_RXDATAFIFO1__Rx3DataFifoStart 8 #define W_RXDATAFIFO1__Rx3DataFifoStart 7 #define O_RXDATAFIFO1__Rx3DataFifoSize 0 #define W_RXDATAFIFO1__Rx3DataFifoSize 7 #define R_RXDATAFIFO2 0x22B #define O_RXDATAFIFO2__Rx4DataFifoStart 24 #define W_RXDATAFIFO2__Rx4DataFifoStart 7 #define O_RXDATAFIFO2__Rx4DataFifoSize 16 #define W_RXDATAFIFO2__Rx4DataFifoSize 7 #define O_RXDATAFIFO2__Rx5DataFifoStart 8 #define W_RXDATAFIFO2__Rx5DataFifoStart 7 #define O_RXDATAFIFO2__Rx5DataFifoSize 0 #define W_RXDATAFIFO2__Rx5DataFifoSize 7 #define R_RXDATAFIFO3 0x22C #define O_RXDATAFIFO3__Rx6DataFifoStart 24 #define W_RXDATAFIFO3__Rx6DataFifoStart 7 #define O_RXDATAFIFO3__Rx6DataFifoSize 16 #define W_RXDATAFIFO3__Rx6DataFifoSize 7 #define O_RXDATAFIFO3__Rx7DataFifoStart 8 #define W_RXDATAFIFO3__Rx7DataFifoStart 7 #define O_RXDATAFIFO3__Rx7DataFifoSize 0 #define W_RXDATAFIFO3__Rx7DataFifoSize 7 #define R_RXDATAFIFO4 0x22D #define O_RXDATAFIFO4__Rx8DataFifoStart 24 #define W_RXDATAFIFO4__Rx8DataFifoStart 7 #define O_RXDATAFIFO4__Rx8DataFifoSize 16 #define W_RXDATAFIFO4__Rx8DataFifoSize 7 #define O_RXDATAFIFO4__Rx9DataFifoStart 8 #define W_RXDATAFIFO4__Rx9DataFifoStart 7 #define O_RXDATAFIFO4__Rx9DataFifoSize 0 #define W_RXDATAFIFO4__Rx9DataFifoSize 7 #define R_RXDATAFIFO5 0x22E #define O_RXDATAFIFO5__Rx10DataFifoStart 24 #define W_RXDATAFIFO5__Rx10DataFifoStart 7 #define O_RXDATAFIFO5__Rx10DataFifoSize 16 #define W_RXDATAFIFO5__Rx10DataFifoSize 7 #define O_RXDATAFIFO5__Rx11DataFifoStart 8 #define W_RXDATAFIFO5__Rx11DataFifoStart 7 #define O_RXDATAFIFO5__Rx11DataFifoSize 0 #define W_RXDATAFIFO5__Rx11DataFifoSize 7 #define R_RXDATAFIFO6 0x22F #define O_RXDATAFIFO6__Rx12DataFifoStart 24 #define W_RXDATAFIFO6__Rx12DataFifoStart 7 #define O_RXDATAFIFO6__Rx12DataFifoSize 16 #define W_RXDATAFIFO6__Rx12DataFifoSize 7 #define O_RXDATAFIFO6__Rx13DataFifoStart 8 #define W_RXDATAFIFO6__Rx13DataFifoStart 7 #define O_RXDATAFIFO6__Rx13DataFifoSize 0 #define W_RXDATAFIFO6__Rx13DataFifoSize 7 #define R_RXDATAFIFO7 0x230 #define O_RXDATAFIFO7__Rx14DataFifoStart 24 #define W_RXDATAFIFO7__Rx14DataFifoStart 7 #define O_RXDATAFIFO7__Rx14DataFifoSize 16 #define W_RXDATAFIFO7__Rx14DataFifoSize 7 #define O_RXDATAFIFO7__Rx15DataFifoStart 8 #define W_RXDATAFIFO7__Rx15DataFifoStart 7 #define O_RXDATAFIFO7__Rx15DataFifoSize 0 #define W_RXDATAFIFO7__Rx15DataFifoSize 7 #define R_XGMACPADCALIBRATION 0x231 #define R_FREEQCARVE 0x233 #define R_SPI4STATICDELAY0 0x240 #define O_SPI4STATICDELAY0__DataLine7 28 #define W_SPI4STATICDELAY0__DataLine7 4 #define O_SPI4STATICDELAY0__DataLine6 24 #define W_SPI4STATICDELAY0__DataLine6 4 #define O_SPI4STATICDELAY0__DataLine5 20 #define W_SPI4STATICDELAY0__DataLine5 4 #define O_SPI4STATICDELAY0__DataLine4 16 #define W_SPI4STATICDELAY0__DataLine4 4 #define O_SPI4STATICDELAY0__DataLine3 12 #define W_SPI4STATICDELAY0__DataLine3 4 #define O_SPI4STATICDELAY0__DataLine2 8 #define W_SPI4STATICDELAY0__DataLine2 4 #define O_SPI4STATICDELAY0__DataLine1 4 #define W_SPI4STATICDELAY0__DataLine1 4 #define O_SPI4STATICDELAY0__DataLine0 0 #define W_SPI4STATICDELAY0__DataLine0 4 #define R_SPI4STATICDELAY1 0x241 #define O_SPI4STATICDELAY1__DataLine15 28 #define W_SPI4STATICDELAY1__DataLine15 4 #define O_SPI4STATICDELAY1__DataLine14 24 #define W_SPI4STATICDELAY1__DataLine14 4 #define O_SPI4STATICDELAY1__DataLine13 20 #define W_SPI4STATICDELAY1__DataLine13 4 #define O_SPI4STATICDELAY1__DataLine12 16 #define W_SPI4STATICDELAY1__DataLine12 4 #define O_SPI4STATICDELAY1__DataLine11 12 #define W_SPI4STATICDELAY1__DataLine11 4 #define O_SPI4STATICDELAY1__DataLine10 8 #define W_SPI4STATICDELAY1__DataLine10 4 #define O_SPI4STATICDELAY1__DataLine9 4 #define W_SPI4STATICDELAY1__DataLine9 4 #define O_SPI4STATICDELAY1__DataLine8 0 #define W_SPI4STATICDELAY1__DataLine8 4 #define R_SPI4STATICDELAY2 0x242 #define O_SPI4STATICDELAY0__TxStat1 8 #define W_SPI4STATICDELAY0__TxStat1 4 #define O_SPI4STATICDELAY0__TxStat0 4 #define W_SPI4STATICDELAY0__TxStat0 4 #define O_SPI4STATICDELAY0__RxControl 0 #define W_SPI4STATICDELAY0__RxControl 4 #define R_SPI4CONTROL 0x243 #define O_SPI4CONTROL__StaticDelay 2 #define O_SPI4CONTROL__LVDS_LVTTL 1 #define O_SPI4CONTROL__SPI4Enable 0 #define R_CLASSWATERMARKS 0x244 #define O_CLASSWATERMARKS__Class0Watermark 24 #define W_CLASSWATERMARKS__Class0Watermark 5 #define O_CLASSWATERMARKS__Class1Watermark 16 #define W_CLASSWATERMARKS__Class1Watermark 5 #define O_CLASSWATERMARKS__Class3Watermark 0 #define W_CLASSWATERMARKS__Class3Watermark 5 #define R_RXWATERMARKS1 0x245 #define O_RXWATERMARKS__Rx0DataWatermark 24 #define W_RXWATERMARKS__Rx0DataWatermark 7 #define O_RXWATERMARKS__Rx1DataWatermark 16 #define W_RXWATERMARKS__Rx1DataWatermark 7 #define O_RXWATERMARKS__Rx3DataWatermark 0 #define W_RXWATERMARKS__Rx3DataWatermark 7 #define R_RXWATERMARKS2 0x246 #define O_RXWATERMARKS__Rx4DataWatermark 24 #define W_RXWATERMARKS__Rx4DataWatermark 7 #define O_RXWATERMARKS__Rx5DataWatermark 16 #define W_RXWATERMARKS__Rx5DataWatermark 7 #define O_RXWATERMARKS__Rx6DataWatermark 8 #define W_RXWATERMARKS__Rx6DataWatermark 7 #define O_RXWATERMARKS__Rx7DataWatermark 0 #define W_RXWATERMARKS__Rx7DataWatermark 7 #define R_RXWATERMARKS3 0x247 #define O_RXWATERMARKS__Rx8DataWatermark 24 #define W_RXWATERMARKS__Rx8DataWatermark 7 #define O_RXWATERMARKS__Rx9DataWatermark 16 #define W_RXWATERMARKS__Rx9DataWatermark 7 #define O_RXWATERMARKS__Rx10DataWatermark 8 #define W_RXWATERMARKS__Rx10DataWatermark 7 #define O_RXWATERMARKS__Rx11DataWatermark 0 #define W_RXWATERMARKS__Rx11DataWatermark 7 #define R_RXWATERMARKS4 0x248 #define O_RXWATERMARKS__Rx12DataWatermark 24 #define W_RXWATERMARKS__Rx12DataWatermark 7 #define O_RXWATERMARKS__Rx13DataWatermark 16 #define W_RXWATERMARKS__Rx13DataWatermark 7 #define O_RXWATERMARKS__Rx14DataWatermark 8 #define W_RXWATERMARKS__Rx14DataWatermark 7 #define O_RXWATERMARKS__Rx15DataWatermark 0 #define W_RXWATERMARKS__Rx15DataWatermark 7 #define R_FREEWATERMARKS 0x249 #define O_FREEWATERMARKS__FreeOutWatermark 16 #define W_FREEWATERMARKS__FreeOutWatermark 16 #define O_FREEWATERMARKS__JumFrWatermark 8 #define W_FREEWATERMARKS__JumFrWatermark 7 #define O_FREEWATERMARKS__RegFrWatermark 0 #define W_FREEWATERMARKS__RegFrWatermark 7 #define R_EGRESSFIFOCARVINGSLOTS 0x24a #define CTRL_RES0 0 #define CTRL_RES1 1 #define CTRL_REG_FREE 2 #define CTRL_JUMBO_FREE 3 #define CTRL_CONT 4 #define CTRL_EOP 5 #define CTRL_START 6 #define CTRL_SNGL 7 #define CTRL_B0_NOT_EOP 0 #define CTRL_B0_EOP 1 #define R_ROUND_ROBIN_TABLE 0 #define R_PDE_CLASS_0 0x300 #define R_PDE_CLASS_1 0x302 #define R_PDE_CLASS_2 0x304 #define R_PDE_CLASS_3 0x306 #define R_MSG_TX_THRESHOLD 0x308 #define R_GMAC_JFR0_BUCKET_SIZE 0x320 #define R_GMAC_RFR0_BUCKET_SIZE 0x321 #define R_GMAC_TX0_BUCKET_SIZE 0x322 #define R_GMAC_TX1_BUCKET_SIZE 0x323 #define R_GMAC_TX2_BUCKET_SIZE 0x324 #define R_GMAC_TX3_BUCKET_SIZE 0x325 #define R_GMAC_JFR1_BUCKET_SIZE 0x326 #define R_GMAC_RFR1_BUCKET_SIZE 0x327 #define R_XGS_TX0_BUCKET_SIZE 0x320 #define R_XGS_TX1_BUCKET_SIZE 0x321 #define R_XGS_TX2_BUCKET_SIZE 0x322 #define R_XGS_TX3_BUCKET_SIZE 0x323 #define R_XGS_TX4_BUCKET_SIZE 0x324 #define R_XGS_TX5_BUCKET_SIZE 0x325 #define R_XGS_TX6_BUCKET_SIZE 0x326 #define R_XGS_TX7_BUCKET_SIZE 0x327 #define R_XGS_TX8_BUCKET_SIZE 0x328 #define R_XGS_TX9_BUCKET_SIZE 0x329 #define R_XGS_TX10_BUCKET_SIZE 0x32A #define R_XGS_TX11_BUCKET_SIZE 0x32B #define R_XGS_TX12_BUCKET_SIZE 0x32C #define R_XGS_TX13_BUCKET_SIZE 0x32D #define R_XGS_TX14_BUCKET_SIZE 0x32E #define R_XGS_TX15_BUCKET_SIZE 0x32F #define R_XGS_JFR_BUCKET_SIZE 0x330 #define R_XGS_RFR_BUCKET_SIZE 0x331 #define R_CC_CPU0_0 0x380 #define R_CC_CPU1_0 0x388 #define R_CC_CPU2_0 0x390 #define R_CC_CPU3_0 0x398 #define R_CC_CPU4_0 0x3a0 #define R_CC_CPU5_0 0x3a8 #define R_CC_CPU6_0 0x3b0 #define R_CC_CPU7_0 0x3b8 #define XLR_GMAC_BLK_SZ (XLR_IO_GMAC_1_OFFSET - \ XLR_IO_GMAC_0_OFFSET) /* Constants used for configuring the devices */ #define RGE_TX_THRESHOLD 1024 #define RGE_TX_Q_SIZE 1024 #define MAC_B2B_IPG 88 #define NLGE_PREPAD_LEN 32 /* frame sizes need to be cacheline aligned */ #define MAX_FRAME_SIZE (1536 + NLGE_PREPAD_LEN) #define MAX_FRAME_SIZE_JUMBO 9216 #define RGE_TX_THRESHOLD_BYTES ETHER_MAX_LEN #define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES #define MAC_PREPAD 0 #define BYTE_OFFSET 2 #define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE + BYTE_OFFSET + \ MAC_PREPAD + MAC_SKB_BACK_PTR_SIZE + SMP_CACHE_BYTES) #define MAC_CRC_LEN 4 #define MAX_NUM_MSGRNG_STN_CC 128 #define MAX_MSG_SND_ATTEMPTS 100 /* 13 stns x 4 entry msg/stn + headroom */ #define MAC_FRIN_TO_BE_SENT_THRESHOLD 16 #define MAX_NUM_DESC_SPILL 1024 #define MAX_FRIN_SPILL (MAX_NUM_DESC_SPILL << 2) #define MAX_FROUT_SPILL (MAX_NUM_DESC_SPILL << 2) #define MAX_CLASS_0_SPILL (MAX_NUM_DESC_SPILL << 2) #define MAX_CLASS_1_SPILL (MAX_NUM_DESC_SPILL << 2) #define MAX_CLASS_2_SPILL (MAX_NUM_DESC_SPILL << 2) #define MAX_CLASS_3_SPILL (MAX_NUM_DESC_SPILL << 2) #define XLR_MAX_CORE 8 #define XLR_MAX_NLNA 3 #define XLR_MAX_MACS 8 #define XLR_MAX_TX_FRAGS 14 #define MAX_P2D_DESC_PER_PORT 512 #define PHY_STATUS_RETRIES 25000 /* Structs representing hardware data structures */ struct size_1_desc { uint64_t entry0; }; struct size_2_desc { uint64_t entry0; uint64_t entry1; }; struct size_3_desc { uint64_t entry0; uint64_t entry1; uint64_t entry2; }; struct size_4_desc { uint64_t entry0; uint64_t entry1; uint64_t entry2; uint64_t entry3; }; struct fr_desc { struct size_1_desc d1; }; union rx_tx_desc { struct size_2_desc d2; /* struct size_3_desc d3; */ /* struct size_4_desc d4; */ }; extern unsigned char xlr_base_mac_addr[]; /* Driver data structures and enums */ typedef enum { xlr_mac_speed_10, xlr_mac_speed_100, xlr_mac_speed_1000, xlr_mac_speed_rsvd } xlr_mac_speed_t; typedef enum { xlr_mac_duplex_auto, xlr_mac_duplex_half, xlr_mac_duplex_full } xlr_mac_duplex_t; typedef enum { xlr_mac_link_down, xlr_mac_link_up, } xlr_mac_link_t; typedef enum { xlr_mac_fc_auto, xlr_mac_fc_disabled, xlr_mac_fc_frame, xlr_mac_fc_collision, xlr_mac_fc_carrier } xlr_mac_fc_t; enum { SGMII_SPEED_10 = 0x00000000, SGMII_SPEED_100 = 0x02000000, SGMII_SPEED_1000 = 0x04000000, }; struct nlge_softc; /* * A data-structure to hold a set of related ports. The "sense" in which they * are related is defined by the user of this data-structure. * * One example: a set of ports that are controlled thru a single MDIO line. */ struct nlge_port_set { struct nlge_softc **port_vec; uint32_t vec_sz; }; /* * nlna_softc has Network Accelerator (NA) attributes that are necessary to * configure the h/w registers of this block. All the commmon configuration * for a set of GMAC ports controlled by an NA is done from here. */ struct nlna_softc { device_t nlna_dev; uint32_t num_ports; int na_type; int mac_type; xlr_reg_t *base; struct fr_desc *frin_spill; struct fr_desc *frout_spill; union rx_tx_desc *class_0_spill; union rx_tx_desc *class_1_spill; union rx_tx_desc *class_2_spill; union rx_tx_desc *class_3_spill; uint32_t rfrbucket; uint32_t station_id; struct nlge_softc *child_sc[XLR_MAX_MACS]; /* * Set of ports controlled/configured by the MII line * of this network accelerator. */ struct nlge_port_set mdio_set; struct nlge_softc *mdio_sc[XLR_MAX_MACS]; }; struct nlge_softc { struct ifnet *nlge_if; /* should be first member - cf. mii.c:miibus_attach() */ struct mii_data nlge_mii; struct nlge_port_set *mdio_pset; device_t nlge_dev; device_t mii_bus; xlr_reg_t *base; xlr_reg_t *mii_base; xlr_reg_t *pcs_addr; xlr_reg_t *serdes_addr; int port_type; int if_flags; xlr_mac_speed_t speed; xlr_mac_duplex_t duplex; xlr_mac_link_t link; xlr_mac_fc_t flow_ctrl; uint32_t id; uint32_t instance; uint32_t phy_addr; uint32_t tx_bucket_id; uint8_t dev_addr[ETHER_ADDR_LEN]; struct mtx sc_lock; }; struct nlge_tx_desc { uint64_t frag[XLR_MAX_TX_FRAGS + 2]; }; #define MAX_TX_RING_SIZE (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT *\ sizeof(struct p2d_tx_desc)) #define NLGE_WRITE(base, off, val) xlr_write_reg(base, off, val) #define NLGE_READ(base, off) xlr_read_reg(base, off) #define NLGE_UPDATE(base, off, val, mask) \ do { \ uint32_t rd_val, wrt_val; \ rd_val = NLGE_READ(base, off); \ wrt_val = (rd_val & ~mask) | (val & mask); \ NLGE_WRITE(base, off, wrt_val); \ } while (0) #define NLGE_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->sc_lock, _name, MTX_NETWORK_LOCK, MTX_DEF) #define NLGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_lock) #define NLGE_LOCK(_sc) mtx_lock(&(_sc)->sc_lock) #define NLGE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock) #define NLGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED)
diegows/wanproxy
network/uinet/sys/mips/rmi/dev/nlge/if_nlge.h
C
bsd-2-clause
68,836
using System; using System.Collections.Generic; using System.Reflection; using UnityEngine; namespace SomaSim { public class UnitTestRunner : MonoBehaviour { #if UNITY_EDITOR public void Start () { System.Diagnostics.Stopwatch timer = new System.Diagnostics.Stopwatch(); timer.Start(); int count = RunUnitTests(); timer.Stop(); Debug.Log("Unit tests: " + count + " tests ran in " + timer.Elapsed.TotalSeconds + " seconds"); } private int RunUnitTests () { int sum = 0; foreach (Type testClass in GetTestClasses()) { sum += RunTestMethods(Activator.CreateInstance(testClass)); } return sum; } private int RunTestMethods (object testInstance) { int sum = 0; foreach (MethodInfo method in testInstance.GetType().GetMethods()) { if (method.GetCustomAttributes(typeof(TestMethod), true).Length > 0) { sum++; try { method.Invoke(testInstance, null); } catch (UnitTestException e) { Debug.LogError("UNIT TEST FAILURE in " + method.ToString() + "\n" + e.Message + "\n" + e.StackTrace); } catch (Exception e) { Debug.LogError("UNIT TEST ERROR in " + method.ToString() + "\n" + e.InnerException); } } } return sum; } private static IEnumerable<Type> GetTestClasses () { foreach (Assembly assembly in AppDomain.CurrentDomain.GetAssemblies()) { foreach (Type type in assembly.GetTypes()) { if (type.GetCustomAttributes(typeof(TestClass), true).Length > 0) { yield return type; } } } } #endif } }
rzubek/UnityGameTools
Source/Testing/UnitTestRunner.cs
C#
bsd-2-clause
1,964
class EotUtils < Formula desc "Tools to convert fonts from OTF/TTF to EOT format" homepage "https://www.w3.org/Tools/eot-utils/" url "https://www.w3.org/Tools/eot-utils/eot-utilities-1.1.tar.gz" sha256 "4eed49dac7052e4147deaddbe025c7dfb404fc847d9fe71e1c42eba5620e6431" license "W3C" bottle do cellar :any_skip_relocation sha256 "8d2e463b47a858921b972403f2aa79c6fe80318973fbe5e3f272dc0e1b6dc5b0" => :catalina sha256 "9e3a062c4d2e5345703442a1428f51bcc1554d07a94f6e540d8a631c2ba2633d" => :mojave sha256 "56f30e1b637149b8b34d003ff4c43865175950eb25d528e0cda69dd4e9261b06" => :high_sierra sha256 "320909b9801c96b10491dca13de7c793dae8b0d0864839c6b7a65cbaa1e8e036" => :sierra sha256 "b2a4e0f385fa861baf54ac3c483f5599bc96994b3797fe00430653f1a5c28ba4" => :el_capitan sha256 "3276e755d84fda54851733b693e56922ddb597f1ac4f14792f4221ce794832da" => :yosemite sha256 "d22988bd2c4ba4bb945a80d997fb06532579a09a3bc0c8be86c832f7bbc57a42" => :mavericks sha256 "b5b6c465872f4fc93b590a8b06db5b964d8d8549d78ac79ab13918dee2245efd" => :x86_64_linux end resource "eot" do url "https://github.com/RoelN/font-face-render-check/raw/98f0adda9cfe44fe97f6d538aa893a37905a7add/dev/pixelambacht-dash.eot" sha256 "23d6fbe778abe8fe51cfc5ea22f8e061b4c8d32b096ef4a252ba6f2f00406c91" end def install system "./configure", "--prefix=#{prefix}" system "make", "install" end test do resource("eot").stage do system "#{bin}/eotinfo", "pixelambacht-dash.eot" end end end
rwhogg/homebrew-core
Formula/eot-utils.rb
Ruby
bsd-2-clause
1,522
# Fish-like fast/unobtrusive autosuggestions for zsh. # https://github.com/zsh-users/zsh-autosuggestions # v0.4.3 # Copyright (c) 2013 Thiago de Arruda # Copyright (c) 2016-2018 Eric Freese # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. #--------------------------------------------------------------------# # Setup # #--------------------------------------------------------------------# # Precmd hooks for initializing the library and starting pty's autoload -Uz add-zsh-hook # Asynchronous suggestions are generated in a pty autoload zsh/zpty #--------------------------------------------------------------------# # Global Configuration Variables # #--------------------------------------------------------------------# # Color to use when highlighting suggestion # Uses format of `region_highlight` # More info: http://zsh.sourceforge.net/Doc/Release/Zsh-Line-Editor.html#Zle-Widgets ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE='fg=8' # Prefix to use when saving original versions of bound widgets ZSH_AUTOSUGGEST_ORIGINAL_WIDGET_PREFIX=autosuggest-orig- ZSH_AUTOSUGGEST_STRATEGY=default # Widgets that clear the suggestion ZSH_AUTOSUGGEST_CLEAR_WIDGETS=( history-search-forward history-search-backward history-beginning-search-forward history-beginning-search-backward history-substring-search-up history-substring-search-down up-line-or-beginning-search down-line-or-beginning-search up-line-or-history down-line-or-history accept-line ) # Widgets that accept the entire suggestion ZSH_AUTOSUGGEST_ACCEPT_WIDGETS=( forward-char end-of-line vi-forward-char vi-end-of-line vi-add-eol ) # Widgets that accept the entire suggestion and execute it ZSH_AUTOSUGGEST_EXECUTE_WIDGETS=( ) # Widgets that accept the suggestion as far as the cursor moves ZSH_AUTOSUGGEST_PARTIAL_ACCEPT_WIDGETS=( forward-word emacs-forward-word vi-forward-word vi-forward-word-end vi-forward-blank-word vi-forward-blank-word-end vi-find-next-char vi-find-next-char-skip ) # Widgets that should be ignored (globbing supported but must be escaped) ZSH_AUTOSUGGEST_IGNORE_WIDGETS=( orig-\* beep run-help set-local-history which-command yank yank-pop ) # Max size of buffer to trigger autosuggestion. Leave undefined for no upper bound. ZSH_AUTOSUGGEST_BUFFER_MAX_SIZE= # Pty name for calculating autosuggestions asynchronously ZSH_AUTOSUGGEST_ASYNC_PTY_NAME=zsh_autosuggest_pty #--------------------------------------------------------------------# # Utility Functions # #--------------------------------------------------------------------# _zsh_autosuggest_escape_command() { setopt localoptions EXTENDED_GLOB # Escape special chars in the string (requires EXTENDED_GLOB) echo -E "${1//(#m)[\"\'\\()\[\]|*?~]/\\$MATCH}" } #--------------------------------------------------------------------# # Feature Detection # #--------------------------------------------------------------------# _zsh_autosuggest_feature_detect_zpty_returns_fd() { typeset -g _ZSH_AUTOSUGGEST_ZPTY_RETURNS_FD typeset -h REPLY zpty zsh_autosuggest_feature_detect '{ zshexit() { kill -KILL $$; sleep 1 } }' if (( REPLY )); then _ZSH_AUTOSUGGEST_ZPTY_RETURNS_FD=1 else _ZSH_AUTOSUGGEST_ZPTY_RETURNS_FD=0 fi zpty -d zsh_autosuggest_feature_detect } #--------------------------------------------------------------------# # Widget Helpers # #--------------------------------------------------------------------# _zsh_autosuggest_incr_bind_count() { if ((${+_ZSH_AUTOSUGGEST_BIND_COUNTS[$1]})); then ((_ZSH_AUTOSUGGEST_BIND_COUNTS[$1]++)) else _ZSH_AUTOSUGGEST_BIND_COUNTS[$1]=1 fi typeset -gi bind_count=$_ZSH_AUTOSUGGEST_BIND_COUNTS[$1] } _zsh_autosuggest_get_bind_count() { if ((${+_ZSH_AUTOSUGGEST_BIND_COUNTS[$1]})); then typeset -gi bind_count=$_ZSH_AUTOSUGGEST_BIND_COUNTS[$1] else typeset -gi bind_count=0 fi } # Bind a single widget to an autosuggest widget, saving a reference to the original widget _zsh_autosuggest_bind_widget() { typeset -gA _ZSH_AUTOSUGGEST_BIND_COUNTS local widget=$1 local autosuggest_action=$2 local prefix=$ZSH_AUTOSUGGEST_ORIGINAL_WIDGET_PREFIX local -i bind_count # Save a reference to the original widget case $widgets[$widget] in # Already bound user:_zsh_autosuggest_(bound|orig)_*);; # User-defined widget user:*) _zsh_autosuggest_incr_bind_count $widget zle -N $prefix${bind_count}-$widget ${widgets[$widget]#*:} ;; # Built-in widget builtin) _zsh_autosuggest_incr_bind_count $widget eval "_zsh_autosuggest_orig_${(q)widget}() { zle .${(q)widget} }" zle -N $prefix${bind_count}-$widget _zsh_autosuggest_orig_$widget ;; # Completion widget completion:*) _zsh_autosuggest_incr_bind_count $widget eval "zle -C $prefix${bind_count}-${(q)widget} ${${(s.:.)widgets[$widget]}[2,3]}" ;; esac _zsh_autosuggest_get_bind_count $widget # Pass the original widget's name explicitly into the autosuggest # function. Use this passed in widget name to call the original # widget instead of relying on the $WIDGET variable being set # correctly. $WIDGET cannot be trusted because other plugins call # zle without the `-w` flag (e.g. `zle self-insert` instead of # `zle self-insert -w`). eval "_zsh_autosuggest_bound_${bind_count}_${(q)widget}() { _zsh_autosuggest_widget_$autosuggest_action $prefix$bind_count-${(q)widget} \$@ }" # Create the bound widget zle -N $widget _zsh_autosuggest_bound_${bind_count}_$widget } # Map all configured widgets to the right autosuggest widgets _zsh_autosuggest_bind_widgets() { local widget local ignore_widgets ignore_widgets=( .\* _\* zle-\* autosuggest-\* $ZSH_AUTOSUGGEST_ORIGINAL_WIDGET_PREFIX\* $ZSH_AUTOSUGGEST_IGNORE_WIDGETS ) # Find every widget we might want to bind and bind it appropriately for widget in ${${(f)"$(builtin zle -la)"}:#${(j:|:)~ignore_widgets}}; do if [[ -n ${ZSH_AUTOSUGGEST_CLEAR_WIDGETS[(r)$widget]} ]]; then _zsh_autosuggest_bind_widget $widget clear elif [[ -n ${ZSH_AUTOSUGGEST_ACCEPT_WIDGETS[(r)$widget]} ]]; then _zsh_autosuggest_bind_widget $widget accept elif [[ -n ${ZSH_AUTOSUGGEST_EXECUTE_WIDGETS[(r)$widget]} ]]; then _zsh_autosuggest_bind_widget $widget execute elif [[ -n ${ZSH_AUTOSUGGEST_PARTIAL_ACCEPT_WIDGETS[(r)$widget]} ]]; then _zsh_autosuggest_bind_widget $widget partial_accept else # Assume any unspecified widget might modify the buffer _zsh_autosuggest_bind_widget $widget modify fi done } # Given the name of an original widget and args, invoke it, if it exists _zsh_autosuggest_invoke_original_widget() { # Do nothing unless called with at least one arg (( $# )) || return 0 local original_widget_name="$1" shift if (( ${+widgets[$original_widget_name]} )); then zle $original_widget_name -- $@ fi } #--------------------------------------------------------------------# # Highlighting # #--------------------------------------------------------------------# # If there was a highlight, remove it _zsh_autosuggest_highlight_reset() { typeset -g _ZSH_AUTOSUGGEST_LAST_HIGHLIGHT if [[ -n "$_ZSH_AUTOSUGGEST_LAST_HIGHLIGHT" ]]; then region_highlight=("${(@)region_highlight:#$_ZSH_AUTOSUGGEST_LAST_HIGHLIGHT}") unset _ZSH_AUTOSUGGEST_LAST_HIGHLIGHT fi } # If there's a suggestion, highlight it _zsh_autosuggest_highlight_apply() { typeset -g _ZSH_AUTOSUGGEST_LAST_HIGHLIGHT if (( $#POSTDISPLAY )); then typeset -g _ZSH_AUTOSUGGEST_LAST_HIGHLIGHT="$#BUFFER $(($#BUFFER + $#POSTDISPLAY)) $ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE" region_highlight+=("$_ZSH_AUTOSUGGEST_LAST_HIGHLIGHT") else unset _ZSH_AUTOSUGGEST_LAST_HIGHLIGHT fi } #--------------------------------------------------------------------# # Autosuggest Widget Implementations # #--------------------------------------------------------------------# # Disable suggestions _zsh_autosuggest_disable() { typeset -g _ZSH_AUTOSUGGEST_DISABLED _zsh_autosuggest_clear } # Enable suggestions _zsh_autosuggest_enable() { unset _ZSH_AUTOSUGGEST_DISABLED if (( $#BUFFER )); then _zsh_autosuggest_fetch fi } # Toggle suggestions (enable/disable) _zsh_autosuggest_toggle() { if [[ -n "${_ZSH_AUTOSUGGEST_DISABLED+x}" ]]; then _zsh_autosuggest_enable else _zsh_autosuggest_disable fi } # Clear the suggestion _zsh_autosuggest_clear() { # Remove the suggestion unset POSTDISPLAY _zsh_autosuggest_invoke_original_widget $@ } # Modify the buffer and get a new suggestion _zsh_autosuggest_modify() { local -i retval # Only available in zsh >= 5.4 local -i KEYS_QUEUED_COUNT # Save the contents of the buffer/postdisplay local orig_buffer="$BUFFER" local orig_postdisplay="$POSTDISPLAY" # Clear suggestion while waiting for next one unset POSTDISPLAY # Original widget may modify the buffer _zsh_autosuggest_invoke_original_widget $@ retval=$? # Don't fetch a new suggestion if there's more input to be read immediately if (( $PENDING > 0 )) || (( $KEYS_QUEUED_COUNT > 0 )); then POSTDISPLAY="$orig_postdisplay" return $retval fi # Optimize if manually typing in the suggestion if (( $#BUFFER > $#orig_buffer )); then local added=${BUFFER#$orig_buffer} # If the string added matches the beginning of the postdisplay if [[ "$added" = "${orig_postdisplay:0:$#added}" ]]; then POSTDISPLAY="${orig_postdisplay:$#added}" return $retval fi fi # Don't fetch a new suggestion if the buffer hasn't changed if [[ "$BUFFER" = "$orig_buffer" ]]; then POSTDISPLAY="$orig_postdisplay" return $retval fi # Bail out if suggestions are disabled if [[ -n "${_ZSH_AUTOSUGGEST_DISABLED+x}" ]]; then return $? fi # Get a new suggestion if the buffer is not empty after modification if (( $#BUFFER > 0 )); then if [[ -z "$ZSH_AUTOSUGGEST_BUFFER_MAX_SIZE" ]] || (( $#BUFFER <= $ZSH_AUTOSUGGEST_BUFFER_MAX_SIZE )); then _zsh_autosuggest_fetch fi fi return $retval } # Fetch a new suggestion based on what's currently in the buffer _zsh_autosuggest_fetch() { if zpty -t "$ZSH_AUTOSUGGEST_ASYNC_PTY_NAME" &>/dev/null; then _zsh_autosuggest_async_request "$BUFFER" else local suggestion _zsh_autosuggest_strategy_$ZSH_AUTOSUGGEST_STRATEGY "$BUFFER" _zsh_autosuggest_suggest "$suggestion" fi } # Offer a suggestion _zsh_autosuggest_suggest() { local suggestion="$1" if [[ -n "$suggestion" ]] && (( $#BUFFER )); then POSTDISPLAY="${suggestion#$BUFFER}" else unset POSTDISPLAY fi } # Accept the entire suggestion _zsh_autosuggest_accept() { local -i max_cursor_pos=$#BUFFER # When vicmd keymap is active, the cursor can't move all the way # to the end of the buffer if [[ "$KEYMAP" = "vicmd" ]]; then max_cursor_pos=$((max_cursor_pos - 1)) fi # Only accept if the cursor is at the end of the buffer if [[ $CURSOR = $max_cursor_pos ]]; then # Add the suggestion to the buffer BUFFER="$BUFFER$POSTDISPLAY" # Remove the suggestion unset POSTDISPLAY # Move the cursor to the end of the buffer CURSOR=${#BUFFER} fi _zsh_autosuggest_invoke_original_widget $@ } # Accept the entire suggestion and execute it _zsh_autosuggest_execute() { # Add the suggestion to the buffer BUFFER="$BUFFER$POSTDISPLAY" # Remove the suggestion unset POSTDISPLAY # Call the original `accept-line` to handle syntax highlighting or # other potential custom behavior _zsh_autosuggest_invoke_original_widget "accept-line" } # Partially accept the suggestion _zsh_autosuggest_partial_accept() { local -i retval cursor_loc # Save the contents of the buffer so we can restore later if needed local original_buffer="$BUFFER" # Temporarily accept the suggestion. BUFFER="$BUFFER$POSTDISPLAY" # Original widget moves the cursor _zsh_autosuggest_invoke_original_widget $@ retval=$? # Normalize cursor location across vi/emacs modes cursor_loc=$CURSOR if [[ "$KEYMAP" = "vicmd" ]]; then cursor_loc=$((cursor_loc + 1)) fi # If we've moved past the end of the original buffer if (( $cursor_loc > $#original_buffer )); then # Set POSTDISPLAY to text right of the cursor POSTDISPLAY="${BUFFER[$(($cursor_loc + 1)),$#BUFFER]}" # Clip the buffer at the cursor BUFFER="${BUFFER[1,$cursor_loc]}" else # Restore the original buffer BUFFER="$original_buffer" fi return $retval } for action in clear modify fetch suggest accept partial_accept execute enable disable toggle; do eval "_zsh_autosuggest_widget_$action() { local -i retval _zsh_autosuggest_highlight_reset _zsh_autosuggest_$action \$@ retval=\$? _zsh_autosuggest_highlight_apply zle -R return \$retval }" done zle -N autosuggest-fetch _zsh_autosuggest_widget_fetch zle -N autosuggest-suggest _zsh_autosuggest_widget_suggest zle -N autosuggest-accept _zsh_autosuggest_widget_accept zle -N autosuggest-clear _zsh_autosuggest_widget_clear zle -N autosuggest-execute _zsh_autosuggest_widget_execute zle -N autosuggest-enable _zsh_autosuggest_widget_enable zle -N autosuggest-disable _zsh_autosuggest_widget_disable zle -N autosuggest-toggle _zsh_autosuggest_widget_toggle #--------------------------------------------------------------------# # Default Suggestion Strategy # #--------------------------------------------------------------------# # Suggests the most recent history item that matches the given # prefix. # _zsh_autosuggest_strategy_default() { # Reset options to defaults and enable LOCAL_OPTIONS emulate -L zsh # Enable globbing flags so that we can use (#m) setopt EXTENDED_GLOB # Escape backslashes and all of the glob operators so we can use # this string as a pattern to search the $history associative array. # - (#m) globbing flag enables setting references for match data # TODO: Use (b) flag when we can drop support for zsh older than v5.0.8 local prefix="${1//(#m)[\\*?[\]<>()|^~#]/\\$MATCH}" # Get the history items that match # - (r) subscript flag makes the pattern match on values typeset -g suggestion="${history[(r)${prefix}*]}" } #--------------------------------------------------------------------# # Match Previous Command Suggestion Strategy # #--------------------------------------------------------------------# # Suggests the most recent history item that matches the given # prefix and whose preceding history item also matches the most # recently executed command. # # For example, suppose your history has the following entries: # - pwd # - ls foo # - ls bar # - pwd # # Given the history list above, when you type 'ls', the suggestion # will be 'ls foo' rather than 'ls bar' because your most recently # executed command (pwd) was previously followed by 'ls foo'. # # Note that this strategy won't work as expected with ZSH options that don't # preserve the history order such as `HIST_IGNORE_ALL_DUPS` or # `HIST_EXPIRE_DUPS_FIRST`. _zsh_autosuggest_strategy_match_prev_cmd() { # Reset options to defaults and enable LOCAL_OPTIONS emulate -L zsh # Enable globbing flags so that we can use (#m) setopt EXTENDED_GLOB # TODO: Use (b) flag when we can drop support for zsh older than v5.0.8 local prefix="${1//(#m)[\\*?[\]<>()|^~#]/\\$MATCH}" # Get all history event numbers that correspond to history # entries that match pattern $prefix* local history_match_keys history_match_keys=(${(k)history[(R)$prefix*]}) # By default we use the first history number (most recent history entry) local histkey="${history_match_keys[1]}" # Get the previously executed command local prev_cmd="$(_zsh_autosuggest_escape_command "${history[$((HISTCMD-1))]}")" # Iterate up to the first 200 history event numbers that match $prefix for key in "${(@)history_match_keys[1,200]}"; do # Stop if we ran out of history [[ $key -gt 1 ]] || break # See if the history entry preceding the suggestion matches the # previous command, and use it if it does if [[ "${history[$((key - 1))]}" == "$prev_cmd" ]]; then histkey="$key" break fi done # Give back the matched history entry typeset -g suggestion="$history[$histkey]" } #--------------------------------------------------------------------# # Async # #--------------------------------------------------------------------# # Zpty process is spawned running this function _zsh_autosuggest_async_server() { emulate -R zsh # There is a bug in zpty module (fixed in zsh/master) by which a # zpty that exits will kill all zpty processes that were forked # before it. Here we set up a zsh exit hook to SIGKILL the zpty # process immediately, before it has a chance to kill any other # zpty processes. zshexit() { kill -KILL $$ sleep 1 # Block for long enough for the signal to come through } # Output only newlines (not carriage return + newline) stty -onlcr # Silence any error messages exec 2>/dev/null local last_pid while IFS='' read -r -d $'\0' query; do # Kill last bg process kill -KILL $last_pid &>/dev/null # Run suggestion search in the background ( local suggestion _zsh_autosuggest_strategy_$ZSH_AUTOSUGGEST_STRATEGY "$query" echo -n -E "$suggestion"$'\0' ) & last_pid=$! done } _zsh_autosuggest_async_request() { # Write the query to the zpty process to fetch a suggestion zpty -w -n $ZSH_AUTOSUGGEST_ASYNC_PTY_NAME "${1}"$'\0' } # Called when new data is ready to be read from the pty # First arg will be fd ready for reading # Second arg will be passed in case of error _zsh_autosuggest_async_response() { setopt LOCAL_OPTIONS EXTENDED_GLOB local suggestion zpty -rt $ZSH_AUTOSUGGEST_ASYNC_PTY_NAME suggestion '*'$'\0' 2>/dev/null zle autosuggest-suggest -- "${suggestion%%$'\0'##}" } _zsh_autosuggest_async_pty_create() { # With newer versions of zsh, REPLY stores the fd to read from typeset -h REPLY # If we won't get a fd back from zpty, try to guess it if (( ! $_ZSH_AUTOSUGGEST_ZPTY_RETURNS_FD )); then integer -l zptyfd exec {zptyfd}>&1 # Open a new file descriptor (above 10). exec {zptyfd}>&- # Close it so it's free to be used by zpty. fi # Fork a zpty process running the server function zpty -b $ZSH_AUTOSUGGEST_ASYNC_PTY_NAME _zsh_autosuggest_async_server # Store the fd so we can remove the handler later if (( REPLY )); then _ZSH_AUTOSUGGEST_PTY_FD=$REPLY else _ZSH_AUTOSUGGEST_PTY_FD=$zptyfd fi # Set up input handler from the zpty zle -F $_ZSH_AUTOSUGGEST_PTY_FD _zsh_autosuggest_async_response } _zsh_autosuggest_async_pty_destroy() { # Remove the input handler zle -F $_ZSH_AUTOSUGGEST_PTY_FD &>/dev/null # Destroy the zpty zpty -d $ZSH_AUTOSUGGEST_ASYNC_PTY_NAME &>/dev/null } _zsh_autosuggest_async_pty_recreate() { _zsh_autosuggest_async_pty_destroy _zsh_autosuggest_async_pty_create } _zsh_autosuggest_async_start() { typeset -g _ZSH_AUTOSUGGEST_PTY_FD _zsh_autosuggest_feature_detect_zpty_returns_fd _zsh_autosuggest_async_pty_recreate # We recreate the pty to get a fresh list of history events add-zsh-hook precmd _zsh_autosuggest_async_pty_recreate } #--------------------------------------------------------------------# # Start # #--------------------------------------------------------------------# # Start the autosuggestion widgets _zsh_autosuggest_start() { add-zsh-hook -d precmd _zsh_autosuggest_start _zsh_autosuggest_bind_widgets # Re-bind widgets on every precmd to ensure we wrap other wrappers. # Specifically, highlighting breaks if our widgets are wrapped by # zsh-syntax-highlighting widgets. This also allows modifications # to the widget list variables to take effect on the next precmd. add-zsh-hook precmd _zsh_autosuggest_bind_widgets if [[ -n "${ZSH_AUTOSUGGEST_USE_ASYNC+x}" ]]; then _zsh_autosuggest_async_start fi } # Start the autosuggestion widgets on the next precmd add-zsh-hook precmd _zsh_autosuggest_start
VerKnowSys/ServeD-OS
shell/zsh-autosuggestions/zsh-autosuggestions.zsh
Shell
bsd-2-clause
21,199
#ifndef _ACMIVIEW_H_ #define _ACMIVIEW_H_ #include "f4thread.h" #include "f4vu.h" #include "AcmiCam.h" #include "Graphics\Include\Tex.h" //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// #define EXTERNAL 0 #define CHASE 1 #define SATELLITE 8 #define REPLAY 9 #define FREE 10 #define STARTPOS 15 //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// class SimBaseClass; class RViewPoint; class RenderOTW; class ImageBuffer; class Render2D; class DrawableObject; class SimObjectType; class C_Window; class ACMITape; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// typedef struct DBLIST { void * node; /* pointer to node data */ void * user; /* pointer to user data */ struct DBLIST * next; /* next list node */ struct DBLIST * prev; /* prev list node */ } DBLIST; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// typedef struct { int listboxId; int menuId; char name[40]; } ACMIEntityUIMap; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// class ACMIView { public: // Constructors. ACMIView(); // Destructor. ~ACMIView(); // Access. ACMITape *Tape(); RViewPoint* Viewpoint(); void ToggleScreenShot(); // An array of sim entity pointers. ACMIEntityUIMap *_entityUIMappings; // display toggles void ToggleLabel(int doIDTags); void ToggleHeading(int val); void ToggleAirSpeed(int val); void ToggleAltitude(int val); void ToggleTurnRate(int val); void ToggleTurnRadius(int val); void ToggleWireFrame(int val); void TogglePoles(int val); void ToggleLockLines(int val); void Togglelockrange(int val);//me123 void InitGraphics(C_Window *win); int ExitGraphics(); void Exec(); void Draw(); void GetObjectName(SimBaseClass* theObject, char *tmpStr); // BING - 4/15/98 void SetObjectName(SimBaseClass* theObject, char *tmpStr); void InitUIVector(); void SetUIVector(Tpoint *tVect); void VectorTranslate(Tpoint *tVector); void VectorToVectorTranslation(Tpoint *tVector, Tpoint *offSetV); // List box functions. char *SetListBoxID(int objectNum, long listID); long ListBoxID(int objectNum, long filter); // Camera selection. void IncrementCameraObject(int inc); void SetCameraObject(int theObject); int CameraObject(); void IncrementTrackingObject(int inc); void SetTrackingObject(int theObject); int TrackingObject(); // More camera selection. void SelectCamera(long camSel); void SwitchCameraObject(long cameraObject); void SwitchTrackingObject(long cameraObject); // Load and unload a tape BOOL LoadTape(char *fname, BOOL reload); void UnloadTape(BOOL reload); BOOL TapeHasLoaded( void ) { return _tapeHasLoaded; }; // panner/camera control functions void SetPannerXYZ( float x, float y, float z ); void SetPannerAzEl( float az, float el ); void ResetPanner( void ); void UpdateViewPosRot( void ); void ToggleTracking( void ) { _tracking ^= 1; }; public: int _initialGraphicsLoad; int IsFinished() { return _drawingFinished; }; char _fileName[MAX_PATH]; int _cameraState; RViewPoint *_viewPoint; RenderOTW *_renderer; Texture wireTexture; HWND _win; // currentCam is the object we're attached to // currentEntityCam is the object we're tracking int _currentCam; int _currentEntityCam; float _objectScale; int _drawing; int _drawingFinished; int _isReady; int _tapeHasLoaded; int _doWeather; int _takeScreenShot; SimBaseClass *_platform; ACMITape *_tape; // camera view controls float _pannerX; float _pannerY; float _pannerZ; float _pannerAz; float _pannerEl; float _chaseX; float _chaseY; float _chaseZ; BOOL _tracking; float _camYaw; float _camPitch; float _camRoll; float _camRange; int _doWireFrame; int _doLockLine; // view position and rotation of camera Trotation _camRot; Tpoint _camPos; Tpoint _camWorldPos; // Initialize, used by constructor and destructor. void Init(); // Setup functions. Allocate and initialize data. void SetupEntityUIMappings(); // Misc functions. void DrawIDTags(); void ShowVersionString(); // Other random functions. void StopGraphicsLoop(); void InsertObjectIntoDrawList(SimBaseClass*); // Take a screen shot. void TakeScreenShot(); }; #include "acmvwinl.cpp" extern ACMIView ACMIDriver; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// #endif // _ACMIVIEW_H_
markbb1957/FFalconSource
src/acmi/src/include/acmiview.h
C
bsd-2-clause
5,585
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <title>Ignite Tools: Presenter Class Reference</title> <link href="tabs.css" rel="stylesheet" type="text/css"/> <link href="search/search.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="search/search.js"></script> <link href="doxygen.css" rel="stylesheet" type="text/css"/> </head> <body onload='searchBox.OnSelectItem(0);'> <!-- Generated by Doxygen 1.7.4 --> <script type="text/javascript"><!-- var searchBox = new SearchBox("searchBox", "search",false,'Search'); --></script> <div id="top"> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <td id="projectlogo"><img alt="Logo" src="ASLogo.png"/></td> <td style="padding-left: 0.5em;"> <div id="projectname">Ignite Tools</div> </td> </tr> </tbody> </table> </div> <div id="navrow1" class="tabs"> <ul class="tablist"> <li><a href="index.html"><span>Main&#160;Page</span></a></li> <li class="current"><a href="annotated.html"><span>Classes</span></a></li> <li><a href="files.html"><span>Files</span></a></li> <li id="searchli"> <div id="MSearchBox" class="MSearchBoxInactive"> <span class="left"> <img id="MSearchSelect" src="search/mag_sel.png" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" alt=""/> <input type="text" id="MSearchField" value="Search" accesskey="S" onfocus="searchBox.OnSearchFieldFocus(true)" onblur="searchBox.OnSearchFieldFocus(false)" onkeyup="searchBox.OnSearchFieldChange(event)"/> </span><span class="right"> <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a> </span> </div> </li> </ul> </div> <div id="navrow2" class="tabs2"> <ul class="tablist"> <li><a href="annotated.html"><span>Class&#160;List</span></a></li> <li><a href="classes.html"><span>Class&#160;Index</span></a></li> <li><a href="hierarchy.html"><span>Class&#160;Hierarchy</span></a></li> <li><a href="functions.html"><span>Class&#160;Members</span></a></li> </ul> </div> </div> <div class="header"> <div class="summary"> <a href="#properties">Properties</a> </div> <div class="headertitle"> <div class="title">Presenter Class Reference</div> </div> </div> <div class="contents"> <!-- doxytag: class="Presenter" --> <p>The class corresponding to the <a class="el" href="interface_presenter.html" title="The class corresponding to the Presenter Core Data entity.">Presenter</a> Core Data entity. <a href="interface_presenter.html#details">More...</a></p> <p><code>#import &lt;<a class="el" href="_presenter_8h_source.html">Presenter.h</a>&gt;</code></p> <p><a href="class_presenter-members.html">List of all members.</a></p> <table class="memberdecls"> <tr><td colspan="2"><h2><a name="properties"></a> Properties</h2></td></tr> <tr><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a0cdc66450fcd4dd873be53d4f230c736"></a><!-- doxytag: member="Presenter::name" ref="a0cdc66450fcd4dd873be53d4f230c736" args="" --> NSString *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="interface_presenter.html#a0cdc66450fcd4dd873be53d4f230c736">name</a></td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Name of the presenter. <br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a1832f6e66606fa1ec474961ee74646ed"></a><!-- doxytag: member="Presenter::portrait" ref="a1832f6e66606fa1ec474961ee74646ed" args="" --> NSData *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="interface_presenter.html#a1832f6e66606fa1ec474961ee74646ed">portrait</a></td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">An avatar (probably from Twitter) of the presenter. <br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="ae40f1b402228d35d9f157c922528c249"></a><!-- doxytag: member="Presenter::twitterID" ref="ae40f1b402228d35d9f157c922528c249" args="" --> NSString *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="interface_presenter.html#ae40f1b402228d35d9f157c922528c249">twitterID</a></td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">The twitter ID (including @) of the presenter. <br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a7e6b8d2fd5d346edb8ad6bd5f42c23f3"></a><!-- doxytag: member="Presenter::websiteURL" ref="a7e6b8d2fd5d346edb8ad6bd5f42c23f3" args="" --> NSString *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="interface_presenter.html#a7e6b8d2fd5d346edb8ad6bd5f42c23f3">websiteURL</a></td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">URL of presenter's website. <br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a4d04f57843085e5eb53f9847a0c9c055"></a><!-- doxytag: member="Presenter::events" ref="a4d04f57843085e5eb53f9847a0c9c055" args="" --> NSSet *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="interface_presenter.html#a4d04f57843085e5eb53f9847a0c9c055">events</a></td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Set of <a class="el" href="interface_event.html" title="The class that matches the Core Data Event entity.">Event</a> objects at which the presenter has spoken. <br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a7c46225fa83cefa5ece43f01460d094c"></a><!-- doxytag: member="Presenter::sparks" ref="a7c46225fa83cefa5ece43f01460d094c" args="" --> NSSet *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="interface_presenter.html#a7c46225fa83cefa5ece43f01460d094c">sparks</a></td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">Set of <a class="el" href="interface_spark.html" title="The class that corresponds to the Spark Core Data entity.">Spark</a> objects which the presenter has given. <br/></td></tr> <tr><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a3b66b12456c3be92b7ffe8fe62b4c9c5"></a><!-- doxytag: member="Presenter::uppercaseFirstLetter" ref="a3b66b12456c3be92b7ffe8fe62b4c9c5" args="" --> NSString *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="interface_presenter.html#a3b66b12456c3be92b7ffe8fe62b4c9c5">uppercaseFirstLetter</a></td></tr> <tr><td class="mdescLeft">&#160;</td><td class="mdescRight">The first letter of spark. <br/></td></tr> </table> <hr/><a name="details" id="details"></a><h2>Detailed Description</h2> <div class="textblock"><p>The class corresponding to the <a class="el" href="interface_presenter.html" title="The class corresponding to the Presenter Core Data entity.">Presenter</a> Core Data entity. </p> </div><hr/>The documentation for this class was generated from the following files:<ul> <li>Shared/Model Classes/<a class="el" href="_presenter_8h_source.html">Presenter.h</a></li> <li>Shared/Model Classes/Presenter.m</li> </ul> </div> <!-- window showing the filter options --> <div id="MSearchSelectWindow" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" onkeydown="return searchBox.OnSearchSelectKey(event)"> <a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(0)"><span class="SelectionMark">&#160;</span>All</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(1)"><span class="SelectionMark">&#160;</span>Classes</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(2)"><span class="SelectionMark">&#160;</span>Functions</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(3)"><span class="SelectionMark">&#160;</span>Properties</a></div> <!-- iframe showing the search results (closed by default) --> <div id="MSearchResultsWindow"> <iframe src="javascript:void(0)" frameborder="0" name="MSearchResults" id="MSearchResults"> </iframe> </div> <hr class="footer"/><address class="footer"><small>Generated on Fri Mar 16 2012 11:55:44 for Ignite Tools by&#160; <a href="http://www.doxygen.org/index.html"> <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.7.4 </small></address> </body> </html>
jflasher/IgniteTools
DoxygenDocs.docset/html/interface_presenter.html
HTML
bsd-2-clause
8,769
# $FreeBSD: soc2013/dpl/head/lib/librt/Makefile 219027 2011-02-23 21:17:38Z dim $ LIB=rt SHLIB_MAJOR= 1 CFLAGS+=-I${.CURDIR}/../libc/include -I${.CURDIR} .ifndef NO_THREAD_STACK_UNWIND CFLAGS+=-fexceptions .endif CFLAGS+=-Winline -Wall DPADD= ${LIBPTHREAD} LDADD= -lpthread WARNS?= 2 SRCS+= aio.c mq.c sigev_thread.c timer.c PRECIOUSLIB= VERSION_MAP= ${.CURDIR}/Version.map .include <bsd.lib.mk>
dplbsd/zcaplib
head/lib/librt/Makefile
Makefile
bsd-2-clause
402