code stringlengths 3 1.01M | repo_name stringlengths 5 116 | path stringlengths 3 311 | language stringclasses 30 values | license stringclasses 15 values | size int64 3 1.01M |
|---|---|---|---|---|---|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.deeplearning4j.text.movingwindow;
import org.apache.commons.lang3.StringUtils;
import org.deeplearning4j.text.tokenization.tokenizer.Tokenizer;
import org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory;
import org.nd4j.common.base.Preconditions;
import org.nd4j.common.collection.MultiDimensionalMap;
import org.nd4j.common.primitives.Pair;
import java.util.ArrayList;
import java.util.List;
/**
* Context Label Retriever
* @author Adam Gibson
*/
public class ContextLabelRetriever {
private static String BEGIN_LABEL = "<([A-Za-z]+|\\d+)>";
private static String END_LABEL = "</([A-Za-z]+|\\d+)>";
private ContextLabelRetriever() {}
/**
* Returns a stripped sentence with the indices of words
* with certain kinds of labels.
* @param sentence the sentence to process
* @return a pair of a post processed sentence
* with labels stripped and the spans of
* the labels
*/
public static Pair<String, MultiDimensionalMap<Integer, Integer, String>> stringWithLabels(String sentence,
TokenizerFactory tokenizerFactory) {
MultiDimensionalMap<Integer, Integer, String> map = MultiDimensionalMap.newHashBackedMap();
Tokenizer t = tokenizerFactory.create(sentence);
List<String> currTokens = new ArrayList<>();
String currLabel = null;
String endLabel = null;
List<Pair<String, List<String>>> tokensWithSameLabel = new ArrayList<>();
while (t.hasMoreTokens()) {
String token = t.nextToken();
if (token.matches(BEGIN_LABEL)) {
if (endLabel != null)
throw new IllegalStateException(
"Tried parsing sentence; found an end label when the begin label has not been cleared");
currLabel = token;
//no labels; add these as NONE and begin the new label
if (!currTokens.isEmpty()) {
tokensWithSameLabel.add(new Pair<>("NONE", (List<String>) new ArrayList<>(currTokens)));
currTokens.clear();
}
} else if (token.matches(END_LABEL)) {
if (currLabel == null)
throw new IllegalStateException("Found an ending label with no matching begin label");
endLabel = token;
} else
currTokens.add(token);
if (currLabel != null && endLabel != null) {
currLabel = currLabel.replaceAll("[<>/]", "");
endLabel = endLabel.replaceAll("[<>/]", "");
Preconditions.checkState(!currLabel.isEmpty(), "Current label is empty!");
Preconditions.checkState(!endLabel.isEmpty(), "End label is empty!");
Preconditions.checkState(currLabel.equals(endLabel), "Current label begin and end did not match for the parse. Was: %s ending with %s", currLabel, endLabel);
tokensWithSameLabel.add(new Pair<>(currLabel, (List<String>) new ArrayList<>(currTokens)));
currTokens.clear();
//clear out the tokens
currLabel = null;
endLabel = null;
}
}
//no labels; add these as NONE and begin the new label
if (!currTokens.isEmpty()) {
tokensWithSameLabel.add(new Pair<>("none", (List<String>) new ArrayList<>(currTokens)));
currTokens.clear();
}
//now join the output
StringBuilder strippedSentence = new StringBuilder();
for (Pair<String, List<String>> tokensWithLabel : tokensWithSameLabel) {
String joinedSentence = StringUtils.join(tokensWithLabel.getSecond(), " ");
//spaces between separate parts of the sentence
if (!(strippedSentence.length() < 1))
strippedSentence.append(" ");
strippedSentence.append(joinedSentence);
int begin = strippedSentence.toString().indexOf(joinedSentence);
int end = begin + joinedSentence.length();
map.put(begin, end, tokensWithLabel.getFirst());
}
return new Pair<>(strippedSentence.toString(), map);
}
}
| deeplearning4j/deeplearning4j | deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp/src/main/java/org/deeplearning4j/text/movingwindow/ContextLabelRetriever.java | Java | apache-2.0 | 5,049 |
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pdata
import (
"encoding/base64"
"fmt"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
otlpcommon "go.opentelemetry.io/collector/model/internal/data/protogen/common/v1"
)
func TestAttributeValue(t *testing.T) {
v := NewAttributeValueString("abc")
assert.EqualValues(t, AttributeValueTypeString, v.Type())
assert.EqualValues(t, "abc", v.StringVal())
v = NewAttributeValueInt(123)
assert.EqualValues(t, AttributeValueTypeInt, v.Type())
assert.EqualValues(t, 123, v.IntVal())
v = NewAttributeValueDouble(3.4)
assert.EqualValues(t, AttributeValueTypeDouble, v.Type())
assert.EqualValues(t, 3.4, v.DoubleVal())
v = NewAttributeValueBool(true)
assert.EqualValues(t, AttributeValueTypeBool, v.Type())
assert.True(t, v.BoolVal())
v = NewAttributeValueEmpty()
assert.EqualValues(t, AttributeValueTypeEmpty, v.Type())
v.SetStringVal("abc")
assert.EqualValues(t, AttributeValueTypeString, v.Type())
assert.EqualValues(t, "abc", v.StringVal())
v.SetIntVal(123)
assert.EqualValues(t, AttributeValueTypeInt, v.Type())
assert.EqualValues(t, 123, v.IntVal())
v.SetDoubleVal(3.4)
assert.EqualValues(t, AttributeValueTypeDouble, v.Type())
assert.EqualValues(t, 3.4, v.DoubleVal())
v.SetBoolVal(true)
assert.EqualValues(t, AttributeValueTypeBool, v.Type())
assert.True(t, v.BoolVal())
bytesValue := []byte{1, 2, 3, 4}
v = NewAttributeValueBytes(bytesValue)
assert.EqualValues(t, AttributeValueTypeBytes, v.Type())
assert.EqualValues(t, bytesValue, v.BytesVal())
}
func TestAttributeValueType(t *testing.T) {
assert.EqualValues(t, "EMPTY", AttributeValueTypeEmpty.String())
assert.EqualValues(t, "STRING", AttributeValueTypeString.String())
assert.EqualValues(t, "BOOL", AttributeValueTypeBool.String())
assert.EqualValues(t, "INT", AttributeValueTypeInt.String())
assert.EqualValues(t, "DOUBLE", AttributeValueTypeDouble.String())
assert.EqualValues(t, "MAP", AttributeValueTypeMap.String())
assert.EqualValues(t, "ARRAY", AttributeValueTypeArray.String())
assert.EqualValues(t, "BYTES", AttributeValueTypeBytes.String())
}
func TestAttributeValueMap(t *testing.T) {
m1 := NewAttributeValueMap()
assert.Equal(t, AttributeValueTypeMap, m1.Type())
assert.Equal(t, NewAttributeMap(), m1.MapVal())
assert.Equal(t, 0, m1.MapVal().Len())
m1.MapVal().InsertDouble("double_key", 123)
assert.Equal(t, 1, m1.MapVal().Len())
got, exists := m1.MapVal().Get("double_key")
assert.True(t, exists)
assert.Equal(t, NewAttributeValueDouble(123), got)
// Create a second map.
m2 := NewAttributeValueMap()
assert.Equal(t, 0, m2.MapVal().Len())
// Modify the source map that was inserted.
m2.MapVal().UpsertString("key_in_child", "somestr")
assert.Equal(t, 1, m2.MapVal().Len())
got, exists = m2.MapVal().Get("key_in_child")
assert.True(t, exists)
assert.Equal(t, NewAttributeValueString("somestr"), got)
// Insert the second map as a child. This should perform a deep copy.
m1.MapVal().Insert("child_map", m2)
assert.EqualValues(t, 2, m1.MapVal().Len())
got, exists = m1.MapVal().Get("double_key")
assert.True(t, exists)
assert.Equal(t, NewAttributeValueDouble(123), got)
got, exists = m1.MapVal().Get("child_map")
assert.True(t, exists)
assert.Equal(t, m2, got)
// Modify the source map m2 that was inserted into m1.
m2.MapVal().UpdateString("key_in_child", "somestr2")
assert.EqualValues(t, 1, m2.MapVal().Len())
got, exists = m2.MapVal().Get("key_in_child")
assert.True(t, exists)
assert.Equal(t, NewAttributeValueString("somestr2"), got)
// The child map inside m1 should not be modified.
childMap, childMapExists := m1.MapVal().Get("child_map")
require.True(t, childMapExists)
got, exists = childMap.MapVal().Get("key_in_child")
require.True(t, exists)
assert.Equal(t, NewAttributeValueString("somestr"), got)
// Now modify the inserted map (not the source)
childMap.MapVal().UpdateString("key_in_child", "somestr3")
assert.EqualValues(t, 1, childMap.MapVal().Len())
got, exists = childMap.MapVal().Get("key_in_child")
require.True(t, exists)
assert.Equal(t, NewAttributeValueString("somestr3"), got)
// The source child map should not be modified.
got, exists = m2.MapVal().Get("key_in_child")
require.True(t, exists)
assert.Equal(t, NewAttributeValueString("somestr2"), got)
removed := m1.MapVal().Remove("double_key")
assert.True(t, removed)
assert.EqualValues(t, 1, m1.MapVal().Len())
_, exists = m1.MapVal().Get("double_key")
assert.False(t, exists)
removed = m1.MapVal().Remove("child_map")
assert.True(t, removed)
assert.EqualValues(t, 0, m1.MapVal().Len())
_, exists = m1.MapVal().Get("child_map")
assert.False(t, exists)
// Test nil KvlistValue case for MapVal() func.
orig := &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_KvlistValue{KvlistValue: nil}}
m1 = AttributeValue{orig: orig}
assert.EqualValues(t, NewAttributeMap(), m1.MapVal())
}
func TestNilOrigSetAttributeValue(t *testing.T) {
av := NewAttributeValueEmpty()
av.SetStringVal("abc")
assert.EqualValues(t, "abc", av.StringVal())
av = NewAttributeValueEmpty()
av.SetIntVal(123)
assert.EqualValues(t, 123, av.IntVal())
av = NewAttributeValueEmpty()
av.SetBoolVal(true)
assert.True(t, av.BoolVal())
av = NewAttributeValueEmpty()
av.SetDoubleVal(1.23)
assert.EqualValues(t, 1.23, av.DoubleVal())
av = NewAttributeValueEmpty()
av.SetBytesVal([]byte{1, 2, 3})
assert.Equal(t, []byte{1, 2, 3}, av.BytesVal())
}
func TestAttributeValueEqual(t *testing.T) {
av1 := NewAttributeValueEmpty()
av2 := NewAttributeValueEmpty()
assert.True(t, av1.Equal(av2))
av2 = NewAttributeValueString("abc")
assert.False(t, av1.Equal(av2))
assert.False(t, av2.Equal(av1))
av1 = NewAttributeValueString("abc")
assert.True(t, av1.Equal(av2))
av2 = NewAttributeValueString("edf")
assert.False(t, av1.Equal(av2))
av2 = NewAttributeValueInt(123)
assert.False(t, av1.Equal(av2))
assert.False(t, av2.Equal(av1))
av1 = NewAttributeValueInt(234)
assert.False(t, av1.Equal(av2))
av1 = NewAttributeValueInt(123)
assert.True(t, av1.Equal(av2))
av2 = NewAttributeValueDouble(123)
assert.False(t, av1.Equal(av2))
assert.False(t, av2.Equal(av1))
av1 = NewAttributeValueDouble(234)
assert.False(t, av1.Equal(av2))
av1 = NewAttributeValueDouble(123)
assert.True(t, av1.Equal(av2))
av2 = NewAttributeValueBool(false)
assert.False(t, av1.Equal(av2))
assert.False(t, av2.Equal(av1))
av1 = NewAttributeValueBool(true)
assert.False(t, av1.Equal(av2))
av1 = NewAttributeValueBool(false)
assert.True(t, av1.Equal(av2))
av2 = NewAttributeValueBytes([]byte{1, 2, 3})
assert.False(t, av1.Equal(av2))
assert.False(t, av2.Equal(av1))
av1 = NewAttributeValueBytes([]byte{1, 2, 4})
assert.False(t, av1.Equal(av2))
av1 = NewAttributeValueBytes([]byte{1, 2, 3})
assert.True(t, av1.Equal(av2))
av1 = NewAttributeValueArray()
av1.SliceVal().AppendEmpty().SetIntVal(123)
assert.False(t, av1.Equal(av2))
assert.False(t, av2.Equal(av1))
av2 = NewAttributeValueArray()
av2.SliceVal().AppendEmpty().SetDoubleVal(123)
assert.False(t, av1.Equal(av2))
NewAttributeValueInt(123).CopyTo(av2.SliceVal().At(0))
assert.True(t, av1.Equal(av2))
av1.CopyTo(av2.SliceVal().AppendEmpty())
assert.False(t, av1.Equal(av2))
av1 = NewAttributeValueMap()
av1.MapVal().UpsertString("foo", "bar")
assert.False(t, av1.Equal(av2))
assert.False(t, av2.Equal(av1))
av2 = NewAttributeValueMap()
av2.MapVal().UpsertString("foo", "bar")
assert.True(t, av1.Equal(av2))
fooVal, ok := av2.MapVal().Get("foo")
if !ok {
assert.Fail(t, "expected to find value with key foo")
}
fooVal.SetStringVal("not-bar")
assert.False(t, av1.Equal(av2))
}
func TestNilAttributeMap(t *testing.T) {
assert.EqualValues(t, 0, NewAttributeMap().Len())
val, exist := NewAttributeMap().Get("test_key")
assert.False(t, exist)
assert.EqualValues(t, AttributeValue{nil}, val)
insertMap := NewAttributeMap()
insertMap.Insert("k", NewAttributeValueString("v"))
assert.EqualValues(t, generateTestAttributeMap(), insertMap)
insertMapString := NewAttributeMap()
insertMapString.InsertString("k", "v")
assert.EqualValues(t, generateTestAttributeMap(), insertMapString)
insertMapNull := NewAttributeMap()
insertMapNull.InsertNull("k")
assert.EqualValues(t, generateTestEmptyAttributeMap(), insertMapNull)
insertMapInt := NewAttributeMap()
insertMapInt.InsertInt("k", 123)
assert.EqualValues(t, generateTestIntAttributeMap(), insertMapInt)
insertMapDouble := NewAttributeMap()
insertMapDouble.InsertDouble("k", 12.3)
assert.EqualValues(t, generateTestDoubleAttributeMap(), insertMapDouble)
insertMapBool := NewAttributeMap()
insertMapBool.InsertBool("k", true)
assert.EqualValues(t, generateTestBoolAttributeMap(), insertMapBool)
insertMapBytes := NewAttributeMap()
insertMapBytes.InsertBytes("k", []byte{1, 2, 3, 4, 5})
assert.EqualValues(t, generateTestBytesAttributeMap(), insertMapBytes)
updateMap := NewAttributeMap()
updateMap.Update("k", NewAttributeValueString("v"))
assert.EqualValues(t, NewAttributeMap(), updateMap)
updateMapString := NewAttributeMap()
updateMapString.UpdateString("k", "v")
assert.EqualValues(t, NewAttributeMap(), updateMapString)
updateMapInt := NewAttributeMap()
updateMapInt.UpdateInt("k", 123)
assert.EqualValues(t, NewAttributeMap(), updateMapInt)
updateMapDouble := NewAttributeMap()
updateMapDouble.UpdateDouble("k", 12.3)
assert.EqualValues(t, NewAttributeMap(), updateMapDouble)
updateMapBool := NewAttributeMap()
updateMapBool.UpdateBool("k", true)
assert.EqualValues(t, NewAttributeMap(), updateMapBool)
updateMapBytes := NewAttributeMap()
updateMapBytes.UpdateBytes("k", []byte{1, 2, 3})
assert.EqualValues(t, NewAttributeMap(), updateMapBytes)
upsertMap := NewAttributeMap()
upsertMap.Upsert("k", NewAttributeValueString("v"))
assert.EqualValues(t, generateTestAttributeMap(), upsertMap)
upsertMapString := NewAttributeMap()
upsertMapString.UpsertString("k", "v")
assert.EqualValues(t, generateTestAttributeMap(), upsertMapString)
upsertMapInt := NewAttributeMap()
upsertMapInt.UpsertInt("k", 123)
assert.EqualValues(t, generateTestIntAttributeMap(), upsertMapInt)
upsertMapDouble := NewAttributeMap()
upsertMapDouble.UpsertDouble("k", 12.3)
assert.EqualValues(t, generateTestDoubleAttributeMap(), upsertMapDouble)
upsertMapBool := NewAttributeMap()
upsertMapBool.UpsertBool("k", true)
assert.EqualValues(t, generateTestBoolAttributeMap(), upsertMapBool)
upsertMapBytes := NewAttributeMap()
upsertMapBytes.UpsertBytes("k", []byte{1, 2, 3, 4, 5})
assert.EqualValues(t, generateTestBytesAttributeMap(), upsertMapBytes)
removeMap := NewAttributeMap()
assert.False(t, removeMap.Remove("k"))
assert.EqualValues(t, NewAttributeMap(), removeMap)
// Test Sort
assert.EqualValues(t, NewAttributeMap(), NewAttributeMap().Sort())
}
func TestAttributeMapWithEmpty(t *testing.T) {
origWithNil := []otlpcommon.KeyValue{
{},
{
Key: "test_key",
Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "test_value"}},
},
{
Key: "test_key2",
Value: otlpcommon.AnyValue{Value: nil},
},
}
sm := AttributeMap{
orig: &origWithNil,
}
val, exist := sm.Get("test_key")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeString, val.Type())
assert.EqualValues(t, "test_value", val.StringVal())
val, exist = sm.Get("test_key2")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeEmpty, val.Type())
assert.EqualValues(t, "", val.StringVal())
sm.Insert("other_key", NewAttributeValueString("other_value"))
val, exist = sm.Get("other_key")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeString, val.Type())
assert.EqualValues(t, "other_value", val.StringVal())
sm.InsertString("other_key_string", "other_value")
val, exist = sm.Get("other_key")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeString, val.Type())
assert.EqualValues(t, "other_value", val.StringVal())
sm.InsertInt("other_key_int", 123)
val, exist = sm.Get("other_key_int")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeInt, val.Type())
assert.EqualValues(t, 123, val.IntVal())
sm.InsertDouble("other_key_double", 1.23)
val, exist = sm.Get("other_key_double")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeDouble, val.Type())
assert.EqualValues(t, 1.23, val.DoubleVal())
sm.InsertBool("other_key_bool", true)
val, exist = sm.Get("other_key_bool")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeBool, val.Type())
assert.True(t, val.BoolVal())
sm.InsertBytes("other_key_bytes", []byte{1, 2, 3})
val, exist = sm.Get("other_key_bytes")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeBytes, val.Type())
assert.EqualValues(t, []byte{1, 2, 3}, val.BytesVal())
sm.Update("other_key", NewAttributeValueString("yet_another_value"))
val, exist = sm.Get("other_key")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeString, val.Type())
assert.EqualValues(t, "yet_another_value", val.StringVal())
sm.UpdateString("other_key_string", "yet_another_value")
val, exist = sm.Get("other_key_string")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeString, val.Type())
assert.EqualValues(t, "yet_another_value", val.StringVal())
sm.UpdateInt("other_key_int", 456)
val, exist = sm.Get("other_key_int")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeInt, val.Type())
assert.EqualValues(t, 456, val.IntVal())
sm.UpdateDouble("other_key_double", 4.56)
val, exist = sm.Get("other_key_double")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeDouble, val.Type())
assert.EqualValues(t, 4.56, val.DoubleVal())
sm.UpdateBool("other_key_bool", false)
val, exist = sm.Get("other_key_bool")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeBool, val.Type())
assert.False(t, val.BoolVal())
sm.UpdateBytes("other_key_bytes", []byte{4, 5, 6})
val, exist = sm.Get("other_key_bytes")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeBytes, val.Type())
assert.EqualValues(t, []byte{4, 5, 6}, val.BytesVal())
sm.Upsert("other_key", NewAttributeValueString("other_value"))
val, exist = sm.Get("other_key")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeString, val.Type())
assert.EqualValues(t, "other_value", val.StringVal())
sm.UpsertString("other_key_string", "other_value")
val, exist = sm.Get("other_key")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeString, val.Type())
assert.EqualValues(t, "other_value", val.StringVal())
sm.UpsertInt("other_key_int", 123)
val, exist = sm.Get("other_key_int")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeInt, val.Type())
assert.EqualValues(t, 123, val.IntVal())
sm.UpsertDouble("other_key_double", 1.23)
val, exist = sm.Get("other_key_double")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeDouble, val.Type())
assert.EqualValues(t, 1.23, val.DoubleVal())
sm.UpsertBool("other_key_bool", true)
val, exist = sm.Get("other_key_bool")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeBool, val.Type())
assert.True(t, val.BoolVal())
sm.UpsertBytes("other_key_bytes", []byte{7, 8, 9})
val, exist = sm.Get("other_key_bytes")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeBytes, val.Type())
assert.EqualValues(t, []byte{7, 8, 9}, val.BytesVal())
sm.Upsert("yet_another_key", NewAttributeValueString("yet_another_value"))
val, exist = sm.Get("yet_another_key")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeString, val.Type())
assert.EqualValues(t, "yet_another_value", val.StringVal())
sm.UpsertString("yet_another_key_string", "yet_another_value")
val, exist = sm.Get("yet_another_key_string")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeString, val.Type())
assert.EqualValues(t, "yet_another_value", val.StringVal())
sm.UpsertInt("yet_another_key_int", 456)
val, exist = sm.Get("yet_another_key_int")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeInt, val.Type())
assert.EqualValues(t, 456, val.IntVal())
sm.UpsertDouble("yet_another_key_double", 4.56)
val, exist = sm.Get("yet_another_key_double")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeDouble, val.Type())
assert.EqualValues(t, 4.56, val.DoubleVal())
sm.UpsertBool("yet_another_key_bool", false)
val, exist = sm.Get("yet_another_key_bool")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeBool, val.Type())
assert.False(t, val.BoolVal())
sm.UpsertBytes("yet_another_key_bytes", []byte{1})
val, exist = sm.Get("yet_another_key_bytes")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeBytes, val.Type())
assert.EqualValues(t, []byte{1}, val.BytesVal())
assert.True(t, sm.Remove("other_key"))
assert.True(t, sm.Remove("other_key_string"))
assert.True(t, sm.Remove("other_key_int"))
assert.True(t, sm.Remove("other_key_double"))
assert.True(t, sm.Remove("other_key_bool"))
assert.True(t, sm.Remove("other_key_bytes"))
assert.True(t, sm.Remove("yet_another_key"))
assert.True(t, sm.Remove("yet_another_key_string"))
assert.True(t, sm.Remove("yet_another_key_int"))
assert.True(t, sm.Remove("yet_another_key_double"))
assert.True(t, sm.Remove("yet_another_key_bool"))
assert.True(t, sm.Remove("yet_another_key_bytes"))
assert.False(t, sm.Remove("other_key"))
assert.False(t, sm.Remove("yet_another_key"))
// Test that the initial key is still there.
val, exist = sm.Get("test_key")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeString, val.Type())
assert.EqualValues(t, "test_value", val.StringVal())
val, exist = sm.Get("test_key2")
assert.True(t, exist)
assert.EqualValues(t, AttributeValueTypeEmpty, val.Type())
assert.EqualValues(t, "", val.StringVal())
_, exist = sm.Get("test_key3")
assert.False(t, exist)
// Test Sort
assert.EqualValues(t, AttributeMap{orig: &origWithNil}, sm.Sort())
}
func TestAttributeMapIterationNil(t *testing.T) {
NewAttributeMap().Range(func(k string, v AttributeValue) bool {
// Fail if any element is returned
t.Fail()
return true
})
}
func TestAttributeMap_Range(t *testing.T) {
rawMap := map[string]AttributeValue{
"k_string": NewAttributeValueString("123"),
"k_int": NewAttributeValueInt(123),
"k_double": NewAttributeValueDouble(1.23),
"k_bool": NewAttributeValueBool(true),
"k_empty": NewAttributeValueEmpty(),
"k_bytes": NewAttributeValueBytes([]byte{}),
}
am := NewAttributeMapFromMap(rawMap)
assert.Equal(t, 6, am.Len())
calls := 0
am.Range(func(k string, v AttributeValue) bool {
calls++
return false
})
assert.Equal(t, 1, calls)
am.Range(func(k string, v AttributeValue) bool {
assert.True(t, v.Equal(rawMap[k]))
delete(rawMap, k)
return true
})
assert.EqualValues(t, 0, len(rawMap))
}
func TestAttributeMap_InitFromMap(t *testing.T) {
am := NewAttributeMapFromMap(map[string]AttributeValue(nil))
assert.EqualValues(t, NewAttributeMap(), am)
rawMap := map[string]AttributeValue{
"k_string": NewAttributeValueString("123"),
"k_int": NewAttributeValueInt(123),
"k_double": NewAttributeValueDouble(1.23),
"k_bool": NewAttributeValueBool(true),
"k_null": NewAttributeValueEmpty(),
"k_bytes": NewAttributeValueBytes([]byte{1, 2, 3}),
}
rawOrig := []otlpcommon.KeyValue{
newAttributeKeyValueString("k_string", "123"),
newAttributeKeyValueInt("k_int", 123),
newAttributeKeyValueDouble("k_double", 1.23),
newAttributeKeyValueBool("k_bool", true),
newAttributeKeyValueNull("k_null"),
newAttributeKeyValueBytes("k_bytes", []byte{1, 2, 3}),
}
am = NewAttributeMapFromMap(rawMap)
assert.EqualValues(t, AttributeMap{orig: &rawOrig}.Sort(), am.Sort())
}
func TestAttributeValue_CopyTo(t *testing.T) {
// Test nil KvlistValue case for MapVal() func.
dest := NewAttributeValueEmpty()
orig := &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_KvlistValue{KvlistValue: nil}}
AttributeValue{orig: orig}.CopyTo(dest)
assert.Nil(t, dest.orig.Value.(*otlpcommon.AnyValue_KvlistValue).KvlistValue)
// Test nil ArrayValue case for SliceVal() func.
dest = NewAttributeValueEmpty()
orig = &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_ArrayValue{ArrayValue: nil}}
AttributeValue{orig: orig}.CopyTo(dest)
assert.Nil(t, dest.orig.Value.(*otlpcommon.AnyValue_ArrayValue).ArrayValue)
// Test copy empty value.
AttributeValue{orig: &otlpcommon.AnyValue{}}.CopyTo(dest)
assert.Nil(t, dest.orig.Value)
}
func TestAttributeMap_CopyTo(t *testing.T) {
dest := NewAttributeMap()
// Test CopyTo to empty
NewAttributeMap().CopyTo(dest)
assert.EqualValues(t, 0, dest.Len())
// Test CopyTo larger slice
generateTestAttributeMap().CopyTo(dest)
assert.EqualValues(t, generateTestAttributeMap(), dest)
// Test CopyTo same size slice
generateTestAttributeMap().CopyTo(dest)
assert.EqualValues(t, generateTestAttributeMap(), dest)
// Test CopyTo with an empty Value in the destination
(*dest.orig)[0].Value = otlpcommon.AnyValue{}
generateTestAttributeMap().CopyTo(dest)
assert.EqualValues(t, generateTestAttributeMap(), dest)
}
func TestAttributeValue_copyTo(t *testing.T) {
av := NewAttributeValueEmpty()
destVal := otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_IntValue{}}
av.copyTo(&destVal)
assert.EqualValues(t, nil, destVal.Value)
}
func TestAttributeMap_Update(t *testing.T) {
origWithNil := []otlpcommon.KeyValue{
{
Key: "test_key",
Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "test_value"}},
},
{
Key: "test_key2",
Value: otlpcommon.AnyValue{Value: nil},
},
}
sm := AttributeMap{
orig: &origWithNil,
}
av, exists := sm.Get("test_key")
assert.True(t, exists)
assert.EqualValues(t, AttributeValueTypeString, av.Type())
assert.EqualValues(t, "test_value", av.StringVal())
av.SetIntVal(123)
av2, exists := sm.Get("test_key")
assert.True(t, exists)
assert.EqualValues(t, AttributeValueTypeInt, av2.Type())
assert.EqualValues(t, 123, av2.IntVal())
av, exists = sm.Get("test_key2")
assert.True(t, exists)
assert.EqualValues(t, AttributeValueTypeEmpty, av.Type())
assert.EqualValues(t, "", av.StringVal())
av.SetIntVal(123)
av2, exists = sm.Get("test_key2")
assert.True(t, exists)
assert.EqualValues(t, AttributeValueTypeInt, av2.Type())
assert.EqualValues(t, 123, av2.IntVal())
}
func TestAttributeMap_EnsureCapacity_Zero(t *testing.T) {
am := NewAttributeMap()
am.EnsureCapacity(0)
assert.Equal(t, 0, am.Len())
assert.Equal(t, 0, cap(*am.orig))
}
func TestAttributeMap_EnsureCapacity(t *testing.T) {
am := NewAttributeMap()
am.EnsureCapacity(5)
assert.Equal(t, 0, am.Len())
assert.Equal(t, 5, cap(*am.orig))
am.EnsureCapacity(3)
assert.Equal(t, 0, am.Len())
assert.Equal(t, 5, cap(*am.orig))
am.EnsureCapacity(8)
assert.Equal(t, 0, am.Len())
assert.Equal(t, 8, cap(*am.orig))
}
func TestAttributeMap_Clear(t *testing.T) {
am := NewAttributeMap()
assert.Nil(t, *am.orig)
am.Clear()
assert.Nil(t, *am.orig)
am.EnsureCapacity(5)
assert.NotNil(t, *am.orig)
am.Clear()
assert.Nil(t, *am.orig)
}
func TestAttributeMap_RemoveIf(t *testing.T) {
rawMap := map[string]AttributeValue{
"k_string": NewAttributeValueString("123"),
"k_int": NewAttributeValueInt(123),
"k_double": NewAttributeValueDouble(1.23),
"k_bool": NewAttributeValueBool(true),
"k_empty": NewAttributeValueEmpty(),
"k_bytes": NewAttributeValueBytes([]byte{}),
}
am := NewAttributeMapFromMap(rawMap)
assert.Equal(t, 6, am.Len())
am.RemoveIf(func(key string, val AttributeValue) bool {
return key == "k_int" || val.Type() == AttributeValueTypeBool
})
assert.Equal(t, 4, am.Len())
_, exists := am.Get("k_string")
assert.True(t, exists)
_, exists = am.Get("k_bool")
assert.False(t, exists)
_, exists = am.Get("k_int")
assert.False(t, exists)
}
func BenchmarkAttributeValue_CopyTo(b *testing.B) {
av := NewAttributeValueString("k")
c := NewAttributeValueInt(123)
b.ResetTimer()
for n := 0; n < b.N; n++ {
c.copyTo(av.orig)
}
if av.IntVal() != 123 {
b.Fail()
}
}
func BenchmarkAttributeValue_SetIntVal(b *testing.B) {
av := NewAttributeValueString("k")
b.ResetTimer()
for n := 0; n < b.N; n++ {
av.SetIntVal(int64(n))
}
if av.IntVal() != int64(b.N-1) {
b.Fail()
}
}
func BenchmarkAttributeMap_Range(b *testing.B) {
const numElements = 20
rawOrig := make([]otlpcommon.KeyValue, numElements)
for i := 0; i < numElements; i++ {
rawOrig[i] = otlpcommon.KeyValue{
Key: "k" + strconv.Itoa(i),
Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "v" + strconv.Itoa(i)}},
}
}
am := AttributeMap{
orig: &rawOrig,
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
numEls := 0
am.Range(func(k string, v AttributeValue) bool {
numEls++
return true
})
if numEls != numElements {
b.Fail()
}
}
}
func BenchmarkAttributeMap_RangeOverMap(b *testing.B) {
const numElements = 20
rawOrig := make(map[string]AttributeValue, numElements)
for i := 0; i < numElements; i++ {
key := "k" + strconv.Itoa(i)
rawOrig[key] = NewAttributeValueString("v" + strconv.Itoa(i))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
numEls := 0
for _, v := range rawOrig {
if v.orig == nil {
continue
}
numEls++
}
if numEls != numElements {
b.Fail()
}
}
}
func BenchmarkAttributeMap_Remove(b *testing.B) {
b.StopTimer()
// Remove all of the even keys
keysToRemove := map[string]struct{}{}
for j := 0; j < 50; j++ {
keysToRemove[fmt.Sprintf("%d", j*2)] = struct{}{}
}
for i := 0; i < b.N; i++ {
m := NewAttributeMap()
for j := 0; j < 100; j++ {
m.InsertString(fmt.Sprintf("%d", j), "string value")
}
b.StartTimer()
for k := range keysToRemove {
m.Remove(k)
}
b.StopTimer()
}
}
func BenchmarkAttributeMap_RemoveIf(b *testing.B) {
b.StopTimer()
// Remove all of the even keys
keysToRemove := map[string]struct{}{}
for j := 0; j < 50; j++ {
keysToRemove[fmt.Sprintf("%d", j*2)] = struct{}{}
}
for i := 0; i < b.N; i++ {
m := NewAttributeMap()
for j := 0; j < 100; j++ {
m.InsertString(fmt.Sprintf("%d", j), "string value")
}
b.StartTimer()
m.RemoveIf(func(key string, _ AttributeValue) bool {
_, remove := keysToRemove[key]
return remove
})
b.StopTimer()
}
}
func BenchmarkStringMap_RangeOverMap(b *testing.B) {
const numElements = 20
rawOrig := make(map[string]string, numElements)
for i := 0; i < numElements; i++ {
key := "k" + strconv.Itoa(i)
rawOrig[key] = "v" + strconv.Itoa(i)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
numEls := 0
for _, v := range rawOrig {
if v == "" {
continue
}
numEls++
}
if numEls != numElements {
b.Fail()
}
}
}
func fillTestAttributeValue(dest AttributeValue) {
dest.SetStringVal("v")
}
func generateTestAttributeValue() AttributeValue {
av := NewAttributeValueEmpty()
fillTestAttributeValue(av)
return av
}
func generateTestAttributeMap() AttributeMap {
am := NewAttributeMap()
fillTestAttributeMap(am)
return am
}
func fillTestAttributeMap(dest AttributeMap) {
NewAttributeMapFromMap(map[string]AttributeValue{
"k": NewAttributeValueString("v"),
}).CopyTo(dest)
}
func generateTestEmptyAttributeMap() AttributeMap {
return NewAttributeMapFromMap(map[string]AttributeValue{
"k": NewAttributeValueEmpty(),
})
}
func generateTestIntAttributeMap() AttributeMap {
return NewAttributeMapFromMap(map[string]AttributeValue{
"k": NewAttributeValueInt(123),
})
}
func generateTestDoubleAttributeMap() AttributeMap {
return NewAttributeMapFromMap(map[string]AttributeValue{
"k": NewAttributeValueDouble(12.3),
})
}
func generateTestBoolAttributeMap() AttributeMap {
return NewAttributeMapFromMap(map[string]AttributeValue{
"k": NewAttributeValueBool(true),
})
}
func generateTestBytesAttributeMap() AttributeMap {
return NewAttributeMapFromMap(map[string]AttributeValue{
"k": NewAttributeValueBytes([]byte{1, 2, 3, 4, 5}),
})
}
func TestAttributeValueArray(t *testing.T) {
a1 := NewAttributeValueArray()
assert.EqualValues(t, AttributeValueTypeArray, a1.Type())
assert.EqualValues(t, NewAttributeValueSlice(), a1.SliceVal())
assert.EqualValues(t, 0, a1.SliceVal().Len())
a1.SliceVal().AppendEmpty().SetDoubleVal(123)
assert.EqualValues(t, 1, a1.SliceVal().Len())
assert.EqualValues(t, NewAttributeValueDouble(123), a1.SliceVal().At(0))
// Create a second array.
a2 := NewAttributeValueArray()
assert.EqualValues(t, 0, a2.SliceVal().Len())
a2.SliceVal().AppendEmpty().SetStringVal("somestr")
assert.EqualValues(t, 1, a2.SliceVal().Len())
assert.EqualValues(t, NewAttributeValueString("somestr"), a2.SliceVal().At(0))
// Insert the second array as a child.
a2.CopyTo(a1.SliceVal().AppendEmpty())
assert.EqualValues(t, 2, a1.SliceVal().Len())
assert.EqualValues(t, NewAttributeValueDouble(123), a1.SliceVal().At(0))
assert.EqualValues(t, a2, a1.SliceVal().At(1))
// Check that the array was correctly inserted.
childArray := a1.SliceVal().At(1)
assert.EqualValues(t, AttributeValueTypeArray, childArray.Type())
assert.EqualValues(t, 1, childArray.SliceVal().Len())
v := childArray.SliceVal().At(0)
assert.EqualValues(t, AttributeValueTypeString, v.Type())
assert.EqualValues(t, "somestr", v.StringVal())
// Test nil values case for SliceVal() func.
a1 = AttributeValue{orig: &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_ArrayValue{ArrayValue: nil}}}
assert.EqualValues(t, NewAttributeValueSlice(), a1.SliceVal())
}
func TestAttributeSliceWithNilValues(t *testing.T) {
origWithNil := []otlpcommon.AnyValue{
{},
{Value: &otlpcommon.AnyValue_StringValue{StringValue: "test_value"}},
}
sm := AttributeValueSlice{
orig: &origWithNil,
}
val := sm.At(0)
assert.EqualValues(t, AttributeValueTypeEmpty, val.Type())
assert.EqualValues(t, "", val.StringVal())
val = sm.At(1)
assert.EqualValues(t, AttributeValueTypeString, val.Type())
assert.EqualValues(t, "test_value", val.StringVal())
sm.AppendEmpty().SetStringVal("other_value")
val = sm.At(2)
assert.EqualValues(t, AttributeValueTypeString, val.Type())
assert.EqualValues(t, "other_value", val.StringVal())
}
func TestAsString(t *testing.T) {
tests := []struct {
name string
input AttributeValue
expected string
}{
{
name: "string",
input: NewAttributeValueString("string value"),
expected: "string value",
},
{
name: "int64",
input: NewAttributeValueInt(42),
expected: "42",
},
{
name: "float64",
input: NewAttributeValueDouble(1.61803399),
expected: "1.61803399",
},
{
name: "boolean",
input: NewAttributeValueBool(true),
expected: "true",
},
{
name: "empty_map",
input: NewAttributeValueMap(),
expected: "{}",
},
{
name: "simple_map",
input: simpleAttributeValueMap(),
expected: "{\"arrKey\":[\"strOne\",\"strTwo\"],\"boolKey\":false,\"floatKey\":18.6,\"intKey\":7,\"mapKey\":{\"keyOne\":\"valOne\",\"keyTwo\":\"valTwo\"},\"nullKey\":null,\"strKey\":\"strVal\"}",
},
{
name: "empty_array",
input: NewAttributeValueArray(),
expected: "[]",
},
{
name: "simple_array",
input: simpleAttributeValueArray(),
expected: "[\"strVal\",7,18.6,false,null]",
},
{
name: "empty",
input: NewAttributeValueEmpty(),
expected: "",
},
{
name: "bytes",
input: NewAttributeValueBytes([]byte("String bytes")),
expected: base64.StdEncoding.EncodeToString([]byte("String bytes")),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
actual := test.input.AsString()
assert.Equal(t, test.expected, actual)
})
}
}
func TestAsRaw(t *testing.T) {
arr := NewAttributeValueArray()
arr.SliceVal().AppendEmpty().SetBoolVal(false)
arr.SliceVal().AppendEmpty().SetBytesVal([]byte("test"))
arr.SliceVal().AppendEmpty().SetDoubleVal(12.9)
arr.SliceVal().AppendEmpty().SetIntVal(91)
arr.SliceVal().AppendEmpty().SetStringVal("another string")
tests := []struct {
name string
input AttributeMap
expected map[string]interface{}
}{
{
name: "asraw",
input: NewAttributeMapFromMap(
map[string]AttributeValue{
"array": arr,
"bool": NewAttributeValueBool(true),
"bytes": NewAttributeValueBytes([]byte("bytes value")),
"double": NewAttributeValueDouble(1.2),
"empty": NewAttributeValueEmpty(),
"int": NewAttributeValueInt(900),
"map": NewAttributeValueMap(),
"string": NewAttributeValueString("string value"),
},
),
expected: map[string]interface{}{
"array": []interface{}{false, []byte("test"), 12.9, int64(91), "another string"},
"bool": true,
"bytes": []byte("bytes value"),
"double": 1.2,
"empty": interface{}(nil),
"int": int64(900),
"map": map[string]interface{}{},
"string": "string value",
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
actual := test.input.AsRaw()
assert.Equal(t, test.expected, actual)
})
}
}
func simpleAttributeValueMap() AttributeValue {
ret := NewAttributeValueMap()
attrMap := ret.MapVal()
attrMap.UpsertString("strKey", "strVal")
attrMap.UpsertInt("intKey", 7)
attrMap.UpsertDouble("floatKey", 18.6)
attrMap.UpsertBool("boolKey", false)
attrMap.Upsert("nullKey", NewAttributeValueEmpty())
attrMap.Upsert("mapKey", constructTestAttributeSubmap())
attrMap.Upsert("arrKey", constructTestAttributeSubarray())
return ret
}
func simpleAttributeValueArray() AttributeValue {
ret := NewAttributeValueArray()
attrArr := ret.SliceVal()
attrArr.AppendEmpty().SetStringVal("strVal")
attrArr.AppendEmpty().SetIntVal(7)
attrArr.AppendEmpty().SetDoubleVal(18.6)
attrArr.AppendEmpty().SetBoolVal(false)
attrArr.AppendEmpty()
return ret
}
func constructTestAttributeSubmap() AttributeValue {
value := NewAttributeValueMap()
value.MapVal().UpsertString("keyOne", "valOne")
value.MapVal().UpsertString("keyTwo", "valTwo")
return value
}
func constructTestAttributeSubarray() AttributeValue {
value := NewAttributeValueArray()
value.SliceVal().AppendEmpty().SetStringVal("strOne")
value.SliceVal().AppendEmpty().SetStringVal("strTwo")
return value
}
| open-telemetry/opentelemetry-collector | model/internal/pdata/common_test.go | GO | apache-2.0 | 34,929 |
This directory contains all of the immutable models that contain the data that powers Slate. They are built using [Immutable.js](https://facebook.github.io/immutable-js/). Here's what each of them does:
- [Block](#block)
- [Character](#character)
- [Data](#data)
- [Document](#document)
- [Inline](#inline)
- [Mark](#mark)
- [Node](#node)
- [Selection](#selection)
- [State](#state)
- [Text](#text)
- [Transform](#transform)
#### Block
Just like in the DOM, `Block` nodes are one that contain other inline content. They can be split apart, and wrapped in other blocks, but they will always contain at least a single [`Text`](#text) node of inline content. They can also contain associated [`Data`](#data)
#### Character
The content of each [`Text`](#text) node is modeled as a `Character` list. Each character contains a single character string, and any associated [`Marks`](#mark) that are applied to it.
#### Data
`Data` is just a thin wrapper around [`Immutable.Map`](https://facebook.github.io/immutable-js/docs/#/Map), which allows for more easily creating maps without having to require [`Immutable`](https://facebook.github.io/immutable-js/) itself.
#### Document
The `Document` is where all of the content in the editor is stored. It is a recursively nested tree of [`Nodes`](#node), just like the DOM itself. Which can either be [`Block`](#block), [`Inline`](#inline), or [`Text`](#text) nodes.
#### Inline
Similar to [`Block`](#block) nodes, but containing inline content instead of block-level content. They too can be nested to any depth, but at the lowest level will always contain a single [`Text`](#text) node.
#### Mark
Marks are the pieces of "formatting" that can be applied to strings of text in the editor. Unlike [`Nodes`](#nodes), `Marks` are modeled as a flat set, such that each character can have multiple marks associated with it. This allows for cases where a link (ie. an inline node) can also have bold (ie. a mark) formatting attached to part of it.
#### Node
`Node` isn't actually a model that is exposed, but instead it's an interface full of convenience methods that [`Document`](#document), [`Block`](#block), [`Inline`](#inline) all implement.
#### Selection
The `Selection` keeps track of where the user's cursor is. It's modeled after the [DOM Selection API](https://developer.mozilla.org/en-US/docs/Web/API/Selection), using terms like "anchor", "focus" and "collapsed".
#### State
The `State` is the highest-level model. It is really just a convenient wrapper around a few other models: [`Document`](#document), [`Selection`](#selection), and a `History` which is not publicly exposed.
Since `State` has knowledge of both the [`Document`](#document) and the [`Selection`](#selection), it provides a handful of convenience methods for updating the both at the same time. For example, when inserting a new content fragment, it inserts the fragment and then moves the selection to the end of the newly inserted content.
The `State` is the object that lets you apply "transforms" that change the current document or selection. By having them all be applied through the top-level state, it can keep track of changes in the `History`, allowing for undoing and redoing changes.
#### Text
`Text` is the lowest-level [`Node`](#node) in the tree. Each `Text` node contains a list of [`Characters`](#characters), which can optionally be dynamically decorated.
#### Transform
`Transform` is not publicly exposed; you access it by calling the `transform()` method on a [`State`](#state) model. It's simply a wrapper around the somewhat-complex transformation logic that allows for a state's history to be populated correctly.
| standardhealth/flux | src/lib/slate/models/Readme.md | Markdown | apache-2.0 | 3,689 |
# Zaleya galericulata subsp. galericulata SUBSPECIES
#### Status
ACCEPTED
#### According to
NUB Generator [autonym]
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Caryophyllales/Aizoaceae/Zaleya/Zaleya galericulata/Zaleya galericulata galericulata/README.md | Markdown | apache-2.0 | 184 |
---
layout: base
title: 'Statistics of flat in UD_Turkish-FrameNet'
udver: '2'
---
## Treebank Statistics: UD_Turkish-FrameNet: Relations: `flat`
This relation is universal.
4 nodes (0%) are attached to their parents as `flat`.
4 instances of `flat` (100%) are left-to-right (parent precedes child).
Average distance between parent and child is 1.25.
The following 2 pairs of parts of speech are connected with `flat`: <tt><a href="tr_framenet-pos-PROPN.html">PROPN</a></tt>-<tt><a href="tr_framenet-pos-PROPN.html">PROPN</a></tt> (3; 75% instances), <tt><a href="tr_framenet-pos-PROPN.html">PROPN</a></tt>-<tt><a href="tr_framenet-pos-NOUN.html">NOUN</a></tt> (1; 25% instances).
~~~ conllu
# visual-style 2 bgColor:blue
# visual-style 2 fgColor:white
# visual-style 1 bgColor:blue
# visual-style 1 fgColor:white
# visual-style 1 2 flat color:blue
1 Jeff jeff PROPN _ Case=Nom|Number=Sing 4 obl _ _
2 Bezos Bezos PROPN _ Case=Nom|Number=Sing 1 flat _ _
3 gibi gibi ADP _ _ 1 case _ _
4 olursa ol VERB _ Aspect=Hab|Mood=CndGen|Number=Sing|Person=3|Polarity=Pos|Tense=Pres|VerbForm=Fin 6 advcl _ _
5 dünyalığı dünyalık NOUN _ Case=Acc|Number=Sing|Person=3 6 compound _ _
6 doğrultur doğrult VERB _ Aspect=Hab|Mood=Gen|Number=Sing|Person=3|Polarity=Pos|Tense=Pres|VerbForm=Fin 0 root _ _
7 . . PUNCT _ _ 6 punct _ _
~~~
~~~ conllu
# visual-style 6 bgColor:blue
# visual-style 6 fgColor:white
# visual-style 5 bgColor:blue
# visual-style 5 fgColor:white
# visual-style 5 6 flat color:blue
1 Kafa kafa NOUN _ Case=Nom|Number=Sing|Person=3 2 compound _ _
2 göz göz NOUN _ Case=Nom|Number=Sing|Person=3 3 compound _ _
3 yara yar VERB _ Mood=Opt|Number=Sing|Person=3|Polarity=Pos|Tense=Pres|VerbForm=Fin 4 compound _ _
4 yara yar VERB _ Mood=Opt|Number=Sing|Person=3|Polarity=Pos|Tense=Pres|VerbForm=Fin 8 advcl _ _
5 Hüsn Hüsn PROPN _ Case=Nom|Number=Sing 8 obj _ _
6 ü ü NOUN _ Case=Nom|Number=Sing|Person=3 5 flat _ _
7 Aşk'ı aşk PROPN _ Case=Acc|Number=Sing 5 flat _ _
8 okuyor oku VERB _ Aspect=Prog|Mood=Ind|Number=Sing|Person=3|Polarity=Pos|Tense=Pres|VerbForm=Fin 16 parataxis _ _
9 , , PUNCT _ _ 8 punct _ _
10 hayranları hayran NOUN _ Case=Nom|Number=Plur|Number[psor]=Sing|Person=3|Person[psor]=3 16 nsubj _ _
11 da da CCONJ _ _ 10 advmod _ _
12 koyun koy NOUN _ Case=Gen|Number=Sing|Person=3 13 compound _ _
13 kaval kaval NOUN _ Case=Nom|Number=Sing|Person=3 14 compound _ _
14 dinler dinle VERB _ Aspect=Hab|Mood=Gen|Number=Sing|Person=3|Polarity=Pos|Tense=Pres|VerbForm=Fin 15 compound _ _
15 gibi gibi ADP _ _ 16 compound _ _
16 dinliyorlardı dinle VERB _ Aspect=Perf|Mood=Ind|Number=Plur|Person=3|Polarity=Pos|Tense=Past|VerbForm=Fin 0 root _ _
17 . . PUNCT _ _ 16 punct _ _
~~~
| UniversalDependencies/docs | treebanks/tr_framenet/tr_framenet-dep-flat.md | Markdown | apache-2.0 | 2,722 |
package com.shadow.repository;
import com.shadow.domain.User;
import org.springframework.stereotype.Repository;
import java.util.List;
/**
* Created by qq65827 on 2015/1/26.
*/
@Repository
public interface UserRepository extends MyCustomRepository<User,Long> {
public User findByUsername(String username);
public List<User> findAll();
}
| kidmonster/GraduationProject | src/main/java/com/shadow/repository/UserRepository.java | Java | apache-2.0 | 355 |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<title>TypeSafeCollectionsTest xref</title>
<link type="text/css" rel="stylesheet" href="../../../../stylesheet.css" />
</head>
<body>
<div id="overview"><a href="../../../../../testapidocs/org/yaml/snakeyaml/constructor/TypeSafeCollectionsTest.html">View Javadoc</a></div><pre>
<a class="jxr_linenumber" name="1" href="#1">1</a> <em class="jxr_javadoccomment">/**</em>
<a class="jxr_linenumber" name="2" href="#2">2</a> <em class="jxr_javadoccomment"> * Copyright (c) 2008-2012, <a href="http://www.snakeyaml.org" target="alexandria_uri">http://www.snakeyaml.org</a></em>
<a class="jxr_linenumber" name="3" href="#3">3</a> <em class="jxr_javadoccomment"> *</em>
<a class="jxr_linenumber" name="4" href="#4">4</a> <em class="jxr_javadoccomment"> * Licensed under the Apache License, Version 2.0 (the "License");</em>
<a class="jxr_linenumber" name="5" href="#5">5</a> <em class="jxr_javadoccomment"> * you may not use this file except in compliance with the License.</em>
<a class="jxr_linenumber" name="6" href="#6">6</a> <em class="jxr_javadoccomment"> * You may obtain a copy of the License at</em>
<a class="jxr_linenumber" name="7" href="#7">7</a> <em class="jxr_javadoccomment"> *</em>
<a class="jxr_linenumber" name="8" href="#8">8</a> <em class="jxr_javadoccomment"> * <a href="http://www.apache.org/licenses/LICENSE-2.0" target="alexandria_uri">http://www.apache.org/licenses/LICENSE-2.0</a></em>
<a class="jxr_linenumber" name="9" href="#9">9</a> <em class="jxr_javadoccomment"> *</em>
<a class="jxr_linenumber" name="10" href="#10">10</a> <em class="jxr_javadoccomment"> * Unless required by applicable law or agreed to in writing, software</em>
<a class="jxr_linenumber" name="11" href="#11">11</a> <em class="jxr_javadoccomment"> * distributed under the License is distributed on an "AS IS" BASIS,</em>
<a class="jxr_linenumber" name="12" href="#12">12</a> <em class="jxr_javadoccomment"> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</em>
<a class="jxr_linenumber" name="13" href="#13">13</a> <em class="jxr_javadoccomment"> * See the License for the specific language governing permissions and</em>
<a class="jxr_linenumber" name="14" href="#14">14</a> <em class="jxr_javadoccomment"> * limitations under the License.</em>
<a class="jxr_linenumber" name="15" href="#15">15</a> <em class="jxr_javadoccomment"> */</em>
<a class="jxr_linenumber" name="16" href="#16">16</a> <strong class="jxr_keyword">package</strong> org.yaml.snakeyaml.constructor;
<a class="jxr_linenumber" name="17" href="#17">17</a>
<a class="jxr_linenumber" name="18" href="#18">18</a> <strong class="jxr_keyword">import</strong> java.util.Date;
<a class="jxr_linenumber" name="19" href="#19">19</a> <strong class="jxr_keyword">import</strong> java.util.List;
<a class="jxr_linenumber" name="20" href="#20">20</a> <strong class="jxr_keyword">import</strong> java.util.Map;
<a class="jxr_linenumber" name="21" href="#21">21</a> <strong class="jxr_keyword">import</strong> java.util.TreeMap;
<a class="jxr_linenumber" name="22" href="#22">22</a>
<a class="jxr_linenumber" name="23" href="#23">23</a> <strong class="jxr_keyword">import</strong> junit.framework.TestCase;
<a class="jxr_linenumber" name="24" href="#24">24</a>
<a class="jxr_linenumber" name="25" href="#25">25</a> <strong class="jxr_keyword">import</strong> org.yaml.snakeyaml.TypeDescription;
<a class="jxr_linenumber" name="26" href="#26">26</a> <strong class="jxr_keyword">import</strong> org.yaml.snakeyaml.Util;
<a class="jxr_linenumber" name="27" href="#27">27</a> <strong class="jxr_keyword">import</strong> org.yaml.snakeyaml.Yaml;
<a class="jxr_linenumber" name="28" href="#28">28</a> <strong class="jxr_keyword">import</strong> org.yaml.snakeyaml.nodes.Tag;
<a class="jxr_linenumber" name="29" href="#29">29</a> <strong class="jxr_keyword">import</strong> org.yaml.snakeyaml.representer.Representer;
<a class="jxr_linenumber" name="30" href="#30">30</a>
<a class="jxr_linenumber" name="31" href="#31">31</a> <strong class="jxr_keyword">public</strong> <strong class="jxr_keyword">class</strong> <a href="../../../../org/yaml/snakeyaml/constructor/TypeSafeCollectionsTest.html">TypeSafeCollectionsTest</a> <strong class="jxr_keyword">extends</strong> TestCase {
<a class="jxr_linenumber" name="32" href="#32">32</a>
<a class="jxr_linenumber" name="33" href="#33">33</a> <strong class="jxr_keyword">public</strong> <strong class="jxr_keyword">void</strong> testTypeSafeList() {
<a class="jxr_linenumber" name="34" href="#34">34</a> Constructor constructor = <strong class="jxr_keyword">new</strong> Constructor(Car.<strong class="jxr_keyword">class</strong>);
<a class="jxr_linenumber" name="35" href="#35">35</a> TypeDescription carDescription = <strong class="jxr_keyword">new</strong> TypeDescription(Car.<strong class="jxr_keyword">class</strong>);
<a class="jxr_linenumber" name="36" href="#36">36</a> carDescription.putListPropertyType(<span class="jxr_string">"wheels"</span>, Wheel.<strong class="jxr_keyword">class</strong>);
<a class="jxr_linenumber" name="37" href="#37">37</a> constructor.addTypeDescription(carDescription);
<a class="jxr_linenumber" name="38" href="#38">38</a> Yaml yaml = <strong class="jxr_keyword">new</strong> Yaml(constructor);
<a class="jxr_linenumber" name="39" href="#39">39</a> <a href="../../../../org/yaml/snakeyaml/constructor/Car.html">Car</a> car = (Car) yaml.load(Util.getLocalResource(<span class="jxr_string">"constructor/car-no-root-class.yaml"</span>));
<a class="jxr_linenumber" name="40" href="#40">40</a> assertEquals(<span class="jxr_string">"12-XP-F4"</span>, car.getPlate());
<a class="jxr_linenumber" name="41" href="#41">41</a> List<Wheel> wheels = car.getWheels();
<a class="jxr_linenumber" name="42" href="#42">42</a> assertNotNull(wheels);
<a class="jxr_linenumber" name="43" href="#43">43</a> assertEquals(5, wheels.size());
<a class="jxr_linenumber" name="44" href="#44">44</a> <strong class="jxr_keyword">for</strong> (Wheel wheel : wheels) {
<a class="jxr_linenumber" name="45" href="#45">45</a> assertTrue(wheel.getId() > 0);
<a class="jxr_linenumber" name="46" href="#46">46</a> }
<a class="jxr_linenumber" name="47" href="#47">47</a> }
<a class="jxr_linenumber" name="48" href="#48">48</a>
<a class="jxr_linenumber" name="49" href="#49">49</a> <strong class="jxr_keyword">public</strong> <strong class="jxr_keyword">void</strong> testTypeSafeMap() {
<a class="jxr_linenumber" name="50" href="#50">50</a> Constructor constructor = <strong class="jxr_keyword">new</strong> Constructor(MyCar.<strong class="jxr_keyword">class</strong>);
<a class="jxr_linenumber" name="51" href="#51">51</a> TypeDescription carDescription = <strong class="jxr_keyword">new</strong> TypeDescription(MyCar.<strong class="jxr_keyword">class</strong>);
<a class="jxr_linenumber" name="52" href="#52">52</a> carDescription.putMapPropertyType(<span class="jxr_string">"wheels"</span>, MyWheel.<strong class="jxr_keyword">class</strong>, Object.<strong class="jxr_keyword">class</strong>);
<a class="jxr_linenumber" name="53" href="#53">53</a> constructor.addTypeDescription(carDescription);
<a class="jxr_linenumber" name="54" href="#54">54</a> Yaml yaml = <strong class="jxr_keyword">new</strong> Yaml(constructor);
<a class="jxr_linenumber" name="55" href="#55">55</a> <a href="../../../../org/yaml/snakeyaml/constructor/MyCar.html">MyCar</a> car = (MyCar) yaml.load(Util
<a class="jxr_linenumber" name="56" href="#56">56</a> .getLocalResource(<span class="jxr_string">"constructor/car-no-root-class-map.yaml"</span>));
<a class="jxr_linenumber" name="57" href="#57">57</a> assertEquals(<span class="jxr_string">"00-FF-Q2"</span>, car.getPlate());
<a class="jxr_linenumber" name="58" href="#58">58</a> Map<MyWheel, Date> wheels = car.getWheels();
<a class="jxr_linenumber" name="59" href="#59">59</a> assertNotNull(wheels);
<a class="jxr_linenumber" name="60" href="#60">60</a> assertEquals(5, wheels.size());
<a class="jxr_linenumber" name="61" href="#61">61</a> <strong class="jxr_keyword">for</strong> (MyWheel wheel : wheels.keySet()) {
<a class="jxr_linenumber" name="62" href="#62">62</a> assertTrue(wheel.getId() > 0);
<a class="jxr_linenumber" name="63" href="#63">63</a> Date date = wheels.get(wheel);
<a class="jxr_linenumber" name="64" href="#64">64</a> <strong class="jxr_keyword">long</strong> time = date.getTime();
<a class="jxr_linenumber" name="65" href="#65">65</a> assertTrue(<span class="jxr_string">"It must be midnight."</span>, time % 10000 == 0);
<a class="jxr_linenumber" name="66" href="#66">66</a> }
<a class="jxr_linenumber" name="67" href="#67">67</a> }
<a class="jxr_linenumber" name="68" href="#68">68</a>
<a class="jxr_linenumber" name="69" href="#69">69</a> <strong class="jxr_keyword">public</strong> <strong class="jxr_keyword">void</strong> testWithGlobalTag() {
<a class="jxr_linenumber" name="70" href="#70">70</a> Map<MyWheel, Date> wheels = <strong class="jxr_keyword">new</strong> TreeMap<MyWheel, Date>();
<a class="jxr_linenumber" name="71" href="#71">71</a> <strong class="jxr_keyword">long</strong> time = 1248212168084L;
<a class="jxr_linenumber" name="72" href="#72">72</a> <strong class="jxr_keyword">for</strong> (<strong class="jxr_keyword">int</strong> i = 1; i < 6; i++) {
<a class="jxr_linenumber" name="73" href="#73">73</a> <a href="../../../../org/yaml/snakeyaml/constructor/MyWheel.html">MyWheel</a> mw = <strong class="jxr_keyword">new</strong> <a href="../../../../org/yaml/snakeyaml/constructor/MyWheel.html">MyWheel</a>();
<a class="jxr_linenumber" name="74" href="#74">74</a> mw.setId(i);
<a class="jxr_linenumber" name="75" href="#75">75</a> mw.setBrand(mw.getBrand() + String.valueOf(i));
<a class="jxr_linenumber" name="76" href="#76">76</a> wheels.put(mw, <strong class="jxr_keyword">new</strong> Date(time + i));
<a class="jxr_linenumber" name="77" href="#77">77</a> }
<a class="jxr_linenumber" name="78" href="#78">78</a> <a href="../../../../org/yaml/snakeyaml/constructor/MyCar.html">MyCar</a> c = <strong class="jxr_keyword">new</strong> <a href="../../../../org/yaml/snakeyaml/constructor/MyCar.html">MyCar</a>();
<a class="jxr_linenumber" name="79" href="#79">79</a> c.setPlate(<span class="jxr_string">"00-FF-Q2"</span>);
<a class="jxr_linenumber" name="80" href="#80">80</a> c.setWheels(wheels);
<a class="jxr_linenumber" name="81" href="#81">81</a> Representer representer = <strong class="jxr_keyword">new</strong> Representer();
<a class="jxr_linenumber" name="82" href="#82">82</a> representer.addClassTag(MyWheel.<strong class="jxr_keyword">class</strong>, Tag.MAP);
<a class="jxr_linenumber" name="83" href="#83">83</a> Yaml yaml = <strong class="jxr_keyword">new</strong> Yaml(representer);
<a class="jxr_linenumber" name="84" href="#84">84</a> String output = yaml.dump(c);
<a class="jxr_linenumber" name="85" href="#85">85</a> assertEquals(Util.getLocalResource(<span class="jxr_string">"javabeans/mycar-with-global-tag1.yaml"</span>), output);
<a class="jxr_linenumber" name="86" href="#86">86</a> <em class="jxr_comment">// load</em>
<a class="jxr_linenumber" name="87" href="#87">87</a> Yaml beanLoader = <strong class="jxr_keyword">new</strong> Yaml();
<a class="jxr_linenumber" name="88" href="#88">88</a> <a href="../../../../org/yaml/snakeyaml/constructor/MyCar.html">MyCar</a> car = beanLoader.loadAs(output, MyCar.<strong class="jxr_keyword">class</strong>);
<a class="jxr_linenumber" name="89" href="#89">89</a> assertNotNull(car);
<a class="jxr_linenumber" name="90" href="#90">90</a> assertEquals(<span class="jxr_string">"00-FF-Q2"</span>, car.getPlate());
<a class="jxr_linenumber" name="91" href="#91">91</a> assertEquals(5, car.getWheels().size());
<a class="jxr_linenumber" name="92" href="#92">92</a> <strong class="jxr_keyword">for</strong> (Date d : car.getWheels().values()) {
<a class="jxr_linenumber" name="93" href="#93">93</a> <em class="jxr_comment">// give a day for any timezone</em>
<a class="jxr_linenumber" name="94" href="#94">94</a> assertTrue(d.before(<strong class="jxr_keyword">new</strong> Date(time + 1000 * 60 * 60 * 24)));
<a class="jxr_linenumber" name="95" href="#95">95</a> assertTrue(d.after(<strong class="jxr_keyword">new</strong> Date(time)));
<a class="jxr_linenumber" name="96" href="#96">96</a> }
<a class="jxr_linenumber" name="97" href="#97">97</a> Object wheel = car.getWheels().keySet().iterator().next();
<a class="jxr_linenumber" name="98" href="#98">98</a> assertTrue(wheel instanceof <a href="../../../../org/yaml/snakeyaml/constructor/MyWheel.html">MyWheel</a>);
<a class="jxr_linenumber" name="99" href="#99">99</a> <a href="../../../../org/yaml/snakeyaml/constructor/MyWheel.html">MyWheel</a> w = (MyWheel) wheel;
<a class="jxr_linenumber" name="100" href="#100">100</a> assertEquals(1, w.getId());
<a class="jxr_linenumber" name="101" href="#101">101</a> assertEquals(<span class="jxr_string">"Pirelli1"</span>, w.getBrand());
<a class="jxr_linenumber" name="102" href="#102">102</a> }
<a class="jxr_linenumber" name="103" href="#103">103</a> }
</pre>
<hr/><div id="footer">This page was automatically generated by <a href="http://maven.apache.org/">Maven</a></div></body>
</html>
| Mohitsharma44/Citysynth | Citysynth_v2/Yaml_reader/snakeyaml/target/site/xref-test/org/yaml/snakeyaml/constructor/TypeSafeCollectionsTest.html | HTML | apache-2.0 | 14,151 |
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrift/lib/cpp2/reflection/diff.h>
#include <thrift/test/reflection/gen-cpp2/reflection_fatal_types.h>
#include <folly/String.h>
#include <folly/portability/GTest.h>
#include <sstream>
#include <string>
#include <vector>
using namespace test_cpp2::cpp_reflection;
static struct3 test_data() {
structA a1;
*a1.a_ref() = 99;
*a1.b_ref() = "abc";
structA a2;
*a2.a_ref() = 1001;
*a2.b_ref() = "foo";
structA a3;
*a3.a_ref() = 654;
*a3.b_ref() = "bar";
structA a4;
*a4.a_ref() = 9791;
*a4.b_ref() = "baz";
structA a5;
*a5.a_ref() = 111;
*a5.b_ref() = "gaz";
structB b1;
*b1.c_ref() = 1.23;
*b1.d_ref() = true;
structB b2;
*b2.c_ref() = 9.8;
*b2.d_ref() = false;
structB b3;
*b3.c_ref() = 10.01;
*b3.d_ref() = true;
structB b4;
*b4.c_ref() = 159.73;
*b4.d_ref() = false;
structB b5;
*b5.c_ref() = 468.02;
*b5.d_ref() = true;
struct3 pod;
*pod.fieldA_ref() = 141;
*pod.fieldB_ref() = "this is a test";
*pod.fieldC_ref() = enum1::field0;
*pod.fieldD_ref() = enum2::field1_2;
pod.fieldE_ref()->set_ud(5.6);
pod.fieldF_ref()->set_us_2("this is a variant");
*pod.fieldG_ref()->field0_ref() = 98;
pod.fieldG_ref()->field1_ref() = "hello, world";
*pod.fieldG_ref()->field2_ref() = enum1::field2;
*pod.fieldG_ref()->field3_ref() = enum2::field0_2;
pod.fieldG_ref()->field4_ref() = {};
pod.fieldG_ref()->field4_ref()->set_ui(19937);
pod.fieldG_ref()->field5_ref()->set_ue_2(enum1::field1);
// fieldH intentionally left empty
*pod.fieldI_ref() = {3, 5, 7, 9};
*pod.fieldJ_ref() = {"a", "b", "c", "d"};
*pod.fieldK_ref() = {};
pod.fieldL_ref()->push_back(a1);
pod.fieldL_ref()->push_back(a2);
pod.fieldL_ref()->push_back(a3);
pod.fieldL_ref()->push_back(a4);
pod.fieldL_ref()->push_back(a5);
*pod.fieldM_ref() = {2, 4, 6, 8};
*pod.fieldN_ref() = {"w", "x", "y", "z"};
*pod.fieldO_ref() = {};
*pod.fieldP_ref() = {b1, b2, b3, b4, b5};
*pod.fieldQ_ref() = {{"a1", a1}, {"a2", a2}, {"a3", a3}};
*pod.fieldR_ref() = {};
*pod.fieldS_ref() = {{"123", "456"}, {"abc", "ABC"}, {"def", "DEF"}};
return pod;
}
#define TEST_IMPL(LHS, RHS, EXPECTED) \
do { \
using namespace apache::thrift; \
auto const& expected = folly::stripLeftMargin(EXPECTED); \
std::ostringstream actualStream; \
debug_equals(LHS, RHS, make_diff_output_callback(actualStream)); \
EXPECT_EQ(expected, actualStream.str()); \
} while (false)
TEST(fatal_diff, equal) {
TEST_IMPL(test_data(), test_data(), "");
}
TEST(fatal_diff, Failure) {
auto pod = test_data();
struct3 pod1, pod2;
*pod1.fieldR_ref()["a"].c_ref() = 1;
*pod1.fieldR_ref()["b"].c_ref() = 2;
*pod1.fieldR_ref()["c"].c_ref() = 3;
*pod1.fieldR_ref()["d"].c_ref() = 4;
*pod2.fieldR_ref()["d"].c_ref() = 4;
*pod2.fieldR_ref()["c"].c_ref() = 3;
*pod2.fieldR_ref()["b"].c_ref() = 2;
*pod2.fieldR_ref()["a"].c_ref() = 1;
TEST_IMPL(pod1, pod2, "");
}
TEST(fatal_diff, fieldA) {
auto pod = test_data();
*pod.fieldA_ref() = 90;
TEST_IMPL(pod, test_data(), R"(
$.fieldA:
- 90
+ 141
)");
*pod.fieldA_ref() = 141;
TEST_IMPL(pod, test_data(), "");
}
TEST(fatal_diff, fieldB) {
auto pod = test_data();
*pod.fieldB_ref() = "should mismatch";
TEST_IMPL(pod, test_data(), R"(
$.fieldB:
- "should mismatch"
+ "this is a test"
)");
*pod.fieldB_ref() = "this is a test";
TEST_IMPL(pod, test_data(), "");
}
TEST(fatal_diff, fieldC) {
auto pod = test_data();
*pod.fieldC_ref() = enum1::field2;
TEST_IMPL(pod, test_data(), R"(
$.fieldC:
- field2
+ field0
)");
*pod.fieldC_ref() = enum1::field0;
TEST_IMPL(pod, test_data(), "");
}
TEST(fatal_diff, fieldE) {
auto pod = test_data();
pod.fieldE_ref() = {};
TEST_IMPL(pod, test_data(), R"(
$.fieldE.ud:
+ 5.6
)");
TEST_IMPL(test_data(), pod, R"(
$.fieldE.ud:
- 5.6
)");
pod.fieldE_ref()->set_ui(5);
TEST_IMPL(pod, test_data(), R"(
$.fieldE.ui:
- 5
$.fieldE.ud:
+ 5.6
)");
pod.fieldE_ref() = {};
TEST_IMPL(pod, test_data(), R"(
$.fieldE.ud:
+ 5.6
)");
pod.fieldE_ref()->set_ud(4);
TEST_IMPL(pod, test_data(), R"(
$.fieldE.ud:
- 4
+ 5.6
)");
pod.fieldE_ref()->set_ud(5.6);
TEST_IMPL(pod, test_data(), "");
}
TEST(fatal_diff, fieldH) {
auto pod = test_data();
pod.fieldH_ref()->set_ui_2(3);
TEST_IMPL(pod, test_data(), R"(
$.fieldH.ui_2:
- 3
)");
pod.fieldH_ref() = {};
TEST_IMPL(pod, test_data(), "");
}
TEST(fatal_diff, fieldI) {
auto pod = test_data();
pod.fieldI_ref()[0] = 4;
TEST_IMPL(pod, test_data(), R"(
$.fieldI[0]:
- 4
+ 3
)");
pod.fieldI_ref()[0] = 3;
TEST_IMPL(pod, test_data(), "");
pod.fieldI_ref()[2] = 10;
TEST_IMPL(pod, test_data(), R"(
$.fieldI[2]:
- 10
+ 7
)");
pod.fieldI_ref()->push_back(11);
TEST_IMPL(pod, test_data(), R"(
$.fieldI[2]:
- 10
+ 7
$.fieldI[4]:
- 11
)");
pod.fieldI_ref()->clear();
TEST_IMPL(pod, test_data(), R"(
$.fieldI[0]:
+ 3
$.fieldI[1]:
+ 5
$.fieldI[2]:
+ 7
$.fieldI[3]:
+ 9
)");
}
TEST(fatal_diff, fieldM) {
auto pod = test_data();
pod.fieldM_ref()->clear();
TEST_IMPL(
pod,
test_data(),
R"(
$.fieldM[2]:
+ 2
$.fieldM[4]:
+ 4
$.fieldM[6]:
+ 6
$.fieldM[8]:
+ 8
)");
pod.fieldM_ref()->insert(11);
pod.fieldM_ref()->insert(12);
pod.fieldM_ref()->insert(13);
pod.fieldM_ref()->insert(14);
TEST_IMPL(
pod,
test_data(),
R"(
$.fieldM[11]:
- 11
$.fieldM[12]:
- 12
$.fieldM[13]:
- 13
$.fieldM[14]:
- 14
$.fieldM[2]:
+ 2
$.fieldM[4]:
+ 4
$.fieldM[6]:
+ 6
$.fieldM[8]:
+ 8
)");
*pod.fieldM_ref() = *test_data().fieldM_ref();
TEST_IMPL(pod, test_data(), "");
}
TEST(fatal_diff, fieldQ) {
auto pod = test_data();
pod.fieldQ_ref()->clear();
TEST_IMPL(
pod,
test_data(),
R"(
$.fieldQ["a1"]:
+ <struct>{
+ a: 99,
+ b: "abc"
+ }
$.fieldQ["a2"]:
+ <struct>{
+ a: 1001,
+ b: "foo"
+ }
$.fieldQ["a3"]:
+ <struct>{
+ a: 654,
+ b: "bar"
+ }
)");
structA a1;
*a1.a_ref() = 1;
*a1.b_ref() = "1";
structA a2;
*a2.a_ref() = 2;
*a2.b_ref() = "2";
structA a3;
*a3.a_ref() = 3;
*a3.b_ref() = "3";
pod.fieldQ_ref()["A1"] = a1;
pod.fieldQ_ref()["A2"] = a2;
pod.fieldQ_ref()["A3"] = a3;
TEST_IMPL(
pod,
test_data(),
R"(
$.fieldQ["A1"]:
- <struct>{
- a: 1,
- b: "1"
- }
$.fieldQ["A2"]:
- <struct>{
- a: 2,
- b: "2"
- }
$.fieldQ["A3"]:
- <struct>{
- a: 3,
- b: "3"
- }
$.fieldQ["a1"]:
+ <struct>{
+ a: 99,
+ b: "abc"
+ }
$.fieldQ["a2"]:
+ <struct>{
+ a: 1001,
+ b: "foo"
+ }
$.fieldQ["a3"]:
+ <struct>{
+ a: 654,
+ b: "bar"
+ }
)");
*pod.fieldQ_ref() = *test_data().fieldQ_ref();
TEST_IMPL(pod, test_data(), "");
}
TEST(fatal_diff, fieldG_field0) {
auto pod = test_data();
*pod.fieldG_ref()->field0_ref() = 12;
TEST_IMPL(pod, test_data(), R"(
$.fieldG.field0:
- 12
+ 98
)");
*pod.fieldG_ref()->field0_ref() = 98;
TEST_IMPL(pod, test_data(), "");
}
TEST(fatal_diff, fieldG_field1) {
auto pod = test_data();
pod.fieldG_ref()->field1_ref() = "should mismatch";
TEST_IMPL(pod, test_data(), R"(
$.fieldG.field1:
- "should mismatch"
+ "hello, world"
)");
pod.fieldG_ref()->field1_ref() = "hello, world";
TEST_IMPL(pod, test_data(), "");
}
TEST(fatal_diff, fieldG_field2) {
auto pod = test_data();
*pod.fieldG_ref()->field2_ref() = enum1::field1;
TEST_IMPL(pod, test_data(), R"(
$.fieldG.field2:
- field1
+ field2
)");
*pod.fieldG_ref()->field2_ref() = enum1::field2;
TEST_IMPL(pod, test_data(), "");
}
TEST(fatal_diff, fieldG_field5) {
auto pod = test_data();
pod.fieldG_ref()->field5_ref()->set_ui_2(5);
TEST_IMPL(pod, test_data(), R"(
$.fieldG.field5.ui_2:
- 5
$.fieldG.field5.ue_2:
+ field1
)");
pod.fieldG_ref()->field5_ref() = {};
TEST_IMPL(pod, test_data(), R"(
$.fieldG.field5.ue_2:
+ field1
)");
pod.fieldG_ref()->field5_ref()->set_ue_2(enum1::field0);
TEST_IMPL(pod, test_data(), R"(
$.fieldG.field5.ue_2:
- field0
+ field1
)");
pod.fieldG_ref()->field5_ref()->set_ue_2(enum1::field1);
TEST_IMPL(pod, test_data(), "");
}
TEST(fatal_diff, fieldS) {
auto pod = test_data();
*pod.fieldS_ref() = {{"123", "456"}, {"abc", "ABC"}, {"ghi", "GHI"}};
TEST_IMPL(pod, test_data(), R"(
$.fieldS["0x676869"]:
- GHI
$.fieldS["0x646566"]:
+ DEF
)");
}
TEST(fatal_diff, struct_binary) {
struct_binary lhs;
*lhs.bi_ref() = "hello";
struct_binary rhs;
*rhs.bi_ref() = "world";
TEST_IMPL(lhs, rhs, R"(
$.bi:
- hello
+ world
)");
}
namespace {
struct UniqueHelper {
template <typename T, typename... Args>
static std::unique_ptr<T> build(Args&&... args) {
return std::make_unique<T>(std::forward<Args>(args)...);
}
};
struct SharedHelper {
template <typename T, typename... Args>
static std::shared_ptr<T> build(Args&&... args) {
return std::make_shared<T>(std::forward<Args>(args)...);
}
};
struct SharedConstHelper {
template <typename T, typename... Args>
static std::shared_ptr<T const> build(Args&&... args) {
return std::make_shared<T const>(std::forward<Args>(args)...);
}
};
} // namespace
template <typename Structure, typename Helper>
void ref_test() {
Structure allNull;
allNull.aStruct_ref() = Helper::template build<structA>();
allNull.aList_ref() = Helper::template build<std::deque<std::string>>();
allNull.aSet_ref() =
Helper::template build<std::unordered_set<std::string>>();
allNull.aMap_ref() =
Helper::template build<std::unordered_map<std::string, std::string>>();
allNull.aUnion_ref() = Helper::template build<unionA>();
allNull.anOptionalStruct_ref() = nullptr;
allNull.anOptionalList_ref() = nullptr;
allNull.anOptionalSet_ref() = nullptr;
allNull.anOptionalMap_ref() = nullptr;
allNull.anOptionalUnion_ref() = nullptr;
Structure allDefault;
allDefault.aStruct_ref() = Helper::template build<structA>();
allDefault.anOptionalStruct_ref() = Helper::template build<structA>();
allDefault.aList_ref() = Helper::template build<std::deque<std::string>>();
allDefault.anOptionalList_ref() =
Helper::template build<std::deque<std::string>>();
allDefault.aSet_ref() =
Helper::template build<std::unordered_set<std::string>>();
allDefault.anOptionalSet_ref() =
Helper::template build<std::unordered_set<std::string>>();
allDefault.aMap_ref() =
Helper::template build<std::unordered_map<std::string, std::string>>();
allDefault.anOptionalMap_ref() =
Helper::template build<std::unordered_map<std::string, std::string>>();
allDefault.aUnion_ref() = Helper::template build<unionA>();
allDefault.anOptionalUnion_ref() = Helper::template build<unionA>();
TEST_IMPL(allNull, allDefault, R"(
$.anOptionalStruct:
+ <struct>{
+ a: 0,
+ b: ""
+ }
$.anOptionalList:
+ <list>[]
$.anOptionalSet:
+ <set>{}
$.anOptionalMap:
+ <map>{}
$.anOptionalUnion:
+ <variant>{}
)");
TEST_IMPL(allDefault, allNull, R"(
$.anOptionalStruct:
- <struct>{
- a: 0,
- b: ""
- }
$.anOptionalList:
- <list>[]
$.anOptionalSet:
- <set>{}
$.anOptionalMap:
- <map>{}
$.anOptionalUnion:
- <variant>{}
)");
}
TEST(fatal_diff, struct_ref_unique) {
ref_test<hasRefUnique, UniqueHelper>();
}
TEST(fatal_diff, ref_shared) {
ref_test<hasRefShared, SharedHelper>();
}
TEST(fatal_diff, ref_shared_const) {
ref_test<hasRefSharedConst, SharedConstHelper>();
}
TEST(fatal_diff, optional_members) {
struct1 field1Set;
field1Set.field1_ref() = "1";
struct1 field1Unset;
struct1 field1SetButNotIsset;
field1SetButNotIsset.field1_ref() = "2";
apache::thrift::unset_unsafe(field1SetButNotIsset.field1_ref());
struct1 field1SetDefault;
apache::thrift::ensure_isset_unsafe(field1SetDefault.field1_ref());
TEST_IMPL(field1Set, field1Unset, R"(
$.field1:
- "1"
)");
TEST_IMPL(field1Unset, field1Set, R"(
$.field1:
+ "1"
)");
TEST_IMPL(field1Set, field1SetButNotIsset, R"(
$.field1:
- "1"
)");
TEST_IMPL(field1SetButNotIsset, field1Set, R"(
$.field1:
+ "1"
)");
TEST_IMPL(field1Unset, field1SetButNotIsset, "");
TEST_IMPL(field1SetButNotIsset, field1Unset, "");
TEST_IMPL(field1Set, field1SetDefault, R"(
$.field1:
- "1"
+ ""
)");
TEST_IMPL(field1SetDefault, field1Set, R"(
$.field1:
- ""
+ "1"
)");
TEST_IMPL(field1SetDefault, field1SetButNotIsset, R"(
$.field1:
- ""
)");
TEST_IMPL(field1SetButNotIsset, field1SetDefault, R"(
$.field1:
+ ""
)");
TEST_IMPL(field1Unset, field1SetDefault, R"(
$.field1:
+ ""
)");
TEST_IMPL(field1SetDefault, field1Unset, R"(
$.field1:
- ""
)");
}
TEST(fatal_diff, variant_ref_unique) {
variantHasRefUnique allNull;
allNull.set_aStruct() = nullptr;
variantHasRefUnique allDefault;
allDefault.set_aStruct();
TEST_IMPL(allNull, allDefault, R"(
$.aStruct:
+ <struct>{
+ a: 0,
+ b: ""
+ }
)");
TEST_IMPL(allDefault, allNull, R"(
$.aStruct:
- <struct>{
- a: 0,
- b: ""
- }
)");
}
#undef TEST_IMPL
| facebook/fbthrift | thrift/test/reflection/fatal_diff_test.cpp | C++ | apache-2.0 | 14,687 |
function Get-LogonEvents {
param(
[CmdletBinding()]
[int] $DaysBefore = 21,
[string] $OutputFilePath,
[string] $OutputCSVFilePath,
[switch] $FormatTable = $false
)
begin {
$ErrorActionPreference = "Stop"
$logins = @()
[System.Diagnostics.Debug]::WriteLine("DaysBefore $DaysBefore")
[System.Diagnostics.Debug]::WriteLine("OutputFilePath $OutputFilePath")
[System.Diagnostics.Debug]::WriteLine("DoNotWaitInTheEnd $DoNotWaitInTheEnd")
$LoginEventID = 7001
$LogoutEventID = 7002
$DateFrom = $(Get-Date).AddDays(-1*$DaysBefore)
}
process {
#depricated
#Get-EventLog System -source Microsoft-Windows-Winlogon -After $((Get-Date).AddDays(-1*$DaysBefore)) | Sort-Object -Property TimeWritten |
Get-WinEvent -ProviderName Microsoft-Windows-Winlogon | Where-Object { $_.ID -in ($LoginEventID, $LogoutEventID) -and $_.TimeCreated -gt $DateFrom} |
Sort-Object -Property TimeCreated |
ForEach-Object {
$time = $_.TimeCreated
if ($_.ID -eq $LogoutEventID -and $logins.Length -gt 0) {
$last = $logins.Length - 1
$logins[$last].Logout = $time.ToString("HH:mm")
$logins[$last].LogoutTime = $time
$logins[$last].Elapsed = (New-TimeSpan -Start $logins[$last].LoginTime -End $time).ToString("dd\.hh\:mm").Replace("00.", "")
}
elseif ($_.ID -eq $LoginEventID) {
$logins += [PSCustomObject]@{
Date = $time.ToString("yyyy.MM.dd")
LoginTime = $time
Login = $time.ToString("HH:mm")
Logout = ""
LogoutTime = $null
Elapsed = (New-TimeSpan -Start $time -End $(Get-Date)).ToString("dd\.hh\:mm").Replace("00.", "")
}
}
}
if ($OutputCSVFilePath) {
$logins | Select-Object -Property Date, Login, Logout, Elapsed | Export-Csv -NoTypeInformation -Path $OutputCSVFilePath
}
if ($OutputFilePath) {
$content = $logins | Format-Table -HideTableHeaders -Property Date, Login, Logout, Elapsed | Out-String
# Write-Debug $msg
if ($null -eq $content) {
$content = "No event found."
}
$msg = "Writing $($content.Length) chars to $OutputFilePath"
[System.Diagnostics.Debug]::WriteLine($msg);
$content.Replace(' ', ' ') | Out-File $OutputFilePath
Get-Content $OutputFilePath
}
if ($FormatTable) {
$logins | Format-Table -Property Date, Login, Logout, Elapsed
}
else {
$logins
}
}
end {
}
}
Export-ModuleMember -Function Get-LogonEvents | mattia72/powershell | Modules/Get-LogonEvents/Get-LogonEvents.psm1 | PowerShell | apache-2.0 | 2,559 |
package fr.inra.maiage.bibliome.alvisnlp.bibliomefactory.modules.pubannotation;
import java.util.Iterator;
import fr.inra.maiage.bibliome.alvisnlp.bibliomefactory.converters.expression.parser.ExpressionParser;
import fr.inra.maiage.bibliome.alvisnlp.bibliomefactory.modules.DefaultExpressions;
import fr.inra.maiage.bibliome.alvisnlp.bibliomefactory.modules.pubannotation.RelationSpecification.Resolved;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import fr.inra.maiage.bibliome.alvisnlp.core.corpus.Element;
import fr.inra.maiage.bibliome.alvisnlp.core.corpus.Section;
import fr.inra.maiage.bibliome.alvisnlp.core.corpus.expressions.EvaluationContext;
import fr.inra.maiage.bibliome.alvisnlp.core.corpus.expressions.Evaluator;
import fr.inra.maiage.bibliome.alvisnlp.core.corpus.expressions.Expression;
import fr.inra.maiage.bibliome.alvisnlp.core.corpus.expressions.LibraryResolver;
import fr.inra.maiage.bibliome.alvisnlp.core.corpus.expressions.Resolvable;
import fr.inra.maiage.bibliome.alvisnlp.core.corpus.expressions.ResolverException;
import fr.inra.maiage.bibliome.alvisnlp.core.module.ModuleException;
import fr.inra.maiage.bibliome.alvisnlp.core.module.NameUsage;
import fr.inra.maiage.bibliome.alvisnlp.core.module.NameUser;
import fr.inra.maiage.bibliome.util.Iterators;
public class RelationSpecification implements Resolvable<Resolved> {
private final Expression instances;
private final Expression pred;
private final Expression subj;
private final Expression obj;
public RelationSpecification(Expression instances, Expression pred, Expression subj, Expression obj) {
super();
this.instances = instances;
this.pred = pred;
this.subj = subj;
this.obj = obj;
}
public RelationSpecification() {
this(DefaultExpressions.SECTION_TUPLES, DefaultExpressions.feature("type"), ExpressionParser.parseUnsafe("args{0}"), ExpressionParser.parseUnsafe("args{1}"));
}
@Override
public Resolved resolveExpressions(LibraryResolver resolver) throws ResolverException {
return new Resolved(resolver, this);
}
public static class Resolved implements NameUser {
private final Evaluator instances;
private final Evaluator pred;
private final Evaluator subj;
private final Evaluator obj;
private Resolved(LibraryResolver resolver, RelationSpecification spec) throws ResolverException {
this.instances = spec.instances.resolveExpressions(resolver);
this.pred = spec.pred.resolveExpressions(resolver);
this.subj = spec.subj.resolveExpressions(resolver);
this.obj = spec.obj.resolveExpressions(resolver);
}
@Override
public void collectUsedNames(NameUsage nameUsage, String defaultType) throws ModuleException {
instances.collectUsedNames(nameUsage, defaultType);
pred.collectUsedNames(nameUsage, defaultType);
subj.collectUsedNames(nameUsage, defaultType);
obj.collectUsedNames(nameUsage, defaultType);
}
@SuppressWarnings("unchecked")
void addRelations(EvaluationContext ctx, Section sec, JSONArray relations) {
for (Element e : Iterators.loop(instances.evaluateElements(ctx, sec))) {
JSONObject j = convertRelation(ctx, e);
relations.add(j);
}
}
@SuppressWarnings("unchecked")
private JSONObject convertRelation(EvaluationContext ctx, Element e) {
JSONObject result = new JSONObject();
result.put("id", e.getStringId());
result.put("pred", pred.evaluateString(ctx, e));
result.put("subj", getArgument(ctx, e, subj));
result.put("obj", getArgument(ctx, e, obj));
return result;
}
private static String getArgument(EvaluationContext ctx, Element e, Evaluator eval) {
Iterator<Element> it = eval.evaluateElements(ctx, e);
if (it.hasNext()) {
Element arg = it.next();
return arg.getStringId();
}
return null;
}
}
}
| Bibliome/alvisnlp | alvisnlp-bibliome/src/main/java/fr/inra/maiage/bibliome/alvisnlp/bibliomefactory/modules/pubannotation/RelationSpecification.java | Java | apache-2.0 | 3,786 |
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides process view
This module provides a view for
visualizing processes in human-readable formm
"""
import cinder.openstack.common.report.views.jinja_view as jv
class ProcessView(jv.JinjaView):
"""A Process View
This view displays process models defined by
:class:`openstack.common.report.models.process.ProcessModel`
"""
VIEW_TEXT = (
"Process {{ pid }} (under {{ parent_pid }}) "
"[ run by: {{ username }} ({{ uids.real|default('unknown uid') }}),"
" state: {{ state }} ]\n"
"{% for child in children %}"
" {{ child }}"
"{% endfor %}"
)
| saeki-masaki/cinder | cinder/openstack/common/report/views/text/process.py | Python | apache-2.0 | 1,233 |
/**
* Copyright 2015 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package io.confluent.connect.jdbc.source;
import org.apache.kafka.connect.data.Date;
import org.apache.kafka.connect.data.Decimal;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.data.Time;
import org.apache.kafka.connect.data.Timestamp;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URL;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.SQLXML;
import java.sql.Types;
import io.confluent.connect.jdbc.util.DateTimeUtils;
/**
* DataConverter handles translating table schemas to Kafka Connect schemas and row data to Kafka
* Connect records.
*/
public class DataConverter {
private static final Logger log = LoggerFactory.getLogger(JdbcSourceTask.class);
public static Schema convertSchema(
String tableName,
ResultSetMetaData metadata,
boolean mapNumerics
) throws SQLException {
// TODO: Detect changes to metadata, which will require schema updates
SchemaBuilder builder = SchemaBuilder.struct().name(tableName);
for (int col = 1; col <= metadata.getColumnCount(); col++) {
addFieldSchema(metadata, col, builder, mapNumerics);
}
return builder.build();
}
public static Struct convertRecord(Schema schema, ResultSet resultSet, boolean mapNumerics)
throws SQLException {
ResultSetMetaData metadata = resultSet.getMetaData();
Struct struct = new Struct(schema);
for (int col = 1; col <= metadata.getColumnCount(); col++) {
try {
convertFieldValue(resultSet, col, metadata.getColumnType(col), struct,
metadata.getColumnLabel(col), mapNumerics);
} catch (IOException e) {
log.warn("Ignoring record because processing failed:", e);
} catch (SQLException e) {
log.warn("Ignoring record due to SQL error:", e);
}
}
return struct;
}
private static void addFieldSchema(ResultSetMetaData metadata, int col,
SchemaBuilder builder, boolean mapNumerics)
throws SQLException {
// Label is what the query requested the column name be using an "AS" clause, name is the
// original
String label = metadata.getColumnLabel(col);
String name = metadata.getColumnName(col);
String fieldName = label != null && !label.isEmpty() ? label : name;
int sqlType = metadata.getColumnType(col);
boolean optional = false;
if (metadata.isNullable(col) == ResultSetMetaData.columnNullable
|| metadata.isNullable(col) == ResultSetMetaData.columnNullableUnknown) {
optional = true;
}
switch (sqlType) {
case Types.NULL: {
log.warn("JDBC type {} not currently supported", sqlType);
break;
}
case Types.BOOLEAN: {
if (optional) {
builder.field(fieldName, Schema.OPTIONAL_BOOLEAN_SCHEMA);
} else {
builder.field(fieldName, Schema.BOOLEAN_SCHEMA);
}
break;
}
// ints <= 8 bits
case Types.BIT: {
if (optional) {
builder.field(fieldName, Schema.OPTIONAL_INT8_SCHEMA);
} else {
builder.field(fieldName, Schema.INT8_SCHEMA);
}
break;
}
case Types.TINYINT: {
if (optional) {
if (metadata.isSigned(col)) {
builder.field(fieldName, Schema.OPTIONAL_INT8_SCHEMA);
} else {
builder.field(fieldName, Schema.OPTIONAL_INT16_SCHEMA);
}
} else {
if (metadata.isSigned(col)) {
builder.field(fieldName, Schema.INT8_SCHEMA);
} else {
builder.field(fieldName, Schema.INT16_SCHEMA);
}
}
break;
}
// 16 bit ints
case Types.SMALLINT: {
if (optional) {
if (metadata.isSigned(col)) {
builder.field(fieldName, Schema.OPTIONAL_INT16_SCHEMA);
} else {
builder.field(fieldName, Schema.OPTIONAL_INT32_SCHEMA);
}
} else {
if (metadata.isSigned(col)) {
builder.field(fieldName, Schema.INT16_SCHEMA);
} else {
builder.field(fieldName, Schema.INT32_SCHEMA);
}
}
break;
}
// 32 bit ints
case Types.INTEGER: {
if (optional) {
if (metadata.isSigned(col)) {
builder.field(fieldName, Schema.OPTIONAL_INT32_SCHEMA);
} else {
builder.field(fieldName, Schema.OPTIONAL_INT64_SCHEMA);
}
} else {
if (metadata.isSigned(col)) {
builder.field(fieldName, Schema.INT32_SCHEMA);
} else {
builder.field(fieldName, Schema.INT64_SCHEMA);
}
}
break;
}
// 64 bit ints
case Types.BIGINT: {
if (optional) {
builder.field(fieldName, Schema.OPTIONAL_INT64_SCHEMA);
} else {
builder.field(fieldName, Schema.INT64_SCHEMA);
}
break;
}
// REAL is a single precision floating point value, i.e. a Java float
case Types.REAL: {
if (optional) {
builder.field(fieldName, Schema.OPTIONAL_FLOAT32_SCHEMA);
} else {
builder.field(fieldName, Schema.FLOAT32_SCHEMA);
}
break;
}
// FLOAT is, confusingly, double precision and effectively the same as DOUBLE. See REAL
// for single precision
case Types.FLOAT:
case Types.DOUBLE: {
if (optional) {
builder.field(fieldName, Schema.OPTIONAL_FLOAT64_SCHEMA);
} else {
builder.field(fieldName, Schema.FLOAT64_SCHEMA);
}
break;
}
case Types.NUMERIC:
if (mapNumerics) {
int precision = metadata.getPrecision(col);
if (metadata.getScale(col) == 0 && precision < 19) { // integer
Schema schema;
if (precision > 9) {
schema = (optional) ? Schema.OPTIONAL_INT64_SCHEMA :
Schema.INT64_SCHEMA;
} else if (precision > 4) {
schema = (optional) ? Schema.OPTIONAL_INT32_SCHEMA :
Schema.INT32_SCHEMA;
} else if (precision > 2) {
schema = (optional) ? Schema.OPTIONAL_INT16_SCHEMA :
Schema.INT16_SCHEMA;
} else {
schema = (optional) ? Schema.OPTIONAL_INT8_SCHEMA :
Schema.INT8_SCHEMA;
}
builder.field(fieldName, schema);
break;
}
}
// fallthrough
case Types.DECIMAL: {
int scale = metadata.getScale(col);
if (scale == -127) { //NUMBER without precision defined for OracleDB
scale = 127;
}
SchemaBuilder fieldBuilder = Decimal.builder(scale);
if (optional) {
fieldBuilder.optional();
}
builder.field(fieldName, fieldBuilder.build());
break;
}
case Types.CHAR:
case Types.VARCHAR:
case Types.LONGVARCHAR:
case Types.NCHAR:
case Types.NVARCHAR:
case Types.LONGNVARCHAR:
case Types.CLOB:
case Types.NCLOB:
case Types.DATALINK:
case Types.SQLXML: {
// Some of these types will have fixed size, but we drop this from the schema conversion
// since only fixed byte arrays can have a fixed size
if (optional) {
builder.field(fieldName, Schema.OPTIONAL_STRING_SCHEMA);
} else {
builder.field(fieldName, Schema.STRING_SCHEMA);
}
break;
}
// Binary == fixed bytes
// BLOB, VARBINARY, LONGVARBINARY == bytes
case Types.BINARY:
case Types.BLOB:
case Types.VARBINARY:
case Types.LONGVARBINARY: {
if (optional) {
builder.field(fieldName, Schema.OPTIONAL_BYTES_SCHEMA);
} else {
builder.field(fieldName, Schema.BYTES_SCHEMA);
}
break;
}
// Date is day + moth + year
case Types.DATE: {
SchemaBuilder dateSchemaBuilder = Date.builder();
if (optional) {
dateSchemaBuilder.optional();
}
builder.field(fieldName, dateSchemaBuilder.build());
break;
}
// Time is a time of day -- hour, minute, seconds, nanoseconds
case Types.TIME: {
SchemaBuilder timeSchemaBuilder = Time.builder();
if (optional) {
timeSchemaBuilder.optional();
}
builder.field(fieldName, timeSchemaBuilder.build());
break;
}
// Timestamp is a date + time
case Types.TIMESTAMP: {
SchemaBuilder tsSchemaBuilder = Timestamp.builder();
if (optional) {
tsSchemaBuilder.optional();
}
builder.field(fieldName, tsSchemaBuilder.build());
break;
}
case Types.ARRAY:
case Types.JAVA_OBJECT:
case Types.OTHER:
case Types.DISTINCT:
case Types.STRUCT:
case Types.REF:
case Types.ROWID:
default: {
log.warn("JDBC type {} not currently supported", sqlType);
break;
}
}
}
private static void convertFieldValue(ResultSet resultSet, int col, int colType,
Struct struct, String fieldName, boolean mapNumerics)
throws SQLException, IOException {
final Object colValue;
switch (colType) {
case Types.NULL: {
colValue = null;
break;
}
case Types.BOOLEAN: {
colValue = resultSet.getBoolean(col);
break;
}
case Types.BIT: {
/**
* BIT should be either 0 or 1.
* TODO: Postgres handles this differently, returning a string "t" or "f". See the
* elasticsearch-jdbc plugin for an example of how this is handled
*/
colValue = resultSet.getByte(col);
break;
}
// 8 bits int
case Types.TINYINT: {
if (resultSet.getMetaData().isSigned(col)) {
colValue = resultSet.getByte(col);
} else {
colValue = resultSet.getShort(col);
}
break;
}
// 16 bits int
case Types.SMALLINT: {
if (resultSet.getMetaData().isSigned(col)) {
colValue = resultSet.getShort(col);
} else {
colValue = resultSet.getInt(col);
}
break;
}
// 32 bits int
case Types.INTEGER: {
if (resultSet.getMetaData().isSigned(col)) {
colValue = resultSet.getInt(col);
} else {
colValue = resultSet.getLong(col);
}
break;
}
// 64 bits int
case Types.BIGINT: {
colValue = resultSet.getLong(col);
break;
}
// REAL is a single precision floating point value, i.e. a Java float
case Types.REAL: {
colValue = resultSet.getFloat(col);
break;
}
// FLOAT is, confusingly, double precision and effectively the same as DOUBLE. See REAL
// for single precision
case Types.FLOAT:
case Types.DOUBLE: {
colValue = resultSet.getDouble(col);
break;
}
case Types.NUMERIC:
if (mapNumerics) {
ResultSetMetaData metadata = resultSet.getMetaData();
int precision = metadata.getPrecision(col);
if (metadata.getScale(col) == 0 && precision < 19) { // integer
if (precision > 9) {
colValue = resultSet.getLong(col);
} else if (precision > 4) {
colValue = resultSet.getInt(col);
} else if (precision > 2) {
colValue = resultSet.getShort(col);
} else {
colValue = resultSet.getByte(col);
}
break;
}
}
// fallthrough
case Types.DECIMAL: {
ResultSetMetaData metadata = resultSet.getMetaData();
int scale = metadata.getScale(col);
if (scale == -127) {
scale = 127;
}
colValue = resultSet.getBigDecimal(col, scale);
break;
}
case Types.CHAR:
case Types.VARCHAR:
case Types.LONGVARCHAR: {
colValue = resultSet.getString(col);
break;
}
case Types.NCHAR:
case Types.NVARCHAR:
case Types.LONGNVARCHAR: {
colValue = resultSet.getNString(col);
break;
}
// Binary == fixed, VARBINARY and LONGVARBINARY == bytes
case Types.BINARY:
case Types.VARBINARY:
case Types.LONGVARBINARY: {
colValue = resultSet.getBytes(col);
break;
}
// Date is day + moth + year
case Types.DATE: {
colValue = resultSet.getDate(col, DateTimeUtils.UTC_CALENDAR.get());
break;
}
// Time is a time of day -- hour, minute, seconds, nanoseconds
case Types.TIME: {
colValue = resultSet.getTime(col, DateTimeUtils.UTC_CALENDAR.get());
break;
}
// Timestamp is a date + time
case Types.TIMESTAMP: {
colValue = resultSet.getTimestamp(col, DateTimeUtils.UTC_CALENDAR.get());
break;
}
// Datalink is basically a URL -> string
case Types.DATALINK: {
URL url = resultSet.getURL(col);
colValue = (url != null ? url.toString() : null);
break;
}
// BLOB == fixed
case Types.BLOB: {
Blob blob = resultSet.getBlob(col);
if (blob == null) {
colValue = null;
} else {
if (blob.length() > Integer.MAX_VALUE) {
throw new IOException("Can't process BLOBs longer than Integer.MAX_VALUE");
}
colValue = blob.getBytes(1, (int) blob.length());
blob.free();
}
break;
}
case Types.CLOB:
case Types.NCLOB: {
Clob clob = (colType == Types.CLOB ? resultSet.getClob(col) : resultSet.getNClob(col));
if (clob == null) {
colValue = null;
} else {
if (clob.length() > Integer.MAX_VALUE) {
throw new IOException("Can't process BLOBs longer than Integer.MAX_VALUE");
}
colValue = clob.getSubString(1, (int) clob.length());
clob.free();
}
break;
}
// XML -> string
case Types.SQLXML: {
SQLXML xml = resultSet.getSQLXML(col);
colValue = (xml != null ? xml.getString() : null);
break;
}
case Types.ARRAY:
case Types.JAVA_OBJECT:
case Types.OTHER:
case Types.DISTINCT:
case Types.STRUCT:
case Types.REF:
case Types.ROWID:
default: {
// These are not currently supported, but we don't want to log something for every single
// record we translate. There will already be errors logged for the schema translation
return;
}
}
// FIXME: Would passing in some extra info about the schema so we can get the Field by index
// be faster than setting this by name?
struct.put(fieldName, resultSet.wasNull() ? null : colValue);
}
}
| cotedm/kafka-connect-jdbc | src/main/java/io/confluent/connect/jdbc/source/DataConverter.java | Java | apache-2.0 | 15,870 |
<?php
/**
* @version CVS: 1.0.2
* @package Com_Akrecipes
* @author Rutvik Doshi <rutvik@archanaskitchen.com>
* @copyright Copyright (C) 2015. All rights reserved.
* @license GNU General Public License version 2 or later; see LICENSE.txt
*/
// No direct access
defined('_JEXEC') or die;
jimport('joomla.application.component.view');
/**
* View class for a list of Akrecipes.
*
* @since 1.6
*/
class AkrecipesViewIngredients extends JViewLegacy
{
protected $items;
protected $pagination;
protected $state;
protected $params;
/**
* Display the view
*
* @param string $tpl Template name
*
* @return void
*
* @throws Exception
*/
public function display($tpl = null)
{
$app = JFactory::getApplication();
$this->state = $this->get('State');
$this->items = $this->get('Items');
$this->pagination = $this->get('Pagination');
$this->params = $app->getParams('com_akrecipes');
// Check for errors.
if (count($errors = $this->get('Errors')))
{
throw new Exception(implode("\n", $errors));
}
$this->_prepareDocument();
parent::display($tpl);
}
/**
* Prepares the document
*
* @return void
*
* @throws Exception
*/
protected function _prepareDocument()
{
$app = JFactory::getApplication();
$menus = $app->getMenu();
$title = null;
// Because the application sets a default page title,
// we need to get it from the menu item itself
$menu = $menus->getActive();
if ($menu)
{
$this->params->def('page_heading', $this->params->get('page_title', $menu->title));
}
else
{
$this->params->def('page_heading', JText::_('COM_AKRECIPES_DEFAULT_PAGE_TITLE'));
}
$title = $this->params->get('page_title', '');
if (empty($title))
{
$title = $app->get('sitename');
}
elseif ($app->get('sitename_pagetitles', 0) == 1)
{
$title = JText::sprintf('JPAGETITLE', $app->get('sitename'), $title);
}
elseif ($app->get('sitename_pagetitles', 0) == 2)
{
$title = JText::sprintf('JPAGETITLE', $title, $app->get('sitename'));
}
$this->document->setTitle($title);
if ($this->params->get('menu-meta_description'))
{
$this->document->setDescription($this->params->get('menu-meta_description'));
}
if ($this->params->get('menu-meta_keywords'))
{
$this->document->setMetadata('keywords', $this->params->get('menu-meta_keywords'));
}
if ($this->params->get('robots'))
{
$this->document->setMetadata('robots', $this->params->get('robots'));
}
}
/**
* Check if state is set
*
* @param mixed $state State
*
* @return bool
*/
public function getState($state)
{
return isset($this->state->{$state}) ? $this->state->{$state} : false;
}
}
| rutvikd/ak-recipes | com_akrecipes-1.0.2/site/views/ingredients/view.html.php | PHP | apache-2.0 | 2,724 |
---
layout: default
description: This is an introduction to ArangoDB's HTTP interface for administration andmonitoring of the server
---
HTTP Interface for Administration and Monitoring
================================================
This is an introduction to ArangoDB's HTTP interface for administration and
monitoring of the server.
<!-- lib/Admin/RestAdminLogHandler.cpp -->
{% docublock JSF_get_admin_log %}
{% docublock JSF_get_admin_loglevel %}
{% docublock JSF_put_admin_loglevel %}
<!-- js/actions/api-system.js -->
{% docublock JSF_get_admin_routing_reloads %}
<!-- js/actions/api-system.js -->
{% docublock JSF_get_admin_statistics %}
<!-- js/actions/api-system.js -->
{% docublock JSF_get_admin_statistics_description %}
<!-- js/actions/api-system.js -->
{% docublock JSF_get_admin_server_role %}
<!-- js/actions/api-system.js -->
{% docublock JSF_get_admin_server_id %}
{% docublock get_admin_server_availability %}
## Cluster
<!-- js/actions/api-cluster.js -->
{% docublock JSF_cluster_statistics_GET %}
{% docublock get_cluster_health %}
| arangodb/docs | 3.3/http/administration-and-monitoring.md | Markdown | apache-2.0 | 1,067 |
using System;
using System.Configuration;
namespace HtmlToPdf.Parser
{
public static class AppSettingsHelper
{
public static string GetValue(string key)
{
return GetValue<string>(key);
}
public static T GetValue<T>(string key, T defaultValue = default(T))
{
var value = ConfigurationManager.AppSettings[key];
if (string.IsNullOrEmpty(value))
return defaultValue;
return (T)Convert.ChangeType(value, typeof(T));
}
}
} | elyor0529/HtmlToPdf | HtmlToPdf.Parser/AppSettingsHelper.cs | C# | apache-2.0 | 547 |
/*-------------------------------------------------------------------------
*
* vacuum.h
* header file for postgres vacuum cleaner and statistics analyzer
*
*
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/commands/vacuum.h
*
*-------------------------------------------------------------------------
*/
#ifndef VACUUM_H
#define VACUUM_H
#include "access/htup.h"
#include "catalog/pg_class.h"
#include "catalog/pg_statistic.h"
#include "catalog/pg_type.h"
#include "nodes/parsenodes.h"
#include "storage/buf.h"
#include "storage/lock.h"
#include "utils/relcache.h"
/*----------
* ANALYZE builds one of these structs for each attribute (column) that is
* to be analyzed. The struct and subsidiary data are in anl_context,
* so they live until the end of the ANALYZE operation.
*
* The type-specific typanalyze function is passed a pointer to this struct
* and must return true to continue analysis, false to skip analysis of this
* column. In the true case it must set the compute_stats and minrows fields,
* and can optionally set extra_data to pass additional info to compute_stats.
* minrows is its request for the minimum number of sample rows to be gathered
* (but note this request might not be honored, eg if there are fewer rows
* than that in the table).
*
* The compute_stats routine will be called after sample rows have been
* gathered. Aside from this struct, it is passed:
* fetchfunc: a function for accessing the column values from the
* sample rows
* samplerows: the number of sample tuples
* totalrows: estimated total number of rows in relation
* The fetchfunc may be called with rownum running from 0 to samplerows-1.
* It returns a Datum and an isNull flag.
*
* compute_stats should set stats_valid true if it is able to compute
* any useful statistics. If it does, the remainder of the struct holds
* the information to be stored in a pg_statistic row for the column. Be
* careful to allocate any pointed-to data in anl_context, which will NOT
* be CurrentMemoryContext when compute_stats is called.
*
* Note: all comparisons done for statistical purposes should use the
* underlying column's collation (attcollation), except in situations
* where a noncollatable container type contains a collatable type;
* in that case use the type's default collation. Be sure to record
* the appropriate collation in stacoll.
*----------
*/
typedef struct VacAttrStats *VacAttrStatsP;
typedef Datum (*AnalyzeAttrFetchFunc) (VacAttrStatsP stats, int rownum,
bool *isNull);
typedef void (*AnalyzeAttrComputeStatsFunc) (VacAttrStatsP stats,
AnalyzeAttrFetchFunc fetchfunc,
int samplerows,
double totalrows);
typedef struct VacAttrStats
{
/*
* These fields are set up by the main ANALYZE code before invoking the
* type-specific typanalyze function.
*
* Note: do not assume that the data being analyzed has the same datatype
* shown in attr, ie do not trust attr->atttypid, attlen, etc. This is
* because some index opclasses store a different type than the underlying
* column/expression. Instead use attrtypid, attrtypmod, and attrtype for
* information about the datatype being fed to the typanalyze function.
* Likewise, use attrcollid not attr->attcollation.
*/
Form_pg_attribute attr; /* copy of pg_attribute row for column */
Oid attrtypid; /* type of data being analyzed */
int32 attrtypmod; /* typmod of data being analyzed */
Form_pg_type attrtype; /* copy of pg_type row for attrtypid */
Oid attrcollid; /* collation of data being analyzed */
MemoryContext anl_context; /* where to save long-lived data */
/*
* These fields must be filled in by the typanalyze routine, unless it
* returns false.
*/
AnalyzeAttrComputeStatsFunc compute_stats; /* function pointer */
int minrows; /* Minimum # of rows wanted for stats */
void *extra_data; /* for extra type-specific data */
/*
* These fields are to be filled in by the compute_stats routine. (They
* are initialized to zero when the struct is created.)
*/
bool stats_valid;
float4 stanullfrac; /* fraction of entries that are NULL */
int32 stawidth; /* average width of column values */
float4 stadistinct; /* # distinct values */
int16 stakind[STATISTIC_NUM_SLOTS];
Oid staop[STATISTIC_NUM_SLOTS];
Oid stacoll[STATISTIC_NUM_SLOTS];
int numnumbers[STATISTIC_NUM_SLOTS];
float4 *stanumbers[STATISTIC_NUM_SLOTS];
int numvalues[STATISTIC_NUM_SLOTS];
Datum *stavalues[STATISTIC_NUM_SLOTS];
/*
* These fields describe the stavalues[n] element types. They will be
* initialized to match attrtypid, but a custom typanalyze function might
* want to store an array of something other than the analyzed column's
* elements. It should then overwrite these fields.
*/
Oid statypid[STATISTIC_NUM_SLOTS];
int16 statyplen[STATISTIC_NUM_SLOTS];
bool statypbyval[STATISTIC_NUM_SLOTS];
char statypalign[STATISTIC_NUM_SLOTS];
/*
* These fields are private to the main ANALYZE code and should not be
* looked at by type-specific functions.
*/
int tupattnum; /* attribute number within tuples */
HeapTuple *rows; /* access info for std fetch function */
TupleDesc tupDesc;
Datum *exprvals; /* access info for index fetch function */
bool *exprnulls;
int rowstride;
} VacAttrStats;
typedef enum VacuumOption
{
VACOPT_VACUUM = 1 << 0, /* do VACUUM */
VACOPT_ANALYZE = 1 << 1, /* do ANALYZE */
VACOPT_VERBOSE = 1 << 2, /* print progress info */
VACOPT_FREEZE = 1 << 3, /* FREEZE option */
VACOPT_FULL = 1 << 4, /* FULL (non-concurrent) vacuum */
VACOPT_SKIP_LOCKED = 1 << 5, /* skip if cannot get lock */
VACOPT_SKIPTOAST = 1 << 6, /* don't process the TOAST table, if any */
VACOPT_DISABLE_PAGE_SKIPPING = 1 << 7 /* don't skip any pages */
} VacuumOption;
/*
* A ternary value used by vacuum parameters.
*
* DEFAULT value is used to determine the value based on other
* configurations, e.g. reloptions.
*/
typedef enum VacOptTernaryValue
{
VACOPT_TERNARY_DEFAULT = 0,
VACOPT_TERNARY_DISABLED,
VACOPT_TERNARY_ENABLED,
} VacOptTernaryValue;
/*
* Parameters customizing behavior of VACUUM and ANALYZE.
*
* Note that at least one of VACOPT_VACUUM and VACOPT_ANALYZE must be set
* in options.
*/
typedef struct VacuumParams
{
int options; /* bitmask of VacuumOption */
int freeze_min_age; /* min freeze age, -1 to use default */
int freeze_table_age; /* age at which to scan whole table */
int multixact_freeze_min_age; /* min multixact freeze age, -1 to
* use default */
int multixact_freeze_table_age; /* multixact age at which to scan
* whole table */
bool is_wraparound; /* force a for-wraparound vacuum */
int log_min_duration; /* minimum execution threshold in ms at
* which verbose logs are activated, -1
* to use default */
VacOptTernaryValue index_cleanup; /* Do index vacuum and cleanup,
* default value depends on reloptions */
VacOptTernaryValue truncate; /* Truncate empty pages at the end,
* default value depends on reloptions */
} VacuumParams;
/* GUC parameters */
extern PGDLLIMPORT int default_statistics_target; /* PGDLLIMPORT for PostGIS */
extern int vacuum_freeze_min_age;
extern int vacuum_freeze_table_age;
extern int vacuum_multixact_freeze_min_age;
extern int vacuum_multixact_freeze_table_age;
/* in commands/vacuum.c */
extern void ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel);
extern void vacuum(List *relations, VacuumParams *params,
BufferAccessStrategy bstrategy, bool isTopLevel);
extern void vac_open_indexes(Relation relation, LOCKMODE lockmode,
int *nindexes, Relation **Irel);
extern void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode);
extern double vac_estimate_reltuples(Relation relation,
BlockNumber total_pages,
BlockNumber scanned_pages,
double scanned_tuples);
extern void vac_update_relstats(Relation relation,
BlockNumber num_pages,
double num_tuples,
BlockNumber num_all_visible_pages,
bool hasindex,
TransactionId frozenxid,
MultiXactId minmulti,
bool in_outer_xact);
extern void vacuum_set_xid_limits(Relation rel,
int freeze_min_age, int freeze_table_age,
int multixact_freeze_min_age,
int multixact_freeze_table_age,
TransactionId *oldestXmin,
TransactionId *freezeLimit,
TransactionId *xidFullScanLimit,
MultiXactId *multiXactCutoff,
MultiXactId *mxactFullScanLimit);
extern void vac_update_datfrozenxid(void);
extern void vacuum_delay_point(void);
extern bool vacuum_is_relation_owner(Oid relid, Form_pg_class reltuple,
int options);
extern Relation vacuum_open_relation(Oid relid, RangeVar *relation,
int options, bool verbose, LOCKMODE lmode);
/* in commands/analyze.c */
extern void analyze_rel(Oid relid, RangeVar *relation,
VacuumParams *params, List *va_cols, bool in_outer_xact,
BufferAccessStrategy bstrategy);
extern bool std_typanalyze(VacAttrStats *stats);
/* in utils/misc/sampling.c --- duplicate of declarations in utils/sampling.h */
extern double anl_random_fract(void);
extern double anl_init_selection_state(int n);
extern double anl_get_next_S(double t, int n, double *stateptr);
#endif /* VACUUM_H */
| francelabs/datafari | linux/pgsql/include/server/commands/vacuum.h | C | apache-2.0 | 9,572 |
/*
* Copyright 1&1 Internet AG, https://github.com/1and1/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.oneandone.stool.cli.command;
import net.oneandone.inline.ArgumentException;
import net.oneandone.stool.cli.Globals;
import net.oneandone.stool.cli.Reference;
import net.oneandone.sushi.util.Strings;
import java.util.LinkedHashMap;
import java.util.Map;
public class Config extends IteratedStageCommand {
private final Map<String, String> values;
private boolean get;
private boolean set;
public Config(Globals globals, String stage) {
super(globals, stage);
values = new LinkedHashMap<>();
}
public void value(String str) {
int idx;
String key;
String value;
if (str.endsWith("-")) {
key = str.substring(0, str.length() - 1);
value = null;
set = true;
} else {
idx = str.indexOf('=');
if (idx == -1) {
key = str;
value = null;
get = true;
} else {
key = str.substring(0, idx);
value = str.substring(idx + 1);
set = true;
}
}
if (values.containsKey(key)) {
throw new ArgumentException("duplicate value: " + key);
}
if (get && set) {
throw new ArgumentException("cannot mix get and set arguments");
}
values.put(key, value);
}
@Override
public void doMain(Reference reference) throws Exception {
Map<String, Map<String, String>> loaded;
int width;
Map<String, String> map;
if (set) {
for (Map.Entry<String, String> entry : reference.client.setValues(reference.stage, values).entrySet()) {
console.info.println(entry.getKey() + "=" + entry.getValue());
}
} else {
loaded = reference.client.getValues(reference.stage);
if (get) {
loaded = selectedValues(loaded);
} else {
// neither get nor set -> show all
}
width = 9; // "layer" with angle brackets with indet 2
if (loaded.size() > 1) {
for (String name : loaded.keySet()) {
width = Math.max(width, name.length());
}
width += 3;
}
for (Map.Entry<String, Map<String, String>> entry : loaded.entrySet()) {
map = entry.getValue();
console.info.println(Strings.padLeft(entry.getKey(), width) + " : " + map.get("value"));
if (console.getVerbose()) {
printOpt(width, map.get("layer"), map.get("expr"));
printOpt(width, map, "doc");
}
}
}
}
private void printOpt(int width, Map<String, String> map, String key) {
printOpt(width, key, map.get(key));
}
private void printOpt(int width, String key, String value) {
if (value != null) {
console.info.println(Strings.padLeft("[" + key + "]", width) + " : " + value);
}
}
private Map<String, Map<String, String>> selectedValues(Map<String, Map<String, String>> all) {
Map<String, Map<String, String>> result;
Map<String, String> map;
result = new LinkedHashMap<>();
for (String name : values.keySet()) {
map = all.get(name);
if (map == null) {
throw new ArgumentException("unknown value: " + name);
}
result.put(name, map);
}
return result;
}
}
| mlhartme/stool | src/main/java/net/oneandone/stool/cli/command/Config.java | Java | apache-2.0 | 4,196 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_26) on Mon Sep 12 10:43:51 CEST 2011 -->
<TITLE>
it.essepuntato.earmark.core.test
</TITLE>
<META NAME="date" CONTENT="2011-09-12">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="it.essepuntato.earmark.core.test";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Package</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <FONT CLASS="NavBarFont1">Class</FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-use.html"><FONT CLASS="NavBarFont1"><B>Use</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../index-files/index-1.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../it/essepuntato/earmark/core/io/package-summary.html"><B>PREV PACKAGE</B></A>
<A HREF="../../../../../it/essepuntato/earmark/core/xml/package-summary.html"><B>NEXT PACKAGE</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../index.html?it/essepuntato/earmark/core/test/package-summary.html" target="_top"><B>FRAMES</B></A>
<A HREF="package-summary.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<H2>
Package it.essepuntato.earmark.core.test
</H2>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
<B>Interface Summary</B></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD WIDTH="15%"><B><A HREF="../../../../../it/essepuntato/earmark/core/test/EARMARKTest.html" title="interface in it.essepuntato.earmark.core.test">EARMARKTest</A></B></TD>
<TD>This interface defines the common methods of any test belonging to the EARMARK test suite.</TD>
</TR>
</TABLE>
<P>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
<B>Class Summary</B></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD WIDTH="15%"><B><A HREF="../../../../../it/essepuntato/earmark/core/test/AbstractTest.html" title="class in it.essepuntato.earmark.core.test">AbstractTest</A></B></TD>
<TD>An abstract implementation of a test.</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD WIDTH="15%"><B><A HREF="../../../../../it/essepuntato/earmark/core/test/AVeryVeryGeneralTest.html" title="class in it.essepuntato.earmark.core.test">AVeryVeryGeneralTest</A></B></TD>
<TD> </TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD WIDTH="15%"><B><A HREF="../../../../../it/essepuntato/earmark/core/test/DocumentCreation.html" title="class in it.essepuntato.earmark.core.test">DocumentCreation</A></B></TD>
<TD>A test aims to check the creation of new EARMARK documents.</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD WIDTH="15%"><B><A HREF="../../../../../it/essepuntato/earmark/core/test/DocumentStructure.html" title="class in it.essepuntato.earmark.core.test">DocumentStructure</A></B></TD>
<TD>A test aims to check the structure of EARMARK documents.</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD WIDTH="15%"><B><A HREF="../../../../../it/essepuntato/earmark/core/test/EqualityTest.html" title="class in it.essepuntato.earmark.core.test">EqualityTest</A></B></TD>
<TD>A test aims to check the equivalence between EARMARK documents and their nodes.</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD WIDTH="15%"><B><A HREF="../../../../../it/essepuntato/earmark/core/test/FrancescoPoggiTestOne.html" title="class in it.essepuntato.earmark.core.test">FrancescoPoggiTestOne</A></B></TD>
<TD> </TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD WIDTH="15%"><B><A HREF="../../../../../it/essepuntato/earmark/core/test/LaunchTests.html" title="class in it.essepuntato.earmark.core.test">LaunchTests</A></B></TD>
<TD>The class running the entire test suite.</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD WIDTH="15%"><B><A HREF="../../../../../it/essepuntato/earmark/core/test/LoadTest.html" title="class in it.essepuntato.earmark.core.test">LoadTest</A></B></TD>
<TD>A test aims to check the loading of EARMARK documents.</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD WIDTH="15%"><B><A HREF="../../../../../it/essepuntato/earmark/core/test/ModifyingDocument.html" title="class in it.essepuntato.earmark.core.test">ModifyingDocument</A></B></TD>
<TD>A test aims to check modifications of part of EARMARK documents.</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD WIDTH="15%"><B><A HREF="../../../../../it/essepuntato/earmark/core/test/StoreTest.html" title="class in it.essepuntato.earmark.core.test">StoreTest</A></B></TD>
<TD>A test aims to check the storing of EARMARK documents.</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD WIDTH="15%"><B><A HREF="../../../../../it/essepuntato/earmark/core/test/TestPattern.html" title="class in it.essepuntato.earmark.core.test">TestPattern</A></B></TD>
<TD> </TD>
</TR>
</TABLE>
<P>
<DL>
</DL>
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Package</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <FONT CLASS="NavBarFont1">Class</FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-use.html"><FONT CLASS="NavBarFont1"><B>Use</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../index-files/index-1.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../it/essepuntato/earmark/core/io/package-summary.html"><B>PREV PACKAGE</B></A>
<A HREF="../../../../../it/essepuntato/earmark/core/xml/package-summary.html"><B>NEXT PACKAGE</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../index.html?it/essepuntato/earmark/core/test/package-summary.html" target="_top"><B>FRAMES</B></A>
<A HREF="package-summary.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
</BODY>
</HTML>
| essepuntato/EarmarkDataStructure | doc/it/essepuntato/earmark/core/test/package-summary.html | HTML | apache-2.0 | 9,750 |
/**
* @param {string} foo
* @param {string} [bar]
* @param {string=} baz some blurb
* @param {string} [quuz=Nope Nope Nope] more blurb
* @return {number}
*/
function allTheOptionalForms(foo, bar, baz, quuz) {
return 4;
};
/**
* @param {string} foo
* @param {string} [bar]
* @param {string=} baz some blurb
* @param {string} [quuz=Nope Nope Nope] more blurb
* @return {number}
*/
function allTheOptionalFormsWithDefaults(foo, bar, baz = 'Nope', quuz = 'Nope Nope Nope') {
return 4;
};
/**
* @param {number} foo
* @return {number}
*/
function optionalParamWithWrongJSdoc(foo = '4') {
return Number(foo);
}; | Kegsay/flow-jsdoc | tests/input/11-optional-params.js | JavaScript | apache-2.0 | 624 |
angular.module("emiolo").factory("apiInterceptor", function ($q) {
return {
request: function (config) {
return config;
},
responseError: function (rejection) {
console.log(rejection.config.url);
return $q.reject(rejection);
}
};
});
angular.module("emiolo").config(function ($httpProvider) {
$httpProvider.interceptors.push("apiInterceptor");
});
/*angular.module("emiolo").run(["$rootScope", "$location", function($rootScope, $location) {
$rootScope.$on("$routeChangeSuccess", function(userInfo) {
console.log(userInfo);
});
$rootScope.$on("$routeChangeError", function(event, current, previous, eventObj) {
if (eventObj.authenticated === false) {
$location.path("/login");
}
});
}]);*/ | bimnascimento/TesteConhecimento | src/main/webapp/resources/js/interceptors/apiInterceptor.js | JavaScript | apache-2.0 | 722 |
#
# OpenCenter(TM) is Copyright 2013 by Rackspace US, Inc.
##############################################################################
#
# OpenCenter is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. This
# version of OpenCenter includes Rackspace trademarks and logos, and in
# accordance with Section 6 of the License, the provision of commercial
# support services in conjunction with a version of OpenCenter which includes
# Rackspace trademarks and logos is prohibited. OpenCenter source code and
# details are available at: # https://github.com/rcbops/opencenter or upon
# written request.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 and a copy, including this
# notice, is available in the LICENSE file accompanying this software.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the # specific language governing permissions and limitations
# under the License.
#
##############################################################################
from util import OpenCenterTestCase
import opencenter.db.api as db_api
from opencenter.webapp import ast
api = db_api.api_from_models()
class ExpressionTestCase(OpenCenterTestCase):
def setUp(self):
self.nodes = {}
self.interfaces = {}
self.nodes['node-1'] = self._model_create('nodes', name='node-1')
self.interfaces['chef'] = self._model_create('filters', name='chef',
filter_type='interface',
expr='facts.x = true')
self.nodes['container'] = self._model_create('nodes', name='container')
def tearDown(self):
self._clean_all()
def _run_expression(self, node, expression, ns={}):
builder = ast.FilterBuilder(ast.FilterTokenizer(), expression,
api=api)
root_node = builder.build()
return root_node.eval_node(node, symbol_table=ns)
def _simple_expression(self, expression):
node = self._model_get_by_id('nodes', self.nodes['node-1']['id'])
return self._run_expression(node,
'nodes: %s' % expression)
def _invert_expression(self, expression, ns={}):
builder = ast.FilterBuilder(ast.FilterTokenizer(), expression)
root_node = builder.build()
return root_node.invert()
def _eval_expression(self, expression, node_id, ns={}):
ephemeral_api = db_api.ephemeral_api_from_api(api)
builder = ast.FilterBuilder(ast.FilterTokenizer(), expression,
api=ephemeral_api)
node = ephemeral_api._model_get_by_id('nodes', node_id)
builder.eval_node(node, symbol_table=ns)
new_node = ephemeral_api._model_get_by_id('nodes', node_id)
return new_node
def test_bad_interface(self):
expression = "ifcount('blahblah') > 0"
self.assertRaises(SyntaxError, self._run_expression,
self.nodes['node-1'], expression)
def test_zero_ifcount(self):
expression = "ifcount('chef') > 0"
result = self._run_expression(self.nodes['node-1'], expression)
self.logger.debug('Got result: %s' % result)
self.assertEquals(result, False)
def test_valid_ifcount(self):
expression = "ifcount('chef') > 0"
self._model_create('facts', node_id=self.nodes['node-1']['id'],
key='x', value=True)
result = self._run_expression(self.nodes['node-1'], expression)
self.logger.debug('Got result: %s' % result)
self.assertEquals(result, True)
def test_invert_equals(self):
expression = "facts.test = 'test'"
result = self._invert_expression(expression)
self.assertEquals(result, ["facts.test := 'test'"])
def test_invert_and(self):
expression = "facts.test='test' and facts.x='x'"
result = self._invert_expression(expression)
self.assertTrue("facts.test := 'test'" in result)
self.assertTrue("facts.x := 'x'" in result)
def test_invert_in(self):
expression = "'test' in facts.foo"
result = self._invert_expression(expression)
self.assertTrue("facts.foo := union(facts.foo, 'test')" in result)
self.assertEquals(len(result), 1)
def test_invert_not_in(self):
expression = "'test' !in facts.foo"
result = self._invert_expression(expression)
self.assertTrue("facts.foo := remove(facts.foo, 'test')" in result)
self.assertEquals(len(result), 1)
def test_eval_assign(self):
node_id = self.nodes['node-1']['id']
expression = "facts.parent_id := %d" % int(
self.nodes['container']['id'])
node = self._eval_expression(expression, node_id)
self.assertEquals(node['facts'].get('parent_id', None),
self.nodes['container']['id'])
def test_eval_union(self):
node_id = self.nodes['node-1']['id']
expression = "facts.woof := union(facts.woof, 3)"
node = self._eval_expression(expression, node_id)
self.assertEquals(node['facts']['woof'], [3])
def test_eval_remove(self):
node_id = self.nodes['node-1']['id']
fact = self._model_create('facts', node_id=node_id,
key='array_fact', value=[1, 2])
expression = 'facts.array_fact := remove(facts.array_fact, 2)'
node = self._eval_expression(expression, node_id)
self.assertEquals(node['facts']['array_fact'], [1])
# verify removing from none returns none. This is perhaps
# questionable, but is inline with the rest of the none/empty
# behavior. It could probably also return [], but enforce
# current behavior
self._model_delete('facts', fact['id'])
expression = 'facts.array_fact := remove(facts.array_fact, "test")'
node = self._eval_expression(expression, node_id)
self.assertEquals(node['facts']['array_fact'], None)
# verify removing from a non-list raises SyntaxError
self._model_create('facts', node_id=node_id,
key='array_fact', value='non-array')
expression = 'facts.array_fact := remove(facts.array_fact, "whoops")'
self.assertRaises(SyntaxError, self._eval_expression,
expression, node_id)
def test_eval_namespaces(self):
node_id = self.nodes['node-1']['id']
expression = "facts.parent_id := value"
ns = {"value": self.nodes['container']['id']}
node = self._eval_expression(expression, node_id, ns)
self.assertEquals(node['facts'].get('parent_id', None),
self.nodes['container']['id'])
# test the inverter and regularizer functions
def test_regularize_expression(self):
expression = 'foo=value'
regular = ast.regularize_expression(expression)
self.logger.debug('Got regularized expression "%s" for "%s"' %
(regular, expression))
self.assertEquals(regular, 'foo = value')
def test_inverted_expression(self):
expression = 'foo=value'
inverted = ast.invert_expression(expression)
self.logger.debug('Got inverted expression "%s" for "%s"' %
(inverted, expression))
self.assertEquals(len(inverted), 1)
self.assertEquals(inverted[0], 'foo := value')
def test_inverted_union(self):
expression = 'facts.test := union(facts.test, test)'
inverted = ast.invert_expression(expression)
self.logger.debug('Got inverted expression "%s" for "%s"' %
(inverted, expression))
self.assertEquals(len(inverted), 1)
self.assertEquals(inverted[0], 'test in facts.test')
def test_inverted_remove(self):
expression = 'facts.test := remove(facts.test, test)'
inverted = ast.invert_expression(expression)
self.logger.debug('Got inverted expression "%s" for "%s"' %
(inverted, expression))
self.assertEquals(len(inverted), 1)
self.assertEquals(inverted[0], 'test !in facts.test')
def test_concrete_expression(self):
expression = "foo = value"
ns = {"value": 3}
concrete = ast.concrete_expression(expression, ns)
self.logger.debug('Got concrete expression "%s" for "%s"' %
(concrete, expression))
# TODO(rpedde): This does not work like you think it does
# self.assertTrue('foo = 3', concrete)
# Using an assertEquals of the above fails
# self.assertEquals(concrete, 'foo = 3')
# But this works
self.assertEquals(concrete, 'foo = value')
def test_apply_expression(self):
expression = 'facts.test := union(facts.test, "test")'
node = self._model_get_by_id('nodes', self.nodes['node-1']['id'])
# make sure we are applying into an empty fact
self.assertFalse('test' in node['facts'])
ast.apply_expression(self.nodes['node-1']['id'], expression, api)
node = self._model_get_by_id('nodes', self.nodes['node-1']['id'])
self.assertTrue('test' in node['facts'])
self.assertEquals(node['facts']['test'], ['test'])
# FIXME: when we get types
def test_util_nth_with_none(self):
expression = 'nth(0, facts.test)' # nth of none?
res = self._simple_expression(expression)
self.assertIsNone(res)
# FIXME: when we get types
def test_util_nth_not_integer(self):
expression = 'nth("a", facts.test)' # raise with type error?
res = self._simple_expression(expression)
self.assertIsNone(res)
# FIXME: when we get types
def test_util_nth_index_out_of_range(self):
self._model_create('facts', node_id=self.nodes['node-1']['id'],
key='test', value=[1, 2, 3])
self.assertTrue(self._simple_expression('nth(2, facts.test)') is 3)
self.assertIsNone(self._simple_expression('nth(3, facts.test)'))
# FIXME: when we get types
def test_str_casting_none(self):
# this should fail, too, I think
self.assertIsNone(self._simple_expression('str(facts.test)'))
self._model_create('facts', node_id=self.nodes['node-1']['id'],
key='test', value=[1, 2, 3])
self.assertEquals(self._simple_expression('str(facts.test)'),
'[1, 2, 3]')
self._model_create('facts', node_id=self.nodes['node-1']['id'],
key='test', value=1)
self.assertEquals(self._simple_expression('str(facts.test)'), '1')
| rcbops/opencenter | tests/test_expressions.py | Python | apache-2.0 | 11,034 |
/*
* Copyright 2011-2014, by Vladimir Kostyukov and Contributors.
*
* This file is part of la4j project (http://la4j.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributor(s): -
*
*/
package org.la4j.io;
public abstract class MatrixIterator extends CursorIterator {
protected final int rows;
protected final int columns;
public MatrixIterator(int rows, int columns) {
this.rows = rows;
this.columns = columns;
}
public abstract int rowIndex();
public abstract int columnIndex();
public MatrixIterator orElseAdd(final MatrixIterator those) {
return new CursorToMatrixIterator(super.orElse(those, JoinFunction.ADD), rows, columns);
}
public MatrixIterator orElseSubtract(final MatrixIterator those) {
return new CursorToMatrixIterator(super.orElse(those, JoinFunction.SUB), rows, columns);
}
public MatrixIterator andAlsoMultiply(final MatrixIterator those) {
return new CursorToMatrixIterator(super.andAlso(those, JoinFunction.MUL), rows, columns);
}
public MatrixIterator andAlsoDivide(final MatrixIterator those) {
return new CursorToMatrixIterator(super.andAlso(those, JoinFunction.DIV), rows, columns);
}
@Override
protected int cursor() {
return rowIndex() * columns + columnIndex();
}
}
| fernandoj92/mvca-parkinson | ltm-analysis/src/main/java/org/la4j/io/MatrixIterator.java | Java | apache-2.0 | 1,858 |
module Fog
module AWS
class ELB
class Real
require 'fog/aws/parsers/elb/empty'
# Create an app cookie stickiness policy
#
# ==== Parameters
# * lb_name<~String> - Name of the ELB
# * policy_name<~String> - The name of the policy being created.
# The name must be unique within the set of policies for this Load Balancer.
# * cookie_name<~String> - Name of the application cookie used for stickiness.
# ==== Returns
# * response<~Excon::Response>:
# * body<~Hash>:
# * 'ResponseMetadata'<~Hash>:
# * 'RequestId'<~String> - Id of request
def create_app_cookie_stickiness_policy(lb_name, policy_name, cookie_name)
params = {'CookieName' => cookie_name, 'PolicyName' => policy_name}
request({
'Action' => 'CreateAppCookieStickinessPolicy',
'LoadBalancerName' => lb_name,
:parser => Fog::Parsers::AWS::ELB::Empty.new
}.merge!(params))
end
end
class Mock
def create_app_cookie_stickiness_policy(lb_name, policy_name, cookie_name)
if load_balancer = self.data[:load_balancers][lb_name]
response = Excon::Response.new
response.status = 200
load_balancer['Policies']['AppCookieStickinessPolicies'] << { 'CookieName' => cookie_name, 'PolicyName' => policy_name }
create_load_balancer_policy(lb_name, policy_name, 'AppCookieStickinessPolicyType', {'CookieName' => cookie_name})
response.body = {
'ResponseMetadata' => {
'RequestId' => Fog::AWS::Mock.request_id
}
}
response
else
raise Fog::AWS::ELB::NotFound
end
end
end
end
end
end
| krobertson/knife-xenserver | vendor/fog/lib/fog/aws/requests/elb/create_app_cookie_stickiness_policy.rb | Ruby | apache-2.0 | 1,874 |
/*
* Copyright (c) 2010-2018 Stardog Union. <https://stardog.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.complexible.stardog.examples.api;
import java.util.Arrays;
import com.complexible.common.rdf.query.resultio.TextTableQueryResultWriter;
import com.complexible.stardog.ContextSets;
import com.complexible.stardog.Stardog;
import com.complexible.stardog.api.Connection;
import com.complexible.stardog.api.ConnectionConfiguration;
import com.complexible.stardog.api.SelectQuery;
import com.complexible.stardog.api.admin.AdminConnection;
import com.complexible.stardog.api.admin.AdminConnectionConfiguration;
import com.complexible.stardog.db.DatabaseOptions;
import com.complexible.stardog.icv.api.ICVConnection;
import com.complexible.stardog.prov.ProvVocabulary;
import com.stardog.stark.IRI;
import com.stardog.stark.Resource;
import com.stardog.stark.Values;
import com.stardog.stark.query.io.QueryResultWriters;
import com.stardog.stark.vocabs.DC;
import com.stardog.stark.vocabs.RDF;
import com.stardog.stark.vocabs.SKOS;
import static com.stardog.stark.Values.iri;
/**
* <p>Example code illustrating use of the built-in ontologies in Stardog, specifically for PROV and SKOS ontologies.</p>
*
* @author Evren Sirin
* @version 6.0
* @since 2.0
*/
public class ProvSkosExample {
// Very simple publication vocabulary used in this example
private static final String publicationNamespace = "urn:example:publication:";
private static final IRI Book = iri(publicationNamespace, "Book");
private static final IRI Fiction = iri(publicationNamespace, "Fiction");
private static final IRI ScienceFiction = iri(publicationNamespace, "ScienceFiction");
private static final IRI Author = iri(publicationNamespace, "Author");
// Database Archetypes
// ---
// A Database Archteype is a built-in "type" of a Database, they include common axioms and constraints for
// a particular type of data. The default archetypes build into Stardog are currently SKOS and PROV.
public static void main(String[] args) throws Exception {
// First need to initialize the Stardog instance which will automatically start the embedded server.
Stardog aStardog = Stardog.builder().create();
try {
String db = "exampleProvSkos";
// Create an `AdminConnection` to Stardog
try (AdminConnection dbms = AdminConnectionConfiguration.toEmbeddedServer()
.credentials("admin", "admin")
.connect()) {
// Drop the example database if it exists so we can create it fresh
if (dbms.list().contains(db)) {
dbms.drop(db);
}
// Enable both `PROV` and `SKOS` ontologies for the current database
dbms.newDatabase(db).set(DatabaseOptions.ARCHETYPES, Arrays.asList("skos", "prov")).create();
// Obtain a connection to the database
try (Connection aConn = ConnectionConfiguration
.to(db)
.credentials("admin", "admin")
.reasoning(true)
.connect()) {
// First create some SKOS data and introduce an error (related and transitive broader relations should be disjoint)
aConn.begin();
aConn.add()
.statement(Book, RDF.TYPE, SKOS.Concept)
.statement(Fiction, RDF.TYPE, SKOS.Concept)
.statement(ScienceFiction, RDF.TYPE, SKOS.Concept)
.statement(Book, SKOS.narrower, Fiction)
.statement(ScienceFiction, SKOS.broader, Fiction)
.statement(ScienceFiction, SKOS.related, Book);
aConn.commit();
// Let's validate the SKOS data we just created. Note that SKOS inferences and constraints are automatically
// included in the database because it uses the SKOS archetype. So there's no extra work we have to do
// we just insert our SKOS data and we're good to go.
ICVConnection aValidator = aConn.as(ICVConnection.class);
// For simplicity, we will just print that the data is not valid (explanations can be retrieved separately)
System.out.println("The data " + (aValidator.isValid(ContextSets.DEFAULT_ONLY)
? "is"
: "is NOT") + " valid!");
// Let's remove the problematic triple and add some PROV data
IRI The_War_of_the_Worlds = Values.iri("http://dbpedia.org/resource/The_War_of_the_Worlds");
IRI H_G_Wells = Values.iri("http://dbpedia.org/resource/H._G._Wells");
Resource attr = Values.bnode();
aConn.begin();
aConn.remove()
.statements(ScienceFiction, SKOS.related, Book);
aConn.add()
.statement(The_War_of_the_Worlds, RDF.TYPE, ProvVocabulary.Entity)
.statement(The_War_of_the_Worlds, DC.subject, ScienceFiction)
.statement(The_War_of_the_Worlds, ProvVocabulary.qualifiedAttribution, attr)
.statement(attr, RDF.TYPE, ProvVocabulary.Attribution)
.statement(attr, ProvVocabulary.agent, H_G_Wells)
.statement(attr, ProvVocabulary.hadRole, Author);
aConn.commit();
// Now that the problematic triples is removed, the data will be valid
System.out.println("The data " + (aValidator.isValid(ContextSets.DEFAULT_ONLY)
? "is"
: "is NOT") + " valid!");
// Finlaly run a query that will retrieve all fiction books and their authors.
// This query uses both PROV and SKOS inferences that are automatically included with the archetypes.
// Using `Book -[skos:narrower]-> Fiction <-[skos:broader]- ScienceFiction` triples, we infer `ScienceFiction -[skos:broaderTransitive]-> Book`
// Using `The_War_of_the_Worlds -[prov:qualifiedAttribution]-> :_attr -[prov:agent]-> H_G_Wells`, we infer `The_War_of_the_Worlds -[prov:wasAttributedTo]-> H_G_Wells`
// Also note that we don't need to define prefixes for skos and prov which are automatically registered
// to the database when the archetypes are loaded
SelectQuery aQuery = aConn.select(
"PREFIX pub: <" + publicationNamespace + ">" +
"PREFIX dc: <" + DC.NAMESPACE + ">" +
"SELECT * WHERE {\n" +
" ?book dc:subject/skos:broaderTransitive pub:Book;\n" +
" prov:wasAttributedTo ?author\n" +
"}");
// Print the query results
QueryResultWriters.write(aQuery.execute(), System.out, TextTableQueryResultWriter.FORMAT);
}
finally {
if (dbms.list().contains(db)) {
dbms.drop(db);
}
}
}
}
finally {
aStardog.shutdown();
}
}
}
| Complexible/stardog-examples | examples/api/main/src/com/complexible/stardog/examples/api/ProvSkosExample.java | Java | apache-2.0 | 7,161 |
# Dinophysis ovum Schutt SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Protozoa/Dinophyta/Dinophyceae/Dinophysiales/Dinophysiaceae/Dinophysis/Dinophysis ovum/README.md | Markdown | apache-2.0 | 180 |
package cn.bingoogolapple.refreshlayout.demo.ui.fragment;
import android.os.AsyncTask;
import android.os.Bundle;
import android.util.Log;
import android.view.View;
import android.widget.TextView;
import cn.bingoogolapple.refreshlayout.BGARefreshLayout;
import cn.bingoogolapple.refreshlayout.demo.R;
import cn.bingoogolapple.refreshlayout.demo.ui.activity.MainActivity;
import cn.bingoogolapple.refreshlayout.demo.ui.activity.ViewPagerActivity;
/**
* 作者:王浩 邮件:bingoogolapple@gmail.com
* 创建时间:15/9/27 下午12:53
* 描述:
*/
public class StickyNavScrollViewFragment extends BaseFragment implements BGARefreshLayout.BGARefreshLayoutDelegate {
private TextView mClickableLabelTv;
@Override
protected void initView(Bundle savedInstanceState) {
setContentView(R.layout.fragment_scrollview_sticky_nav);
mClickableLabelTv = getViewById(R.id.tv_scrollview_clickablelabel);
}
@Override
protected void setListener() {
getViewById(R.id.tv_scrollview_clickablelabel).setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
showToast("点击了测试文本");
}
});
}
@Override
protected void processLogic(Bundle savedInstanceState) {
}
@Override
protected void onFirstUserVisible() {
}
@Override
public void onBGARefreshLayoutBeginRefreshing(BGARefreshLayout refreshLayout) {
new AsyncTask<Void, Void, Void>() {
@Override
protected void onPreExecute() {
showLoadingDialog();
}
@Override
protected Void doInBackground(Void... params) {
try {
Thread.sleep(MainActivity.LOADING_DURATION);
} catch (InterruptedException e) {
e.printStackTrace();
}
return null;
}
@Override
protected void onPostExecute(Void aVoid) {
dismissLoadingDialog();
((ViewPagerActivity) getActivity()).endRefreshing();
mClickableLabelTv.setText("加载最新数据完成");
}
}.execute();
}
@Override
public boolean onBGARefreshLayoutBeginLoadingMore(BGARefreshLayout refreshLayout) {
new AsyncTask<Void, Void, Void>() {
@Override
protected void onPreExecute() {
showLoadingDialog();
}
@Override
protected Void doInBackground(Void... params) {
try {
Thread.sleep(MainActivity.LOADING_DURATION);
} catch (InterruptedException e) {
e.printStackTrace();
}
return null;
}
@Override
protected void onPostExecute(Void aVoid) {
dismissLoadingDialog();
((ViewPagerActivity) getActivity()).endLoadingMore();
Log.i(TAG, "上拉加载更多完成");
}
}.execute();
return true;
}
}
| snmlm/MyObject | GRTT/demo/src/main/java/cn/bingoogolapple/refreshlayout/demo/ui/fragment/StickyNavScrollViewFragment.java | Java | apache-2.0 | 3,157 |
//===--- SILGenPattern.cpp - Pattern matching codegen ---------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "patternmatch-silgen"
#include "Cleanup.h"
#include "ExitableFullExpr.h"
#include "Initialization.h"
#include "LValue.h"
#include "RValue.h"
#include "SILGen.h"
#include "Scope.h"
#include "swift/AST/ASTWalker.h"
#include "swift/AST/DiagnosticsSIL.h"
#include "swift/AST/Pattern.h"
#include "swift/AST/SILOptions.h"
#include "swift/AST/SubstitutionMap.h"
#include "swift/AST/Types.h"
#include "swift/Basic/Defer.h"
#include "swift/Basic/ProfileCounter.h"
#include "swift/Basic/STLExtras.h"
#include "swift/SIL/DynamicCasts.h"
#include "swift/SIL/SILArgument.h"
#include "swift/SIL/SILUndef.h"
#include "swift/SIL/TypeLowering.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FormattedStream.h"
using namespace swift;
using namespace Lowering;
//===----------------------------------------------------------------------===//
// Pattern Utilities
//===----------------------------------------------------------------------===//
// TODO: These routines should probably be refactored into their own file since
// they have nothing to do with the implementation of SILGenPattern
// specifically.
/// Shallow-dump a pattern node one level deep for debug purposes.
static void dumpPattern(const Pattern *p, llvm::raw_ostream &os) {
if (!p) {
// We use null to represent a synthetic wildcard.
os << '_';
return;
}
p = p->getSemanticsProvidingPattern();
switch (p->getKind()) {
case PatternKind::Any:
os << '_';
return;
case PatternKind::Expr:
os << "<expr>";
return;
case PatternKind::Named:
os << "var " << cast<NamedPattern>(p)->getBoundName();
return;
case PatternKind::Tuple: {
unsigned numFields = cast<TuplePattern>(p)->getNumElements();
if (numFields == 0)
os << "()";
else if (numFields == 1)
os << "(_)";
else {
os << '(';
for (unsigned i = 0; i < numFields - 1; ++i)
os << ',';
os << ')';
}
return;
}
case PatternKind::Is:
os << "is ";
cast<IsPattern>(p)->getCastType()->print(os);
break;
case PatternKind::EnumElement: {
auto eep = cast<EnumElementPattern>(p);
os << '.' << eep->getName();
return;
}
case PatternKind::OptionalSome:
os << ".some";
return;
case PatternKind::Bool:
os << (cast<BoolPattern>(p)->getValue() ? "true" : "false");
return;
case PatternKind::Paren:
case PatternKind::Typed:
case PatternKind::Var:
llvm_unreachable("not semantic");
}
}
/// Is the given specializable pattern directly refutable, as opposed
/// to containing some refutability in a nested position?
static bool isDirectlyRefutablePattern(const Pattern *p) {
if (!p) return false;
switch (p->getKind()) {
case PatternKind::Any:
case PatternKind::Named:
case PatternKind::Expr:
llvm_unreachable("non-specializable patterns");
// Tuple and nominal-type patterns are not themselves directly refutable.
case PatternKind::Tuple:
return false;
// isa and enum-element patterns are refutable, at least in theory.
case PatternKind::Is:
case PatternKind::EnumElement:
case PatternKind::OptionalSome:
case PatternKind::Bool:
return true;
// Recur into simple wrapping patterns.
case PatternKind::Paren:
case PatternKind::Typed:
case PatternKind::Var:
return isDirectlyRefutablePattern(p->getSemanticsProvidingPattern());
}
llvm_unreachable("bad pattern");
}
const unsigned AlwaysRefutable = ~0U;
/// Return the number of times a pattern must be specialized
/// before becoming irrefutable.
///
/// \return AlwaysRefutable if the pattern is never irrefutable
static unsigned getNumSpecializationsRecursive(const Pattern *p, unsigned n) {
// n is partially here to make simple cases tail-recursive, but it
// also gives us a simple opportunity to bail out early when we see
// an always-refutable pattern.
if (n == AlwaysRefutable) return n;
switch (p->getKind()) {
// True wildcards.
case PatternKind::Any:
case PatternKind::Named:
return n;
// Expressions are always-refutable wildcards.
case PatternKind::Expr:
return AlwaysRefutable;
// Tuple and nominal-type patterns are not themselves directly refutable.
case PatternKind::Tuple: {
auto tuple = cast<TuplePattern>(p);
for (auto &elt : tuple->getElements())
n = getNumSpecializationsRecursive(elt.getPattern(), n);
return n;
}
// isa and enum-element patterns are refutable, at least in theory.
case PatternKind::Is: {
auto isa = cast<IsPattern>(p);
++n;
if (auto sub = isa->getSubPattern())
return getNumSpecializationsRecursive(sub, n);
return n;
}
case PatternKind::EnumElement: {
auto en = cast<EnumElementPattern>(p);
++n;
if (en->hasSubPattern())
n = getNumSpecializationsRecursive(en->getSubPattern(), n);
return n;
}
case PatternKind::OptionalSome: {
auto en = cast<OptionalSomePattern>(p);
return getNumSpecializationsRecursive(en->getSubPattern(), n+1);
}
case PatternKind::Bool:
return n+1;
// Recur into simple wrapping patterns.
case PatternKind::Paren:
case PatternKind::Typed:
case PatternKind::Var:
return getNumSpecializationsRecursive(p->getSemanticsProvidingPattern(), n);
}
llvm_unreachable("bad pattern");
}
/// Return the number of times a pattern must be specialized
/// before becoming irrefutable.
///
/// \return AlwaysRefutable if the pattern is never irrefutable
static unsigned getNumSpecializations(const Pattern *p) {
return (p ? getNumSpecializationsRecursive(p, 0) : 0);
}
/// True if a pattern is a wildcard, meaning it matches any value. '_' and
/// variable patterns are wildcards. We also consider ExprPatterns to be
/// wildcards; we test the match expression as a guard outside of the normal
/// pattern clause matrix. When destructuring wildcard patterns, we also use
/// nullptr to represent newly-constructed wildcards.
static bool isWildcardPattern(const Pattern *p) {
if (!p)
return true;
switch (p->getKind()) {
// Simple wildcards.
case PatternKind::Any:
case PatternKind::Expr:
case PatternKind::Named:
return true;
// Non-wildcards.
case PatternKind::Tuple:
case PatternKind::Is:
case PatternKind::EnumElement:
case PatternKind::OptionalSome:
case PatternKind::Bool:
return false;
// Recur into simple wrapping patterns.
case PatternKind::Paren:
case PatternKind::Typed:
case PatternKind::Var:
return isWildcardPattern(p->getSemanticsProvidingPattern());
}
llvm_unreachable("Unhandled PatternKind in switch.");
}
/// Check to see if the given pattern is a specializing pattern,
/// and return a semantic pattern for it.
Pattern *getSpecializingPattern(Pattern *p) {
// Empty entries are basically AnyPatterns.
if (!p) return nullptr;
p = p->getSemanticsProvidingPattern();
return (isWildcardPattern(p) ? nullptr : p);
}
/// Given a pattern stored in a clause matrix, check to see whether it
/// can be specialized the same way as the first one.
static Pattern *getSimilarSpecializingPattern(Pattern *p, Pattern *first) {
// Empty entries are basically AnyPatterns.
if (!p) return nullptr;
assert(first && getSpecializingPattern(first) == first);
// Map down to the semantics-providing pattern.
p = p->getSemanticsProvidingPattern();
// If the patterns are exactly the same kind, we might be able to treat them
// similarly.
switch (p->getKind()) {
case PatternKind::EnumElement:
case PatternKind::OptionalSome: {
// If one is an OptionalSomePattern and one is an EnumElementPattern, then
// they are the same since the OptionalSomePattern is just sugar for
// .Some(x).
if ((isa<OptionalSomePattern>(p) && isa<EnumElementPattern>(first)) ||
(isa<OptionalSomePattern>(first) && isa<EnumElementPattern>(p)))
return p;
LLVM_FALLTHROUGH;
}
case PatternKind::Tuple:
case PatternKind::Named:
case PatternKind::Any:
case PatternKind::Bool:
case PatternKind::Expr: {
// These kinds are only similar to the same kind.
if (p->getKind() == first->getKind())
return p;
return nullptr;
}
case PatternKind::Is: {
auto pIs = cast<IsPattern>(p);
// 'is' patterns are only similar to matches to the same type.
if (auto firstIs = dyn_cast<IsPattern>(first)) {
if (firstIs->getCastType()->isEqual(pIs->getCastType()))
return p;
}
return nullptr;
}
case PatternKind::Paren:
case PatternKind::Var:
case PatternKind::Typed:
llvm_unreachable("not semantic");
}
llvm_unreachable("Unhandled PatternKind in switch.");
}
//===----------------------------------------------------------------------===//
// SILGenPattern Emission
//===----------------------------------------------------------------------===//
namespace {
/// A row which we intend to specialize.
struct RowToSpecialize {
/// The pattern from this row which we are specializing upon.
swift::Pattern *Pattern;
/// The index of the target row.
unsigned RowIndex;
/// Whether the row will be irrefutable after this specialization.
bool Irrefutable;
/// Profile Count of hte row we intend to specialize.
ProfileCounter Count;
};
/// Changes that we wish to apply to a row which we have specialized.
struct SpecializedRow {
/// The patterns which should replace the specialized pattern.
SmallVector<Pattern *, 4> Patterns;
/// The index of the target row.
unsigned RowIndex;
};
/// An array of arguments.
using ArgArray = ArrayRef<ConsumableManagedValue>;
/// A callback which dispatches a failure case.
using FailureHandler =
std::function<void(SILLocation failureLoc)>;
/// A callback which redispatches a set of specialized rows.
using SpecializationHandler =
std::function<void(ArgArray values, ArrayRef<SpecializedRow> rowChanges,
const FailureHandler &contDest)>;
class ClauseMatrix;
class ClauseRow;
/// A class controlling the emission of the decision tree for a pattern match
/// statement (switch, if/let, or while/let condition).
///
/// The value cleanup rules during pattern match emission are complicated
/// because we're trying to allow as much borrowing/forwarding of
/// values as possible, so that we only need to actually copy/retain
/// values as late as possible. This means we end up having to do
/// a pretty delicate dance to manage the active set of cleanups.
///
/// We split values into three categories:
/// - TakeAlways (which are owned by the current portion of the
/// decision tree)
/// - CopyOnSuccess (which are not owned at all by the current
/// portion of the decision tree)
/// - TakeOnSuccess (which are owned only if the decision tree
/// actually passes all guards and enters a case block)
/// In particular, it is important that a TakeOnSuccess value not be
/// destructively modified unless success is assured.
///
/// Whenever the decision tree branches, it must forward values down
/// correctly. A TakeAlways value becomes TakeOnSuccess for all but
/// last branch of the tree.
///
/// Values should be forwarded down the decision tree with the
/// appropriate cleanups. CopyOnSuccess values should not have
/// attached cleanups. TakeAlways or TakeOnSuccess values should have
/// cleanups when their types are non-trivial. When a value is
/// forwarded down into a branch of the decision tree, its cleanup
/// might be deactivated within that subtree; to protect against the
/// cleanup being removed when this happens, the cleanup must be first
/// put in the PersistentlyActive state before the emission of the
/// subtree, then restored to its current state when the subtree is
/// finished.
///
/// The set of active cleanups should always be instantaneously
/// consistent: that is, there should always be exactly one cleanup
/// tracking a +1 value. It's okay to deactivate a cleanup for a
/// TakeOnSuccess value and then introduce new cleanups for all of its
/// subobjects. Jumps outside of the decision tree entirely will be
/// fine: the jump will simply destroy the subobjects instead of the
/// aggregate. However, jumps to somewhere else within the decision
/// tree require careful attention if the jump could lead to a
/// cleanups depth outside the subobject cleanups (causing them to be
/// run) but inside the old cleanup (in which case it will be
/// reactivated). Therefore, such borrowings must be "unforwarded"
/// during the emission of such jumps by disabling the new cleanups
/// and re-enabling the outer cleanup. It's okay to re-enable
/// cleanups like this because these jumps only occur when a branch of
/// the decision tree fails with a non-exhaustive match, which means
/// the value should have been passed down as TakeOnSuccess, and the
/// decision tree is not allowed to destructively modify objects that
/// are TakeOnSuccess when failure is still a possibility.
class PatternMatchEmission {
PatternMatchEmission(const PatternMatchEmission &) = delete;
PatternMatchEmission &operator=(const PatternMatchEmission &) = delete;
SILGenFunction &SGF;
/// PatternMatchStmt - The 'switch', or do-catch statement that we're emitting
/// this pattern match for.
Stmt *PatternMatchStmt;
CleanupsDepth PatternMatchStmtDepth;
llvm::MapVector<CaseStmt*, std::pair<SILBasicBlock*, bool>> SharedCases;
llvm::DenseMap<VarDecl*, SILValue> Temporaries;
using CompletionHandlerTy =
llvm::function_ref<void(PatternMatchEmission &, ArgArray, ClauseRow &)>;
CompletionHandlerTy CompletionHandler;
public:
PatternMatchEmission(SILGenFunction &SGF, Stmt *S,
CompletionHandlerTy completionHandler)
: SGF(SGF), PatternMatchStmt(S),
CompletionHandler(completionHandler) {}
Optional<SILLocation> getSubjectLocationOverride(SILLocation loc) const {
if (auto *Switch = dyn_cast<SwitchStmt>(PatternMatchStmt))
if (!Switch->isImplicit())
return SILLocation(Switch->getSubjectExpr());
return None;
}
void emitDispatch(ClauseMatrix &matrix, ArgArray args,
const FailureHandler &failure);
void initSharedCaseBlockDest(CaseStmt *caseBlock, bool hasFallthroughTo);
void emitAddressOnlyAllocations();
void emitAddressOnlyInitialization(VarDecl *dest, SILValue value);
JumpDest getSharedCaseBlockDest(CaseStmt *caseStmt);
void emitSharedCaseBlocks(llvm::function_ref<void(CaseStmt *)> bodyEmitter);
void emitCaseBody(CaseStmt *caseBlock);
SILValue getAddressOnlyTemporary(VarDecl *decl) {
auto found = Temporaries.find(decl);
assert(found != Temporaries.end());
return found->second;
}
private:
void emitWildcardDispatch(ClauseMatrix &matrix, ArgArray args, unsigned row,
const FailureHandler &failure);
void bindRefutablePatterns(const ClauseRow &row, ArgArray args,
const FailureHandler &failure);
void emitGuardBranch(SILLocation loc, Expr *guard,
const FailureHandler &failure);
void bindIrrefutablePatterns(const ClauseRow &row, ArgArray args,
bool forIrrefutableRow, bool hasMultipleItems);
void bindVariable(Pattern *pattern, VarDecl *var,
ConsumableManagedValue value, bool isIrrefutable,
bool hasMultipleItems);
void emitSpecializedDispatch(ClauseMatrix &matrix, ArgArray args,
unsigned &lastRow, unsigned column,
const FailureHandler &failure);
void emitTupleObjectDispatch(ArrayRef<RowToSpecialize> rows,
ConsumableManagedValue src,
const SpecializationHandler &handleSpec,
const FailureHandler &failure);
void emitTupleDispatch(ArrayRef<RowToSpecialize> rows,
ConsumableManagedValue src,
const SpecializationHandler &handleSpec,
const FailureHandler &failure);
void emitIsDispatch(ArrayRef<RowToSpecialize> rows,
ConsumableManagedValue src,
const SpecializationHandler &handleSpec,
const FailureHandler &failure);
void emitEnumElementObjectDispatch(ArrayRef<RowToSpecialize> rows,
ConsumableManagedValue src,
const SpecializationHandler &handleSpec,
const FailureHandler &failure,
ProfileCounter defaultCaseCount);
void emitEnumElementDispatch(ArrayRef<RowToSpecialize> rows,
ConsumableManagedValue src,
const SpecializationHandler &handleSpec,
const FailureHandler &failure,
ProfileCounter defaultCaseCount);
void emitBoolDispatch(ArrayRef<RowToSpecialize> rows,
ConsumableManagedValue src,
const SpecializationHandler &handleSpec,
const FailureHandler &failure);
};
/// A handle to a row in a clause matrix. Does not own memory; use of the
/// ClauseRow must be dominated by its originating ClauseMatrix.
///
/// TODO: This should be refactored into a more general formulation that uses a
/// child template pattern to inject our logic. This will then allow us to
/// inject "mock" objects in a unittest file.
class ClauseRow {
friend class ClauseMatrix;
Stmt *ClientData;
Pattern *CasePattern;
Expr *CaseGuardExpr;
/// HasFallthroughTo - True if there is a fallthrough into this case.
bool HasFallthroughTo;
/// The number of remaining specializations until this row becomes
/// irrefutable.
unsigned NumRemainingSpecializations;
SmallVector<Pattern*, 4> Columns;
public:
ClauseRow(Stmt *clientData, Pattern *CasePattern, Expr *CaseGuardExpr,
bool HasFallthroughTo)
: ClientData(clientData),
CasePattern(CasePattern),
CaseGuardExpr(CaseGuardExpr),
HasFallthroughTo(HasFallthroughTo) {
Columns.push_back(CasePattern);
if (CaseGuardExpr)
NumRemainingSpecializations = AlwaysRefutable;
else
NumRemainingSpecializations = getNumSpecializations(Columns[0]);
}
template<typename T>
T *getClientData() const {
return static_cast<T*>(ClientData);
}
Pattern *getCasePattern() const { return CasePattern; }
Expr *getCaseGuardExpr() const { return CaseGuardExpr; }
bool hasFallthroughTo() const { return HasFallthroughTo; }
ArrayRef<Pattern *> getColumns() const {
return Columns;
}
MutableArrayRef<Pattern *> getColumns() {
return Columns;
}
/// Specialize the given column to the given array of new columns.
///
/// Places the new columns using the column-specialization algorithm.
void specializeInPlace(unsigned column, ArrayRef<Pattern *> newColumns) {
// We assume that this method always removes one level of pattern
// and replacing it with its direct sub-patterns. Therefore, we
// can adjust the number of remaining specializations very easily.
//
// We don't need to test whether NumRemainingSpecializations is
// AlwaysRefutable before decrementing because we only ever test
// this value against zero.
if (isDirectlyRefutablePattern(Columns[column]))
--NumRemainingSpecializations;
if (newColumns.size() == 1) {
Columns[column] = newColumns[0];
} else if (newColumns.empty()) {
if (column + 1 == Columns.size()) {
Columns.pop_back();
} else {
Columns[column] = Columns.pop_back_val();
}
} else {
Columns[column] = newColumns[0];
Columns.append(newColumns.begin() + 1, newColumns.end());
}
}
/// Is this row currently irrefutable?
bool isIrrefutable() const {
return NumRemainingSpecializations == 0;
}
/// Will this row be irrefutable after we single-step specialize the
/// given column?
bool isIrrefutableAfterSpecializing(unsigned column) const {
if (NumRemainingSpecializations == 1)
return isDirectlyRefutablePattern(Columns[column]);
return NumRemainingSpecializations == 0;
}
Pattern * const *begin() const {
return getColumns().begin();
}
Pattern * const *end() const {
return getColumns().end();
}
Pattern **begin() {
return getColumns().begin();
}
Pattern **end() {
return getColumns().end();
}
Pattern *operator[](unsigned column) const {
return getColumns()[column];
}
Pattern *&operator[](unsigned column) {
return getColumns()[column];
}
unsigned columns() const {
return Columns.size();
}
LLVM_ATTRIBUTE_USED void dump() const { return print(llvm::errs()); }
void print(llvm::raw_ostream &out) const;
};
/// A clause matrix. This matrix associates subpattern rows to their
/// corresponding guard expressions, and associates destination basic block
/// and columns to their associated subject value.
class ClauseMatrix {
SmallVector<ClauseRow *, 4> Rows;
ClauseMatrix(const ClauseMatrix &) = delete;
ClauseMatrix &operator=(const ClauseMatrix &) = delete;
ClauseMatrix() = default;
public:
/// Create a clause matrix from the given pattern-row storage.
/// (actively matched values) and enough initial capacity for the
/// given number of rows. The clause matrix will be initialized with zero rows
/// and a column for every occurrence. Rows can be added using addRows.
explicit ClauseMatrix(MutableArrayRef<ClauseRow> rows) {
for (ClauseRow &row : rows) {
Rows.push_back(&row);
}
}
ClauseMatrix(ClauseMatrix &&) = default;
ClauseMatrix &operator=(ClauseMatrix &&) = default;
unsigned rows() const { return Rows.size(); }
ClauseRow &operator[](unsigned row) {
return *Rows[row];
}
const ClauseRow &operator[](unsigned row) const {
return *Rows[row];
}
/// Destructively specialize the rows of this clause matrix. The
/// rows should not be used in this matrix afterwards.
ClauseMatrix specializeRowsInPlace(unsigned column,
ArrayRef<SpecializedRow> newRows) {
assert(!newRows.empty() && "specializing for an empty set of rows?");
ClauseMatrix innerMatrix;
for (unsigned i = 0, e = newRows.size(); i != e; ++i) {
assert((i == 0 || newRows[i - 1].RowIndex < newRows[i].RowIndex) &&
"specialized rows are out of order?");
ClauseRow *rowData = Rows[newRows[i].RowIndex];
rowData->specializeInPlace(column, newRows[i].Patterns);
innerMatrix.Rows.push_back(rowData);
}
return innerMatrix;
}
LLVM_ATTRIBUTE_USED void dump() const { return print(llvm::errs()); }
void print(llvm::raw_ostream &out) const;
};
} // end anonymous namespace
void ClauseRow::print(llvm::raw_ostream &out) const {
out << "[ ";
for (const Pattern *column : *this) {
dumpPattern(column, out);
out << ' ';
}
out << "]\n";
}
void ClauseMatrix::print(llvm::raw_ostream &out) const {
if (Rows.empty()) { return; }
// Tabulate the strings for each column, row-major.
// We need to pad the strings out like a real matrix.
SmallVector<std::vector<std::string>, 4> patternStrings;
SmallVector<size_t, 4> columnSizes;
patternStrings.resize(Rows.size());
llvm::formatted_raw_ostream fos(out);
for (unsigned r = 0, rend = rows(); r < rend; ++r) {
const ClauseRow &row = (*this)[r];
auto &rowStrings = patternStrings[r];
// Make sure that column sizes has an entry for all our columns.
if (row.columns() > columnSizes.size())
columnSizes.resize(row.columns(), 0);
rowStrings.reserve(row.columns());
for (unsigned c = 0, cend = row.columns(); c < cend; ++c) {
rowStrings.push_back("");
std::string &str = rowStrings.back();
{
llvm::raw_string_ostream ss(str);
dumpPattern(row[c], ss);
ss.flush();
}
columnSizes[c] = std::max(columnSizes[c], str.size());
}
}
for (unsigned r = 0, rend = rows(); r < rend; ++r) {
fos << "[ ";
for (unsigned c = 0, cend = patternStrings[r].size(); c < cend; ++c) {
unsigned start = fos.getColumn();
fos << patternStrings[r][c];
fos.PadToColumn(start + columnSizes[c] + 1);
}
fos << "]\n";
}
fos.flush();
}
/// Forward a value down into a branch of the decision tree that may
/// fail and lead back to other branch(es).
///
/// Essentially equivalent to forwardIntoIrrefutableSubtree, except it
/// converts AlwaysTake to TakeOnSuccess.
static ConsumableManagedValue
forwardIntoSubtree(SILGenFunction &SGF, SILLocation loc,
CleanupStateRestorationScope &scope,
ConsumableManagedValue outerCMV) {
ManagedValue outerMV = outerCMV.getFinalManagedValue();
if (!outerMV.hasCleanup()) return outerCMV;
auto consumptionKind = outerCMV.getFinalConsumption();
(void)consumptionKind;
// If we have an object and it is take always, we need to borrow the value
// since our subtree does not own the value.
if (outerMV.getType().isObject()) {
assert(consumptionKind == CastConsumptionKind::TakeAlways &&
"Object without cleanup that is not take_always?!");
return {outerMV.borrow(SGF, loc), CastConsumptionKind::BorrowAlways};
}
// Only address only values use TakeOnSuccess.
assert(outerMV.getType().isAddressOnly(SGF.F) &&
"TakeOnSuccess can only be used with address only values");
assert((consumptionKind == CastConsumptionKind::TakeAlways ||
consumptionKind == CastConsumptionKind::TakeOnSuccess) &&
"non-+1 consumption with a cleanup?");
scope.pushCleanupState(outerMV.getCleanup(),
CleanupState::PersistentlyActive);
// Success means that we won't end up in the other branch,
// but failure doesn't.
return {outerMV, CastConsumptionKind::TakeOnSuccess};
}
/// Forward a value down into an irrefutable branch of the decision tree.
///
/// Essentially equivalent to forwardIntoSubtree, except it preserves
/// AlwaysTake consumption.
static void forwardIntoIrrefutableSubtree(SILGenFunction &SGF,
CleanupStateRestorationScope &scope,
ConsumableManagedValue outerCMV) {
ManagedValue outerMV = outerCMV.getFinalManagedValue();
if (!outerMV.hasCleanup()) return;
assert(outerCMV.getFinalConsumption() != CastConsumptionKind::CopyOnSuccess
&& "copy-on-success value with cleanup?");
scope.pushCleanupState(outerMV.getCleanup(),
CleanupState::PersistentlyActive);
}
namespace {
class ArgForwarderBase {
SILGenFunction &SGF;
CleanupStateRestorationScope Scope;
protected:
ArgForwarderBase(SILGenFunction &SGF) : SGF(SGF), Scope(SGF.Cleanups) {}
ConsumableManagedValue forward(ConsumableManagedValue value,
SILLocation loc) {
return forwardIntoSubtree(SGF, loc, Scope, value);
}
void forwardIntoIrrefutable(ConsumableManagedValue value) {
return forwardIntoIrrefutableSubtree(SGF, Scope, value);
}
};
/// A RAII-ish object for forwarding a bunch of arguments down to one
/// side of a branch.
class ArgForwarder : private ArgForwarderBase {
ArgArray OuterArgs;
SmallVector<ConsumableManagedValue, 4> ForwardedArgsBuffer;
public:
ArgForwarder(SILGenFunction &SGF, ArgArray outerArgs, SILLocation loc,
bool isFinalUse)
: ArgForwarderBase(SGF), OuterArgs(outerArgs) {
// If this is a final use along this path, we don't need to change
// any of the args. However, we do need to make sure that the
// cleanup state gets restored later, because being final on this
// path isn't the same as being final along all paths.
if (isFinalUse) {
for (auto &outerArg : outerArgs)
forwardIntoIrrefutable(outerArg);
} else {
ForwardedArgsBuffer.reserve(outerArgs.size());
for (auto &outerArg : outerArgs)
ForwardedArgsBuffer.push_back(forward(outerArg, loc));
}
}
ArgArray getForwardedArgs() const {
if (didForwardArgs()) return ForwardedArgsBuffer;
return OuterArgs;
}
private:
bool didForwardArgs() const { return !ForwardedArgsBuffer.empty(); }
};
/// A RAII-ish object for forwarding a bunch of arguments down to one
/// side of a branch.
class SpecializedArgForwarder : private ArgForwarderBase {
ArgArray OuterArgs;
bool IsFinalUse;
SmallVector<ConsumableManagedValue, 4> ForwardedArgsBuffer;
public:
/// Construct a specialized arg forwarder for a (locally) successful
/// dispatch.
SpecializedArgForwarder(SILGenFunction &SGF, ArgArray outerArgs,
unsigned column, ArgArray newArgs, SILLocation loc,
bool isFinalUse)
: ArgForwarderBase(SGF), OuterArgs(outerArgs), IsFinalUse(isFinalUse) {
assert(column < outerArgs.size());
ForwardedArgsBuffer.reserve(outerArgs.size() - 1 + newArgs.size());
// Place the new columns with the column-specialization algorithm:
// - place the first new column (if any) in the same position as the
// original column;
// - if there are no new columns, and the removed column was not
// the last column, the last column is moved to the removed column.
// The outer columns before the specialized column.
for (unsigned i = 0, e = column; i != e; ++i)
ForwardedArgsBuffer.push_back(forward(outerArgs[i], loc));
// The specialized column.
if (!newArgs.empty()) {
ForwardedArgsBuffer.push_back(newArgs[0]);
newArgs = newArgs.slice(1);
} else if (column + 1 < outerArgs.size()) {
ForwardedArgsBuffer.push_back(forward(outerArgs.back(), loc));
outerArgs = outerArgs.slice(0, outerArgs.size() - 1);
}
// The rest of the outer columns.
for (unsigned i = column + 1, e = outerArgs.size(); i != e; ++i)
ForwardedArgsBuffer.push_back(forward(outerArgs[i], loc));
// The rest of the new args.
ForwardedArgsBuffer.append(newArgs.begin(), newArgs.end());
}
/// Returns the forward arguments. The new rows are placed using
/// the column-specialization algorithm.
ArgArray getForwardedArgs() const {
return ForwardedArgsBuffer;
}
private:
ConsumableManagedValue forward(ConsumableManagedValue value,
SILLocation loc) {
if (IsFinalUse) {
ArgForwarderBase::forwardIntoIrrefutable(value);
return value;
} else {
return ArgForwarderBase::forward(value, loc);
}
}
};
/// A RAII-ish object for undoing the forwarding of cleanups along a
/// failure path.
class ArgUnforwarder {
SILGenFunction &SGF;
CleanupStateRestorationScope Scope;
public:
ArgUnforwarder(SILGenFunction &SGF) : SGF(SGF), Scope(SGF.Cleanups) {}
static bool requiresUnforwarding(SILGenFunction &SGF,
ConsumableManagedValue operand) {
return operand.hasCleanup() &&
operand.getFinalConsumption()
== CastConsumptionKind::TakeOnSuccess;
}
/// Given that an aggregate was divided into a set of borrowed
/// values which are now being tracked individually, temporarily
/// disable all of the borrowed-value cleanups and restore the
/// aggregate cleanup.
void unforwardBorrowedValues(ConsumableManagedValue aggregate,
ArgArray subobjects) {
if (!requiresUnforwarding(SGF, aggregate))
return;
Scope.pushCleanupState(aggregate.getCleanup(), CleanupState::Active);
for (auto &subobject : subobjects) {
if (subobject.hasCleanup())
Scope.pushCleanupState(subobject.getCleanup(), CleanupState::Dormant);
}
}
};
} // end anonymous namespace
/// Return the dispatchable length of the given column.
static unsigned getConstructorPrefix(const ClauseMatrix &matrix,
unsigned firstRow, unsigned column) {
assert(firstRow < matrix.rows() &&
"getting column constructor prefix in matrix with no rows remaining?");
// Require the first row to be a non-wildcard.
auto first = getSpecializingPattern(matrix[firstRow][column]);
if (!first) return 0;
// Then count the number of rows with the same kind of pattern.
unsigned row = firstRow + 1;
for (unsigned rend = matrix.rows(); row < rend; ++row) {
if (!getSimilarSpecializingPattern(matrix[row][column], first))
break;
}
return row - firstRow;
}
/// Select the "necessary column", Maranget's term for the column
/// most likely to give an optimal decision tree.
///
/// \return None if we didn't find a meaningful necessary column
static Optional<unsigned>
chooseNecessaryColumn(const ClauseMatrix &matrix, unsigned firstRow) {
assert(firstRow < matrix.rows() &&
"choosing necessary column of matrix with no rows remaining?");
// First of all, if we have zero or one columns, this is trivial
// to decide.
auto numColumns = matrix[firstRow].columns();
if (numColumns <= 1) {
if (numColumns == 1 && !isWildcardPattern(matrix[firstRow][0])) {
return 0;
}
return None;
}
// Use the "constructor prefix" heuristic from Maranget to pick the
// necessary column. The column with the most pattern nodes prior to a
// wildcard turns out to be a good and cheap-to-calculate heuristic for
// generating an optimal decision tree. We ignore patterns that aren't
// similar to the head pattern.
Optional<unsigned> bestColumn;
unsigned longestConstructorPrefix = 0;
for (unsigned c = 0; c != numColumns; ++c) {
unsigned constructorPrefix = getConstructorPrefix(matrix, firstRow, c);
if (constructorPrefix > longestConstructorPrefix) {
longestConstructorPrefix = constructorPrefix;
bestColumn = c;
}
}
return bestColumn;
}
/// Recursively emit a decision tree from the given pattern matrix.
void PatternMatchEmission::emitDispatch(ClauseMatrix &clauses, ArgArray args,
const FailureHandler &outerFailure) {
if (clauses.rows() == 0) {
SGF.B.createUnreachable(SILLocation(PatternMatchStmt));
return;
}
unsigned firstRow = 0;
while (true) {
// If there are no rows remaining, then we fail.
if (firstRow == clauses.rows()) {
outerFailure(clauses[clauses.rows() - 1].getCasePattern());
return;
}
// Try to find a "necessary column".
Optional<unsigned> column = chooseNecessaryColumn(clauses, firstRow);
// Emit the subtree in its own scope.
ExitableFullExpr scope(SGF, CleanupLocation(PatternMatchStmt));
auto innerFailure = [&](SILLocation loc) {
if (firstRow == clauses.rows()) return outerFailure(loc);
SGF.Cleanups.emitBranchAndCleanups(scope.getExitDest(), loc);
};
// If there is no necessary column, just emit the first row.
if (!column) {
unsigned wildcardRow = firstRow++;
emitWildcardDispatch(clauses, args, wildcardRow, innerFailure);
} else {
// Otherwise, specialize on the necessary column.
emitSpecializedDispatch(clauses, args, firstRow, column.getValue(),
innerFailure);
}
assert(!SGF.B.hasValidInsertionPoint());
SILBasicBlock *contBB = scope.exit();
// If the continuation block has no uses, ...
if (contBB->pred_empty()) {
// If we have no more rows to emit, clear the IP and destroy the
// continuation block.
if (firstRow == clauses.rows()) {
SGF.B.clearInsertionPoint();
SGF.eraseBasicBlock(contBB);
return;
}
// Otherwise, if there is no fallthrough, then the next row is
// unreachable: emit a dead code diagnostic.
if (!clauses[firstRow].hasFallthroughTo()) {
SourceLoc Loc;
bool isDefault = false;
if (auto *S = clauses[firstRow].getClientData<Stmt>()) {
Loc = S->getStartLoc();
if (auto *CS = dyn_cast<CaseStmt>(S))
isDefault = CS->isDefault();
} else {
Loc = clauses[firstRow].getCasePattern()->getStartLoc();
}
SGF.SGM.diagnose(Loc, diag::unreachable_case, isDefault);
}
}
}
}
/// Emit the decision tree for a row containing only non-specializing
/// patterns.
///
/// \param matrixArgs - appropriate for the entire clause matrix, not
/// just this one row
void PatternMatchEmission::emitWildcardDispatch(ClauseMatrix &clauses,
ArgArray matrixArgs,
unsigned row,
const FailureHandler &failure) {
// Get appropriate arguments.
ArgForwarder forwarder(SGF, matrixArgs, clauses[row].getCasePattern(),
/*isFinalUse*/ row + 1 == clauses.rows());
ArgArray args = forwarder.getForwardedArgs();
// Bind all the refutable patterns first. We want to do this first
// so that we can treat the rest of the bindings as inherently
// successful if we don't have a guard. This approach assumes that
// expression patterns can't refer to bound arguments.
bindRefutablePatterns(clauses[row], args, failure);
// Okay, the rest of the bindings are irrefutable if there isn't a guard.
Expr *guardExpr = clauses[row].getCaseGuardExpr();
bool hasGuard = guardExpr != nullptr;
assert(!hasGuard || !clauses[row].isIrrefutable());
auto stmt = clauses[row].getClientData<Stmt>();
assert(isa<CaseStmt>(stmt));
auto *caseStmt = dyn_cast<CaseStmt>(stmt);
bool hasMultipleItems =
caseStmt && (clauses[row].hasFallthroughTo() ||
caseStmt->getCaseLabelItems().size() > 1);
// Bind the rest of the patterns.
bindIrrefutablePatterns(clauses[row], args, !hasGuard, hasMultipleItems);
// Emit the guard branch, if it exists.
if (guardExpr) {
this->emitGuardBranch(guardExpr, guardExpr, failure);
}
// Enter the row.
CompletionHandler(*this, args, clauses[row]);
assert(!SGF.B.hasValidInsertionPoint());
}
/// Bind all the refutable patterns in the given row.
void PatternMatchEmission::
bindRefutablePatterns(const ClauseRow &row, ArgArray args,
const FailureHandler &failure) {
assert(row.columns() == args.size());
for (unsigned i = 0, e = args.size(); i != e; ++i) {
if (!row[i]) // We use null patterns to mean artificial AnyPatterns
continue;
Pattern *pattern = row[i]->getSemanticsProvidingPattern();
switch (pattern->getKind()) {
// Irrefutable patterns that we'll handle in a later pass.
case PatternKind::Any:
break;
case PatternKind::Named:
break;
case PatternKind::Expr: {
ExprPattern *exprPattern = cast<ExprPattern>(pattern);
DebugLocOverrideRAII LocOverride{SGF.B,
getSubjectLocationOverride(pattern)};
FullExpr scope(SGF.Cleanups, CleanupLocation(pattern));
bindVariable(pattern, exprPattern->getMatchVar(), args[i],
/*isForSuccess*/ false, /* hasMultipleItems */ false);
emitGuardBranch(pattern, exprPattern->getMatchExpr(), failure);
break;
}
default:
llvm_unreachable("bad pattern kind");
}
}
}
/// Bind all the irrefutable patterns in the given row, which is nothing
/// but wildcard patterns.
///
/// Note that forIrrefutableRow can be true even if !row.isIrrefutable()
/// because we might have already bound all the refutable parts.
void PatternMatchEmission::bindIrrefutablePatterns(const ClauseRow &row,
ArgArray args,
bool forIrrefutableRow,
bool hasMultipleItems) {
assert(row.columns() == args.size());
for (unsigned i = 0, e = args.size(); i != e; ++i) {
if (!row[i]) // We use null patterns to mean artificial AnyPatterns
continue;
Pattern *pattern = row[i]->getSemanticsProvidingPattern();
switch (pattern->getKind()) {
case PatternKind::Any: // We can just drop Any values.
break;
case PatternKind::Expr: // Ignore expression patterns, which we should have
// bound in an earlier pass.
break;
case PatternKind::Named: {
NamedPattern *named = cast<NamedPattern>(pattern);
bindVariable(pattern, named->getDecl(), args[i], forIrrefutableRow,
hasMultipleItems);
break;
}
default:
llvm_unreachable("bad pattern kind");
}
}
}
/// Should we take control of the mang
static bool shouldTake(ConsumableManagedValue value, bool isIrrefutable) {
switch (value.getFinalConsumption()) {
case CastConsumptionKind::TakeAlways: return true;
case CastConsumptionKind::TakeOnSuccess: return isIrrefutable;
case CastConsumptionKind::CopyOnSuccess: return false;
case CastConsumptionKind::BorrowAlways: return false;
}
llvm_unreachable("bad consumption kind");
}
/// Bind a variable into the current scope.
void PatternMatchEmission::bindVariable(Pattern *pattern, VarDecl *var,
ConsumableManagedValue value,
bool isIrrefutable,
bool hasMultipleItems) {
// If this binding is one of multiple patterns, each individual binding
// will just be let, and then the chosen value will get forwarded into
// a var box in the final shared case block.
bool immutable = var->isLet() || hasMultipleItems;
// Initialize the variable value.
InitializationPtr init = SGF.emitInitializationForVarDecl(var, immutable);
// Do not emit debug descriptions at this stage.
//
// If there are multiple let bindings, the value is forwarded to the case
// block via a phi. Emitting duplicate debug values for the incoming values
// leads to bogus debug info -- we must emit the debug value only on the phi.
//
// If there's only one let binding, we still want to wait until we can nest
// the scope for the case body under the scope for the pattern match.
init->setEmitDebugValueOnInit(false);
auto mv = value.getFinalManagedValue();
if (shouldTake(value, isIrrefutable)) {
mv.forwardInto(SGF, pattern, init.get());
} else {
mv.copyInto(SGF, pattern, init.get());
}
}
/// Evaluate a guard expression and, if it returns false, branch to
/// the given destination.
void PatternMatchEmission::emitGuardBranch(SILLocation loc, Expr *guard,
const FailureHandler &failure) {
SILBasicBlock *falseBB = SGF.B.splitBlockForFallthrough();
SILBasicBlock *trueBB = SGF.B.splitBlockForFallthrough();
// Emit the match test.
SILValue testBool;
{
FullExpr scope(SGF.Cleanups, CleanupLocation(guard));
testBool = SGF.emitRValueAsSingleValue(guard).getUnmanagedValue();
}
// Extract the i1 from the Bool struct.
auto i1Value = SGF.emitUnwrapIntegerResult(loc, testBool);
SGF.B.createCondBranch(loc, i1Value, trueBB, falseBB);
SGF.B.setInsertionPoint(falseBB);
failure(loc);
SGF.B.setInsertionPoint(trueBB);
}
/// Perform specialized dispatch on the particular column.
///
/// \param matrixArgs - appropriate for the entire clause matrix, not
/// just these specific rows
void PatternMatchEmission::emitSpecializedDispatch(ClauseMatrix &clauses,
ArgArray matrixArgs,
unsigned &lastRow,
unsigned column,
const FailureHandler &failure) {
// HEY! LISTEN!
//
// When a pattern specializes its submatrix (like an 'as' or enum element
// pattern), it *must* chain the FailureHandler for its inner submatrixes
// through our `failure` handler if it manipulates any cleanup state.
// Here's an example from emitEnumElementDispatch:
//
// const FailureHandler *innerFailure = &failure;
// FailureHandler specializedFailure = [&](SILLocation loc) {
// ArgUnforwarder unforwarder(SGF);
// unforwarder.unforwardBorrowedValues(src, origCMV);
// failure(loc);
// };
//
// if (ArgUnforwarder::requiresUnforwarding(src))
// innerFailure = &specializedFailure;
//
// Note that the inner failure handler either is exactly the outer failure
// or performs the work necessary to clean up after the failed specialized
// decision tree immediately before chaining onto the outer failure.
// It is specifically NOT correct to do something like this:
//
// /* DON'T DO THIS */
// ExitableFullExpr scope;
// FailureHandler innerFailure = [&](SILLocation loc) {
// emitBranchAndCleanups(scope, loc);
// };
// ...
// /* DON'T DO THIS */
// scope.exit();
// ArgUnforwarder unforwarder(SGF);
// unforwarder.unforwardBorrowedValues(src, origCMV);
// failure(loc);
// /* DON'T DO THIS */
//
// since the cleanup state changes performed by ArgUnforwarder will
// occur too late.
unsigned firstRow = lastRow;
// Collect the rows to specialize.
SmallVector<RowToSpecialize, 4> rowsToSpecialize;
auto addRowToSpecialize = [&](Pattern *pattern, unsigned rowIndex) {
assert(getSpecializingPattern(clauses[rowIndex][column]) == pattern);
bool irrefutable = clauses[rowIndex].isIrrefutableAfterSpecializing(column);
auto caseBlock = clauses[rowIndex].getClientData<CaseStmt>();
ProfileCounter count = ProfileCounter();
if (caseBlock) {
count = SGF.loadProfilerCount(caseBlock);
}
rowsToSpecialize.push_back({pattern, rowIndex, irrefutable, count});
};
ProfileCounter defaultCaseCount = ProfileCounter();
Pattern *firstSpecializer = getSpecializingPattern(clauses[firstRow][column]);
assert(firstSpecializer && "specializing unspecializable row?");
addRowToSpecialize(firstSpecializer, firstRow);
// Take a prefix of rows that share the same semantic kind of pattern.
for (++lastRow; lastRow != clauses.rows(); ++lastRow) {
Pattern *specializer =
getSimilarSpecializingPattern(clauses[lastRow][column], firstSpecializer);
if (!specializer) {
auto caseBlock = clauses[lastRow].getClientData<CaseStmt>();
if (caseBlock) {
defaultCaseCount = SGF.loadProfilerCount(caseBlock);
}
break;
}
addRowToSpecialize(specializer, lastRow);
}
assert(lastRow - firstRow == rowsToSpecialize.size());
// Forward just the specialized argument right now. We'll forward
// the rest in the handler.
bool isFinalUse = (lastRow == clauses.rows());
ArgForwarder outerForwarder(SGF, matrixArgs[column], firstSpecializer,
isFinalUse);
auto arg = outerForwarder.getForwardedArgs()[0];
SpecializationHandler handler = [&](ArrayRef<ConsumableManagedValue> newArgs,
ArrayRef<SpecializedRow> rows,
const FailureHandler &innerFailure) {
// These two operations must follow the same rules for column
// placement because 'arguments' are parallel to the matrix columns.
// We use the column-specialization algorithm described in
// specializeInPlace.
ClauseMatrix innerClauses = clauses.specializeRowsInPlace(column, rows);
SpecializedArgForwarder innerForwarder(SGF, matrixArgs, column, newArgs,
firstSpecializer, isFinalUse);
ArgArray innerArgs = innerForwarder.getForwardedArgs();
emitDispatch(innerClauses, innerArgs, innerFailure);
};
switch (firstSpecializer->getKind()) {
case PatternKind::Any:
case PatternKind::Expr:
case PatternKind::Named:
llvm_unreachable("cannot specialize wildcard pattern");
case PatternKind::Paren:
case PatternKind::Typed:
case PatternKind::Var:
llvm_unreachable("non-semantic pattern kind!");
case PatternKind::Tuple:
return emitTupleDispatch(rowsToSpecialize, arg, handler, failure);
case PatternKind::Is:
return emitIsDispatch(rowsToSpecialize, arg, handler, failure);
case PatternKind::EnumElement:
case PatternKind::OptionalSome:
return emitEnumElementDispatch(rowsToSpecialize, arg, handler, failure,
defaultCaseCount);
case PatternKind::Bool:
return emitBoolDispatch(rowsToSpecialize, arg, handler, failure);
}
llvm_unreachable("bad pattern kind");
};
/// Given that we've broken down a source value into this subobject,
/// and that we were supposed to use the given consumption rules on
/// it, construct an appropriate managed value.
static ConsumableManagedValue
getManagedSubobject(SILGenFunction &SGF, SILValue value,
const TypeLowering &valueTL,
CastConsumptionKind consumption) {
switch (consumption) {
case CastConsumptionKind::BorrowAlways:
case CastConsumptionKind::CopyOnSuccess:
return {ManagedValue::forUnmanaged(value), consumption};
case CastConsumptionKind::TakeAlways:
case CastConsumptionKind::TakeOnSuccess:
return {SGF.emitManagedRValueWithCleanup(value, valueTL), consumption};
}
llvm_unreachable("covered switch");
}
static ConsumableManagedValue
emitReabstractedSubobject(SILGenFunction &SGF, SILLocation loc,
ConsumableManagedValue value,
const TypeLowering &valueTL,
AbstractionPattern abstraction,
CanType substFormalType) {
// Return if there's no abstraction. (The first condition is just
// a fast path.)
if (value.getType().getASTType() == substFormalType ||
value.getType() == SGF.getLoweredType(substFormalType))
return value;
// Otherwise, turn to +1 and re-abstract.
ManagedValue mv = SGF.getManagedValue(loc, value);
return ConsumableManagedValue::forOwned(
SGF.emitOrigToSubstValue(loc, mv, abstraction, substFormalType));
}
void PatternMatchEmission::emitTupleObjectDispatch(
ArrayRef<RowToSpecialize> rows, ConsumableManagedValue src,
const SpecializationHandler &handleCase,
const FailureHandler &outerFailure) {
// Construct the specialized rows.
SmallVector<SpecializedRow, 4> specializedRows;
specializedRows.resize(rows.size());
for (unsigned i = 0, e = rows.size(); i != e; ++i) {
specializedRows[i].RowIndex = rows[i].RowIndex;
auto pattern = cast<TuplePattern>(rows[i].Pattern);
for (auto &elt : pattern->getElements()) {
specializedRows[i].Patterns.push_back(elt.getPattern());
}
}
auto firstPat = rows[0].Pattern;
SILLocation loc = firstPat;
// Final consumption here will be either BorrowAlways or TakeAlways.
ManagedValue v = src.getFinalManagedValue();
SmallVector<ConsumableManagedValue, 8> destructured;
SGF.B.emitDestructureValueOperation(
loc, v, [&](unsigned index, ManagedValue v) {
destructured.push_back({v, src.getFinalConsumption()});
});
// Since we did all of our work at +0, we just send down the outer failure.
handleCase(destructured, specializedRows, outerFailure);
}
/// Perform specialized dispatch for tuples.
///
/// This is simple; all the tuples have the same structure.
void PatternMatchEmission::
emitTupleDispatch(ArrayRef<RowToSpecialize> rows, ConsumableManagedValue src,
const SpecializationHandler &handleCase,
const FailureHandler &outerFailure) {
auto firstPat = rows[0].Pattern;
SILLocation loc = firstPat;
// If our source is an address that is loadable, perform a load_borrow.
if (src.getType().isAddress() && src.getType().isLoadable(SGF.F)) {
// We should only see take_on_success if we have a base type that is address
// only.
assert(src.getFinalConsumption() != CastConsumptionKind::TakeOnSuccess &&
"Can only occur if base type is address only?!");
src = {SGF.B.createLoadBorrow(loc, src.getFinalManagedValue()),
CastConsumptionKind::BorrowAlways};
}
// Then if we have an object...
if (src.getType().isObject()) {
// Make sure that if we have a copy_on_success, non-trivial value that we do
// not have a value with @owned ownership.
assert((!src.getType().isTrivial(SGF.F) ||
src.getFinalConsumption() != CastConsumptionKind::CopyOnSuccess ||
src.getOwnershipKind() != ValueOwnershipKind::Owned) &&
"@owned value without cleanup + copy_on_success");
// We should only see take_on_success if we have a base type that is address
// only.
assert(src.getFinalConsumption() != CastConsumptionKind::TakeOnSuccess &&
"Can only occur if base type is address only?!");
// Then perform a forward or reborrow destructure on the object.
return emitTupleObjectDispatch(rows, src, handleCase, outerFailure);
}
// Construct the specialized rows.
SmallVector<SpecializedRow, 4> specializedRows;
specializedRows.resize(rows.size());
for (unsigned i = 0, e = rows.size(); i != e; ++i) {
specializedRows[i].RowIndex = rows[i].RowIndex;
auto pattern = cast<TuplePattern>(rows[i].Pattern);
for (auto &elt : pattern->getElements()) {
specializedRows[i].Patterns.push_back(elt.getPattern());
}
}
// At this point we know that we must have an address only type, since we
// would have loaded it earlier.
SILValue v = src.getFinalManagedValue().forward(SGF);
assert(v->getType().isAddressOnly(SGF.F) &&
"Loadable values were handled earlier");
// The destructured tuple that we pass off to our sub pattern. This may
// contain values that we have performed a load_borrow from subsequent to
// "performing a SILGenPattern borrow".
SmallVector<ConsumableManagedValue, 4> subPatternArgs;
// An array of values that have the same underlying values as our
// subPatternArgs, but may have a different cleanup and final consumption
// kind. These are at +1 and are unforwarded.
SmallVector<ConsumableManagedValue, 4> unforwardArgs;
// Break down the values.
auto tupleSILTy = v->getType();
for (unsigned i : range(tupleSILTy.castTo<TupleType>()->getNumElements())) {
SILType fieldTy = tupleSILTy.getTupleElementType(i);
auto &fieldTL = SGF.getTypeLowering(fieldTy);
SILValue member = SGF.B.createTupleElementAddr(loc, v, i, fieldTy);
// Inline constructor.
auto memberCMV = ([&]() -> ConsumableManagedValue {
if (!fieldTL.isLoadable()) {
// If we have an address only type, just get the managed
// subobject.
return getManagedSubobject(SGF, member, fieldTL,
src.getFinalConsumption());
}
// If we have a loadable type, then we have a loadable sub-type of the
// underlying address only tuple.
auto memberMV = ManagedValue::forUnmanaged(member);
switch (src.getFinalConsumption()) {
case CastConsumptionKind::TakeAlways: {
// If our original source value is take always, perform a load [take].
return {SGF.B.createLoadTake(loc, memberMV),
CastConsumptionKind::TakeAlways};
}
case CastConsumptionKind::TakeOnSuccess: {
// If we have a take_on_success, we propagate down the member as a +1
// address value and do not load.
//
// DISCUSSION: Unforwarding objects violates ownership since
// unforwarding relies on forwarding an aggregate into subvalues and
// on failure disabling the subvalue cleanups and re-enabling the
// cleanup for the aggregate (which was already destroyed). So we are
// forced to use an address here so we can forward/unforward this
// value. We maintain our invariants that loadable types are always
// loaded and are never take on success by passing down to our
// subPattern a borrow of this value. See below.
return getManagedSubobject(SGF, member, fieldTL,
src.getFinalConsumption());
}
case CastConsumptionKind::CopyOnSuccess: {
// We translate copy_on_success => borrow_always.
auto memberMV = ManagedValue::forUnmanaged(member);
return {SGF.B.createLoadBorrow(loc, memberMV),
CastConsumptionKind::BorrowAlways};
}
case CastConsumptionKind::BorrowAlways: {
llvm_unreachable(
"Borrow always can only occur along object only code paths");
}
}
llvm_unreachable("covered switch");
}());
// If we aren't loadable, add to the unforward array.
if (!fieldTL.isLoadable()) {
unforwardArgs.push_back(memberCMV);
} else {
// If we have a loadable type that we didn't load, we must have had a
// take_on_success address. This means that our parent cleanup is
// currently persistently active, so we needed to propagate an active +1
// cleanup on our address so we can take if we actually succeed. That
// being said, we do not want to pass objects with take_on_success into
// the actual subtree. So we perform a load_borrow at this point. This
// will ensure that we will always finish the end_borrow before we jumped
// to a failure point, but at the same time the original +1 value will be
// appropriately destroyed/forwarded around.
if (memberCMV.getType().isAddress()) {
unforwardArgs.push_back(memberCMV);
auto val = memberCMV.getFinalManagedValue();
memberCMV = {SGF.B.createLoadBorrow(loc, val),
CastConsumptionKind::BorrowAlways};
}
}
subPatternArgs.push_back(memberCMV);
}
// Maybe revert to the original cleanups during failure branches.
const FailureHandler *innerFailure = &outerFailure;
FailureHandler specializedFailure = [&](SILLocation loc) {
ArgUnforwarder unforwarder(SGF);
unforwarder.unforwardBorrowedValues(src, unforwardArgs);
outerFailure(loc);
};
if (ArgUnforwarder::requiresUnforwarding(SGF, src))
innerFailure = &specializedFailure;
// Recurse.
handleCase(subPatternArgs, specializedRows, *innerFailure);
}
static CanType getTargetType(const RowToSpecialize &row) {
auto type = cast<IsPattern>(row.Pattern)->getCastType();
return type->getCanonicalType();
}
static ConsumableManagedValue
emitCastOperand(SILGenFunction &SGF, SILLocation loc,
ConsumableManagedValue src, CanType sourceType,
CanType targetType,
SmallVectorImpl<ConsumableManagedValue> &borrowedValues) {
// Reabstract to the most general abstraction, and put it into a
// temporary if necessary.
// Figure out if we need the value to be in a temporary.
bool requiresAddress = !canUseScalarCheckedCastInstructions(SGF.SGM.M,
sourceType, targetType);
AbstractionPattern abstraction = SGF.SGM.M.Types.getMostGeneralAbstraction();
auto &srcAbstractTL = SGF.getTypeLowering(abstraction, sourceType);
bool hasAbstraction = (src.getType() != srcAbstractTL.getLoweredType());
// Fast path: no re-abstraction required.
if (!hasAbstraction && (!requiresAddress || src.getType().isAddress())) {
return src;
}
// We know that we must have a loadable type at this point since address only
// types do not need reabstraction and are addresses. So we should have exited
// above already.
assert(src.getType().isLoadable(SGF.F) &&
"Should have a loadable value at this point");
// Since our finalValue is loadable, we could not have had a take_on_success
// here.
assert(src.getFinalConsumption() != CastConsumptionKind::TakeOnSuccess &&
"Loadable types can not have take_on_success?!");
std::unique_ptr<TemporaryInitialization> init;
SGFContext ctx;
if (requiresAddress) {
init = SGF.emitTemporary(loc, srcAbstractTL);
ctx = SGFContext(init.get());
}
// This will always produce a +1 take always value no matter what src's
// ownership is.
ManagedValue finalValue = SGF.getManagedValue(loc, src);
if (hasAbstraction) {
// Reabstract the value if we need to. This should produce a +1 value as
// well.
finalValue =
SGF.emitSubstToOrigValue(loc, finalValue, abstraction, sourceType, ctx);
}
assert(finalValue.isPlusOne(SGF));
// If we at this point do not require an address, return final value. We know
// that it is a +1 take always value.
if (!requiresAddress) {
return ConsumableManagedValue::forOwned(finalValue);
}
// At this point, we know that we have a non-address only type since we are
// materializing an object into memory and addresses can not be stored into
// memory.
SGF.B.emitStoreValueOperation(loc, finalValue.forward(SGF),
init->getAddress(),
StoreOwnershipQualifier::Init);
init->finishInitialization(SGF);
// We know that either our initial value was already take_always or we made a
// copy of the underlying value. In either case, we now have a take_always +1
// value.
return ConsumableManagedValue::forOwned(init->getManagedAddress());
}
/// Perform specialized dispatch for a sequence of IsPatterns.
void PatternMatchEmission::emitIsDispatch(ArrayRef<RowToSpecialize> rows,
ConsumableManagedValue src,
const SpecializationHandler &handleCase,
const FailureHandler &failure) {
CanType sourceType = rows[0].Pattern->getType()->getCanonicalType();
CanType targetType = getTargetType(rows[0]);
// Make any abstraction modifications necessary for casting.
SmallVector<ConsumableManagedValue, 4> borrowedValues;
ConsumableManagedValue operand = emitCastOperand(
SGF, rows[0].Pattern, src, sourceType, targetType, borrowedValues);
// Emit the 'is' check.
// Build the specialized-rows array.
SmallVector<SpecializedRow, 4> specializedRows;
specializedRows.reserve(rows.size());
for (auto &row : rows) {
assert(getTargetType(row) == targetType
&& "can only specialize on one type at a time");
auto is = cast<IsPattern>(row.Pattern);
specializedRows.push_back({});
specializedRows.back().RowIndex = row.RowIndex;
specializedRows.back().Patterns.push_back(is->getSubPattern());
}
SILLocation loc = rows[0].Pattern;
ConsumableManagedValue castOperand = operand.asBorrowedOperand(SGF, loc);
// Chain inner failures onto the outer failure.
const FailureHandler *innerFailure = &failure;
FailureHandler specializedFailure = [&](SILLocation loc) {
ArgUnforwarder unforwarder(SGF);
unforwarder.unforwardBorrowedValues(src, borrowedValues);
failure(loc);
};
if (ArgUnforwarder::requiresUnforwarding(SGF, src))
innerFailure = &specializedFailure;
// Perform a conditional cast branch.
SGF.emitCheckedCastBranch(
loc, castOperand, sourceType, targetType, SGFContext(),
// Success block: recurse.
[&](ManagedValue castValue) {
handleCase(ConsumableManagedValue::forOwned(castValue), specializedRows,
*innerFailure);
assert(!SGF.B.hasValidInsertionPoint() && "did not end block");
},
// Failure block: branch out to the continuation block.
[&](Optional<ManagedValue> mv) { (*innerFailure)(loc); }, rows[0].Count);
}
namespace {
struct CaseInfo {
SmallVector<SpecializedRow, 2> SpecializedRows;
Pattern *FirstMatcher;
bool Irrefutable = false;
};
class CaseBlocks {
// These vectors are completely parallel, but the switch instructions want
// only the first two, so we split them up.
SmallVector<std::pair<EnumElementDecl *, SILBasicBlock *>, 4> CaseBBs;
SmallVector<ProfileCounter, 4> CaseCounts;
SmallVector<CaseInfo, 4> CaseInfos;
SILBasicBlock *DefaultBB = nullptr;
public:
/// Create destination blocks for switching over the cases in an enum
/// defined by \p rows.
CaseBlocks(SILGenFunction &SGF,
ArrayRef<RowToSpecialize> rows,
CanType sourceType,
SILBasicBlock *curBB);
ArrayRef<std::pair<EnumElementDecl *, SILBasicBlock *>>
getCaseBlocks() const {
return CaseBBs;
}
ArrayRef<ProfileCounter> getCounts() const { return CaseCounts; }
SILBasicBlock *getDefaultBlock() const { return DefaultBB; }
void forEachCase(llvm::function_ref<void(EnumElementDecl *,
SILBasicBlock *,
const CaseInfo &)> op) const {
for_each(CaseBBs, CaseInfos,
[op](std::pair<EnumElementDecl *, SILBasicBlock *> casePair,
const CaseInfo &info) {
op(casePair.first, casePair.second, info);
});
}
bool hasAnyRefutableCase() const {
return llvm::any_of(CaseInfos, [](const CaseInfo &info) {
return !info.Irrefutable;
});
}
};
} // end anonymous namespace
CaseBlocks::CaseBlocks(
SILGenFunction &SGF,
ArrayRef<RowToSpecialize> rows,
CanType sourceType,
SILBasicBlock *curBB) {
CaseBBs.reserve(rows.size());
CaseInfos.reserve(rows.size());
CaseCounts.reserve(rows.size());
auto enumDecl = sourceType.getEnumOrBoundGenericEnum();
llvm::SmallDenseMap<EnumElementDecl *, unsigned, 16> caseToIndex;
for (auto &row : rows) {
EnumElementDecl *formalElt;
Pattern *subPattern = nullptr;
if (auto eep = dyn_cast<EnumElementPattern>(row.Pattern)) {
formalElt = eep->getElementDecl();
subPattern = eep->getSubPattern();
} else {
auto *osp = cast<OptionalSomePattern>(row.Pattern);
formalElt = osp->getElementDecl();
subPattern = osp->getSubPattern();
}
assert(formalElt->getParentEnum() == enumDecl);
unsigned index = CaseInfos.size();
auto insertionResult = caseToIndex.insert({formalElt, index});
if (!insertionResult.second) {
index = insertionResult.first->second;
} else {
curBB = SGF.createBasicBlockAfter(curBB);
CaseBBs.push_back({formalElt, curBB});
CaseInfos.push_back(CaseInfo());
CaseInfos.back().FirstMatcher = row.Pattern;
CaseCounts.push_back(row.Count);
}
assert(caseToIndex[formalElt] == index);
assert(CaseBBs[index].first == formalElt);
auto &info = CaseInfos[index];
info.Irrefutable = (info.Irrefutable || row.Irrefutable);
info.SpecializedRows.push_back(SpecializedRow());
auto &specRow = info.SpecializedRows.back();
specRow.RowIndex = row.RowIndex;
// Use the row pattern, if it has one.
if (subPattern) {
specRow.Patterns.push_back(subPattern);
// It's also legal to write:
// case .Some { ... }
// which is an implicit wildcard.
} else {
specRow.Patterns.push_back(nullptr);
}
}
assert(CaseBBs.size() == CaseInfos.size());
// Check to see if the enum may have values beyond the cases we can see
// at compile-time. This includes future cases (for resilient enums) and
// random values crammed into C enums.
bool canAssumeExhaustive =
enumDecl->isEffectivelyExhaustive(SGF.getModule().getSwiftModule(),
SGF.F.getResilienceExpansion());
if (canAssumeExhaustive) {
// Check that Sema didn't let any cases slip through.
canAssumeExhaustive = llvm::all_of(enumDecl->getAllElements(),
[&](const EnumElementDecl *elt) {
return caseToIndex.count(elt);
});
}
if (!canAssumeExhaustive)
DefaultBB = SGF.createBasicBlockAfter(curBB);
}
/// Perform specialized dispatch for a sequence of EnumElementPattern or an
/// OptionalSomePattern.
void PatternMatchEmission::emitEnumElementObjectDispatch(
ArrayRef<RowToSpecialize> rows, ConsumableManagedValue src,
const SpecializationHandler &handleCase, const FailureHandler &outerFailure,
ProfileCounter defaultCastCount) {
assert(src.getFinalConsumption() != CastConsumptionKind::TakeOnSuccess &&
"SIL ownership does not support TakeOnSuccess");
CanType sourceType = rows[0].Pattern->getType()->getCanonicalType();
// Collect the cases and specialized rows.
CaseBlocks blocks{SGF, rows, sourceType, SGF.B.getInsertionBB()};
SILLocation loc = PatternMatchStmt;
loc.setDebugLoc(rows[0].Pattern);
bool isPlusZero =
src.getFinalConsumption() == CastConsumptionKind::BorrowAlways;
SILValue srcValue = src.getFinalManagedValue().forward(SGF);
SGF.B.createSwitchEnum(loc, srcValue, blocks.getDefaultBlock(),
blocks.getCaseBlocks(), blocks.getCounts(),
defaultCastCount);
// Okay, now emit all the cases.
blocks.forEachCase([&](EnumElementDecl *elt, SILBasicBlock *caseBB,
const CaseInfo &caseInfo) {
SILLocation loc = caseInfo.FirstMatcher;
auto &specializedRows = caseInfo.SpecializedRows;
SGF.B.setInsertionPoint(caseBB);
// We're in conditionally-executed code; enter a scope.
Scope scope(SGF.Cleanups, CleanupLocation::get(loc));
// Create a BB argument or 'unchecked_take_enum_data_addr'
// instruction to receive the enum case data if it has any.
SILType eltTy;
bool hasNonVoidAssocValue = false;
bool hasAssocValue = elt->hasAssociatedValues();
if (hasAssocValue) {
eltTy = src.getType().getEnumElementType(elt, SGF.SGM.M,
SGF.getTypeExpansionContext());
hasNonVoidAssocValue = !eltTy.getASTType()->isVoid();
}
ConsumableManagedValue eltCMV, origCMV;
// Void (i.e. empty) cases.
//
if (!hasNonVoidAssocValue) {
// Inline constructor.
eltCMV = [&]() -> ConsumableManagedValue {
// If we have an associated value, rather than no payload at all, we
// still need to create the argument. So do that instead of creating the
// empty-tuple. Otherwise, we need to create undef or the empty-tuple.
if (hasAssocValue) {
return {SGF.B.createOwnedPhiArgument(eltTy),
CastConsumptionKind::TakeAlways};
}
// Otherwise, try to avoid making an empty tuple value if it's obviously
// going to be ignored. This assumes that we won't even try to touch the
// value in such cases, although we may touch the cleanup (enough to see
// that it's not present).
bool hasNonAny =
llvm::any_of(specializedRows, [&](const SpecializedRow &row) {
auto *p = row.Patterns[0];
return p && !isa<AnyPattern>(p->getSemanticsProvidingPattern());
});
if (hasNonAny) {
return ConsumableManagedValue::forUnmanaged(SGF.emitEmptyTuple(loc));
}
return ConsumableManagedValue::forUnmanaged(
SILUndef::get(SGF.SGM.Types.getEmptyTupleType(), SGF.F));
}();
// Okay, specialize on the argument.
} else {
auto *eltTL = &SGF.getTypeLowering(eltTy);
SILValue eltValue;
if (isPlusZero) {
origCMV = {SGF.B.createGuaranteedTransformingTerminatorArgument(eltTy),
CastConsumptionKind::BorrowAlways};
} else {
origCMV = {SGF.B.createOwnedPhiArgument(eltTy),
CastConsumptionKind::TakeAlways};
}
eltCMV = origCMV;
// If the payload is boxed, project it.
if (elt->isIndirect() || elt->getParentEnum()->isIndirect()) {
ManagedValue boxedValue =
SGF.B.createProjectBox(loc, origCMV.getFinalManagedValue(), 0);
eltTL = &SGF.getTypeLowering(boxedValue.getType());
if (eltTL->isLoadable()) {
boxedValue = SGF.B.createLoadBorrow(loc, boxedValue);
eltCMV = {boxedValue, CastConsumptionKind::BorrowAlways};
} else {
// Otherwise, we have an address only payload and we use
// copy on success instead.
eltCMV = {boxedValue, CastConsumptionKind::CopyOnSuccess};
}
}
// Reabstract to the substituted type, if needed.
CanType substEltTy =
sourceType
->getTypeOfMember(SGF.SGM.M.getSwiftModule(), elt,
elt->getArgumentInterfaceType())
->getCanonicalType();
AbstractionPattern origEltTy =
(elt->getParentEnum()->isOptionalDecl()
? AbstractionPattern(substEltTy)
: SGF.SGM.M.Types.getAbstractionPattern(elt));
// If we reabstracted, we may have a +1 value returned. We are ok with
// that as long as it is TakeAlways.
eltCMV = emitReabstractedSubobject(SGF, loc, eltCMV, *eltTL, origEltTy,
substEltTy);
}
handleCase(eltCMV, specializedRows, outerFailure);
assert(!SGF.B.hasValidInsertionPoint() && "did not end block");
});
// Emit the default block if we needed one.
if (SILBasicBlock *defaultBB = blocks.getDefaultBlock()) {
SGF.B.setInsertionPoint(defaultBB);
if (isPlusZero) {
SGF.B.createGuaranteedTransformingTerminatorArgument(src.getType());
} else {
SGF.B.createOwnedPhiArgument(src.getType());
}
outerFailure(rows.back().Pattern);
}
}
/// Perform specialized dispatch for a sequence of EnumElementPattern or an
/// OptionalSomePattern.
void PatternMatchEmission::emitEnumElementDispatch(
ArrayRef<RowToSpecialize> rows, ConsumableManagedValue src,
const SpecializationHandler &handleCase, const FailureHandler &outerFailure,
ProfileCounter defaultCaseCount) {
// Why do we need to do this here (I just cargo culted this).
SILLocation loc = PatternMatchStmt;
loc.setDebugLoc(rows[0].Pattern);
// If our source is an address that is loadable, perform a load_borrow.
if (src.getType().isAddress() && src.getType().isLoadable(SGF.F)) {
assert(src.getFinalConsumption() != CastConsumptionKind::TakeOnSuccess &&
"Can only have take_on_success with address only values");
src = {SGF.B.createLoadBorrow(loc, src.getFinalManagedValue()),
CastConsumptionKind::BorrowAlways};
}
// If we have an object...
if (src.getType().isObject()) {
// Do a quick assert that we do not have take_on_success. This should only
// be passed take_on_success if src is an address only type.
assert(src.getFinalConsumption() != CastConsumptionKind::TakeOnSuccess &&
"Can only have take_on_success with address only values");
// Finally perform the enum element dispatch.
return emitEnumElementObjectDispatch(rows, src, handleCase, outerFailure,
defaultCaseCount);
}
// After this point we now that we must have an address only type.
assert(src.getType().isAddressOnly(SGF.F) &&
"Should have an address only type here");
CanType sourceType = rows[0].Pattern->getType()->getCanonicalType();
// Collect the cases and specialized rows.
CaseBlocks blocks{SGF, rows, sourceType, SGF.B.getInsertionBB()};
// We lack a SIL instruction to nondestructively project data from an
// address-only enum, so we can only do so in place if we're allowed to take
// the source always. Copy the source if we can't.
switch (src.getFinalConsumption()) {
case CastConsumptionKind::TakeAlways:
case CastConsumptionKind::CopyOnSuccess:
case CastConsumptionKind::BorrowAlways:
// No change to src necessary.
break;
case CastConsumptionKind::TakeOnSuccess:
// If any of the specialization cases is refutable, we must copy.
if (!blocks.hasAnyRefutableCase())
break;
src = ConsumableManagedValue(ManagedValue::forUnmanaged(src.getValue()),
CastConsumptionKind::CopyOnSuccess);
break;
}
// Emit the switch_enum_addr instruction.
SILValue srcValue = src.getFinalManagedValue().forward(SGF);
SGF.B.createSwitchEnumAddr(loc, srcValue, blocks.getDefaultBlock(),
blocks.getCaseBlocks(), blocks.getCounts(),
defaultCaseCount);
// Okay, now emit all the cases.
blocks.forEachCase([&](EnumElementDecl *elt, SILBasicBlock *caseBB,
const CaseInfo &caseInfo) {
SILLocation loc = caseInfo.FirstMatcher;
auto &specializedRows = caseInfo.SpecializedRows;
SGF.B.setInsertionPoint(caseBB);
// We're in conditionally-executed code; enter a scope.
Scope scope(SGF.Cleanups, CleanupLocation::get(loc));
// Create a BB argument or 'unchecked_take_enum_data_addr'
// instruction to receive the enum case data if it has any.
SILType eltTy;
bool hasElt = false;
if (elt->hasAssociatedValues()) {
eltTy = src.getType().getEnumElementType(elt, SGF.SGM.M,
SGF.getTypeExpansionContext());
hasElt = !eltTy.getASTType()->isVoid();
}
ConsumableManagedValue eltCMV, origCMV;
// Empty cases. Try to avoid making an empty tuple value if it's
// obviously going to be ignored. This assumes that we won't even
// try to touch the value in such cases, although we may touch the
// cleanup (enough to see that it's not present).
if (!hasElt) {
bool hasNonAny = false;
for (auto &specRow : specializedRows) {
auto pattern = specRow.Patterns[0];
if (pattern &&
!isa<AnyPattern>(pattern->getSemanticsProvidingPattern())) {
hasNonAny = true;
break;
}
}
SILValue result;
if (hasNonAny) {
result = SGF.emitEmptyTuple(loc);
} else {
result = SILUndef::get(SGF.SGM.Types.getEmptyTupleType(), SGF.F);
}
origCMV = ConsumableManagedValue::forUnmanaged(result);
eltCMV = origCMV;
// Okay, specialize on the argument.
} else {
auto *eltTL = &SGF.getTypeLowering(eltTy);
// Normally we'd just use the consumption of the source
// because the difference between TakeOnSuccess and TakeAlways
// doesn't matter for irrefutable rows. But if we need to
// re-abstract, we'll see a lot of benefit from figuring out
// that we can use TakeAlways here.
auto eltConsumption = src.getFinalConsumption();
if (caseInfo.Irrefutable &&
eltConsumption == CastConsumptionKind::TakeOnSuccess) {
eltConsumption = CastConsumptionKind::TakeAlways;
}
SILValue eltValue;
// We can only project destructively from an address-only enum, so
// copy the value if we can't consume it.
// TODO: Should have a more efficient way to copy payload
// nondestructively from an enum.
switch (eltConsumption) {
case CastConsumptionKind::TakeAlways:
eltValue =
SGF.B.createUncheckedTakeEnumDataAddr(loc, srcValue, elt, eltTy);
break;
case CastConsumptionKind::BorrowAlways:
// If we reach this point, we know that we have a loadable
// element type from an enum with mixed address
// only/loadable cases. Since we had an address only type,
// we assume that we will not have BorrowAlways since
// address only types do not support BorrowAlways.
llvm_unreachable("not allowed");
case CastConsumptionKind::CopyOnSuccess: {
auto copy = SGF.emitTemporaryAllocation(loc, srcValue->getType());
SGF.B.createCopyAddr(loc, srcValue, copy, IsNotTake, IsInitialization);
// We can always take from the copy.
eltConsumption = CastConsumptionKind::TakeAlways;
eltValue = SGF.B.createUncheckedTakeEnumDataAddr(loc, copy, elt, eltTy);
break;
}
// We can't conditionally take, since UncheckedTakeEnumDataAddr
// invalidates the enum.
case CastConsumptionKind::TakeOnSuccess:
llvm_unreachable("not allowed");
}
// If we have a loadable payload despite the enum being address only, load
// the value. This invariant makes it easy to specialize code for
// ownership.
if (eltTL->isLoadable()) {
// If we do not have a loadable value, just use getManagedSubObject
// Load a loadable data value.
auto managedEltValue = ManagedValue::forUnmanaged(eltValue);
if (eltConsumption == CastConsumptionKind::CopyOnSuccess) {
managedEltValue = SGF.B.createLoadBorrow(loc, managedEltValue);
eltConsumption = CastConsumptionKind::BorrowAlways;
} else {
assert(eltConsumption == CastConsumptionKind::TakeAlways);
managedEltValue = SGF.B.createLoadTake(loc, managedEltValue);
}
origCMV = {managedEltValue, eltConsumption};
} else {
origCMV = getManagedSubobject(SGF, eltValue, *eltTL, eltConsumption);
}
eltCMV = origCMV;
// If the payload is boxed, project it.
if (elt->isIndirect() || elt->getParentEnum()->isIndirect()) {
ManagedValue boxedValue =
SGF.B.createProjectBox(loc, origCMV.getFinalManagedValue(), 0);
eltTL = &SGF.getTypeLowering(boxedValue.getType());
if (eltTL->isLoadable()) {
boxedValue = SGF.B.createLoadBorrow(loc, boxedValue);
eltCMV = {boxedValue, CastConsumptionKind::BorrowAlways};
} else {
// The boxed value may be shared, so we always have to copy it.
eltCMV = getManagedSubobject(SGF, boxedValue.getValue(), *eltTL,
CastConsumptionKind::CopyOnSuccess);
}
}
// Reabstract to the substituted type, if needed.
CanType substEltTy =
sourceType->getTypeOfMember(SGF.SGM.M.getSwiftModule(), elt,
elt->getArgumentInterfaceType())
->getCanonicalType();
AbstractionPattern origEltTy =
(elt->getParentEnum()->isOptionalDecl()
? AbstractionPattern(substEltTy)
: SGF.SGM.M.Types.getAbstractionPattern(elt));
eltCMV = emitReabstractedSubobject(SGF, loc, eltCMV, *eltTL,
origEltTy, substEltTy);
}
const FailureHandler *innerFailure = &outerFailure;
FailureHandler specializedFailure = [&](SILLocation loc) {
ArgUnforwarder unforwarder(SGF);
unforwarder.unforwardBorrowedValues(src, origCMV);
outerFailure(loc);
};
if (ArgUnforwarder::requiresUnforwarding(SGF, src))
innerFailure = &specializedFailure;
handleCase(eltCMV, specializedRows, *innerFailure);
assert(!SGF.B.hasValidInsertionPoint() && "did not end block");
});
// Emit the default block if we needed one.
if (SILBasicBlock *defaultBB = blocks.getDefaultBlock()) {
SGF.B.setInsertionPoint(defaultBB);
outerFailure(rows.back().Pattern);
}
}
/// Perform specialized dispatch for a sequence of EnumElementPattern or an
/// OptionalSomePattern.
void PatternMatchEmission::
emitBoolDispatch(ArrayRef<RowToSpecialize> rows, ConsumableManagedValue src,
const SpecializationHandler &handleCase,
const FailureHandler &outerFailure) {
struct CaseInfo {
Pattern *FirstMatcher;
bool Irrefutable = false;
SmallVector<SpecializedRow, 2> SpecializedRows;
};
SILBasicBlock *curBB = SGF.B.getInsertionBB();
auto &Context = SGF.getASTContext();
// Collect the cases and specialized rows.
//
// These vectors are completely parallel, but the switch
// instructions want only the first information, so we split them up.
SmallVector<std::pair<SILValue, SILBasicBlock*>, 4> caseBBs;
SmallVector<CaseInfo, 4> caseInfos;
SILBasicBlock *defaultBB = nullptr;
caseBBs.reserve(rows.size());
caseInfos.reserve(rows.size());
// Create destination blocks for all the cases.
unsigned caseToIndex[2] = { ~0U, ~0U };
for (auto &row : rows) {
bool isTrue = cast<BoolPattern>(row.Pattern)->getValue();
unsigned index = caseInfos.size();
if (caseToIndex[isTrue] != ~0U) {
// We already had an entry for this bool value.
index = caseToIndex[isTrue];
} else {
caseToIndex[isTrue] = index;
curBB = SGF.createBasicBlockAfter(curBB);
auto *IL = SGF.B.createIntegerLiteral(PatternMatchStmt,
SILType::getBuiltinIntegerType(1, Context),
isTrue ? 1 : 0);
caseBBs.push_back({SILValue(IL), curBB});
caseInfos.resize(caseInfos.size() + 1);
caseInfos.back().FirstMatcher = row.Pattern;
}
auto &info = caseInfos[index];
info.Irrefutable = (info.Irrefutable || row.Irrefutable);
info.SpecializedRows.resize(info.SpecializedRows.size() + 1);
auto &specRow = info.SpecializedRows.back();
specRow.RowIndex = row.RowIndex;
specRow.Patterns.push_back(nullptr);
}
assert(caseBBs.size() == caseInfos.size());
// Check to see if we need a default block.
if (caseBBs.size() < 2)
defaultBB = SGF.createBasicBlockAfter(curBB);
// Emit the switch_value
SILLocation loc = PatternMatchStmt;
loc.setDebugLoc(rows[0].Pattern);
SILValue srcValue = src.getFinalManagedValue().forward(SGF);
// Extract the i1 from the Bool struct.
auto i1Value = SGF.emitUnwrapIntegerResult(loc, srcValue);
SGF.B.createSwitchValue(loc, i1Value, defaultBB, caseBBs);
// Okay, now emit all the cases.
for (unsigned i = 0, e = caseInfos.size(); i != e; ++i) {
auto &caseInfo = caseInfos[i];
auto &specializedRows = caseInfo.SpecializedRows;
SILBasicBlock *caseBB = caseBBs[i].second;
SGF.B.setInsertionPoint(caseBB);
// We're in conditionally-executed code; enter a scope.
Scope scope(SGF.Cleanups, CleanupLocation::get(loc));
SILValue result
= SILUndef::get(SGF.SGM.Types.getEmptyTupleType(), SGF.F);
ConsumableManagedValue CMV =
ConsumableManagedValue::forUnmanaged(result);
handleCase(CMV, specializedRows, outerFailure);
assert(!SGF.B.hasValidInsertionPoint() && "did not end block");
}
// Emit the default block if we needed one.
if (defaultBB) {
SGF.B.setInsertionPoint(defaultBB);
outerFailure(rows.back().Pattern);
}
}
/// Emit the body of a case statement at the current insertion point.
void PatternMatchEmission::emitCaseBody(CaseStmt *caseBlock) {
SGF.emitStmt(caseBlock->getBody());
// Implicitly break out of the pattern match statement.
if (SGF.B.hasValidInsertionPoint()) {
// Case blocks without trailing braces have a line location of the last
// instruction in the case block.
SILLocation cleanupLoc =
RegularLocation::getAutoGeneratedLocation(caseBlock->getEndLoc());
if (auto *braces = dyn_cast<BraceStmt>(caseBlock->getBody()))
if (braces->getNumElements() == 1 &&
dyn_cast_or_null<DoStmt>(braces->getFirstElement().dyn_cast<Stmt *>()))
cleanupLoc = CleanupLocation(caseBlock);
SGF.emitBreakOutOf(cleanupLoc, PatternMatchStmt);
}
}
void PatternMatchEmission::initSharedCaseBlockDest(CaseStmt *caseBlock,
bool hasFallthroughTo) {
auto result = SharedCases.insert({caseBlock, {nullptr, hasFallthroughTo}});
assert(result.second);
auto *block = SGF.createBasicBlock();
result.first->second.first = block;
// Add args for any pattern variables if we have any.
for (auto *vd : caseBlock->getCaseBodyVariablesOrEmptyArray()) {
if (!vd->hasName())
continue;
// We don't pass address-only values in basic block arguments.
SILType ty = SGF.getLoweredType(vd->getType());
if (ty.isAddressOnly(SGF.F))
continue;
block->createPhiArgument(ty, ValueOwnershipKind::Owned, vd);
}
}
/// Retrieve the jump destination for a shared case block.
JumpDest PatternMatchEmission::getSharedCaseBlockDest(CaseStmt *caseBlock) {
auto result = SharedCases.find(caseBlock);
assert(result != SharedCases.end());
auto *block = result->second.first;
assert(block);
return JumpDest(block, PatternMatchStmtDepth,
CleanupLocation(PatternMatchStmt));
}
void PatternMatchEmission::emitAddressOnlyAllocations() {
for (auto &entry : SharedCases) {
CaseStmt *caseBlock = entry.first;
// If we have a shared case with bound decls, setup the arguments for the
// shared block by emitting the temporary allocation used for the arguments
// of the shared block.
for (auto *vd : caseBlock->getCaseBodyVariablesOrEmptyArray()) {
if (!vd->hasName())
continue;
SILType ty = SGF.getLoweredType(vd->getType());
if (!ty.isAddressOnly(SGF.F))
continue;
assert(!Temporaries[vd]);
Temporaries[vd] = SGF.emitTemporaryAllocation(vd, ty);
}
}
// Now we have all of our cleanups entered, so we can record the
// depth.
PatternMatchStmtDepth = SGF.getCleanupsDepth();
}
void PatternMatchEmission::
emitAddressOnlyInitialization(VarDecl *dest, SILValue value) {
auto found = Temporaries.find(dest);
assert(found != Temporaries.end());
SGF.B.createCopyAddr(dest, value, found->second, IsNotTake, IsInitialization);
}
/// Emit all the shared case statements.
void PatternMatchEmission::emitSharedCaseBlocks(
llvm::function_ref<void(CaseStmt *)> bodyEmitter) {
for (auto &entry : SharedCases) {
CaseStmt *caseBlock = entry.first;
SILBasicBlock *caseBB = entry.second.first;
bool hasFallthroughTo = entry.second.second;
assert(caseBB->empty());
// If this case can only have one predecessor, then merge it into that
// predecessor. We rely on the SIL CFG here, because unemitted shared case
// blocks might fallthrough into this one.
if (!hasFallthroughTo && caseBlock->getCaseLabelItems().size() == 1) {
SILBasicBlock *predBB = caseBB->getSinglePredecessorBlock();
assert(predBB && "Should only have 1 predecessor because it isn't shared");
assert(isa<BranchInst>(predBB->getTerminator()) &&
"Should have uncond branch to shared block");
predBB->getTerminator()->eraseFromParent();
caseBB->eraseFromParent();
// Emit the case body into the predecessor's block.
SGF.B.setInsertionPoint(predBB);
} else {
// If we did not need a shared case block, we shouldn't have emitted one.
assert(!caseBB->pred_empty() &&
"Shared case block without predecessors?!");
// Otherwise, move the block to after the first predecessor.
auto predBB = *caseBB->pred_begin();
caseBB->moveAfter(predBB);
// Then emit the case body into the caseBB.
SGF.B.setInsertionPoint(caseBB);
}
// Make sure that before/after we emit the case body we have emitted all
// cleanups we created within.
assert(SGF.getCleanupsDepth() == PatternMatchStmtDepth);
SWIFT_DEFER { assert(SGF.getCleanupsDepth() == PatternMatchStmtDepth); };
if (!caseBlock->hasCaseBodyVariables()) {
emitCaseBody(caseBlock);
continue;
}
// If we have a shared case with bound decls, then the case stmt pattern has
// the order of variables that are the incoming BB arguments. Setup the
// VarLocs to point to the incoming args and setup initialization so any
// args needing Cleanup will get that as well.
LexicalScope scope(SGF, CleanupLocation(caseBlock));
unsigned argIndex = 0;
for (auto *vd : caseBlock->getCaseBodyVariables()) {
if (!vd->hasName())
continue;
SILType ty = SGF.getLoweredType(vd->getType());
// Initialize mv at +1. We always pass values in at +1 for today into
// shared blocks.
ManagedValue mv;
if (ty.isAddressOnly(SGF.F)) {
// There's no basic block argument, since we don't allow basic blocks
// to have address arguments.
//
// Instead, we map the variable to a temporary alloc_stack in
// emitAddressOnlyAllocations(), and store into it at each
// predecessor block.
//
// There's nothing to do here, since the value should already have
// been initialized on entry.
auto found = Temporaries.find(vd);
assert(found != Temporaries.end());
mv = SGF.emitManagedRValueWithCleanup(found->second);
} else {
SILValue arg = caseBB->getArgument(argIndex++);
assert(arg.getOwnershipKind() == ValueOwnershipKind::Owned ||
arg.getOwnershipKind() == ValueOwnershipKind::None);
mv = SGF.emitManagedRValueWithCleanup(arg);
}
// Emit a debug description of the incoming arg, nested within the scope
// for the pattern match.
SILDebugVariable dbgVar(vd->isLet(), /*ArgNo=*/0);
SGF.B.emitDebugDescription(vd, mv.getValue(), dbgVar);
if (vd->isLet()) {
// Just emit a let and leave the cleanup alone.
SGF.VarLocs[vd].value = mv.getValue();
continue;
}
// Otherwise, the pattern variables were all emitted as lets and one got
// passed in. Since we have a var, alloc a box for the var and forward in
// the chosen value.
SGF.VarLocs.erase(vd);
auto newVar = SGF.emitInitializationForVarDecl(vd, vd->isLet());
newVar->copyOrInitValueInto(SGF, vd, mv, /*isInit*/ true);
newVar->finishInitialization(SGF);
}
// Now that we have setup all of the VarLocs correctly, emit the shared case
// body.
bodyEmitter(caseBlock);
}
}
/// Context info used to emit FallthroughStmts.
/// Since fallthrough-able case blocks must not bind variables, they are always
/// emitted in the outermost scope of the switch.
class Lowering::PatternMatchContext {
public:
PatternMatchEmission &Emission;
};
namespace {
struct UnexpectedEnumCaseInfo {
CanType subjectTy;
ManagedValue metatype;
ManagedValue rawValue;
NullablePtr<const EnumDecl> singleObjCEnum;
UnexpectedEnumCaseInfo(CanType subjectTy, ManagedValue metatype,
ManagedValue rawValue, const EnumDecl *singleObjCEnum)
: subjectTy(subjectTy), metatype(metatype), rawValue(rawValue),
singleObjCEnum(singleObjCEnum) {
assert(isa<MetatypeInst>(metatype));
assert(bool(rawValue) && isa<UncheckedTrivialBitCastInst>(rawValue));
assert(singleObjCEnum->hasRawType());
}
UnexpectedEnumCaseInfo(CanType subjectTy, ManagedValue valueMetatype)
: subjectTy(subjectTy), metatype(valueMetatype), rawValue(),
singleObjCEnum() {
assert(isa<ValueMetatypeInst>(valueMetatype));
}
bool isSingleObjCEnum() const { return singleObjCEnum.isNonNull(); }
void cleanupInstsIfUnused() {
auto f = [](SILValue v) {
if (!v->use_empty())
return;
cast<SingleValueInstruction>(v)->eraseFromParent();
};
f(metatype.getValue());
if (rawValue)
f(rawValue.getValue());
}
};
} // end anonymous namespace
static void emitDiagnoseOfUnexpectedEnumCaseValue(SILGenFunction &SGF,
SILLocation loc,
UnexpectedEnumCaseInfo ueci) {
ASTContext &ctx = SGF.getASTContext();
auto diagnoseFailure = ctx.getDiagnoseUnexpectedEnumCaseValue();
if (!diagnoseFailure) {
SGF.B.createBuiltinTrap(loc);
return;
}
auto genericSig = diagnoseFailure->getGenericSignature();
auto subs = SubstitutionMap::get(
genericSig,
[&](SubstitutableType *type) -> Type {
auto genericParam = cast<GenericTypeParamType>(type);
assert(genericParam->getDepth() == 0);
assert(genericParam->getIndex() < 2);
switch (genericParam->getIndex()) {
case 0:
return ueci.subjectTy;
case 1:
return ueci.singleObjCEnum.get()->getRawType();
default:
llvm_unreachable("wrong generic signature for expected case value");
}
},
LookUpConformanceInSignature(genericSig.getPointer()));
SGF.emitApplyOfLibraryIntrinsic(
loc, diagnoseFailure, subs,
{ueci.metatype, ueci.rawValue.materialize(SGF, loc)}, SGFContext());
}
static void emitDiagnoseOfUnexpectedEnumCase(SILGenFunction &SGF,
SILLocation loc,
UnexpectedEnumCaseInfo ueci) {
ASTContext &ctx = SGF.getASTContext();
auto diagnoseFailure = ctx.getDiagnoseUnexpectedEnumCase();
if (!diagnoseFailure) {
SGF.B.createBuiltinTrap(loc);
return;
}
auto diagnoseSignature = diagnoseFailure->getGenericSignature();
auto genericArgsMap = SubstitutionMap::get(
diagnoseSignature,
[&](SubstitutableType *type) -> Type { return ueci.subjectTy; },
LookUpConformanceInSignature(diagnoseSignature.getPointer()));
SGF.emitApplyOfLibraryIntrinsic(loc, diagnoseFailure, genericArgsMap,
ueci.metatype, SGFContext());
}
static void switchCaseStmtSuccessCallback(SILGenFunction &SGF,
PatternMatchEmission &emission,
ArgArray argArray, ClauseRow &row) {
auto caseBlock = row.getClientData<CaseStmt>();
SGF.emitProfilerIncrement(caseBlock);
// Certain case statements can be entered along multiple paths, either because
// they have multiple labels or because of fallthrough. When we need multiple
// entrance path, we factor the paths with a shared block.
//
// If we don't have a fallthrough or a multi-pattern 'case', we can emit the
// body inline. Emit the statement here and bail early.
if (!row.hasFallthroughTo() && caseBlock->getCaseLabelItems().size() == 1) {
// Debug values for case body variables must be nested within a scope for
// the case block to avoid name conflicts.
DebugScope scope(SGF, CleanupLocation(caseBlock));
// If we have case body vars, set them up to point at the matching var
// decls.
if (caseBlock->hasCaseBodyVariables()) {
// Since we know that we only have one case label item, grab its pattern
// vars and use that to update expected with the right SILValue.
//
// TODO: Do we need a copy here?
SmallVector<VarDecl *, 4> patternVars;
row.getCasePattern()->collectVariables(patternVars);
for (auto *expected : caseBlock->getCaseBodyVariables()) {
if (!expected->hasName())
continue;
for (auto *vd : patternVars) {
if (!vd->hasName() || vd->getName() != expected->getName()) {
continue;
}
// Ok, we found a match. Update the VarLocs for the case block.
auto v = SGF.VarLocs[vd];
SGF.VarLocs[expected] = v;
// Emit a debug description for the variable, nested within a scope
// for the pattern match.
SILDebugVariable dbgVar(vd->isLet(), /*ArgNo=*/0);
SGF.B.emitDebugDescription(vd, v.value, dbgVar);
}
}
}
emission.emitCaseBody(caseBlock);
return;
}
// Ok, at this point we know that we have a multiple entrance block. Grab our
// shared destination in preperation for branching to it.
//
// NOTE: We do not emit anything yet, since we will emit the shared block
// later.
JumpDest sharedDest = emission.getSharedCaseBlockDest(caseBlock);
// If we do not have any bound decls, we do not need to setup any
// variables. Just jump to the shared destination.
if (!caseBlock->hasCaseBodyVariables()) {
// Don't emit anything yet, we emit it at the cleanup level of the switch
// statement.
JumpDest sharedDest = emission.getSharedCaseBlockDest(caseBlock);
SGF.Cleanups.emitBranchAndCleanups(sharedDest, caseBlock);
return;
}
// Generate the arguments from this row's pattern in the case block's expected
// order, and keep those arguments from being cleaned up, as we're passing the
// +1 along to the shared case block dest. (The cleanups still happen, as they
// are threaded through here messily, but the explicit retains here counteract
// them, and then the retain/release pair gets optimized out.)
SmallVector<SILValue, 4> args;
SmallVector<VarDecl *, 4> patternVars;
row.getCasePattern()->collectVariables(patternVars);
for (auto *expected : caseBlock->getCaseBodyVariables()) {
if (!expected->hasName())
continue;
for (auto *var : patternVars) {
if (!var->hasName() || var->getName() != expected->getName())
continue;
SILValue value = SGF.VarLocs[var].value;
SILType type = value->getType();
// If we have an address-only type, initialize the temporary
// allocation. We're not going to pass the address as a block
// argument.
if (type.isAddressOnly(SGF.F)) {
emission.emitAddressOnlyInitialization(expected, value);
break;
}
// If we have a loadable address, perform a load [copy].
if (type.isAddress()) {
value = SGF.B.emitLoadValueOperation(SGF.CurrentSILLoc, value,
LoadOwnershipQualifier::Copy);
args.push_back(value);
break;
}
value = SGF.B.emitCopyValueOperation(SGF.CurrentSILLoc, value);
args.push_back(value);
break;
}
}
// Now that we have initialized our arguments, branch to the shared dest.
SGF.Cleanups.emitBranchAndCleanups(sharedDest, caseBlock, args);
}
void SILGenFunction::emitSwitchStmt(SwitchStmt *S) {
LLVM_DEBUG(llvm::dbgs() << "emitting switch stmt\n";
S->dump(llvm::dbgs());
llvm::dbgs() << '\n');
auto subjectTy = S->getSubjectExpr()->getType();
// If the subject expression is uninhabited, we're already dead.
// Emit an unreachable in place of the switch statement.
if (subjectTy->isStructurallyUninhabited()) {
emitIgnoredExpr(S->getSubjectExpr());
B.createUnreachable(S);
return;
}
auto completionHandler = [this](PatternMatchEmission &emission,
ArgArray argArray, ClauseRow &row) {
return switchCaseStmtSuccessCallback(*this, emission, argArray, row);
};
PatternMatchEmission emission(*this, S, completionHandler);
// Add a row for each label of each case.
SmallVector<ClauseRow, 8> clauseRows;
clauseRows.reserve(S->getRawCases().size());
bool hasFallthrough = false;
for (auto caseBlock : S->getCases()) {
// If the previous block falls through into this block or we have multiple
// case label itmes, create a shared case block to generate the shared
// block.
if (hasFallthrough || caseBlock->getCaseLabelItems().size() > 1) {
emission.initSharedCaseBlockDest(caseBlock, hasFallthrough);
}
for (auto &labelItem : caseBlock->getCaseLabelItems()) {
clauseRows.emplace_back(caseBlock,
const_cast<Pattern*>(labelItem.getPattern()),
const_cast<Expr*>(labelItem.getGuardExpr()),
hasFallthrough);
}
hasFallthrough = caseBlock->hasFallthroughDest();
}
// Emit alloc_stacks for address-only variables appearing in
// multiple-entry case blocks.
emission.emitAddressOnlyAllocations();
SILBasicBlock *contBB = createBasicBlock();
emitProfilerIncrement(S);
JumpDest contDest(contBB, Cleanups.getCleanupsDepth(), CleanupLocation(S));
LexicalScope switchScope(*this, CleanupLocation(S));
// Enter a break/continue scope. If we wanted a continue
// destination, it would probably be out here.
BreakContinueDestStack.push_back({S, contDest, JumpDest(S)});
PatternMatchContext switchContext = { emission };
SwitchStack.push_back(&switchContext);
// Emit the subject value. If at +1, dispatching will consume it. If it is at
// +0, we just forward down borrows.
ManagedValue subjectMV = emitRValueAsSingleValue(
S->getSubjectExpr(), SGFContext::AllowGuaranteedPlusZero);
// Inline constructor for subject.
auto subject = ([&]() -> ConsumableManagedValue {
// If we have a plus one value...
if (subjectMV.isPlusOne(*this)) {
// And we have an address that is loadable, perform a load [take].
if (subjectMV.getType().isAddress() &&
subjectMV.getType().isLoadable(F)) {
subjectMV = B.createLoadTake(S, subjectMV);
}
return {subjectMV, CastConsumptionKind::TakeAlways};
}
// If we have a loadable address and +0, perform a load borrow.
if (subjectMV.getType().isAddress() &&
subjectMV.getType().isLoadable(F)) {
subjectMV = B.createLoadBorrow(S, subjectMV);
}
// If then we have an object, return it at +0.
if (subjectMV.getType().isObject()) {
return {subjectMV, CastConsumptionKind::BorrowAlways};
}
// If we have an address only type returned without a cleanup, we
// need to do a copy just to be safe. So for efficiency we pass it
// down take_always.
return {subjectMV.copy(*this, S), CastConsumptionKind::TakeAlways};
}());
// If we need to diagnose an unexpected enum case or unexpected enum case
// value, we need access to a value metatype for the subject. Emit this state
// now before we emit the actual switch to ensure that the subject has not
// been consumed.
auto unexpectedEnumCaseInfo = ([&]() -> UnexpectedEnumCaseInfo {
SILLocation loc = RegularLocation::getAutoGeneratedLocation();
CanType canSubjectTy = subjectTy->getCanonicalType();
CanType metatypeType = MetatypeType::get(canSubjectTy)->getCanonicalType();
SILType loweredMetatypeType =
getLoweredType(AbstractionPattern::getOpaque(), metatypeType);
ManagedValue value = subject.getFinalManagedValue();
if (auto *singleEnumDecl = canSubjectTy->getEnumOrBoundGenericEnum()) {
if (singleEnumDecl->isObjC()) {
auto metatype = ManagedValue::forUnmanaged(
B.createMetatype(loc, loweredMetatypeType));
// Bitcast the enum value to its raw type. (This is only safe for @objc
// enums.)
SILType loweredRawType = getLoweredType(singleEnumDecl->getRawType());
assert(loweredRawType.isTrivial(F));
assert(loweredRawType.isObject());
auto rawValue =
B.createUncheckedTrivialBitCast(loc, value, loweredRawType);
return {canSubjectTy, metatype, rawValue, singleEnumDecl};
}
}
return {canSubjectTy,
B.createValueMetatype(loc, loweredMetatypeType, value)};
}());
auto failure = [&](SILLocation location) {
// If we fail to match anything, we trap. This can happen with a switch
// over an @objc enum, which may contain any value of its underlying type,
// or a switch over a non-frozen Swift enum when the user hasn't written a
// catch-all case.
SWIFT_DEFER { B.createUnreachable(location); };
// Special case: if it's a single @objc enum, we can print the raw value.
if (unexpectedEnumCaseInfo.isSingleObjCEnum()) {
emitDiagnoseOfUnexpectedEnumCaseValue(*this, location,
unexpectedEnumCaseInfo);
return;
}
emitDiagnoseOfUnexpectedEnumCase(*this, location, unexpectedEnumCaseInfo);
};
// Set up an initial clause matrix.
ClauseMatrix clauses(clauseRows);
// Recursively specialize and emit the clause matrix.
emission.emitDispatch(clauses, subject, failure);
assert(!B.hasValidInsertionPoint());
switchScope.pop();
// Then emit the case blocks shared by multiple pattern cases.
emission.emitSharedCaseBlocks(
[&](CaseStmt *caseStmt) { emission.emitCaseBody(caseStmt); });
// Bookkeeping.
SwitchStack.pop_back();
BreakContinueDestStack.pop_back();
// If the continuation block has no predecessors, this
// point is not reachable.
if (contBB->pred_empty()) {
eraseBasicBlock(contBB);
} else {
B.emitBlock(contBB);
}
// Now that we have emitted everything, see if our unexpected enum case info
// metatypes were actually used. If not, delete them.
unexpectedEnumCaseInfo.cleanupInstsIfUnused();
}
void SILGenFunction::emitSwitchFallthrough(FallthroughStmt *S) {
assert(!SwitchStack.empty() && "fallthrough outside of switch?!");
PatternMatchContext *context = SwitchStack.back();
// Get the destination block.
CaseStmt *destCaseStmt = S->getFallthroughDest();
JumpDest sharedDest = context->Emission.getSharedCaseBlockDest(destCaseStmt);
// If our destination case doesn't have any bound decls, there is no rebinding
// to do. Just jump to the shared dest.
if (!destCaseStmt->hasCaseBodyVariables()) {
Cleanups.emitBranchAndCleanups(sharedDest, S);
return;
}
// Generate branch args to pass along current vars to fallthrough case.
SmallVector<SILValue, 4> args;
CaseStmt *fallthroughSourceStmt = S->getFallthroughSource();
for (auto *expected : destCaseStmt->getCaseBodyVariables()) {
if (!expected->hasName())
continue;
// The type checker enforces that if our destination case has variables then
// our fallthrough source must as well.
for (auto *var : fallthroughSourceStmt->getCaseBodyVariables()) {
if (!var->hasName() || var->getName() != expected->getName()) {
continue;
}
auto varLoc = VarLocs[var];
SILValue value = varLoc.value;
if (value->getType().isAddressOnly(F)) {
context->Emission.emitAddressOnlyInitialization(expected, value);
break;
}
if (varLoc.box) {
SILValue argValue = B.emitLoadValueOperation(
CurrentSILLoc, value, LoadOwnershipQualifier::Copy);
args.push_back(argValue);
break;
}
auto argValue = B.emitCopyValueOperation(CurrentSILLoc, value);
args.push_back(argValue);
break;
}
}
Cleanups.emitBranchAndCleanups(sharedDest, S, args);
}
void SILGenFunction::emitCatchDispatch(DoCatchStmt *S, ManagedValue exn,
ArrayRef<CaseStmt *> clauses,
JumpDest catchFallthroughDest) {
auto completionHandler = [&](PatternMatchEmission &emission,
ArgArray argArray, ClauseRow &row) {
auto clause = row.getClientData<CaseStmt>();
emitProfilerIncrement(clause->getBody());
// Certain catch clauses can be entered along multiple paths because they
// have multiple labels. When we need multiple entrance path, we factor the
// paths with a shared block.
//
// If we don't have a multi-pattern 'catch', we can emit the
// body inline. Emit the statement here and bail early.
if (clause->getCaseLabelItems().size() == 1) {
// If we have case body vars, set them up to point at the matching var
// decls.
if (clause->hasCaseBodyVariables()) {
// Since we know that we only have one case label item, grab its pattern
// vars and use that to update expected with the right SILValue.
//
// TODO: Do we need a copy here?
SmallVector<VarDecl *, 4> patternVars;
row.getCasePattern()->collectVariables(patternVars);
for (auto *expected : clause->getCaseBodyVariables()) {
if (!expected->hasName())
continue;
for (auto *vd : patternVars) {
if (!vd->hasName() || vd->getName() != expected->getName()) {
continue;
}
// Ok, we found a match. Update the VarLocs for the case block.
auto v = VarLocs[vd];
VarLocs[expected] = v;
}
}
}
emitStmt(clause->getBody());
// If we fell out of the catch clause, branch to the fallthrough dest.
if (B.hasValidInsertionPoint()) {
Cleanups.emitBranchAndCleanups(catchFallthroughDest, clause->getBody());
}
return;
}
// Ok, at this point we know that we have a multiple entrance block. Grab
// our shared destination in preperation for branching to it.
//
// NOTE: We do not emit anything yet, since we will emit the shared block
// later.
JumpDest sharedDest = emission.getSharedCaseBlockDest(clause);
// If we do not have any bound decls, we do not need to setup any
// variables. Just jump to the shared destination.
if (!clause->hasCaseBodyVariables()) {
// Don't emit anything yet, we emit it at the cleanup level of the switch
// statement.
JumpDest sharedDest = emission.getSharedCaseBlockDest(clause);
Cleanups.emitBranchAndCleanups(sharedDest, clause);
return;
}
// Generate the arguments from this row's pattern in the case block's
// expected order, and keep those arguments from being cleaned up, as we're
// passing the +1 along to the shared case block dest. (The cleanups still
// happen, as they are threaded through here messily, but the explicit
// retains here counteract them, and then the retain/release pair gets
// optimized out.)
SmallVector<SILValue, 4> args;
SmallVector<VarDecl *, 4> patternVars;
row.getCasePattern()->collectVariables(patternVars);
for (auto *expected : clause->getCaseBodyVariables()) {
if (!expected->hasName())
continue;
for (auto *var : patternVars) {
if (!var->hasName() || var->getName() != expected->getName())
continue;
SILValue value = VarLocs[var].value;
SILType type = value->getType();
// If we have an address-only type, initialize the temporary
// allocation. We're not going to pass the address as a block
// argument.
if (type.isAddressOnly(F)) {
emission.emitAddressOnlyInitialization(expected, value);
break;
}
// If we have a loadable address, perform a load [copy].
if (type.isAddress()) {
value = B.emitLoadValueOperation(CurrentSILLoc, value,
LoadOwnershipQualifier::Copy);
args.push_back(value);
break;
}
value = B.emitCopyValueOperation(CurrentSILLoc, value);
args.push_back(value);
break;
}
}
// Now that we have initialized our arguments, branch to the shared dest.
Cleanups.emitBranchAndCleanups(sharedDest, clause, args);
};
LLVM_DEBUG(llvm::dbgs() << "emitting catch dispatch\n"; S->dump(llvm::dbgs());
llvm::dbgs() << '\n');
PatternMatchEmission emission(*this, S, completionHandler);
// Add a row for each label of each case.
SmallVector<ClauseRow, 8> clauseRows;
clauseRows.reserve(S->getCatches().size());
for (auto caseBlock : S->getCatches()) {
// If we have multiple case label itmes, create a shared case block to
// generate the shared block.
if (caseBlock->getCaseLabelItems().size() > 1) {
emission.initSharedCaseBlockDest(caseBlock, /*hasFallthrough*/ false);
}
for (auto &labelItem : caseBlock->getCaseLabelItems()) {
clauseRows.emplace_back(caseBlock,
const_cast<Pattern *>(labelItem.getPattern()),
const_cast<Expr *>(labelItem.getGuardExpr()),
/*hasFallthrough*/ false);
}
}
// Emit alloc_stacks for address-only variables appearing in
// multiple-entry case blocks.
emission.emitAddressOnlyAllocations();
Scope stmtScope(Cleanups, CleanupLocation(S));
assert(exn.getType().isObject() &&
"Error is special and should always be an object");
// Our model is that sub-cases get the exception at +0 and the throw (if we
// need to rethrow the exception) gets the exception at +1 since we need to
// trampoline it's ownership to our caller.
ConsumableManagedValue subject = {exn.borrow(*this, S),
CastConsumptionKind::BorrowAlways};
auto failure = [&](SILLocation location) {
// If we fail to match anything, just rethrow the exception.
// If the throw destination is not valid, then the PatternMatchEmission
// logic is emitting an unreachable block but didn't prune the failure BB.
// Mark it as such.
if (!ThrowDest.isValid()) {
B.createUnreachable(S);
return;
}
// Since we borrowed exn before sending it to our subcases, we know that it
// must be at +1 at this point. That being said, SILGenPattern will
// potentially invoke this for each of the catch statements, so we need to
// copy before we pass it into the throw.
CleanupStateRestorationScope scope(Cleanups);
if (exn.hasCleanup()) {
scope.pushCleanupState(exn.getCleanup(),
CleanupState::PersistentlyActive);
}
emitThrow(S, exn);
};
// Set up an initial clause matrix.
ClauseMatrix clauseMatrix(clauseRows);
// Recursively specialize and emit the clause matrix.
emission.emitDispatch(clauseMatrix, subject, failure);
assert(!B.hasValidInsertionPoint());
stmtScope.pop();
// Then emit the case blocks shared by multiple pattern cases.
emission.emitSharedCaseBlocks([&](CaseStmt *caseStmt) {
emitStmt(caseStmt->getBody());
// If we fell out of the catch clause, branch to the fallthrough dest.
if (B.hasValidInsertionPoint()) {
Cleanups.emitBranchAndCleanups(catchFallthroughDest, caseStmt->getBody());
}
});
}
| stephentyrone/swift | lib/SILGen/SILGenPattern.cpp | C++ | apache-2.0 | 118,138 |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta content="text/html; charset=UTF-8" http-equiv="Content-Type" />
<meta content="2013-12-25 10:18:47 -0700" http-equiv="change-date" />
<title>1RS 2</title>
<script src='../js/jquery-3.1.1.min.js' type='text/javascript' charset='utf-8'></script>
<script src='../js/bpi.js' type="text/javascript" charset="utf-8"></script>
<link rel="stylesheet" href='../css/bpi.css' >
</head>
<body>
<div class="header"><h1 id="titulo">1º Reis 2<span id="trecho"></span></h1></div>
<div id="passagem">
<div class="bible1 verses">
<p class="verse" verse="1"><sup>1</sup>Ora, aproximando-se o dia da morte de Davi, deu ele ordem a Salomão, seu filho, dizendo:</p>
<p class="verse" verse="2"><sup>2</sup>Eu vou pelo caminho de toda a terra; sê forte, pois, e porta-te como homem.</p>
<p class="verse" verse="3"><sup>3</sup>Guarda as ordenanças do Senhor teu Deus, andando nos seus caminhos, e observando os seus estatutos, os seus mandamentos, os seus preceitos e os seus testemunhos, como está escrito na lei de Moisés, para que prosperes em tudo quanto fizeres e por onde quer que fores,</p>
<p class="verse" verse="4"><sup>4</sup>e para que o Senhor confirme a palavra que falou acerca de mim, dizendo: Se teus filhos guardarem os seus caminhos, andando perante a minha face fielmente, com todo o seu coração e com toda a sua alma, nunca te faltará sucessor ao trono de Israel.</p>
<p class="verse" verse="5"><sup>5</sup>Tu sabes também o que me fez Joabe, filho de Zeruia, a saber, o que fez aos dois chefes do exército de Israel, a Abner, filho de Ner, e a Amasa, filho de Jeter, os quais ele matou, e em tempo de paz derramou o sangue de guerra, manchando com ele o cinto que tinha nos lombos, e os sapatos que trazia nos pés.</p>
<p class="verse" verse="6"><sup>6</sup>Faze, pois, segundo a tua sabedoria, e não permitas que suas cãs desçam à sepultura em paz.</p>
<p class="verse" verse="7"><sup>7</sup>Mas para com os filhos de Barzilai, o gileadita, usa de benevolência, e estejam eles entre os que comem à tua mesa; porque assim se houveram comigo, quando eu fugia por causa de teu irmão Absalão.</p>
<p class="verse" verse="8"><sup>8</sup>E eis que também contigo está Simei, filho de Gêra, benjamita, de Baurim, que me lançou atroz maldição, no dia em que eu ia a Maanaim; porém ele saiu a encontrar-se comigo junto ao Jordão, e eu lhe jurei pelo Senhor, dizendo: Não te matarei à espada.</p>
<p class="verse" verse="9"><sup>9</sup>Agora, porém, não o tenhas por inocente; pois és homem sábio, e bem saberás o que lhe hás de fazer; farás com que as suas cãs desçam à sepultura com sangue.</p>
<p class="verse" verse="10"><sup>10</sup>Depois Davi dormiu com seus pais, e foi sepultado na cidade de Davi.</p>
<p class="verse" verse="11"><sup>11</sup>E foi o tempo que Davi reinou sobre Israel quarenta anos: sete anos reinou em Hebrom, e em Jerusalém reinou trinta e três anos.</p>
<p class="verse" verse="12"><sup>12</sup>Salomão, pois, assentou-se no trono de Davi, seu pai; e o seu reino se fortificou sobremaneira.</p>
<p class="verse" verse="13"><sup>13</sup>Então Adonias, filho de Hagite, veio a Bate-Seba, mãe de Salomão; e perguntou ela: De paz é a tua vinda? Respondeu ele: É de paz.</p>
<p class="verse" verse="14"><sup>14</sup>E acrescentou: Uma palavra tenho que dizer-te. Respondeu ela: Fala.</p>
<p class="verse" verse="15"><sup>15</sup>Disse, pois, ele: Bem sabes que o reino era meu, e que todo o Israel tinha posto a vista em mim para que eu viesse a reinar; contudo o reino se transferiu e veio a ser de meu irmão, porque foi feito seu pelo Senhor.</p>
<p class="verse" verse="16"><sup>16</sup>Agora uma só coisa te peço; não ma recuses. Ela lhe disse: Fala.</p>
<p class="verse" verse="17"><sup>17</sup>E ele disse: Peço-te que fales ao rei Salomão {porque ele não to recusará} , que me dê por mulher a Abisague, a sunamita.</p>
<p class="verse" verse="18"><sup>18</sup>Respondeu Bate-Seba: Pois bem; eu falarei por ti ao rei.</p>
<p class="verse" verse="19"><sup>19</sup>Foi, pois, Bate-Seba ter com o rei Salomão, para falar-lhe por Adonias. E o rei se levantou a encontrar-se com ela, e se inclinou diante dela; então, assentando-se no seu trono, mandou que pusessem um trono para a rainha-mãe; e ela se assentou à sua direita.</p>
<p class="verse" verse="20"><sup>20</sup>Então disse ela: Só uma pequena coisa te peço; não ma recuses. Respondeu-lhe o rei: Pede, minha mãe, porque não te recusarei.</p>
<p class="verse" verse="21"><sup>21</sup>E ela disse: Dê-se Abisague, a sunamita, por mulher a teu irmão Adonias.</p>
<p class="verse" verse="22"><sup>22</sup>Então respondeu o rei Salomão, e disse a sua mãe: E por que pedes Abisague, a sunamita, para Adonias? Pede também para ele o reino {porque é meu irmão mais velho}; sim, para ele, e também para Abiatar, o sacerdote, e para Joabe, filho de Zeruia.</p>
<p class="verse" verse="23"><sup>23</sup>E jurou o rei Salomão pelo Senhor, dizendo: Assim Deus me faça, e outro tanto, se não falou Adonias esta palavra contra a sua vida.</p>
<p class="verse" verse="24"><sup>24</sup>Agora, pois, vive o Senhor, que me confirmou e me fez assentar no trono de Davi, meu pai, e que me estabeleceu casa, como tinha dito, que hoje será morto Adonias.</p>
<p class="verse" verse="25"><sup>25</sup>E o rei Salomão deu ordem a Benaías, filho de Jeoiada, o qual feriu a Adonias, de modo que morreu.</p>
<p class="verse" verse="26"><sup>26</sup>Também a Abiatar, o sacerdote, disse o rei: Vai para Anatote, para os teus campos, porque és homem digno de morte; porém hoje não te matarei, porquanto levaste a arca do Senhor Deus diante de Davi, meu pai, e porquanto participaste de todas as aflições de meu pai.</p>
<p class="verse" verse="27"><sup>27</sup>Salomão, pois, expulsou Abiatar, para que não fosse sacerdote do Senhor, assim cumprindo a palavra que o Senhor tinha dito acerca da casa de Eli em Siló.</p>
<p class="verse" verse="28"><sup>28</sup>Ora, veio esta notícia a Joabe {pois Joabe se desviara após Adonias, ainda que não se tinha desviado após Absalão} ; pelo que Joabe fugiu para o tabernáculo do Senhor, e apegou-se as pontas do altar.</p>
<p class="verse" verse="29"><sup>29</sup>E disseram ao rei Salomão: Joabe fugiu para o tabernáculo do Senhor; e eis que está junto ao altar. Então Salomão enviou Benaías, filho de Jeoiada, dizendo: Vai, mata-o.</p>
<p class="verse" verse="30"><sup>30</sup>Foi, pois, Benaías ao tabernáculo do Senhor, e disse a Joabe: Assim diz o rei: Sai daí. Respondeu Joabe: Não! porém aqui morrerei. E Benaías tornou com a resposta ao rei, dizendo: Assim falou Joabe, e assim me respondeu.</p>
<p class="verse" verse="31"><sup>31</sup>Ao que lhe disse o rei: Faze como ele disse; mata-o, e sepulta-o, para que tires de sobre mim e de sobre a casa de meu pai o sangue que Joabe sem causa derramou.</p>
<p class="verse" verse="32"><sup>32</sup>Assim o Senhor fará recair o sangue dele sobre a sua cabeça, porque deu sobre dois homens mais justos e melhores do que ele, e os matou à espada, sem que meu pai Davi o soubesse, a saber: a Abner, filho de Ner, chefe do exército de Israel, e a Amasa, filho de Jeter, chefe do exército de Judá.</p>
<p class="verse" verse="33"><sup>33</sup>Assim recairá o sangue destes sobre a cabeça de Joabe e sobre a cabeça da sua descendência para sempre; mas a Davi, e à sua descendência, e à sua casa, e ao seu trono, o Senhor dará paz para sempre.</p>
<p class="verse" verse="34"><sup>34</sup>Então Benaías, filho de Jeoiada, subiu e, arremetendo contra Joabe, o matou. E foi sepultado em sua casa, no deserto.</p>
<p class="verse" verse="35"><sup>35</sup>Em lugar dele o rei pôs a Benaías, filho de Jeoiada, sobre o exército; e a Zadoque, o sacerdote, pôs em lugar de Abiatar.</p>
<p class="verse" verse="36"><sup>36</sup>Depois o rei mandou chamar a Simei e lhe disse: Edifica para ti uma casa em Jerusalém, habita aí, e daí não saias, nem para uma nem para outra parte.</p>
<p class="verse" verse="37"><sup>37</sup>E fica sabendo que, no dia em que saíres e passares o ribeiro de Cedrom, de certo hás de morrer. O teu sangue será sobre a tua cabeça.</p>
<p class="verse" verse="38"><sup>38</sup>Respondeu Simei ao rei: Boa é essa palavra; como tem dito o rei meu senhor, assim fará o teu servo. E Simei habitou em Jerusalém muitos dias.</p>
<p class="verse" verse="39"><sup>39</sup>Sucedeu porém que, ao cabo de três anos, dois servos de Simei fugiram para Aquis, filho de Maacá, rei de Gate. E deram parte a Simei, dizendo: Eis que teus servos estão em Gate.</p>
<p class="verse" verse="40"><sup>40</sup>Então Simei se levantou, albardou o seu jumento e foi a Gate ter com Aquis, em busca dos seus servos; assim foi Simei, e os trouxe de Gate.</p>
<p class="verse" verse="41"><sup>41</sup>Disseram a Salomão que Simei fora de Jerusalém a Gate, e já havia voltado.</p>
<p class="verse" verse="42"><sup>42</sup>Então o rei mandou chamar a Simei e lhe disse: Não te conjurei pelo Senhor e não te protestei, dizendo: No dia em que saíres para qualquer parte, sabe de certo que hás de morrer? E tu me disseste: Boa é essa palavra que ouvi.</p>
<p class="verse" verse="43"><sup>43</sup>Por que, então, não guardaste o juramento do Senhor, e a ordem que te dei?</p>
<p class="verse" verse="44"><sup>44</sup>Disse-lhe mais: Bem sabes tu, e o teu coração reconhece toda a maldade que fizeste a Davi, meu pai; pelo que o Senhor fará recair a tua maldade sobre a tua cabeça.</p>
<p class="verse" verse="45"><sup>45</sup>Mas o rei Salomão será abençoado, e o trono de Davi será confirmado perante o Senhor para sempre:</p>
<p class="verse" verse="46"><sup>46</sup>E o rei deu ordem a Benaías, filho de Jeoiada, o qual saiu, e feriu a Simei, de modo que morreu. Assim foi confirmado o reino na mão de Salomão.</p>
</div>
</div>
<br/>
<br/>
<br/>
<br/>
<br/>
<br/>
<br/>
<br/>
<br/>
<p class="copyright">Almeida Revista e Atualizada© Copyright © 1993 Sociedade Bíblica do Brasil. Todos os direitos reservados. Texto bíblico utilizado com autorização. Saiba mais sobre a Sociedade Bíblica do Brasil. A Sociedade Bíblica do Brasil trabalha para que a Bíblia esteja, efetivamente, ao alcance de todos e seja lida por todos. A SBB é uma entidade sem fins lucrativos, dedicada a promover o desenvolvimento integral do ser humano.</p>
<br/>
<br/>
<br/>
<br/></body>
</html>
| ahsbjunior/biblia-para-igrejas | ara/11-2.html | HTML | apache-2.0 | 10,884 |
/*
* Copyright 2000-2014 Vaadin Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.vaadin.grid.cellrenderers.client.editoraware;
import com.google.gwt.core.client.GWT;
import com.google.gwt.dom.client.BrowserEvents;
import com.google.gwt.dom.client.Element;
import com.google.gwt.event.dom.client.ClickEvent;
import com.google.gwt.event.dom.client.ClickHandler;
import com.google.gwt.event.dom.client.MouseDownEvent;
import com.google.gwt.event.dom.client.MouseDownHandler;
import com.google.gwt.event.shared.HandlerRegistration;
import com.vaadin.client.communication.RpcProxy;
import com.vaadin.client.connectors.ClickableRendererConnector;
import com.vaadin.client.renderers.ClickableRenderer;
import com.vaadin.client.ui.VCheckBox;
import com.vaadin.client.widget.grid.RendererCellReference;
import com.vaadin.client.widgets.Grid;
import com.vaadin.shared.ui.Connect;
import elemental.json.JsonObject;
import org.vaadin.grid.cellrenderers.editoraware.CheckboxRenderer;
/**
* Provides various helper methods for connectors. Meant for internal use.
*
* @author Ilya Motorny, Tatu Lund
*/
@Connect(CheckboxRenderer.class)
public class CheckboxRendererConnector extends ClickableRendererConnector<Boolean> {
CheckboxRendererServerRpc rpc = RpcProxy.create(
CheckboxRendererServerRpc.class, this);
public class CheckboxClientRenderer extends ClickableRenderer<Boolean, VCheckBox> {
private static final String ROW_KEY_PROPERTY = "rowKey";
@Override
public VCheckBox createWidget() {
VCheckBox checkBox = GWT.create(VCheckBox.class);
checkBox.addClickHandler(this);
checkBox.setStylePrimaryName("v-checkbox");
checkBox.sinkBitlessEvent(BrowserEvents.CLICK);
checkBox.sinkBitlessEvent(BrowserEvents.MOUSEDOWN);
checkBox.addClickHandler(new ClickHandler() {
@Override
public void onClick(ClickEvent event) {
VCheckBox checkBox = (VCheckBox) event.getSource();
Element e = checkBox.getElement();
checkBox.setValue(!checkBox.getValue());
rpc.onChange(e.getPropertyString(ROW_KEY_PROPERTY),
checkBox.getValue());
}
});
checkBox.addMouseDownHandler(new MouseDownHandler() {
@Override
public void onMouseDown(MouseDownEvent event) {
event.stopPropagation();
}
});
return checkBox;
}
@Override
public void render(RendererCellReference cell, Boolean value, VCheckBox checkBox) {
checkBox.setValue(value, false);
checkBox.setEnabled(true);
if (getState().txtTrue != null) {
String text = null;
if (value) text = getState().txtTrue;
else text = getState().txtFalse;
if (text != null) checkBox.setText(text);
} else {
Grid.HeaderRow headerRow = cell.getGrid().getDefaultHeaderRow();
String text = "";
if (headerRow != null) {
Grid.HeaderCell headerCell = headerRow.getCell(cell.getColumn());
if (headerCell != null && headerCell.getText() != null) text = headerCell.getText();
checkBox.setText(text);
}
}
Element e = checkBox.getElement();
if(e.getPropertyString(ROW_KEY_PROPERTY) != getRowKey((JsonObject) cell.getRow())) {
e.setPropertyString(ROW_KEY_PROPERTY,
getRowKey((JsonObject) cell.getRow()));
}
}
}
@Override
public CheckboxClientRenderer getRenderer() {
return (CheckboxClientRenderer) super.getRenderer();
}
@Override
protected CheckboxClientRenderer createRenderer() {
return new CheckboxClientRenderer();
}
@Override
protected HandlerRegistration addClickHandler(ClickableRenderer.RendererClickHandler<JsonObject> handler) {
return getRenderer().addClickHandler(handler);
}
@Override
public CheckboxRendererState getState() {
return (CheckboxRendererState) super.getState();
}
}
| vaadin/grid-renderers-collection-addon | grid-renderers-collection-addon/src/main/java/org/vaadin/grid/cellrenderers/client/editoraware/CheckboxRendererConnector.java | Java | apache-2.0 | 4,670 |
package org.libsmith.anvil.reflection;
import javax.annotation.Nonnull;
import java.lang.reflect.*;
import java.util.ArrayDeque;
import java.util.Deque;
/**
* http://habrahabr.ru/blogs/java/66593/ и немного модифицировано
*
* @author Dmitriy Balakin <dmitriy.balakin@0x0000.ru>
* @created 20.03.16 5:06
*/
public class GenericReflection<T> {
private final Class<T> genericClass;
protected GenericReflection(Class<T> genericClass) {
this.genericClass = genericClass;
}
public static <T> GenericReflection<T> extractParameterOf(Class<T> genericContainerClass) {
return new GenericReflection<>(genericContainerClass);
}
public Indexed atIndex(int parameterIndex) {
return new Indexed(parameterIndex);
}
public class Indexed {
private final int parameterIndex;
protected Indexed(int parameterIndex) {
this.parameterIndex = parameterIndex;
}
public AsType asType() {
return new AsType();
}
public @Nonnull <R> Class<R> from(@Nonnull final T instance) throws IllegalArgumentException {
return ReflectionUtils.extractClass(GenericReflection.this.from(instance, parameterIndex));
}
public @Nonnull <R> Class<R> from(@Nonnull final Type actualType) throws IllegalArgumentException {
return ReflectionUtils.extractClass(GenericReflection.this.from(actualType, parameterIndex));
}
public class AsType {
public @Nonnull Type from(@Nonnull final T instance) throws IllegalArgumentException {
return GenericReflection.this.from(instance, parameterIndex);
}
public @Nonnull Type from(@Nonnull final Type actualType) throws IllegalArgumentException {
return GenericReflection.this.from(actualType, parameterIndex);
}
}
}
@Nonnull Type from(@Nonnull T instance, int parameterIndex) throws IllegalArgumentException {
return from(instance.getClass(), parameterIndex);
}
@Nonnull Type from(@Nonnull final Type actualType, int parameterIndex) throws IllegalArgumentException {
final Class<?> actualClass = ReflectionUtils.extractClass(actualType);
// Прекращаем работу если genericClass не является предком
// actualClass.
if (!genericClass.isAssignableFrom(actualClass)
|| (genericClass.equals(actualClass) && actualType instanceof Class)) {
throw new IllegalArgumentException("Class "
+ genericClass.getName() + " is not a superclass of "
+ actualClass.getName() + ".");
}
final boolean isInterface = genericClass.isInterface();
// Нам нужно найти класс, для которого непосредственным родителем будет
// genericClass.
// Мы будем подниматься вверх по иерархии, пока не найдем интересующий
// нас класс.
// В процессе поднятия мы будем сохранять в genericClasses все классы -
// они нам понадобятся при спуске вниз.
// Проейденные классы - используются для спуска вниз.
final Deque<ParameterizedType> genericClasses = new ArrayDeque<>();
// clazz - текущий рассматриваемый класс
Type clazz = actualType;
while (true) {
final Type genericInterface = isInterface && clazz instanceof Class
? getGenericInterface((Class<?>) clazz, genericClass)
: null;
final Type currentType;
if (genericInterface != null) {
currentType = genericInterface;
}
else {
if (clazz instanceof Class) {
currentType = ((Class<?>) clazz).getGenericSuperclass();
}
else {
currentType = clazz;
}
}
final boolean isParametrizedType = currentType instanceof ParameterizedType;
if (isParametrizedType) {
// Если предок - параметризованный класс, то запоминаем его -
// возможно он пригодится при спуске вниз.
genericClasses.push((ParameterizedType) currentType);
}
else {
// В иерархии встретился непараметризованный класс. Все ранее
// сохраненные параметризованные классы будут бесполезны.
genericClasses.clear();
}
// Проверяем, дошли мы до нужного предка или нет.
final Type rawType = isParametrizedType ? ((ParameterizedType) currentType).getRawType() : currentType;
if (!rawType.equals(genericClass)) {
// genericClass не является непосредственным родителем для
// текущего класса.
// Поднимаемся по иерархии дальше.
clazz = rawType;
}
else {
// Мы поднялись до нужного класса. Останавливаемся.
break;
}
}
// Нужный класс найден. Теперь мы можем узнать, какими типами он
// параметризован.
Type result = genericClasses.pop().getActualTypeArguments()[parameterIndex];
while (result instanceof TypeVariable && !genericClasses.isEmpty()) {
// Похоже наш параметр задан где-то ниже по иерархии, спускаемся
// вниз.
// Получаем индекс параметра в том классе, в котором он задан.
final int actualArgumentIndex = getParameterTypeDeclarationIndex((TypeVariable<?>) result);
// Берем соответствующий класс, содержащий метаинформацию о нашем
// параметре.
final ParameterizedType type = genericClasses.pop();
// Получаем информацию о значении параметра.
result = type.getActualTypeArguments()[actualArgumentIndex];
}
if (result instanceof TypeVariable) {
// Мы спустились до самого низа, но даже там нужный параметр не
// имеет явного задания.
// Следовательно из-за "Type erasure" узнать класс для параметра
// невозможно.
throw new IllegalArgumentException("Unable to resolve type variable " + result + "."
+ " Try to replace instances of parametrized class with its non-parameterized subtype.");
}
if (result == null) {
throw new IllegalArgumentException("Unable to determine actual parameter type for " + actualType + ".");
}
result = ReflectionUtils.extractWildcardType(result);
if (result instanceof WildcardType) {
WildcardType wt = (WildcardType) result;
if (wt.getLowerBounds().length == 1) {
result = wt.getLowerBounds()[0];
}
else if (wt.getUpperBounds().length == 1) {
result = wt.getUpperBounds()[0];
}
}
if (result instanceof Class || result instanceof ParameterizedType) {
return result;
}
throw new IllegalArgumentException("Actual parameter type for " + actualType + " is not a object Class: " + result);
// Похоже, что параметр - массив, примитивный тип, интерфейс или
// еще-что-то, что не является классом.
}
private static int getParameterTypeDeclarationIndex(final TypeVariable<?> typeVariable) {
final GenericDeclaration genericDeclaration = typeVariable.getGenericDeclaration();
// Ищем наш параметр среди всех параметров того класса, где определен
// нужный нам параметр.
final TypeVariable<?>[] typeVariables = genericDeclaration.getTypeParameters();
Integer actualArgumentIndex = null;
for (int i = 0; i < typeVariables.length; i++) {
if (typeVariables[i].equals(typeVariable)) {
actualArgumentIndex = i;
break;
}
}
if (actualArgumentIndex != null) {
return actualArgumentIndex;
}
else {
throw new IllegalArgumentException("Argument " + typeVariable.toString() + " is not found in "
+ genericDeclaration.toString() + ".");
}
}
private static Type getGenericInterface(final Class<?> sourceClass, final Class<?> genericInterface) {
final Type[] types = sourceClass.getGenericInterfaces();
for (final Type type : types) {
if (type instanceof Class) {
if (genericInterface.isAssignableFrom((Class<?>) type)) {
return type;
}
}
else if (type instanceof ParameterizedType) {
if (genericInterface.isAssignableFrom((Class<?>) ((ParameterizedType) type).getRawType())) {
return type;
}
}
}
return null;
}
}
| libsmith/libanvil | src/main/java/org/libsmith/anvil/reflection/GenericReflection.java | Java | apache-2.0 | 10,140 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_43) on Wed May 22 21:49:31 UTC 2013 -->
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
<TITLE>
Uses of Class org.apache.hadoop.hbase.HServerAddress (HBase 0.94.8 API)
</TITLE>
<META NAME="date" CONTENT="2013-05-22">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.apache.hadoop.hbase.HServerAddress (HBase 0.94.8 API)";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../index.html?org/apache/hadoop/hbase//class-useHServerAddress.html" target="_top"><B>FRAMES</B></A>
<A HREF="HServerAddress.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<CENTER>
<H2>
<B>Uses of Class<br>org.apache.hadoop.hbase.HServerAddress</B></H2>
</CENTER>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
Packages that use <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><A HREF="#org.apache.hadoop.hbase"><B>org.apache.hadoop.hbase</B></A></TD>
<TD> </TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><A HREF="#org.apache.hadoop.hbase.avro"><B>org.apache.hadoop.hbase.avro</B></A></TD>
<TD>Provides an HBase <a href="http://avro.apache.org">Avro</a> service. </TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><A HREF="#org.apache.hadoop.hbase.client"><B>org.apache.hadoop.hbase.client</B></A></TD>
<TD>Provides HBase Client </TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><A HREF="#org.apache.hadoop.hbase.master"><B>org.apache.hadoop.hbase.master</B></A></TD>
<TD> </TD>
</TR>
</TABLE>
<P>
<A NAME="org.apache.hadoop.hbase"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
Uses of <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A> in <A HREF="../../../../../org/apache/hadoop/hbase/package-summary.html">org.apache.hadoop.hbase</A></FONT></TH>
</TR>
</TABLE>
<P>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left" COLSPAN="2">Methods in <A HREF="../../../../../org/apache/hadoop/hbase/package-summary.html">org.apache.hadoop.hbase</A> that return <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></CODE></FONT></TD>
<TD><CODE><B>HRegionLocation.</B><B><A HREF="../../../../../org/apache/hadoop/hbase/HRegionLocation.html#getServerAddress()">getServerAddress</A></B>()</CODE>
<BR>
<B>Deprecated.</B> <I>Use <A HREF="../../../../../org/apache/hadoop/hbase/HRegionLocation.html#getHostnamePort()"><CODE>HRegionLocation.getHostnamePort()</CODE></A></I></TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></CODE></FONT></TD>
<TD><CODE><B>HServerInfo.</B><B><A HREF="../../../../../org/apache/hadoop/hbase/HServerInfo.html#getServerAddress()">getServerAddress</A></B>()</CODE>
<BR>
<B>Deprecated.</B> </TD>
</TR>
</TABLE>
<P>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left" COLSPAN="2">Methods in <A HREF="../../../../../org/apache/hadoop/hbase/package-summary.html">org.apache.hadoop.hbase</A> with parameters of type <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> int</CODE></FONT></TD>
<TD><CODE><B>HServerAddress.</B><B><A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html#compareTo(org.apache.hadoop.hbase.HServerAddress)">compareTo</A></B>(<A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A> o)</CODE>
<BR>
<B>Deprecated.</B> </TD>
</TR>
</TABLE>
<P>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left" COLSPAN="2">Constructors in <A HREF="../../../../../org/apache/hadoop/hbase/package-summary.html">org.apache.hadoop.hbase</A> with parameters of type <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE><B><A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html#HServerAddress(org.apache.hadoop.hbase.HServerAddress)">HServerAddress</A></B>(<A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A> other)</CODE>
<BR>
<B>Deprecated.</B> Copy-constructor.</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE><B><A HREF="../../../../../org/apache/hadoop/hbase/HServerInfo.html#HServerInfo(org.apache.hadoop.hbase.HServerAddress, int)">HServerInfo</A></B>(<A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A> serverAddress,
int webuiport)</CODE>
<BR>
<B>Deprecated.</B> Constructor that creates a HServerInfo with a generated startcode</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE><B><A HREF="../../../../../org/apache/hadoop/hbase/HServerInfo.html#HServerInfo(org.apache.hadoop.hbase.HServerAddress, long, int)">HServerInfo</A></B>(<A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A> serverAddress,
long startCode,
int webuiport)</CODE>
<BR>
<B>Deprecated.</B> </TD>
</TR>
</TABLE>
<P>
<A NAME="org.apache.hadoop.hbase.avro"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
Uses of <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A> in <A HREF="../../../../../org/apache/hadoop/hbase/avro/package-summary.html">org.apache.hadoop.hbase.avro</A></FONT></TH>
</TR>
</TABLE>
<P>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left" COLSPAN="2">Methods in <A HREF="../../../../../org/apache/hadoop/hbase/avro/package-summary.html">org.apache.hadoop.hbase.avro</A> with parameters of type <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE>static <A HREF="../../../../../org/apache/hadoop/hbase/avro/generated/AServerAddress.html" title="class in org.apache.hadoop.hbase.avro.generated">AServerAddress</A></CODE></FONT></TD>
<TD><CODE><B>AvroUtil.</B><B><A HREF="../../../../../org/apache/hadoop/hbase/avro/AvroUtil.html#hsaToASA(org.apache.hadoop.hbase.HServerAddress)">hsaToASA</A></B>(<A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A> hsa)</CODE>
<BR>
</TD>
</TR>
</TABLE>
<P>
<A NAME="org.apache.hadoop.hbase.client"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
Uses of <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A> in <A HREF="../../../../../org/apache/hadoop/hbase/client/package-summary.html">org.apache.hadoop.hbase.client</A></FONT></TH>
</TR>
</TABLE>
<P>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left" COLSPAN="2">Fields in <A HREF="../../../../../org/apache/hadoop/hbase/client/package-summary.html">org.apache.hadoop.hbase.client</A> declared as <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></CODE></FONT></TD>
<TD><CODE><B>MultiPut.</B><B><A HREF="../../../../../org/apache/hadoop/hbase/client/MultiPut.html#address">address</A></B></CODE>
<BR>
<B>Deprecated.</B> </TD>
</TR>
</TABLE>
<P>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left" COLSPAN="2">Methods in <A HREF="../../../../../org/apache/hadoop/hbase/client/package-summary.html">org.apache.hadoop.hbase.client</A> that return <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></CODE></FONT></TD>
<TD><CODE><B>RetriesExhaustedWithDetailsException.</B><B><A HREF="../../../../../org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.html#getAddress(int)">getAddress</A></B>(int i)</CODE>
<BR>
</TD>
</TR>
</TABLE>
<P>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left" COLSPAN="2">Methods in <A HREF="../../../../../org/apache/hadoop/hbase/client/package-summary.html">org.apache.hadoop.hbase.client</A> that return types with arguments of type <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> <A HREF="http://java.sun.com/javase/6/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</A><<A HREF="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title="class in org.apache.hadoop.hbase">HRegionInfo</A>,<A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A>></CODE></FONT></TD>
<TD><CODE><B>HTable.</B><B><A HREF="../../../../../org/apache/hadoop/hbase/client/HTable.html#deserializeRegionInfo(java.io.DataInput)">deserializeRegionInfo</A></B>(<A HREF="http://java.sun.com/javase/6/docs/api/java/io/DataInput.html?is-external=true" title="class or interface in java.io">DataInput</A> in)</CODE>
<BR>
Read from <code>in</code> and deserialize the regions information.</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> <A HREF="http://java.sun.com/javase/6/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</A><<A HREF="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title="class in org.apache.hadoop.hbase">HRegionInfo</A>,<A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A>></CODE></FONT></TD>
<TD><CODE><B>HTable.</B><B><A HREF="../../../../../org/apache/hadoop/hbase/client/HTable.html#getRegionsInfo()">getRegionsInfo</A></B>()</CODE>
<BR>
<B>Deprecated.</B> <I>Use <A HREF="../../../../../org/apache/hadoop/hbase/client/HTable.html#getRegionLocations()"><CODE>HTable.getRegionLocations()</CODE></A> or <A HREF="../../../../../org/apache/hadoop/hbase/client/HTable.html#getStartEndKeys()"><CODE>HTable.getStartEndKeys()</CODE></A></I></TD>
</TR>
</TABLE>
<P>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left" COLSPAN="2">Methods in <A HREF="../../../../../org/apache/hadoop/hbase/client/package-summary.html">org.apache.hadoop.hbase.client</A> with parameters of type <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> <A HREF="../../../../../org/apache/hadoop/hbase/ipc/HRegionInterface.html" title="interface in org.apache.hadoop.hbase.ipc">HRegionInterface</A></CODE></FONT></TD>
<TD><CODE><B>HConnection.</B><B><A HREF="../../../../../org/apache/hadoop/hbase/client/HConnection.html#getHRegionConnection(org.apache.hadoop.hbase.HServerAddress)">getHRegionConnection</A></B>(<A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A> regionServer)</CODE>
<BR>
<B>Deprecated.</B> <I>Use <A HREF="../../../../../org/apache/hadoop/hbase/client/HConnection.html#getHRegionConnection(java.lang.String, int)"><CODE>HConnection.getHRegionConnection(String, int)</CODE></A></I></TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> <A HREF="../../../../../org/apache/hadoop/hbase/ipc/HRegionInterface.html" title="interface in org.apache.hadoop.hbase.ipc">HRegionInterface</A></CODE></FONT></TD>
<TD><CODE><B>HConnection.</B><B><A HREF="../../../../../org/apache/hadoop/hbase/client/HConnection.html#getHRegionConnection(org.apache.hadoop.hbase.HServerAddress, boolean)">getHRegionConnection</A></B>(<A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A> regionServer,
boolean getMaster)</CODE>
<BR>
<B>Deprecated.</B> <I>Use <A HREF="../../../../../org/apache/hadoop/hbase/client/HConnection.html#getHRegionConnection(org.apache.hadoop.hbase.HServerAddress, boolean)"><CODE>HConnection.getHRegionConnection(HServerAddress, boolean)</CODE></A></I></TD>
</TR>
</TABLE>
<P>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left" COLSPAN="2">Method parameters in <A HREF="../../../../../org/apache/hadoop/hbase/client/package-summary.html">org.apache.hadoop.hbase.client</A> with type arguments of type <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> void</CODE></FONT></TD>
<TD><CODE><B>HConnection.</B><B><A HREF="../../../../../org/apache/hadoop/hbase/client/HConnection.html#prewarmRegionCache(byte[], java.util.Map)">prewarmRegionCache</A></B>(byte[] tableName,
<A HREF="http://java.sun.com/javase/6/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</A><<A HREF="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title="class in org.apache.hadoop.hbase">HRegionInfo</A>,<A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A>> regions)</CODE>
<BR>
Load the region map and warm up the global region cache for the table.</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> void</CODE></FONT></TD>
<TD><CODE><B>HTable.</B><B><A HREF="../../../../../org/apache/hadoop/hbase/client/HTable.html#prewarmRegionCache(java.util.Map)">prewarmRegionCache</A></B>(<A HREF="http://java.sun.com/javase/6/docs/api/java/util/Map.html?is-external=true" title="class or interface in java.util">Map</A><<A HREF="../../../../../org/apache/hadoop/hbase/HRegionInfo.html" title="class in org.apache.hadoop.hbase">HRegionInfo</A>,<A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A>> regionMap)</CODE>
<BR>
Save the passed region information and the table's regions
cache.</TD>
</TR>
</TABLE>
<P>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left" COLSPAN="2">Constructors in <A HREF="../../../../../org/apache/hadoop/hbase/client/package-summary.html">org.apache.hadoop.hbase.client</A> with parameters of type <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE><B><A HREF="../../../../../org/apache/hadoop/hbase/client/MultiPut.html#MultiPut(org.apache.hadoop.hbase.HServerAddress)">MultiPut</A></B>(<A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A> a)</CODE>
<BR>
<B>Deprecated.</B> MultiPut for putting multiple regions worth of puts in one RPC.</TD>
</TR>
</TABLE>
<P>
<A NAME="org.apache.hadoop.hbase.master"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
Uses of <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A> in <A HREF="../../../../../org/apache/hadoop/hbase/master/package-summary.html">org.apache.hadoop.hbase.master</A></FONT></TH>
</TR>
</TABLE>
<P>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left" COLSPAN="2">Methods in <A HREF="../../../../../org/apache/hadoop/hbase/master/package-summary.html">org.apache.hadoop.hbase.master</A> with parameters of type <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> <A HREF="../../../../../org/apache/hadoop/hbase/HServerLoad.html" title="class in org.apache.hadoop.hbase">HServerLoad</A></CODE></FONT></TD>
<TD><CODE><B>ServerManager.</B><B><A HREF="../../../../../org/apache/hadoop/hbase/master/ServerManager.html#getLoad(org.apache.hadoop.hbase.HServerAddress)">getLoad</A></B>(<A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase">HServerAddress</A> address)</CODE>
<BR>
<B>Deprecated.</B> <I>Use <A HREF="../../../../../org/apache/hadoop/hbase/master/ServerManager.html#getLoad(org.apache.hadoop.hbase.HServerAddress)"><CODE>ServerManager.getLoad(HServerAddress)</CODE></A></I></TD>
</TR>
</TABLE>
<P>
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../org/apache/hadoop/hbase/HServerAddress.html" title="class in org.apache.hadoop.hbase"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../index.html?org/apache/hadoop/hbase//class-useHServerAddress.html" target="_top"><B>FRAMES</B></A>
<A HREF="HServerAddress.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
Copyright © 2013 <a href="http://www.apache.org/">The Apache Software Foundation</a>. All Rights Reserved.
</BODY>
</HTML>
| algarecu/hbase-0.94.8-qod | docs/apidocs/org/apache/hadoop/hbase/class-use/HServerAddress.html | HTML | apache-2.0 | 26,466 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Portfolio Site</title>
<!-- Basic CSS Setup-->
<link rel="stylesheet" href="{{site.baseurl}}/css/reset.css">
<link rel="stylesheet" href="{{site.baseurl}}/css/bootstrap.min.css">
<link rel="stylesheet" href="{{site.baseurl}}/css/animate.css">
<link rel="stylesheet" href="{{site.baseurl}}/css/font-awesome.min.css">
<!-- CUSTOM CSS -->
<link rel="stylesheet" href="{{site.portfoliourl}}/css/main.css">
</head>
<body>
<!-- Navigation -->
{% include portfolio-nav.html %}
<!-- Main content -->
{{ content }}
<!-- SCRIPTS -->
<script src="{{site.baseurl}}/js/jquery.min.js"></script>
<script src="{{site.baseurl}}/js/bootstrap.min.js"></script>
<!-- CUSTOM SCRIPTS -->
<script src="{{site.portfoliourl}}/js/main.js"></script>
</body>
</html>
| rmfranciacastillo/30-days-bootstrap-site | _layouts/portfolio.html | HTML | apache-2.0 | 970 |
# Helvella sulcata var. pallens Pers. VARIETY
#### Status
ACCEPTED
#### According to
Index Fungorum
#### Published in
Syn. meth. fung. (Göttingen) 2: 616 (1801)
#### Original name
Helvella sulcata var. pallens Pers.
### Remarks
null | mdoering/backbone | life/Fungi/Ascomycota/Pezizomycetes/Pezizales/Helvellaceae/Helvella/Helvella lacunosa/Helvella sulcata pallens/README.md | Markdown | apache-2.0 | 238 |
package org.set4j.objects;
import junit.framework.Assert;
import org.junit.After;
import org.junit.Test;
import org.set4j.Initializer;
/**
* @author Tomas Mikenda
*
*/
public class TestModuleTreeFields
{
@Test
public void testCreated()
{
WithFields_Main main = new WithFields_Main();
main.sub1 = new WithFields_Sub1();
main.sub2 = new WithFields_Sub2();
main.sub3 = this;
Initializer.init(main);
Assert.assertEquals("sub1", main.sub1.val);
Assert.assertEquals(21, main.sub2.val);
}
@Test
public void testPartiallyCreated()
{
WithFields_Main main = new WithFields_Main();
main.sub1 = new WithFields_Sub1();
Initializer.init(main);
Assert.assertEquals("sub1", main.sub1.val);
Assert.assertEquals(21, main.sub2.val);
}
@Test
public void testSubMissing()
{
WithFields_Main main = new WithFields_Main();
Initializer.init(main);
Assert.assertEquals("sub1", main.sub1.val);
Assert.assertEquals(21, main.sub2.val);
}
@Test
public void testMakeObjects()
{
WithFields_Main main = Initializer.init(WithFields_Main.class);
Assert.assertEquals("sub1", main.sub1.val);
Assert.assertEquals(21, main.sub2.val);
}
@After
public void uninit()
{
Initializer.uninitialize(WithFields_Main.class);
}
}
| let-thomas/set4j | src/test/java/org/set4j/objects/TestModuleTreeFields.java | Java | apache-2.0 | 1,287 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.spi.core.remoting;
| jbertram/activemq-artemis-old | artemis-core-client/src/main/java/org/apache/activemq/artemis/spi/core/remoting/package-info.java | Java | apache-2.0 | 857 |
package ru.job4j.trackerapp.start;
import java.util.Scanner;
/**
* ConsoleInput.
*
* @author Alexey Cheremisin.
* @version $Id$.
* @since 0.1.
*/
public class ConsoleInput implements Input {
/**
* Настройка считывания пользовательского ввода.
*/
private Scanner scanner = new Scanner(System.in);
/**
* Реализация метода ask из итерфейса Input.
* @param question вопрос задаваемый пользователю.
* @return ответ пользователя.
*/
public String ask(String question) {
System.out.print(question);
return scanner.nextLine();
}
/**
* Метод опроса пользователя и получения ответа.
* @param question - вопрос спрашиваемый у пользователя.
* @param range - диапазон допустимых значений.
* @return ответ пользователя.
*/
public int ask(String question, int[] range) {
int key = Integer.valueOf(this.ask(question));
boolean isExist = false;
for (int value:range) {
if (value == key) {
isExist = true;
break;
}
}
if (isExist) {
return key;
} else {
throw new MenuOutException("Out of menu range.");
}
}
} | acheremisin/acheremisin | chapter_002/src/main/java/ru/job4j/trackerapp/start/ConsoleInput.java | Java | apache-2.0 | 1,287 |
# AUTOGENERATED FILE
FROM balenalib/nitrogen8mm-alpine:3.10-build
# Default to UTF-8 file.encoding
ENV LANG C.UTF-8
# add a simple script that can auto-detect the appropriate JAVA_HOME value
# based on whether the JDK or only the JRE is installed
RUN { \
echo '#!/bin/sh'; \
echo 'set -e'; \
echo; \
echo 'dirname "$(dirname "$(readlink -f "$(which javac || which java)")")"'; \
} > /usr/local/bin/docker-java-home \
&& chmod +x /usr/local/bin/docker-java-home
ENV JAVA_HOME /usr/lib/jvm/java-1.7-openjdk
ENV PATH $PATH:/usr/lib/jvm/java-1.7-openjdk/jre/bin:/usr/lib/jvm/java-1.7-openjdk/bin
RUN set -x \
&& apk add --no-cache \
openjdk7-jre \
&& [ "$JAVA_HOME" = "$(docker-java-home)" ]
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo $'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v8 \nOS: Alpine Linux 3.10 \nVariant: build variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nOpenJDK v7-jre \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo $'#!/bin/bash\nbalena-info\nbusybox ln -sf /bin/busybox /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& ln -f /bin/sh /bin/sh.real \
&& ln -f /bin/sh-shim /bin/sh | nghiant2710/base-images | balena-base-images/openjdk/nitrogen8mm/alpine/3.10/7-jre/build/Dockerfile | Dockerfile | apache-2.0 | 1,764 |
({"rangeMessage":"* \u8f93\u5165\u6570\u636e\u8d85\u51fa\u503c\u57df\u3002","invalidMessage":"* \u975e\u6cd5\u7684\u8f93\u5165\u503c\u3002","missingMessage":"* \u6b64\u503c\u662f\u5fc5\u987b\u7684\u3002"}) | Gallio/infrastructure | ccnet/WebDashboard/javascript/Dojo/src/widget/nls/zh-cn/validate.js | JavaScript | apache-2.0 | 206 |
// Protocol Buffers for Objective C
//
// Copyright 2010 Booyah Inc.
// Copyright 2008 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OBJC_ENUM_H
#define OBJC_ENUM_H
#include <string>
#include <set>
#include <vector>
#include <google/protobuf/descriptor.h>
namespace google {
namespace protobuf {
namespace io {
class Printer; // printer.h
}
}
namespace protobuf {
namespace compiler {
namespace objectivec {
class EnumGenerator {
public:
explicit EnumGenerator(const EnumDescriptor* descriptor, const FileDescriptor* file);
~EnumGenerator();
void GenerateHeader(io::Printer* printer);
void GenerateSource(io::Printer* printer);
private:
const EnumDescriptor* descriptor_;
const FileDescriptor* file_;
vector<const EnumValueDescriptor*> canonical_values_;
struct Alias {
const EnumValueDescriptor* value;
const EnumValueDescriptor* canonical_value;
};
vector<Alias> aliases_;
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(EnumGenerator);
};
} // namespace objectivec
} // namespace compiler
} // namespace protobuf
} // namespace google
#endif // OBJC_ENUM_H
| hgl888/TeamTalk | protobuf-objc/src/compiler/objc_enum.h | C | apache-2.0 | 2,086 |
package es.ucm.fdi.sscheck.gen
import org.scalacheck.Gen
import org.scalacheck.util.Buildable
import Buildables.buildableSeq
import scala.language.{postfixOps,higherKinds}
object UtilsGen {
/** Like containerOfN but with variable number of elements
* */
def containerOfNtoM[C[_], T]
(n : Int, m : Int, g : Gen[T])
(implicit evb: Buildable[T, C[T]], evt: (C[T]) => Traversable[T])
: Gen[C[T]] = {
for {
i <- Gen.choose(n, m)
xs <- Gen.containerOfN[C, T](i, g)
} yield xs
}
def buildableOfNtoM[C, T]
(n : Int, m : Int, g : Gen[T])
(implicit evb: Buildable[T, C], evt: (C) => Traversable[T])
: Gen[C] = {
for {
i <- Gen.choose(n, m)
xs <- Gen.buildableOfN[C, T](i, g)
} yield xs
}
/** Generates n sequences from g and concatenates them
* */
def repN[A](n : Int, g : Gen[Seq[A]]) : Gen[Seq[A]] = {
for {
xs <- Gen.containerOfN(n, g)
} yield xs flatten
}
/** Generates i sequences from g, with i between n and m, and concatenates them
* */
def repNtoM[A](n : Int, m : Int, g : Gen[Seq[A]]) : Gen[Seq[A]] = {
for {
xs <- containerOfNtoM(n, m, g)
} yield xs flatten
}
/** @return the generator that results from concatenating the sequences
* generated by g1 and g2
* */
def concSeq[A](g1 : Gen[Seq[A]], g2 : Gen[Seq[A]]) : Gen[Seq[A]] = {
for {
xs <- g1
ys <- g2
} yield xs ++ ys
}
/** @return if gen is present then a generator that wraps in Some
* all the values generated by the value in gen, otherwise return
* a generator that always returns None
* */
def optGenToGenOpt[A](gen: Option[Gen[A]]): Gen[Option[A]] =
gen.fold(Gen.const[Option[A]](None)){_.map(Some(_))}
} | juanrh/sscheck | src/main/scala/es/ucm/fdi/sscheck/gen/UtilsGen.scala | Scala | apache-2.0 | 1,756 |
/*
Copyright 2002-2003 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.apache.batik.css.engine;
import java.net.URL;
import org.apache.batik.css.engine.value.ShorthandManager;
import org.apache.batik.css.engine.value.ValueConstants;
import org.apache.batik.css.engine.value.ValueManager;
import org.apache.batik.css.engine.value.css2.ClipManager;
import org.apache.batik.css.engine.value.css2.CursorManager;
import org.apache.batik.css.engine.value.css2.DirectionManager;
import org.apache.batik.css.engine.value.css2.DisplayManager;
import org.apache.batik.css.engine.value.css2.FontFamilyManager;
import org.apache.batik.css.engine.value.css2.FontShorthandManager;
import org.apache.batik.css.engine.value.css2.FontSizeAdjustManager;
import org.apache.batik.css.engine.value.css2.FontSizeManager;
import org.apache.batik.css.engine.value.css2.FontStretchManager;
import org.apache.batik.css.engine.value.css2.FontStyleManager;
import org.apache.batik.css.engine.value.css2.FontVariantManager;
import org.apache.batik.css.engine.value.css2.FontWeightManager;
import org.apache.batik.css.engine.value.css2.OverflowManager;
import org.apache.batik.css.engine.value.css2.SrcManager;
import org.apache.batik.css.engine.value.css2.TextDecorationManager;
import org.apache.batik.css.engine.value.css2.UnicodeBidiManager;
import org.apache.batik.css.engine.value.css2.VisibilityManager;
import org.apache.batik.css.engine.value.svg.AlignmentBaselineManager;
import org.apache.batik.css.engine.value.svg.BaselineShiftManager;
import org.apache.batik.css.engine.value.svg.ClipPathManager;
import org.apache.batik.css.engine.value.svg.ClipRuleManager;
import org.apache.batik.css.engine.value.svg.ColorInterpolationFiltersManager;
import org.apache.batik.css.engine.value.svg.ColorInterpolationManager;
import org.apache.batik.css.engine.value.svg.ColorManager;
import org.apache.batik.css.engine.value.svg.ColorProfileManager;
import org.apache.batik.css.engine.value.svg.ColorRenderingManager;
import org.apache.batik.css.engine.value.svg.DominantBaselineManager;
import org.apache.batik.css.engine.value.svg.EnableBackgroundManager;
import org.apache.batik.css.engine.value.svg.FillRuleManager;
import org.apache.batik.css.engine.value.svg.FilterManager;
import org.apache.batik.css.engine.value.svg.GlyphOrientationHorizontalManager;
import org.apache.batik.css.engine.value.svg.GlyphOrientationVerticalManager;
import org.apache.batik.css.engine.value.svg.ImageRenderingManager;
import org.apache.batik.css.engine.value.svg.KerningManager;
import org.apache.batik.css.engine.value.svg.MarkerManager;
import org.apache.batik.css.engine.value.svg.MarkerShorthandManager;
import org.apache.batik.css.engine.value.svg.MaskManager;
import org.apache.batik.css.engine.value.svg.OpacityManager;
import org.apache.batik.css.engine.value.svg.PointerEventsManager;
import org.apache.batik.css.engine.value.svg.SVGColorManager;
import org.apache.batik.css.engine.value.svg.SVGPaintManager;
import org.apache.batik.css.engine.value.svg.ShapeRenderingManager;
import org.apache.batik.css.engine.value.svg.SpacingManager;
import org.apache.batik.css.engine.value.svg.StrokeDasharrayManager;
import org.apache.batik.css.engine.value.svg.StrokeDashoffsetManager;
import org.apache.batik.css.engine.value.svg.StrokeLinecapManager;
import org.apache.batik.css.engine.value.svg.StrokeLinejoinManager;
import org.apache.batik.css.engine.value.svg.StrokeMiterlimitManager;
import org.apache.batik.css.engine.value.svg.StrokeWidthManager;
import org.apache.batik.css.engine.value.svg.TextAnchorManager;
import org.apache.batik.css.engine.value.svg.TextRenderingManager;
import org.apache.batik.css.engine.value.svg.WritingModeManager;
import org.apache.batik.css.parser.ExtendedParser;
import org.apache.batik.util.CSSConstants;
import org.w3c.dom.Document;
/**
* This class provides a CSS engine initialized for SVG.
*
* @author <a href="mailto:stephane@hillion.org">Stephane Hillion</a>
* @version $Id$
*/
public class SVGCSSEngine extends CSSEngine {
/**
* Creates a new SVGCSSEngine.
* @param doc The associated document.
* @param uri The document URI.
* @param p The CSS parser to use.
* @param ctx The CSS context.
*/
public SVGCSSEngine(Document doc,
URL uri,
ExtendedParser p,
CSSContext ctx) {
super(doc, uri, p,
SVG_VALUE_MANAGERS,
SVG_SHORTHAND_MANAGERS,
null,
null,
"style",
null,
"class",
true,
null,
ctx);
// SVG defines line-height to be font-size.
lineHeightIndex = fontSizeIndex;
}
/**
* Creates a new SVGCSSEngine.
* @param doc The associated document.
* @param uri The document URI.
* @param p The CSS parser to use.
* @param vms Extension value managers.
* @param sms Extension shorthand managers.
* @param ctx The CSS context.
*/
public SVGCSSEngine(Document doc,
URL uri,
ExtendedParser p,
ValueManager[] vms,
ShorthandManager[] sms,
CSSContext ctx) {
super(doc, uri, p,
mergeArrays(SVG_VALUE_MANAGERS, vms),
mergeArrays(SVG_SHORTHAND_MANAGERS, sms),
null,
null,
"style",
null,
"class",
true,
null,
ctx);
// SVG defines line-height to be font-size.
lineHeightIndex = fontSizeIndex;
}
protected SVGCSSEngine(Document doc,
URL uri,
ExtendedParser p,
ValueManager[] vms,
ShorthandManager[] sms,
String[] pe,
String sns,
String sln,
String cns,
String cln,
boolean hints,
String hintsNS,
CSSContext ctx) {
super(doc, uri, p,
mergeArrays(SVG_VALUE_MANAGERS, vms),
mergeArrays(SVG_SHORTHAND_MANAGERS, sms),
pe, sns, sln, cns, cln, hints, hintsNS, ctx);
// SVG defines line-height to be font-size.
lineHeightIndex = fontSizeIndex;
}
/**
* Merges the given arrays.
*/
protected static ValueManager[] mergeArrays(ValueManager[] a1,
ValueManager[] a2) {
ValueManager[] result = new ValueManager[a1.length + a2.length];
System.arraycopy(a1, 0, result, 0, a1.length);
System.arraycopy(a2, 0, result, a1.length, a2.length);
return result;
}
/**
* Merges the given arrays.
*/
protected static ShorthandManager[] mergeArrays(ShorthandManager[] a1,
ShorthandManager[] a2) {
ShorthandManager[] result =
new ShorthandManager[a1.length + a2.length];
System.arraycopy(a1, 0, result, 0, a1.length);
System.arraycopy(a2, 0, result, a1.length, a2.length);
return result;
}
/**
* The value managers for SVG.
*/
public final static ValueManager[] SVG_VALUE_MANAGERS = {
new AlignmentBaselineManager(),
new BaselineShiftManager(),
new ClipManager(),
new ClipPathManager(),
new ClipRuleManager(),
new ColorManager(),
new ColorInterpolationManager(),
new ColorInterpolationFiltersManager(),
new ColorProfileManager(),
new ColorRenderingManager(),
new CursorManager(),
new DirectionManager(),
new DisplayManager(),
new DominantBaselineManager(),
new EnableBackgroundManager(),
new SVGPaintManager(CSSConstants.CSS_FILL_PROPERTY),
new OpacityManager(CSSConstants.CSS_FILL_OPACITY_PROPERTY, true),
new FillRuleManager(),
new FilterManager(),
new SVGColorManager(CSSConstants.CSS_FLOOD_COLOR_PROPERTY),
new OpacityManager(CSSConstants.CSS_FLOOD_OPACITY_PROPERTY, false),
new FontFamilyManager(),
new FontSizeManager(),
new FontSizeAdjustManager(),
new FontStretchManager(),
new FontStyleManager(),
new FontVariantManager(),
new FontWeightManager(),
new GlyphOrientationHorizontalManager(),
new GlyphOrientationVerticalManager(),
new ImageRenderingManager(),
new KerningManager(),
new SpacingManager(CSSConstants.CSS_LETTER_SPACING_PROPERTY),
new SVGColorManager(CSSConstants.CSS_LIGHTING_COLOR_PROPERTY,
ValueConstants.WHITE_RGB_VALUE),
new MarkerManager(CSSConstants.CSS_MARKER_END_PROPERTY),
new MarkerManager(CSSConstants.CSS_MARKER_MID_PROPERTY),
new MarkerManager(CSSConstants.CSS_MARKER_START_PROPERTY),
new MaskManager(),
new OpacityManager(CSSConstants.CSS_OPACITY_PROPERTY, false),
new OverflowManager(),
new PointerEventsManager(),
new SrcManager(),
new ShapeRenderingManager(),
new SVGColorManager(CSSConstants.CSS_STOP_COLOR_PROPERTY),
new OpacityManager(CSSConstants.CSS_STOP_OPACITY_PROPERTY, false),
new SVGPaintManager(CSSConstants.CSS_STROKE_PROPERTY,
ValueConstants.NONE_VALUE),
new StrokeDasharrayManager(),
new StrokeDashoffsetManager(),
new StrokeLinecapManager(),
new StrokeLinejoinManager(),
new StrokeMiterlimitManager(),
new OpacityManager(CSSConstants.CSS_STROKE_OPACITY_PROPERTY, true),
new StrokeWidthManager(),
new TextAnchorManager(),
new TextDecorationManager(),
new TextRenderingManager(),
new UnicodeBidiManager(),
new VisibilityManager(),
new SpacingManager(CSSConstants.CSS_WORD_SPACING_PROPERTY),
new WritingModeManager(),
};
/**
* The shorthand managers for SVG.
*/
public final static ShorthandManager[] SVG_SHORTHAND_MANAGERS = {
new FontShorthandManager(),
new MarkerShorthandManager(),
};
//
// The property indexes.
//
public final static int ALIGNMENT_BASELINE_INDEX = 0;
public final static int BASELINE_SHIFT_INDEX =
ALIGNMENT_BASELINE_INDEX + 1;
public final static int CLIP_INDEX = BASELINE_SHIFT_INDEX + 1;
public final static int CLIP_PATH_INDEX = CLIP_INDEX +1;
public final static int CLIP_RULE_INDEX = CLIP_PATH_INDEX + 1;
public final static int COLOR_INDEX = CLIP_RULE_INDEX + 1;
public final static int COLOR_INTERPOLATION_INDEX = COLOR_INDEX + 1;
public final static int COLOR_INTERPOLATION_FILTERS_INDEX =
COLOR_INTERPOLATION_INDEX + 1;
public final static int COLOR_PROFILE_INDEX =
COLOR_INTERPOLATION_FILTERS_INDEX + 1;
public final static int COLOR_RENDERING_INDEX = COLOR_PROFILE_INDEX + 1;
public final static int CURSOR_INDEX = COLOR_RENDERING_INDEX + 1;
public final static int DIRECTION_INDEX = CURSOR_INDEX + 1;
public final static int DISPLAY_INDEX = DIRECTION_INDEX + 1;
public final static int DOMINANT_BASELINE_INDEX = DISPLAY_INDEX + 1;
public final static int ENABLE_BACKGROUND_INDEX =
DOMINANT_BASELINE_INDEX + 1;
public final static int FILL_INDEX = ENABLE_BACKGROUND_INDEX + 1;
public final static int FILL_OPACITY_INDEX = FILL_INDEX + 1;
public final static int FILL_RULE_INDEX = FILL_OPACITY_INDEX + 1;
public final static int FILTER_INDEX = FILL_RULE_INDEX + 1;
public final static int FLOOD_COLOR_INDEX = FILTER_INDEX + 1;
public final static int FLOOD_OPACITY_INDEX = FLOOD_COLOR_INDEX + 1;
public final static int FONT_FAMILY_INDEX = FLOOD_OPACITY_INDEX + 1;
public final static int FONT_SIZE_INDEX = FONT_FAMILY_INDEX + 1;
public final static int FONT_SIZE_ADJUST_INDEX = FONT_SIZE_INDEX + 1;
public final static int FONT_STRETCH_INDEX = FONT_SIZE_ADJUST_INDEX + 1;
public final static int FONT_STYLE_INDEX = FONT_STRETCH_INDEX + 1;
public final static int FONT_VARIANT_INDEX = FONT_STYLE_INDEX + 1;
public final static int FONT_WEIGHT_INDEX = FONT_VARIANT_INDEX + 1;
public final static int GLYPH_ORIENTATION_HORIZONTAL_INDEX =
FONT_WEIGHT_INDEX + 1;
public final static int GLYPH_ORIENTATION_VERTICAL_INDEX =
GLYPH_ORIENTATION_HORIZONTAL_INDEX + 1;
public final static int IMAGE_RENDERING_INDEX =
GLYPH_ORIENTATION_VERTICAL_INDEX + 1;
public final static int KERNING_INDEX = IMAGE_RENDERING_INDEX + 1;
public final static int LETTER_SPACING_INDEX = KERNING_INDEX + 1;
public final static int LIGHTING_COLOR_INDEX = LETTER_SPACING_INDEX + 1;
public final static int MARKER_END_INDEX = LIGHTING_COLOR_INDEX + 1;
public final static int MARKER_MID_INDEX = MARKER_END_INDEX + 1;
public final static int MARKER_START_INDEX = MARKER_MID_INDEX + 1;
public final static int MASK_INDEX = MARKER_START_INDEX + 1;
public final static int OPACITY_INDEX = MASK_INDEX + 1;
public final static int OVERFLOW_INDEX = OPACITY_INDEX + 1;
public final static int POINTER_EVENTS_INDEX = OVERFLOW_INDEX + 1;
public final static int SRC_INDEX = POINTER_EVENTS_INDEX + 1;
public final static int SHAPE_RENDERING_INDEX = SRC_INDEX + 1;
public final static int STOP_COLOR_INDEX = SHAPE_RENDERING_INDEX + 1;
public final static int STOP_OPACITY_INDEX = STOP_COLOR_INDEX + 1;
public final static int STROKE_INDEX = STOP_OPACITY_INDEX + 1;
public final static int STROKE_DASHARRAY_INDEX = STROKE_INDEX + 1;
public final static int STROKE_DASHOFFSET_INDEX =
STROKE_DASHARRAY_INDEX + 1;
public final static int STROKE_LINECAP_INDEX = STROKE_DASHOFFSET_INDEX + 1;
public final static int STROKE_LINEJOIN_INDEX = STROKE_LINECAP_INDEX + 1;
public final static int STROKE_MITERLIMIT_INDEX =
STROKE_LINEJOIN_INDEX + 1;
public final static int STROKE_OPACITY_INDEX = STROKE_MITERLIMIT_INDEX + 1;
public final static int STROKE_WIDTH_INDEX = STROKE_OPACITY_INDEX + 1;
public final static int TEXT_ANCHOR_INDEX = STROKE_WIDTH_INDEX + 1;
public final static int TEXT_DECORATION_INDEX = TEXT_ANCHOR_INDEX + 1;
public final static int TEXT_RENDERING_INDEX = TEXT_DECORATION_INDEX + 1;
public final static int UNICODE_BIDI_INDEX = TEXT_RENDERING_INDEX + 1;
public final static int VISIBILITY_INDEX = UNICODE_BIDI_INDEX + 1;
public final static int WORD_SPACING_INDEX = VISIBILITY_INDEX + 1;
public final static int WRITING_MODE_INDEX = WORD_SPACING_INDEX + 1;
public final static int FINAL_INDEX = WRITING_MODE_INDEX;
}
| Uni-Sol/batik | sources/org/apache/batik/css/engine/SVGCSSEngine.java | Java | apache-2.0 | 15,600 |
/******************************start app.js******************************/
'use strict';
/*jslint unused: false*/
var App = {};
var page;
var splashImage;
var scrollView;
var panel;
var elementCount;
var elements;
var appBar;
var appCanvas;
var utilityBar;
var navigationDrawer;
var mainContentContainer;
var scrolling = false;
var listeningForScrolling = false;
var scrollViewRegister;
var activeScrollViews;
var isMouseDown;
var isChromeOrSafari;
var feedPage;
var getID = function(id){
return document.getElementById(id);
}
var getClass = function(cl){
return document.getElementsByClassName(cl);
}
function drawPageElements() {
scrollViewRegister = [];
activeScrollViews = [];
appBar = new AppBar();
appCanvas = new AppCanvas();
navigationDrawer = new NavigationDrawer();
page = document.getElementById('body');
mainContentContainer = document.createElement('div');
mainContentContainer.setAttribute('id', 'mainContentContainer');
page.appendChild(mainContentContainer);
utilityBar = new UtilityBar();
mainContentContainer.appendChild(utilityBar.element);
mainContentContainer.appendChild(appCanvas.element);
mainContentContainer.appendChild(appBar.element);
page.appendChild(navigationDrawer.element);
//add feed page
feedPage = new FeedPage();
appCanvas.pushContent(feedPage);
splashImage = document.createElement('img');
splashImage.setAttribute('class', 'splashOpen');
splashImage.setAttribute('src', '/public/images/splash.png');
setTimeout(function(){
page.appendChild(this.splashImage);
setTimeout(splashFadeOut, 1000);
}, 100);
}
function addElementToDict(element, jsObject) {
element.setAttribute('guid', elementCount);
elements[elementCount] = jsObject;
elementCount++;
}
function refreshFeed() {
App.locator.getLoc(function(userLoc){
var event = new CustomEvent('start-feed', { detail: userLoc });
document.dispatchEvent(event);
});
}
function initialize() {
refreshFeed();
elements = {};
elementCount = 0;
drawPageElements();
setTouchListeners();
var messageOut = document.getElementById('message-out');
var titleOut = document.getElementById('title-out');
var submit = document.getElementById('submit-post');
submit.addEventListener(function (e) {
console.log(e);
});
submit.setAttribute('disabled', true);
messageOut.onkeyup = function(){
if(messageOut.value.length>0){
console.log('submit disable false');
submit.removeAttribute('disabled');
}else{
submit.setAttribute('disabled', true);
}
};
submit.addEventListener('click', function() {
App.locator.getLoc(function (loc) {
console.log('data to page: ' + JSON.stringify(loc));
var data = { timestamp : new Date() };
data.title = titleOut.value.toString();
data.body = messageOut.value.toString();
data.loc = { type: 'Point', coordinates: [ loc.lon, loc.lat ] };
App.postman.post(data, function (res) {
console.log('post ok, contents - ' + JSON.stringify(res));
var postModal = document.getElementById('contentModal');
postModal.setAttribute('class', '');
postModal.setAttribute('class', 'contentAreaModalUp');
});
});
}, false);
}
function splashFadeOut() {
splashImage.setAttribute('class', 'splashHide');
setTimeout(splashKill, 700);
}
function splashKill() {
page.removeChild(splashImage);
}
function clicked(element) {
elements[element.getAttribute('guid')].clicked();
}
function onMouseDown(that, event) {
var element = that.element;
if (element != document) {
if (elements[element.getAttribute('guid')] !== null) {
elements[element.getAttribute('guid')].onMouseDown(element, event);
}
}
else {
}
}
function onMouseUp(that, event)
{
var element = that.element;
if (element == document) {
scrollView.onMouseUp(event);
}
else {
elements[element.getAttribute('guid')].onMouseUp(event);
}
for (var item in elements) {
if (elements[item] instanceof Button && !elements[item].multiSelect) {
elements[item].isMouseDown = false;
}
}
for (var scrollV in scrollViewRegister) {
scrollViewRegister[scrollV].onMouseUp(event);
}
scrollViewRegister = new Array();
activeScrollViews = new Array();
activeButton = null;
}
function onMouseMove(element, event) {
if (element == document) {
for (var scrollV in activeScrollViews) {
activeScrollViews[scrollV].onMouseMove(event);
}
}
else {
elements[element.getAttribute('guid')].onMouseMove(event);
}
}
function onMouseOut(element)
{
elements[element.getAttribute('guid')].onMouseOut();
}
function transitionCompleted()
{
elements[this.getAttribute('guid')].transitionCompleted();
}
function onMouseOver(element)
{
elements[element.getAttribute('guid')].onMouseOver();
}
// document.addEventListener( 'touchstart' , function stopScrolling( touchEvent ) { touchEvent.preventDefault(); } , false );
// document.addEventListener( 'touchmove' , function stopScrolling( touchEvent ) { touchEvent.preventDefault(); } , false );
function setTouchListeners() {
document.body.addEventListener('touchmove', function(e) { e.preventDefault(); }, false);
document.getElementById('navigationDrawer').addEventListener('touchmove', function(e) { e.stopPropagation(); }, false);
document.getElementById('appCanvas').addEventListener('touchmove', function(event){ event.stopPropagation(); }, false);
}
/******************************end app.js******************************/
/*******************locator.js start***********************/
'use strict';
/*global alert*/
/*global App*/
/*
-- App.locator --
Keeps track of the user's last known position.
Coordinates are stored in an array as follows:
[ longitude, latitude ]
A timestamp is also kept denoting the last
time a geolocation was successfully fetched.
Events:
* dispatches 'new-location' when a new geo
object is received
Functions:
* getLoc(cb) Updates the user's geolocation
and fires the DOM event 'new-location' with
user's location
* locAge() Returns time since last lookup in
seconds
* showLoc() Simply returns the stored
coordinates without performing another
lookup
* locStatus() returns object with bool of
location validity and the location accuracy
*/
function Locator () {
var userLoc = {
lat: null,
lon: null,
accuracy: null,
timestamp: null
};
var lastGoodLoc;
var maximumAccuracy = 1000;
var positionOptions = {
enableHighAccuracy: false,
timeout: 10000,
maximumAge: 10000
};
function Constructor () { }
Constructor.prototype.getLoc = function (maxAge, maxAccuracy, cb) {
if (typeof arguments[0] === 'function') {
cb = arguments[0];
maxAccuracy = 5000;
maxAge = 600000;
}
if (maxAge) {
positionOptions.maximumAge = maxAge;
}
if (navigator.geolocation) {
navigator.geolocation.getCurrentPosition(getPositionData, getPositionError, positionOptions);
} else {
alert('Your browser does not support geolocation.');
}
function getPositionData (position) {
userLoc = {
lat: position.coords.latitude,
lon: position.coords.longitude,
accuracy: position.coords.accuracy,
timestamp: position.timestamp
};
var event = new CustomEvent('new-location', { detail: userLoc });
document.dispatchEvent(event);
console.log(position);
if(userLoc.accuracy < maxAccuracy){
//cache the last userLoc of sufficient accuracy
lastGoodLoc = userLoc;
}
if (cb) {
cb(userLoc);
}
}
function getPositionError (error){
console.log(error);
}
};
Constructor.prototype.locAge = function () {
return Date.now() - userLoc.timestamp;
};
Constructor.prototype.showLoc = function () {
return {
userLoc: userLoc,
lastGoodLoc: lastGoodLoc
};
};
return new Constructor();
}
App.locator = new Locator();
/**************************locator.js end****************************/
/*******************postman.js start***********************/
'use strict';
/*global App*/
/*
-- App.postman --
Handles the fetching, storing and rendering of
all posts. The REST endpoint is passed as an
argument when it is initialized.
Events:
* listens for 'new-location' and fetches a new
feed
* dispatches 'feedJSON' once the data for the
new feed is obtained
Functions:
* newFeed(loc) expects a point in the format
{ lon: Num, lat: Num } and sends the request
to the feed endpoint
* post(data, cb) creates a new post with the
JSON in data param, and takes a callback
*/
function Postman (endpoint) {
var url = endpoint,
models,
feed;
function Constructor () { }
Constructor.prototype.XHR = function (method, data, url, async, cb) {
var req = new XMLHttpRequest();
req.open(method, url, async);
req.setRequestHeader('Content-Type', 'application/json;charset=UTF-8');
//req.responseType = '';
req.onload = function () {
if (req.status >= 200 && req.status < 400) {
models = JSON.parse(req.responseText);
cb(models);
} else {
return false;
}
};
req.onerror = function (err) {
console.log('XHR Error: ' + JSON.stringify(err));
};
if (data) {
console.log('bad data: ' + JSON.stringify(data));
req.send(JSON.stringify(data));
} else {
req.send();
}
};
Constructor.prototype.fetch = function (cb) {
return this.XHR('GET', null, url, true, cb);
};
Constructor.prototype.show = function () {
return models;
};
Constructor.prototype.post = function (data, cb) {
/* location functionality */
return this.XHR('POST', data, url, false, cb);
};
Constructor.prototype.comment = function (data, id, cb) {
return this.XHR('POST', data, document.URL + 'api/v1/comments/' + id, true, cb);
};
Constructor.prototype.newFeed = function (loc) {
console.log('data to newFeed function: ' + JSON.stringify(loc));
var req = new XMLHttpRequest(),
url = document.URL + 'api/v1/feed';
req.open('POST', url, true);
req.setRequestHeader('Content-type', 'application/x-www-form-urlencoded');
req.onload = function (d) {
feed = JSON.parse(d.currentTarget.responseText);
var event = new CustomEvent('feedJSON', {detail: feed});
document.dispatchEvent(event);
console.log('got a feed, check it:');
//console.log(App.postman.showFeed());
};
req.onerror = function (err) {
console.log(err);
};
// loc.lon = -122;
// loc.lat = 47;
var params = 'lon='+loc.lon+'&lat='+loc.lat;
console.log(params);
req.send(params);
};
Constructor.prototype.showFeed = function () {
return feed;
};
return new Constructor();
}
App.postman = new Postman(document.URL + 'api/v1/posts');
// Receive the DOM event 'feed-location' and query the
// feed endpoint
document.addEventListener('start-feed', function (e) {
console.log('data to new loc event ' + JSON.stringify(e.detail));
App.postman.newFeed(e.detail);
});
/**************************postman.js end****************************/
/*******************heartbeat.js start***********************/
'use strict';
/*global App*/
/*
-- App.heartbeat --
Acts as a controller for update behavior.
Responsible for all periodic front-end behavior.
Runs 1 second setTimeout loop and checks time
deltas for other behavior to fire them.
Currently updates App.locator.getLoc(cb)
depending on the geolocation mode
Functions:
* startBeat() starts the setInterval beat
function
* stopBeat() stops the currently running
beat function
*/
function Heartbeat () {
var beatHandle;
var geolocMode = 'rapid';
var rapidGeolocFreq = 5000;
var beat = function(){
//geoloc handler
if(geolocMode === 'rapid'){
var locAge = App.locator.locAge();
if(!(locAge > 0 && locAge <= rapidGeolocFreq)){
//App.locator.getLoc(function(loc){console.log(loc);});
}
}
};
function Constructor() {}
Constructor.prototype.startBeat = function(){
if(!beatHandle){
beatHandle = setInterval(beat, 1000);
console.log('beat function started');
}
};
Constructor.prototype.stopBeat = function(){
if(beatHandle){
clearInterval(beatHandle);
beatHandle = null;
}
};
return new Constructor();
}
App.heartbeat = new Heartbeat();
App.heartbeat.startBeat();
/**************************heartbeat.js end****************************/
| cvince/missout-app | app.js | JavaScript | apache-2.0 | 12,491 |
---
title: "Developer Journey with Buildpacks"
linkTitle: "Developer Journey"
weight: 100
---
# Skaffold Developer Journey with Buildpacks Tutorial
## Introduction
### What is this project?
Skaffold allows developers to easily transition from local development on minikube to remote development on an enterprise Kubernetes cluster managed by IT. During the transition from local to remote deployment, a security team might ask a developer to patch a library with a specific vulnerability in it. This is where Skaffold's support for buildpacks comes in handy. In this tutorial, you'll start out deploying an application locally, swap out buildpacks in the **skaffold.yaml** file to use the latest libraries, and then deploy the application to a remote Kubernetes cluster.
For a guided Cloud Shell tutorial on how a developer's journey might look in adopting Skaffold, follow:
[](https://ssh.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https://github.com/GoogleContainerTools/skaffold&cloudshell_workspace=examples/dev-journey-buildpacks&cloudshell_tutorial=tutorial.md)
### What you'll learn
* How to develop on minikube locally and move to an enterprise managed kubernetes cluster with ease
* How to easily switch buildpack versions to comply with security demands
___
**Time to complete**: <walkthrough-tutorial-duration duration=15></walkthrough-tutorial-duration>
Click the **Start** button to move to the next step.
## Prepare the Environment
### Create a Project
Create a project called **skaffold-tutorial** with a valid billing account in the Google Cloud [Manage Resources Console](https://console.cloud.google.com/cloud-resource-manager). In the shell run:
```bash
gcloud config set project skaffold-tutorial
```
### Run the Setup Script
Run the start script to prepare the environment. The script will:
* Enable the GCE and GKE APIs, which is needed in order to spin up a GKE cluster
* Create a network and subnet for the GKE cluster to use
* Deploy a one node GKE cluster to optimize for cost
* Install the latest version of skaffold. Although Skaffold is already installed on cloud shell, we'll install the latest version for good measure.
```bash
chmod +x start.sh && ./start.sh
```
**Note:** answer **y** when prompted to enable the GKE and GCE APIs
### Start a Minikube cluster
We'll use minikube for local kubernetes development. Minikube is a tool that optimizes kubernetes for local deployments, which makes it perfect for development and testing. Cloud Shell already has minikube installed, but you can install it yourself by running **gcloud components install minikube** or following [these instructions](https://minikube.sigs.k8s.io/docs/start/).
Run:
```bash
minikube start
```
## Run the App to Minikube Using Skaffold
### Deploy the App
Start skaffold in development mode which will constantly monitor the current directory for changes and kick off a new build and deploy whenever changes are detected.
```bash
skaffold dev
```
**Important:** note the software versions under the **DETECTING** phase of the buildpack output. These will be important later.
We will be working in three terminals during this tutorial. The current terminal you're in will be referred to as **Terminal A**. Open a second terminal, which we will call **Terminal B**. In order to connect to the application to make sure its working, you need to start the minikube load balancer by running:
```bash
minikube tunnel
```
Open a third terminal, which we will refer to as **Terminal C**. Find out the load balancer IP by recording the external IP as **EXTERNAL_IP** after running:
```bash
kubectl get service
```
Ensure the app is responding by running:
```bash
curl http://EXTERNAL_IP:8080
```
### Change the App and Trigger a Redeploy
Edit the part of the application responsible for returning the "Hello World!" message you saw previously:
```bash
vim src/main/java/hello/HelloController.java
```
Change the return line to **return "Hello, Skaffold!"**.
Switch back to the **Terminal A** and see that its rebuilding and redeploying the app.
Switch back to the **Terminal C** and run the following command to watch until only one pod is running:
```bash
watch kubectl get pods
```
Once there is only one pod running, meaning the latest pod is deployed, see that your app changes are live:
```bash
curl http://EXTERNAL_IP:8080
```
### Updating Buildpacks
Perhaps the best benefit of buildpacks is that it reduces how much work developers need to do to patch their applications if the security team highlights a library vulnerability. [Google Cloud buildpacks](https://cloud.google.com/blog/products/containers-kubernetes/google-cloud-now-supports-buildpacks) use a managed base Ubuntu 18.04 image that is regularly scanned for security vulnerabilities; any detected vulnerabilities are automatically patched. These patches are included in the latest revision of the builder. A builder contains one or more buildpacks supporting several languages. Our **skaffold.yaml** points to an older builder release, which uses older buildpack versions that may pull in vulnerable libraries. We will be updating the builder release to use the most up-to-date buildpacks.
Edit the **skaffold.yaml** file:
```bash
vim skaffold.yaml
```
Update the builder line to **gcr.io/buildpacks/builder:v1**, which will use the latest builder that has more up-to-date buildpacks.
Switch back to the **Terminal A** and see that its rebuilding and redeploying the app.
**IMPORTANT:** compare the software versions under the **DETECTING** phase of the buildpack output to the ones you saw before. The builder is now using newer buildpack versions.
Switch back to **Terminal C** and run the following command to watch until only one pod is running:
```bash
watch kubectl get pods
```
Once there is only one pod running, meaning the latest pod is deployed, see that your app changes are live:
```bash
curl http://EXTERNAL_IP:8080
```
## Deploy the App to Enterprise GKE Cluster Using Skaffold
### Switch kubectl Context to the Enterprise GKE Cluster
Switch your local kubectl context to the enterprise GKE cluster and get the latest credentials:
```bash
gcloud container clusters get-credentials $(gcloud config get-value project)-cluster --region us-central1-a
```
See that kubectl is now configured to use the remote kubernetes cluster instead of minikube (denoted by the asterisk)
```bash
kubectl config get-contexts
```
### Deploy the App
Attempt to deploy the app by running:
```bash
skaffold dev --default-repo=gcr.io/$(gcloud config get-value project)
```
Switch back to **Terminal C** and run the following command to watch until only one pod is running:
```bash
watch kubectl get pods
```
Run the following command. Once an external IP is assigned to the service, record it as **EXTERNAL_IP**:
```bash
watch kubectl get service
```
See that your app changes are now live on an Internet-accessible IP:
```bash
curl http://EXTERNAL_IP:8080
```
## Congratulations!
That's the end of the tutorial. You now know how to seamlessly transition between local kubernetes development and remote development on a kubernetes cluster managed by your enterprise IT team. Along the way you learned how to quickly patch your application libraries to comply with security standards.
I hope this tutorial was informative. Good luck on your journey with Skaffold! | GoogleContainerTools/skaffold | docs/content/en/docs/tutorials/developer-journey.md | Markdown | apache-2.0 | 7,448 |
package pl.touk.nussknacker.engine.definition
import pl.touk.nussknacker.engine.api._
import pl.touk.nussknacker.engine.api.expression.TypedExpression
import pl.touk.nussknacker.engine.api.lazyparam.EvaluableLazyParameter
import pl.touk.nussknacker.engine.api.process.ComponentUseCase
import pl.touk.nussknacker.engine.api.typed.TypedMap
import pl.touk.nussknacker.engine.api.typed.typing._
import pl.touk.nussknacker.engine.compile.ExpressionCompiler
import pl.touk.nussknacker.engine.compiledgraph
import pl.touk.nussknacker.engine.expression.ExpressionEvaluator
import pl.touk.nussknacker.engine.graph.expression.Expression
import pl.touk.nussknacker.engine.api.NodeId
import pl.touk.nussknacker.engine.util.SynchronousExecutionContext
import scala.collection.immutable.ListMap
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, ExecutionContext, Future}
// This class is public for tests purpose. Be aware that its interface can be changed in the future
case class ExpressionLazyParameter[T <: AnyRef](nodeId: NodeId,
parameterDef: definition.Parameter,
expression: Expression,
returnType: TypingResult) extends EvaluableLazyParameter[T] {
override def prepareEvaluator(compilerInterpreter: LazyParameterInterpreter)(implicit ec: ExecutionContext): Context => Future[T] = {
val compilerLazyInterpreter = compilerInterpreter.asInstanceOf[CompilerLazyParameterInterpreter]
val compiledExpression = compilerLazyInterpreter.deps.expressionCompiler
.compileWithoutContextValidation(expression, parameterDef.name, parameterDef.typ)(nodeId)
.valueOr(err => throw new IllegalArgumentException(s"Compilation failed with errors: ${err.toList.mkString(", ")}"))
val evaluator = compilerLazyInterpreter.deps.expressionEvaluator
val compiledParameter = compiledgraph.evaluatedparam.Parameter(TypedExpression(compiledExpression, Unknown, null), parameterDef)
context: Context => Future.successful(evaluator.evaluateParameter(compiledParameter, context)(nodeId, compilerLazyInterpreter.metaData)).map(_.value.asInstanceOf[T])(ec)
}
}
trait CompilerLazyParameterInterpreter extends LazyParameterInterpreter {
def deps: LazyInterpreterDependencies
def metaData: MetaData
//it's important that it's (...): (Context => Future[T])
//and not e.g. (...)(Context) => Future[T] as we want to be sure when body is evaluated (in particular expression compilation)!
private[definition] def createInterpreter[T <: AnyRef](ec: ExecutionContext, definition: LazyParameter[T]): Context => Future[T] = {
definition match {
case e: EvaluableLazyParameter[T] => e.prepareEvaluator(this)(ec)
case _ => throw new IllegalArgumentException(s"LazyParameter $definition is not supported")
}
}
override def syncInterpretationFunction[T <: AnyRef](lazyInterpreter: LazyParameter[T]): Context => T = {
implicit val ec: ExecutionContext = SynchronousExecutionContext.ctx
val interpreter = createInterpreter(ec, lazyInterpreter)
v1: Context => Await.result(interpreter(v1), deps.processTimeout)
}
}
case class LazyInterpreterDependencies(expressionEvaluator: ExpressionEvaluator,
expressionCompiler: ExpressionCompiler,
processTimeout: FiniteDuration) extends Serializable
object CustomStreamTransformerExtractor extends AbstractMethodDefinitionExtractor[CustomStreamTransformer] {
override protected val expectedReturnType: Option[Class[_]] = None
override protected val additionalDependencies: Set[Class[_]] = Set[Class[_]](classOf[NodeId], classOf[MetaData], classOf[ComponentUseCase])
} | TouK/nussknacker | interpreter/src/main/scala/pl/touk/nussknacker/engine/definition/CompilerLazyParameter.scala | Scala | apache-2.0 | 3,803 |
void a() {
return 1;
}
void b() {
return 2;
}
void c() {
return 3;
}
void d() {
return 4;
}
void e() {
return 5;
}
void f() {
return 6;
}
int main( int argc, char** argv ) {
int i;
motion_append_after_call( c, z );
motion_append_after_call( d, z );
motion_append_after_call( e, z );
i = pointcut( z );
return i;
}
| quenette/COMPASS-I | t/scripts/test-compass-pointcut02.c | C | apache-2.0 | 362 |
package main
import (
"fmt"
"io/ioutil"
"os"
)
func main() {
dat, err := ioutil.ReadFile("data/simple.txt")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Print(string(dat))
}
| mhausenblas/letsgo | snippets/s09/main.go | GO | apache-2.0 | 192 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.test.functional;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.net.URL;
import java.security.SecureRandom;
import java.security.cert.CertificateException;
import java.security.cert.X509Certificate;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.KeyManager;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSession;
import javax.net.ssl.TrustManager;
import javax.net.ssl.X509TrustManager;
import org.apache.accumulo.cluster.ClusterControl;
import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.cli.BatchWriterOpts;
import org.apache.accumulo.core.cli.ScannerOpts;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.ClientConfiguration;
import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.ZooKeeperInstance;
import org.apache.accumulo.core.client.admin.TableOperations;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.client.security.tokens.KerberosToken;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.rfile.PrintInfo;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.util.MonitorUtil;
import org.apache.accumulo.core.zookeeper.ZooUtil;
import org.apache.accumulo.fate.zookeeper.ZooCache;
import org.apache.accumulo.fate.zookeeper.ZooLock;
import org.apache.accumulo.fate.zookeeper.ZooReader;
import org.apache.accumulo.harness.AccumuloClusterIT;
import org.apache.accumulo.minicluster.ServerType;
import org.apache.accumulo.test.TestIngest;
import org.apache.accumulo.test.TestMultiTableIngest;
import org.apache.accumulo.test.VerifyIngest;
import org.apache.accumulo.test.categories.StandaloneCapableClusterTests;
import org.apache.accumulo.test.categories.SunnyDayTests;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterators;
@Category({StandaloneCapableClusterTests.class, SunnyDayTests.class})
public class ReadWriteIT extends AccumuloClusterIT {
private static final Logger log = LoggerFactory.getLogger(ReadWriteIT.class);
static final int ROWS = 200000;
static final int COLS = 1;
static final String COLF = "colf";
@Override
protected int defaultTimeoutSeconds() {
return 6 * 60;
}
@Test(expected = RuntimeException.class)
public void invalidInstanceName() throws Exception {
final Connector conn = getConnector();
new ZooKeeperInstance("fake_instance_name", conn.getInstance().getZooKeepers());
}
@Test
public void sunnyDay() throws Exception {
// Start accumulo, create a table, insert some data, verify we can read it out.
// Shutdown cleanly.
log.debug("Starting Monitor");
cluster.getClusterControl().startAllServers(ServerType.MONITOR);
Connector connector = getConnector();
String tableName = getUniqueNames(1)[0];
ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName);
verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName);
String monitorLocation = null;
while (null == monitorLocation) {
monitorLocation = MonitorUtil.getLocation(getConnector().getInstance());
if (null == monitorLocation) {
log.debug("Could not fetch monitor HTTP address from zookeeper");
Thread.sleep(2000);
}
}
String scheme = "http://";
if (getCluster() instanceof StandaloneAccumuloCluster) {
StandaloneAccumuloCluster standaloneCluster = (StandaloneAccumuloCluster) getCluster();
File accumuloSite = new File(standaloneCluster.getServerAccumuloConfDir(), "accumulo-site.xml");
if (accumuloSite.isFile()) {
Configuration conf = new Configuration(false);
conf.addResource(new Path(accumuloSite.toURI()));
String monitorSslKeystore = conf.get(Property.MONITOR_SSL_KEYSTORE.getKey());
if (null != monitorSslKeystore) {
log.info("Setting scheme to HTTPS since monitor ssl keystore configuration was observed in {}", accumuloSite);
scheme = "https://";
SSLContext ctx = SSLContext.getInstance("SSL");
TrustManager[] tm = new TrustManager[] {new TestTrustManager()};
ctx.init(new KeyManager[0], tm, new SecureRandom());
SSLContext.setDefault(ctx);
HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory());
HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier());
}
} else {
log.info("{} is not a normal file, not checking for monitor running with SSL", accumuloSite);
}
}
URL url = new URL(scheme + monitorLocation);
log.debug("Fetching web page " + url);
String result = FunctionalTestUtils.readAll(url.openStream());
assertTrue(result.length() > 100);
log.debug("Stopping accumulo cluster");
ClusterControl control = cluster.getClusterControl();
control.adminStopAll();
ZooReader zreader = new ZooReader(connector.getInstance().getZooKeepers(), connector.getInstance().getZooKeepersSessionTimeOut());
ZooCache zcache = new ZooCache(zreader, null);
byte[] masterLockData;
do {
masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(connector.getInstance()) + Constants.ZMASTER_LOCK, null);
if (null != masterLockData) {
log.info("Master lock is still held");
Thread.sleep(1000);
}
} while (null != masterLockData);
control.stopAllServers(ServerType.GARBAGE_COLLECTOR);
control.stopAllServers(ServerType.MONITOR);
control.stopAllServers(ServerType.TRACER);
log.debug("success!");
// Restarting everything
cluster.start();
}
public static void ingest(Connector connector, ClientConfiguration clientConfig, String principal, int rows, int cols, int width, int offset, String tableName)
throws Exception {
ingest(connector, clientConfig, principal, rows, cols, width, offset, COLF, tableName);
}
public static void ingest(Connector connector, ClientConfiguration clientConfig, String principal, int rows, int cols, int width, int offset, String colf,
String tableName) throws Exception {
TestIngest.Opts opts = new TestIngest.Opts();
opts.rows = rows;
opts.cols = cols;
opts.dataSize = width;
opts.startRow = offset;
opts.columnFamily = colf;
opts.createTable = true;
opts.setTableName(tableName);
if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
opts.updateKerberosCredentials(clientConfig);
} else {
opts.setPrincipal(principal);
}
TestIngest.ingest(connector, opts, new BatchWriterOpts());
}
public static void verify(Connector connector, ClientConfiguration clientConfig, String principal, int rows, int cols, int width, int offset, String tableName)
throws Exception {
verify(connector, clientConfig, principal, rows, cols, width, offset, COLF, tableName);
}
private static void verify(Connector connector, ClientConfiguration clientConfig, String principal, int rows, int cols, int width, int offset, String colf,
String tableName) throws Exception {
ScannerOpts scannerOpts = new ScannerOpts();
VerifyIngest.Opts opts = new VerifyIngest.Opts();
opts.rows = rows;
opts.cols = cols;
opts.dataSize = width;
opts.startRow = offset;
opts.columnFamily = colf;
opts.setTableName(tableName);
if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
opts.updateKerberosCredentials(clientConfig);
} else {
opts.setPrincipal(principal);
}
VerifyIngest.verifyIngest(connector, opts, scannerOpts);
}
public static String[] args(String... args) {
return args;
}
@Test
public void multiTableTest() throws Exception {
// Write to multiple tables
final String instance = cluster.getInstanceName();
final String keepers = cluster.getZooKeepers();
final ClusterControl control = cluster.getClusterControl();
final String prefix = getClass().getSimpleName() + "_" + testName.getMethodName();
ExecutorService svc = Executors.newFixedThreadPool(2);
Future<Integer> p1 = svc.submit(new Callable<Integer>() {
@Override
public Integer call() {
try {
ClientConfiguration clientConf = cluster.getClientConfig();
// Invocation is different for SASL. We're only logged in via this processes memory (not via some credentials cache on disk)
// Need to pass along the keytab because of that.
if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
String principal = getAdminPrincipal();
AuthenticationToken token = getAdminToken();
assertTrue("Expected KerberosToken, but was " + token.getClass(), token instanceof KerberosToken);
KerberosToken kt = (KerberosToken) token;
assertNotNull("Expected keytab in token", kt.getKeytab());
return control.exec(
TestMultiTableIngest.class,
args("--count", Integer.toString(ROWS), "-i", instance, "-z", keepers, "--tablePrefix", prefix, "--keytab", kt.getKeytab().getAbsolutePath(),
"-u", principal));
}
return control.exec(
TestMultiTableIngest.class,
args("--count", Integer.toString(ROWS), "-u", getAdminPrincipal(), "-i", instance, "-z", keepers, "-p", new String(
((PasswordToken) getAdminToken()).getPassword(), UTF_8), "--tablePrefix", prefix));
} catch (IOException e) {
log.error("Error running MultiTableIngest", e);
return -1;
}
}
});
Future<Integer> p2 = svc.submit(new Callable<Integer>() {
@Override
public Integer call() {
try {
ClientConfiguration clientConf = cluster.getClientConfig();
// Invocation is different for SASL. We're only logged in via this processes memory (not via some credentials cache on disk)
// Need to pass along the keytab because of that.
if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
String principal = getAdminPrincipal();
AuthenticationToken token = getAdminToken();
assertTrue("Expected KerberosToken, but was " + token.getClass(), token instanceof KerberosToken);
KerberosToken kt = (KerberosToken) token;
assertNotNull("Expected keytab in token", kt.getKeytab());
return control.exec(
TestMultiTableIngest.class,
args("--count", Integer.toString(ROWS), "--readonly", "-i", instance, "-z", keepers, "--tablePrefix", prefix, "--keytab", kt.getKeytab()
.getAbsolutePath(), "-u", principal));
}
return control.exec(
TestMultiTableIngest.class,
args("--count", Integer.toString(ROWS), "--readonly", "-u", getAdminPrincipal(), "-i", instance, "-z", keepers, "-p", new String(
((PasswordToken) getAdminToken()).getPassword(), UTF_8), "--tablePrefix", prefix));
} catch (IOException e) {
log.error("Error running MultiTableIngest", e);
return -1;
}
}
});
svc.shutdown();
while (!svc.isTerminated()) {
svc.awaitTermination(15, TimeUnit.SECONDS);
}
assertEquals(0, p1.get().intValue());
assertEquals(0, p2.get().intValue());
}
@Test
public void largeTest() throws Exception {
// write a few large values
Connector connector = getConnector();
String table = getUniqueNames(1)[0];
ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2, 1, 500000, 0, table);
verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2, 1, 500000, 0, table);
}
@Test
public void interleaved() throws Exception {
// read and write concurrently
final Connector connector = getConnector();
final String tableName = getUniqueNames(1)[0];
interleaveTest(connector, tableName);
}
static void interleaveTest(final Connector connector, final String tableName) throws Exception {
final AtomicBoolean fail = new AtomicBoolean(false);
final int CHUNKSIZE = ROWS / 10;
ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), CHUNKSIZE, 1, 50, 0, tableName);
int i;
for (i = 0; i < ROWS; i += CHUNKSIZE) {
final int start = i;
Thread verify = new Thread() {
@Override
public void run() {
try {
verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), CHUNKSIZE, 1, 50, start, tableName);
} catch (Exception ex) {
fail.set(true);
}
}
};
verify.start();
ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), CHUNKSIZE, 1, 50, i + CHUNKSIZE, tableName);
verify.join();
assertFalse(fail.get());
}
verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), CHUNKSIZE, 1, 50, i, tableName);
}
public static Text t(String s) {
return new Text(s);
}
public static Mutation m(String row, String cf, String cq, String value) {
Mutation m = new Mutation(t(row));
m.put(t(cf), t(cq), new Value(value.getBytes()));
return m;
}
@Test
public void localityGroupPerf() throws Exception {
// verify that locality groups can make look-ups faster
final Connector connector = getConnector();
final String tableName = getUniqueNames(1)[0];
connector.tableOperations().create(tableName);
connector.tableOperations().setProperty(tableName, "table.group.g1", "colf");
connector.tableOperations().setProperty(tableName, "table.groups.enabled", "g1");
ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2000, 1, 50, 0, tableName);
connector.tableOperations().compact(tableName, null, null, true, true);
BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
bw.addMutation(m("zzzzzzzzzzz", "colf2", "cq", "value"));
bw.close();
long now = System.currentTimeMillis();
Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
scanner.fetchColumnFamily(new Text("colf"));
Iterators.size(scanner.iterator());
long diff = System.currentTimeMillis() - now;
now = System.currentTimeMillis();
scanner = connector.createScanner(tableName, Authorizations.EMPTY);
scanner.fetchColumnFamily(new Text("colf2"));
Iterators.size(scanner.iterator());
bw.close();
long diff2 = System.currentTimeMillis() - now;
assertTrue(diff2 < diff);
}
@Test
public void sunnyLG() throws Exception {
// create a locality group, write to it and ensure it exists in the RFiles that result
final Connector connector = getConnector();
final String tableName = getUniqueNames(1)[0];
connector.tableOperations().create(tableName);
Map<String,Set<Text>> groups = new TreeMap<>();
groups.put("g1", Collections.singleton(t("colf")));
connector.tableOperations().setLocalityGroups(tableName, groups);
ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2000, 1, 50, 0, tableName);
verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2000, 1, 50, 0, tableName);
connector.tableOperations().flush(tableName, null, null, true);
BatchScanner bscanner = connector.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1);
String tableId = connector.tableOperations().tableIdMap().get(tableName);
bscanner.setRanges(Collections.singletonList(new Range(new Text(tableId + ";"), new Text(tableId + "<"))));
bscanner.fetchColumnFamily(DataFileColumnFamily.NAME);
boolean foundFile = false;
for (Entry<Key,Value> entry : bscanner) {
foundFile = true;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream newOut = new PrintStream(baos);
PrintStream oldOut = System.out;
try {
System.setOut(newOut);
List<String> args = new ArrayList<>();
args.add(entry.getKey().getColumnQualifier().toString());
if (ClusterType.STANDALONE == getClusterType() && cluster.getClientConfig().getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
args.add("--config");
StandaloneAccumuloCluster sac = (StandaloneAccumuloCluster) cluster;
String hadoopConfDir = sac.getHadoopConfDir();
args.add(new Path(hadoopConfDir, "core-site.xml").toString());
args.add(new Path(hadoopConfDir, "hdfs-site.xml").toString());
}
log.info("Invoking PrintInfo with " + args);
PrintInfo.main(args.toArray(new String[args.size()]));
newOut.flush();
String stdout = baos.toString();
assertTrue(stdout.contains("Locality group : g1"));
assertTrue(stdout.contains("families : [colf]"));
} finally {
newOut.close();
System.setOut(oldOut);
}
}
bscanner.close();
assertTrue(foundFile);
}
@Test
public void localityGroupChange() throws Exception {
// Make changes to locality groups and ensure nothing is lostssh
final Connector connector = getConnector();
String table = getUniqueNames(1)[0];
TableOperations to = connector.tableOperations();
to.create(table);
String[] config = new String[] {"lg1:colf", null, "lg1:colf,xyz", "lg1:colf,xyz;lg2:c1,c2"};
int i = 0;
for (String cfg : config) {
to.setLocalityGroups(table, getGroups(cfg));
ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * (i + 1), 1, 50, ROWS * i, table);
to.flush(table, null, null, true);
verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 0, 1, 50, ROWS * (i + 1), table);
i++;
}
to.delete(table);
to.create(table);
config = new String[] {"lg1:colf", null, "lg1:colf,xyz", "lg1:colf;lg2:colf",};
i = 1;
for (String cfg : config) {
ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * i, 1, 50, 0, table);
ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * i, 1, 50, 0, "xyz", table);
to.setLocalityGroups(table, getGroups(cfg));
to.flush(table, null, null, true);
verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * i, 1, 50, 0, table);
verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * i, 1, 50, 0, "xyz", table);
i++;
}
}
private Map<String,Set<Text>> getGroups(String cfg) {
Map<String,Set<Text>> groups = new TreeMap<>();
if (cfg != null) {
for (String group : cfg.split(";")) {
String[] parts = group.split(":");
Set<Text> cols = new HashSet<>();
for (String col : parts[1].split(",")) {
cols.add(t(col));
}
groups.put(parts[1], cols);
}
}
return groups;
}
private static class TestTrustManager implements X509TrustManager {
@Override
public void checkClientTrusted(X509Certificate[] arg0, String arg1) throws CertificateException {}
@Override
public void checkServerTrusted(X509Certificate[] arg0, String arg1) throws CertificateException {}
@Override
public X509Certificate[] getAcceptedIssuers() {
return null;
}
}
private static class TestHostnameVerifier implements HostnameVerifier {
@Override
public boolean verify(String hostname, SSLSession session) {
return true;
}
}
}
| adamjshook/accumulo | test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java | Java | apache-2.0 | 22,281 |
package com.venturocket.api.client.listing.object;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonProcessingException;
import org.codehaus.jackson.map.JsonSerializer;
import org.codehaus.jackson.map.SerializerProvider;
import java.io.IOException;
/**
* User: Joe Linn
* Date: 1/5/14
* Time: 2:51 AM
*/
public class EnumSerializer extends JsonSerializer<ListingEnum> {
@Override
public void serialize(ListingEnum value, JsonGenerator jgen, SerializerProvider provider) throws IOException, JsonProcessingException {
jgen.writeString(value.toString());
}
}
| Venturocket/venturocket-api-java | src/main/java/com/venturocket/api/client/listing/object/EnumSerializer.java | Java | apache-2.0 | 609 |
public class Manager implements IManager {
private Airport mAirport;
public Manager(Airport mAirport) {
super();
this.mAirport = mAirport;
}
@Override
public void addFlight(Flight pFlight) {
mAirport.addFlight(pFlight);
}
@Override
public void changeState(long idFlight, State pState) {
Flight f = mAirport.findFlight(idFlight);
if (f != null)
f.setState(pState);
}
@Override
public void addPassengerToFlight(Passenger pPassenger, long idFlight) {
Flight f = mAirport.findFlight(idFlight);
f.addPassenger(pPassenger);
}
@Override
public void removePassengerFromFlight(Passenger pPassenger, long idFlight) {
Flight f = mAirport.findFlight(idFlight);
f.removePassenger(pPassenger);
}
@Override
public void showReport() {
for (Flight flight : mAirport.getFlights()) {
Printer.show(flight.toString());
}
}
}
| RakickayaKaterina/Java-training | task3/4/src/Manager.java | Java | apache-2.0 | 909 |
---
copyright:
years: 2016, 2017
lastupdated: "2016-11-29"
---
{:new_window: target="\_blank"}
{:shortdesc: .shortdesc}
# Creating apps with the {{site.data.keyword.iotelectronics}} starter
{{site.data.keyword.iotelectronics_full}} is an integrated, end-to-end solution that enables your apps to communicate with, control, analyze, and update connected appliances. The starter includes a starter app that you can use to create simulated appliances and a sample mobile app that you can use to control those appliances from your mobile device.
{:shortdesc}
## Before you begin
Before you begin, you must deploy an instance of the {{site.data.keyword.iotelectronics}} in your {{site.data.keyword.Bluemix_notm}}
organization. Deploying an instance automatically deploys the component applications and services of the starter.
You can [find the {{site.data.keyword.iotelectronics}} starter](https://console.{DomainName}/catalog/starters/iot-for-electronics-starter/) in the Boilerplates section of the {{site.data.keyword.Bluemix_notm}} catalog.
## Getting started with {{site.data.keyword.iotelectronics}}
To get started, complete the following tasks:
1. [Enable mobile communications and security](iotelectronics_config_mca.html) by configuring {{site.data.keyword.amafull}}.
2. [Create simulated appliances](iot4ecreatingappliances.html) by using the {{site.data.keyword.iotelectronics}} starter web application. For the purposes of demonstration, washers are used as the simulated appliance within the {{site.data.keyword.iotelectronics}} starter. The appliance you choose to connect could be any type of smart electronics device.
3. [Download and connect](iotelectronics_config_mobile.html) the sample mobile app.
## What's next
See what you can do with {{site.data.keyword.iotelectronics}}.
- [Explore the starter app](iot4ecreatingappliances.html) to experience how an enterprise manufacturer can monitor appliances that are connected to the {{site.data.keyword.iot_short_notm}}.
- [Explore the sample mobile app](iotelectronics_config_mobile.html) to experience how appliance owners can register and interact with their appliances.
- [Explore the APIs](http://ibmiotforelectronics.mybluemix.net/public/iot4eregistrationapi.html) to see how you can customize and expand your own {{site.data.keyword.iotelectronics}} apps.
# Related Links
{: #rellinks}
<!-- Related Links last updated 23 October 2016 - new API source -->
## API documentation
{: #api}
* [{{site.data.keyword.iotelectronics}} API](https://broker-uss-iot4e.electronics.internetofthings.ibmcloud.com/public/iot4eregistrationapi.html){:new_window}
* [{{site.data.keyword.iot_short}} API](https://developer.ibm.com/iotfoundation/recipes/api-documentation/){:new_window}
## Components
{: #general}
* [{{site.data.keyword.iotelectronics}} documentation](iotelectronics_overview.html){:new_window}
* [{{site.data.keyword.iot_full}} documentation](https://console.ng.bluemix.net/docs/services/IoT/index.html){:new_window}
* [{{site.data.keyword.amashort}} documentation](https://console.ng.bluemix.net/docs/services/mobileaccess/overview.html){:new_window}
* [{{site.data.keyword.sdk4nodefull}} documentation](https://console.ng.bluemix.net/docs/runtimes/nodejs/index.html#nodejs_runtime){:new_window}
## Samples
{: #samples}
* [Sample mobile app](https://console.ng.bluemix.net/docs/starters/IotElectronics/iotelectronics_config_mobile.html){:new_window}
| patsmith-ibm/docs | starters/IotElectronics/iot4egettingstarted.md | Markdown | apache-2.0 | 3,440 |
package org.mockserver.matchers;
import org.mockserver.model.HttpRequest;
/**
* @author jamesdbloom
*/
public interface Matcher<T> {
boolean matches(HttpRequest context, T t);
}
| jamesdbloom/mockserver | mockserver-core/src/main/java/org/mockserver/matchers/Matcher.java | Java | apache-2.0 | 188 |
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <regex>
#include <gtest/gtest.h>
#include <grpc/grpc.h>
#include "src/core/ext/filters/client_channel/resolver_result_parsing.h"
#include "src/core/ext/filters/client_channel/service_config.h"
#include "src/core/ext/filters/message_size/message_size_filter.h"
#include "src/core/lib/gpr/string.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
namespace grpc_core {
namespace testing {
class TestParsedConfig1 : public ServiceConfig::ParsedConfig {
public:
TestParsedConfig1(int value) : value_(value) {}
int value() const { return value_; }
private:
int value_;
};
class TestParser1 : public ServiceConfig::Parser {
public:
std::unique_ptr<ServiceConfig::ParsedConfig> ParseGlobalParams(
const grpc_json* json, grpc_error** error) override {
GPR_DEBUG_ASSERT(error != nullptr);
for (grpc_json* field = json->child; field != nullptr;
field = field->next) {
if (strcmp(field->key, "global_param") == 0) {
if (field->type != GRPC_JSON_NUMBER) {
*error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING(InvalidTypeErrorMessage());
return nullptr;
}
int value = gpr_parse_nonnegative_int(field->value);
if (value == -1) {
*error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING(InvalidValueErrorMessage());
return nullptr;
}
return MakeUnique<TestParsedConfig1>(value);
}
}
return nullptr;
}
static const char* InvalidTypeErrorMessage() {
return "global_param value type should be a number";
}
static const char* InvalidValueErrorMessage() {
return "global_param value type should be non-negative";
}
};
class TestParser2 : public ServiceConfig::Parser {
public:
std::unique_ptr<ServiceConfig::ParsedConfig> ParsePerMethodParams(
const grpc_json* json, grpc_error** error) override {
GPR_DEBUG_ASSERT(error != nullptr);
for (grpc_json* field = json->child; field != nullptr;
field = field->next) {
if (field->key == nullptr || strcmp(field->key, "name") == 0) {
continue;
}
if (strcmp(field->key, "method_param") == 0) {
if (field->type != GRPC_JSON_NUMBER) {
*error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING(InvalidTypeErrorMessage());
return nullptr;
}
int value = gpr_parse_nonnegative_int(field->value);
if (value == -1) {
*error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING(InvalidValueErrorMessage());
return nullptr;
}
return MakeUnique<TestParsedConfig1>(value);
}
}
return nullptr;
}
static const char* InvalidTypeErrorMessage() {
return "method_param value type should be a number";
}
static const char* InvalidValueErrorMessage() {
return "method_param value type should be non-negative";
}
};
// This parser always adds errors
class ErrorParser : public ServiceConfig::Parser {
public:
std::unique_ptr<ServiceConfig::ParsedConfig> ParsePerMethodParams(
const grpc_json* /*json*/, grpc_error** error) override {
GPR_DEBUG_ASSERT(error != nullptr);
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(MethodError());
return nullptr;
}
std::unique_ptr<ServiceConfig::ParsedConfig> ParseGlobalParams(
const grpc_json* /*json*/, grpc_error** error) override {
GPR_DEBUG_ASSERT(error != nullptr);
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(GlobalError());
return nullptr;
}
static const char* MethodError() { return "ErrorParser : methodError"; }
static const char* GlobalError() { return "ErrorParser : globalError"; }
};
void VerifyRegexMatch(grpc_error* error, const std::regex& e) {
std::smatch match;
std::string s(grpc_error_string(error));
EXPECT_TRUE(std::regex_search(s, match, e));
GRPC_ERROR_UNREF(error);
}
class ServiceConfigTest : public ::testing::Test {
protected:
void SetUp() override {
ServiceConfig::Shutdown();
ServiceConfig::Init();
EXPECT_TRUE(ServiceConfig::RegisterParser(MakeUnique<TestParser1>()) == 0);
EXPECT_TRUE(ServiceConfig::RegisterParser(MakeUnique<TestParser2>()) == 1);
}
};
TEST_F(ServiceConfigTest, ErrorCheck1) {
const char* test_json = "";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(std::string("failed to parse JSON for service config"));
VerifyRegexMatch(error, e);
}
TEST_F(ServiceConfigTest, BasicTest1) {
const char* test_json = "{}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
EXPECT_TRUE(error == GRPC_ERROR_NONE);
}
TEST_F(ServiceConfigTest, ErrorNoNames) {
const char* test_json = "{\"methodConfig\": [{\"blah\":1}]}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Method "
"Params)(.*)(referenced_errors)(.*)(No names "
"found)(.*)(methodConfig)(.*)(referenced_errors)(.*)(No "
"names specified)"));
VerifyRegexMatch(error, e);
}
TEST_F(ServiceConfigTest, ErrorNoNamesWithMultipleMethodConfigs) {
const char* test_json =
"{\"methodConfig\": [{}, {\"name\":[{\"service\":\"TestServ\"}]}]}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Method "
"Params)(.*)(referenced_errors)(.*)(No names "
"found)(.*)(methodConfig)(.*)(referenced_errors)(.*)(No "
"names specified)"));
VerifyRegexMatch(error, e);
}
TEST_F(ServiceConfigTest, ValidMethodConfig) {
const char* test_json =
"{\"methodConfig\": [{\"name\":[{\"service\":\"TestServ\"}]}]}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
EXPECT_TRUE(error == GRPC_ERROR_NONE);
}
TEST_F(ServiceConfigTest, Parser1BasicTest1) {
const char* test_json = "{\"global_param\":5}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
ASSERT_TRUE(error == GRPC_ERROR_NONE);
EXPECT_TRUE(
(static_cast<TestParsedConfig1*>(svc_cfg->GetGlobalParsedConfig(0)))
->value() == 5);
EXPECT_TRUE(svc_cfg->GetMethodParsedConfigVector(
grpc_slice_from_static_string("/TestServ/TestMethod")) ==
nullptr);
}
TEST_F(ServiceConfigTest, Parser1BasicTest2) {
const char* test_json = "{\"global_param\":1000}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
ASSERT_TRUE(error == GRPC_ERROR_NONE);
EXPECT_TRUE(
(static_cast<TestParsedConfig1*>(svc_cfg->GetGlobalParsedConfig(0)))
->value() == 1000);
}
TEST_F(ServiceConfigTest, Parser1ErrorInvalidType) {
const char* test_json = "{\"global_param\":\"5\"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Global "
"Params)(.*)(referenced_errors)(.*)") +
TestParser1::InvalidTypeErrorMessage());
VerifyRegexMatch(error, e);
}
TEST_F(ServiceConfigTest, Parser1ErrorInvalidValue) {
const char* test_json = "{\"global_param\":-5}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Global "
"Params)(.*)(referenced_errors)(.*)") +
TestParser1::InvalidValueErrorMessage());
VerifyRegexMatch(error, e);
}
TEST_F(ServiceConfigTest, Parser2BasicTest) {
const char* test_json =
"{\"methodConfig\": [{\"name\":[{\"service\":\"TestServ\"}], "
"\"method_param\":5}]}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
ASSERT_TRUE(error == GRPC_ERROR_NONE);
const auto* vector_ptr = svc_cfg->GetMethodParsedConfigVector(
grpc_slice_from_static_string("/TestServ/TestMethod"));
EXPECT_TRUE(vector_ptr != nullptr);
auto parsed_config = ((*vector_ptr)[1]).get();
EXPECT_TRUE(static_cast<TestParsedConfig1*>(parsed_config)->value() == 5);
}
TEST_F(ServiceConfigTest, Parser2ErrorInvalidType) {
const char* test_json =
"{\"methodConfig\": [{\"name\":[{\"service\":\"TestServ\"}], "
"\"method_param\":\"5\"}]}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
ASSERT_TRUE(error != GRPC_ERROR_NONE);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
std::regex e(std::string("(Service config parsing "
"error)(.*)(referenced_errors\":\\[)(.*)(Method "
"Params)(.*)(referenced_errors)(.*)(methodConfig)("
".*)(referenced_errors)(.*)") +
TestParser2::InvalidTypeErrorMessage());
VerifyRegexMatch(error, e);
}
TEST_F(ServiceConfigTest, Parser2ErrorInvalidValue) {
const char* test_json =
"{\"methodConfig\": [{\"name\":[{\"service\":\"TestServ\"}], "
"\"method_param\":-5}]}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
ASSERT_TRUE(error != GRPC_ERROR_NONE);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
std::regex e(std::string("(Service config parsing "
"error)(.*)(referenced_errors\":\\[)(.*)(Method "
"Params)(.*)(referenced_errors)()(.*)(methodConfig)("
".*)(referenced_errors)(.*)") +
TestParser2::InvalidValueErrorMessage());
VerifyRegexMatch(error, e);
}
// Test parsing with ErrorParsers which always add errors
class ErroredParsersScopingTest : public ::testing::Test {
protected:
void SetUp() override {
ServiceConfig::Shutdown();
ServiceConfig::Init();
EXPECT_TRUE(ServiceConfig::RegisterParser(MakeUnique<ErrorParser>()) == 0);
EXPECT_TRUE(ServiceConfig::RegisterParser(MakeUnique<ErrorParser>()) == 1);
}
};
TEST_F(ErroredParsersScopingTest, GlobalParams) {
const char* test_json = "{}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
ASSERT_TRUE(error != GRPC_ERROR_NONE);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
std::regex e(std::string("(Service config parsing "
"error)(.*)(referenced_errors\":\\[)(.*)(Global "
"Params)(.*)(referenced_errors)()(.*)") +
ErrorParser::GlobalError() + std::string("(.*)") +
ErrorParser::GlobalError());
VerifyRegexMatch(error, e);
}
TEST_F(ErroredParsersScopingTest, MethodParams) {
const char* test_json = "{\"methodConfig\": [{}]}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
ASSERT_TRUE(error != GRPC_ERROR_NONE);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors\":\\[)(.*)(Global "
"Params)(.*)(referenced_errors)()(.*)") +
ErrorParser::GlobalError() + std::string("(.*)") +
ErrorParser::GlobalError() +
std::string("(.*)(Method "
"Params)(.*)(referenced_errors)(.*)(field:methodConfig "
"error:No names "
"found)(.*)(methodConfig)(.*)(referenced_errors)(.*)") +
ErrorParser::MethodError() + std::string("(.*)") +
ErrorParser::MethodError() + std::string("(.*)(No names specified)"));
VerifyRegexMatch(error, e);
}
class ClientChannelParserTest : public ::testing::Test {
protected:
void SetUp() override {
ServiceConfig::Shutdown();
ServiceConfig::Init();
EXPECT_TRUE(ServiceConfig::RegisterParser(
MakeUnique<internal::ClientChannelServiceConfigParser>()) ==
0);
}
};
TEST_F(ClientChannelParserTest, ValidLoadBalancingConfigPickFirst) {
const char* test_json = "{\"loadBalancingConfig\": [{\"pick_first\":{}}]}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
ASSERT_TRUE(error == GRPC_ERROR_NONE);
const auto* parsed_config =
static_cast<grpc_core::internal::ClientChannelGlobalParsedConfig*>(
svc_cfg->GetGlobalParsedConfig(0));
auto lb_config = parsed_config->parsed_lb_config();
EXPECT_TRUE(strcmp(lb_config->name(), "pick_first") == 0);
}
TEST_F(ClientChannelParserTest, ValidLoadBalancingConfigRoundRobin) {
const char* test_json =
"{\"loadBalancingConfig\": [{\"round_robin\":{}}, {}]}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
ASSERT_TRUE(error == GRPC_ERROR_NONE);
auto parsed_config =
static_cast<grpc_core::internal::ClientChannelGlobalParsedConfig*>(
svc_cfg->GetGlobalParsedConfig(0));
auto lb_config = parsed_config->parsed_lb_config();
EXPECT_TRUE(strcmp(lb_config->name(), "round_robin") == 0);
}
TEST_F(ClientChannelParserTest, ValidLoadBalancingConfigGrpclb) {
const char* test_json =
"{\"loadBalancingConfig\": "
"[{\"grpclb\":{\"childPolicy\":[{\"pick_first\":{}}]}}]}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
ASSERT_TRUE(error == GRPC_ERROR_NONE);
const auto* parsed_config =
static_cast<grpc_core::internal::ClientChannelGlobalParsedConfig*>(
svc_cfg->GetGlobalParsedConfig(0));
auto lb_config = parsed_config->parsed_lb_config();
EXPECT_TRUE(strcmp(lb_config->name(), "grpclb") == 0);
}
TEST_F(ClientChannelParserTest, ValidLoadBalancingConfigXds) {
const char* test_json =
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
" { \"xds_experimental\":{ \"balancerName\": \"fake:///lb\" } }\n"
" ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error == GRPC_ERROR_NONE);
const auto* parsed_config =
static_cast<grpc_core::internal::ClientChannelGlobalParsedConfig*>(
svc_cfg->GetGlobalParsedConfig(0));
auto lb_config = parsed_config->parsed_lb_config();
EXPECT_TRUE(strcmp(lb_config->name(), "xds_experimental") == 0);
}
TEST_F(ClientChannelParserTest, UnknownLoadBalancingConfig) {
const char* test_json = "{\"loadBalancingConfig\": [{\"unknown\":{}}]}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Global "
"Params)(.*)(referenced_errors)(.*)(Client channel global "
"parser)(.*)(referenced_errors)(.*)(field:"
"loadBalancingConfig error:No known policy)"));
VerifyRegexMatch(error, e);
}
TEST_F(ClientChannelParserTest, InvalidGrpclbLoadBalancingConfig) {
const char* test_json =
"{\"loadBalancingConfig\": "
"[{\"grpclb\":{\"childPolicy\":[{\"unknown\":{}}]}}]}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Global "
"Params)(.*)(referenced_errors)(.*)(Client channel global "
"parser)(.*)(referenced_errors)(.*)(GrpcLb "
"Parser)(.*)(referenced_errors)(.*)(field:childPolicy "
"error:No known policy)"));
VerifyRegexMatch(error, e);
}
TEST_F(ClientChannelParserTest, ValidLoadBalancingPolicy) {
const char* test_json = "{\"loadBalancingPolicy\":\"pick_first\"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
ASSERT_TRUE(error == GRPC_ERROR_NONE);
const auto* parsed_config =
static_cast<grpc_core::internal::ClientChannelGlobalParsedConfig*>(
svc_cfg->GetGlobalParsedConfig(0));
const auto* lb_policy = parsed_config->parsed_deprecated_lb_policy();
ASSERT_TRUE(lb_policy != nullptr);
EXPECT_TRUE(strcmp(lb_policy, "pick_first") == 0);
}
TEST_F(ClientChannelParserTest, ValidLoadBalancingPolicyAllCaps) {
const char* test_json = "{\"loadBalancingPolicy\":\"PICK_FIRST\"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error == GRPC_ERROR_NONE);
const auto* parsed_config =
static_cast<grpc_core::internal::ClientChannelGlobalParsedConfig*>(
svc_cfg->GetGlobalParsedConfig(0));
const auto* lb_policy = parsed_config->parsed_deprecated_lb_policy();
ASSERT_TRUE(lb_policy != nullptr);
EXPECT_TRUE(strcmp(lb_policy, "pick_first") == 0);
}
TEST_F(ClientChannelParserTest, UnknownLoadBalancingPolicy) {
const char* test_json = "{\"loadBalancingPolicy\":\"unknown\"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Global "
"Params)(.*)(referenced_errors)(.*)(Client channel global "
"parser)(.*)(referenced_errors)(.*)(field:"
"loadBalancingPolicy error:Unknown lb policy)"));
VerifyRegexMatch(error, e);
}
TEST_F(ClientChannelParserTest, LoadBalancingPolicyXdsNotAllowed) {
const char* test_json = "{\"loadBalancingPolicy\":\"xds_experimental\"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Global "
"Params)(.*)(referenced_errors)(.*)(Client channel global "
"parser)(.*)(referenced_errors)(.*)(field:"
"loadBalancingPolicy error:xds_experimental requires a "
"config. Please use loadBalancingConfig instead.)"));
VerifyRegexMatch(error, e);
}
TEST_F(ClientChannelParserTest, ValidRetryThrottling) {
const char* test_json =
"{\n"
" \"retryThrottling\": {\n"
" \"maxTokens\": 2,\n"
" \"tokenRatio\": 1.0\n"
" }\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error == GRPC_ERROR_NONE);
const auto* parsed_config =
static_cast<grpc_core::internal::ClientChannelGlobalParsedConfig*>(
svc_cfg->GetGlobalParsedConfig(0));
const auto retryThrottling = parsed_config->retry_throttling();
ASSERT_TRUE(retryThrottling.has_value());
EXPECT_EQ(retryThrottling.value().max_milli_tokens, 2000);
EXPECT_EQ(retryThrottling.value().milli_token_ratio, 1000);
}
TEST_F(ClientChannelParserTest, RetryThrottlingMissingFields) {
const char* test_json =
"{\n"
" \"retryThrottling\": {\n"
" }\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Global "
"Params)(.*)(referenced_errors)(.*)(Client channel global "
"parser)(.*)(referenced_errors)(.*)(field:retryThrottling "
"field:maxTokens error:Not found)(.*)(field:retryThrottling "
"field:tokenRatio error:Not found)"));
VerifyRegexMatch(error, e);
}
TEST_F(ClientChannelParserTest, InvalidRetryThrottlingNegativeMaxTokens) {
const char* test_json =
"{\n"
" \"retryThrottling\": {\n"
" \"maxTokens\": -2,\n"
" \"tokenRatio\": 1.0\n"
" }\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Global "
"Params)(.*)(referenced_errors)(.*)(Client channel global "
"parser)(.*)(referenced_errors)(.*)(field:retryThrottling "
"field:maxTokens error:should be greater than zero)"));
VerifyRegexMatch(error, e);
}
TEST_F(ClientChannelParserTest, InvalidRetryThrottlingInvalidTokenRatio) {
const char* test_json =
"{\n"
" \"retryThrottling\": {\n"
" \"maxTokens\": 2,\n"
" \"tokenRatio\": -1\n"
" }\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Global "
"Params)(.*)(referenced_errors)(.*)(Client channel global "
"parser)(.*)(referenced_errors)(.*)(field:retryThrottling "
"field:tokenRatio error:Failed parsing)"));
VerifyRegexMatch(error, e);
}
TEST_F(ClientChannelParserTest, ValidTimeout) {
const char* test_json =
"{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"TestServ\", \"method\": \"TestMethod\" }\n"
" ],\n"
" \"timeout\": \"5s\"\n"
" } ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
ASSERT_TRUE(error == GRPC_ERROR_NONE);
const auto* vector_ptr = svc_cfg->GetMethodParsedConfigVector(
grpc_slice_from_static_string("/TestServ/TestMethod"));
EXPECT_TRUE(vector_ptr != nullptr);
auto parsed_config = ((*vector_ptr)[0]).get();
EXPECT_EQ((static_cast<grpc_core::internal::ClientChannelMethodParsedConfig*>(
parsed_config))
->timeout(),
5000);
}
TEST_F(ClientChannelParserTest, InvalidTimeout) {
const char* test_json =
"{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"service\", \"method\": \"method\" }\n"
" ],\n"
" \"timeout\": \"5sec\"\n"
" } ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Method "
"Params)(.*)(referenced_errors)(.*)(methodConfig)(.*)("
"referenced_errors)(.*)(Client channel "
"parser)(.*)(referenced_errors)(.*)(field:timeout "
"error:Failed parsing)"));
VerifyRegexMatch(error, e);
}
TEST_F(ClientChannelParserTest, ValidWaitForReady) {
const char* test_json =
"{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"TestServ\", \"method\": \"TestMethod\" }\n"
" ],\n"
" \"waitForReady\": true\n"
" } ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
ASSERT_TRUE(error == GRPC_ERROR_NONE);
const auto* vector_ptr = svc_cfg->GetMethodParsedConfigVector(
grpc_slice_from_static_string("/TestServ/TestMethod"));
EXPECT_TRUE(vector_ptr != nullptr);
auto parsed_config = ((*vector_ptr)[0]).get();
EXPECT_TRUE(
(static_cast<grpc_core::internal::ClientChannelMethodParsedConfig*>(
parsed_config))
->wait_for_ready()
.has_value());
EXPECT_TRUE(
(static_cast<grpc_core::internal::ClientChannelMethodParsedConfig*>(
parsed_config))
->wait_for_ready()
.value());
}
TEST_F(ClientChannelParserTest, InvalidWaitForReady) {
const char* test_json =
"{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"service\", \"method\": \"method\" }\n"
" ],\n"
" \"waitForReady\": \"true\"\n"
" } ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Method "
"Params)(.*)(referenced_errors)(.*)(methodConfig)(.*)("
"referenced_errors)(.*)(Client channel "
"parser)(.*)(referenced_errors)(.*)(field:waitForReady "
"error:Type should be true/false)"));
VerifyRegexMatch(error, e);
}
TEST_F(ClientChannelParserTest, ValidRetryPolicy) {
const char* test_json =
"{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"TestServ\", \"method\": \"TestMethod\" }\n"
" ],\n"
" \"retryPolicy\": {\n"
" \"maxAttempts\": 3,\n"
" \"initialBackoff\": \"1s\",\n"
" \"maxBackoff\": \"120s\",\n"
" \"backoffMultiplier\": 1.6,\n"
" \"retryableStatusCodes\": [ \"ABORTED\" ]\n"
" }\n"
" } ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error == GRPC_ERROR_NONE);
const auto* vector_ptr = svc_cfg->GetMethodParsedConfigVector(
grpc_slice_from_static_string("/TestServ/TestMethod"));
EXPECT_TRUE(vector_ptr != nullptr);
const auto* parsed_config =
static_cast<grpc_core::internal::ClientChannelMethodParsedConfig*>(
((*vector_ptr)[0]).get());
EXPECT_TRUE(parsed_config->retry_policy() != nullptr);
EXPECT_EQ(parsed_config->retry_policy()->max_attempts, 3);
EXPECT_EQ(parsed_config->retry_policy()->initial_backoff, 1000);
EXPECT_EQ(parsed_config->retry_policy()->max_backoff, 120000);
EXPECT_EQ(parsed_config->retry_policy()->backoff_multiplier, 1.6f);
EXPECT_TRUE(parsed_config->retry_policy()->retryable_status_codes.Contains(
GRPC_STATUS_ABORTED));
}
TEST_F(ClientChannelParserTest, InvalidRetryPolicyMaxAttempts) {
const char* test_json =
"{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"TestServ\", \"method\": \"TestMethod\" }\n"
" ],\n"
" \"retryPolicy\": {\n"
" \"maxAttempts\": 1,\n"
" \"initialBackoff\": \"1s\",\n"
" \"maxBackoff\": \"120s\",\n"
" \"backoffMultiplier\": 1.6,\n"
" \"retryableStatusCodes\": [ \"ABORTED\" ]\n"
" }\n"
" } ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(std::string(
"(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Method "
"Params)(.*)(referenced_errors)(.*)(methodConfig)(.*)(referenced_errors)("
".*)(Client channel "
"parser)(.*)(referenced_errors)(.*)(retryPolicy)(.*)(referenced_errors)(."
"*)(field:maxAttempts error:should be at least 2)"));
VerifyRegexMatch(error, e);
}
TEST_F(ClientChannelParserTest, InvalidRetryPolicyInitialBackoff) {
const char* test_json =
"{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"TestServ\", \"method\": \"TestMethod\" }\n"
" ],\n"
" \"retryPolicy\": {\n"
" \"maxAttempts\": 1,\n"
" \"initialBackoff\": \"1sec\",\n"
" \"maxBackoff\": \"120s\",\n"
" \"backoffMultiplier\": 1.6,\n"
" \"retryableStatusCodes\": [ \"ABORTED\" ]\n"
" }\n"
" } ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(std::string(
"(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Method "
"Params)(.*)(referenced_errors)(.*)(methodConfig)(.*)(referenced_errors)("
".*)(Client channel "
"parser)(.*)(referenced_errors)(.*)(retryPolicy)(.*)(referenced_errors)(."
"*)(field:initialBackoff error:Failed to parse)"));
VerifyRegexMatch(error, e);
}
TEST_F(ClientChannelParserTest, InvalidRetryPolicyMaxBackoff) {
const char* test_json =
"{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"TestServ\", \"method\": \"TestMethod\" }\n"
" ],\n"
" \"retryPolicy\": {\n"
" \"maxAttempts\": 1,\n"
" \"initialBackoff\": \"1s\",\n"
" \"maxBackoff\": \"120sec\",\n"
" \"backoffMultiplier\": 1.6,\n"
" \"retryableStatusCodes\": [ \"ABORTED\" ]\n"
" }\n"
" } ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(std::string(
"(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Method "
"Params)(.*)(referenced_errors)(.*)(methodConfig)(.*)(referenced_errors)("
".*)(Client channel "
"parser)(.*)(referenced_errors)(.*)(retryPolicy)(.*)(referenced_errors)(."
"*)(field:maxBackoff error:failed to parse)"));
VerifyRegexMatch(error, e);
}
TEST_F(ClientChannelParserTest, InvalidRetryPolicyBackoffMultiplier) {
const char* test_json =
"{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"TestServ\", \"method\": \"TestMethod\" }\n"
" ],\n"
" \"retryPolicy\": {\n"
" \"maxAttempts\": 1,\n"
" \"initialBackoff\": \"1s\",\n"
" \"maxBackoff\": \"120s\",\n"
" \"backoffMultiplier\": \"1.6\",\n"
" \"retryableStatusCodes\": [ \"ABORTED\" ]\n"
" }\n"
" } ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(std::string(
"(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Method "
"Params)(.*)(referenced_errors)(.*)(methodConfig)(.*)(referenced_errors)("
".*)(Client channel "
"parser)(.*)(referenced_errors)(.*)(retryPolicy)(.*)(referenced_errors)(."
"*)(field:backoffMultiplier error:should be of type number)"));
VerifyRegexMatch(error, e);
}
TEST_F(ClientChannelParserTest, InvalidRetryPolicyRetryableStatusCodes) {
const char* test_json =
"{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"TestServ\", \"method\": \"TestMethod\" }\n"
" ],\n"
" \"retryPolicy\": {\n"
" \"maxAttempts\": 1,\n"
" \"initialBackoff\": \"1s\",\n"
" \"maxBackoff\": \"120s\",\n"
" \"backoffMultiplier\": \"1.6\",\n"
" \"retryableStatusCodes\": []\n"
" }\n"
" } ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(std::string(
"(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Method "
"Params)(.*)(referenced_errors)(.*)(methodConfig)(.*)(referenced_errors)("
".*)(Client channel "
"parser)(.*)(referenced_errors)(.*)(retryPolicy)(.*)(referenced_errors)(."
"*)(field:retryableStatusCodes error:should be non-empty)"));
VerifyRegexMatch(error, e);
}
TEST_F(ClientChannelParserTest, ValidHealthCheck) {
const char* test_json =
"{\n"
" \"healthCheckConfig\": {\n"
" \"serviceName\": \"health_check_service_name\"\n"
" }\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
ASSERT_TRUE(error == GRPC_ERROR_NONE);
const auto* parsed_config =
static_cast<grpc_core::internal::ClientChannelGlobalParsedConfig*>(
svc_cfg->GetGlobalParsedConfig(0));
ASSERT_TRUE(parsed_config != nullptr);
EXPECT_EQ(strcmp(parsed_config->health_check_service_name(),
"health_check_service_name"),
0);
}
TEST_F(ClientChannelParserTest, InvalidHealthCheckMultipleEntries) {
const char* test_json =
"{\n"
" \"healthCheckConfig\": {\n"
" \"serviceName\": \"health_check_service_name\"\n"
" },\n"
" \"healthCheckConfig\": {\n"
" \"serviceName\": \"health_check_service_name1\"\n"
" }\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Global "
"Params)(.*)(referenced_errors)(.*)(field:healthCheckConfig "
"error:Duplicate entry)"));
VerifyRegexMatch(error, e);
}
class MessageSizeParserTest : public ::testing::Test {
protected:
void SetUp() override {
ServiceConfig::Shutdown();
ServiceConfig::Init();
EXPECT_TRUE(
ServiceConfig::RegisterParser(MakeUnique<MessageSizeParser>()) == 0);
}
};
TEST_F(MessageSizeParserTest, Valid) {
const char* test_json =
"{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"TestServ\", \"method\": \"TestMethod\" }\n"
" ],\n"
" \"maxRequestMessageBytes\": 1024,\n"
" \"maxResponseMessageBytes\": 1024\n"
" } ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error == GRPC_ERROR_NONE);
const auto* vector_ptr = svc_cfg->GetMethodParsedConfigVector(
grpc_slice_from_static_string("/TestServ/TestMethod"));
EXPECT_TRUE(vector_ptr != nullptr);
auto parsed_config =
static_cast<MessageSizeParsedConfig*>(((*vector_ptr)[0]).get());
ASSERT_TRUE(parsed_config != nullptr);
EXPECT_EQ(parsed_config->limits().max_send_size, 1024);
EXPECT_EQ(parsed_config->limits().max_recv_size, 1024);
}
TEST_F(MessageSizeParserTest, InvalidMaxRequestMessageBytes) {
const char* test_json =
"{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"TestServ\", \"method\": \"TestMethod\" }\n"
" ],\n"
" \"maxRequestMessageBytes\": -1024\n"
" } ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Method "
"Params)(.*)(referenced_errors)(.*)(methodConfig)(.*)("
"referenced_errors)(.*)(Message size "
"parser)(.*)(referenced_errors)(.*)(field:"
"maxRequestMessageBytes error:should be non-negative)"));
VerifyRegexMatch(error, e);
}
TEST_F(MessageSizeParserTest, InvalidMaxResponseMessageBytes) {
const char* test_json =
"{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"TestServ\", \"method\": \"TestMethod\" }\n"
" ],\n"
" \"maxResponseMessageBytes\": {}\n"
" } ]\n"
"}";
grpc_error* error = GRPC_ERROR_NONE;
auto svc_cfg = ServiceConfig::Create(test_json, &error);
gpr_log(GPR_ERROR, "%s", grpc_error_string(error));
ASSERT_TRUE(error != GRPC_ERROR_NONE);
std::regex e(
std::string("(Service config parsing "
"error)(.*)(referenced_errors)(.*)(Method "
"Params)(.*)(referenced_errors)(.*)(methodConfig)(.*)("
"referenced_errors)(.*)(Message size "
"parser)(.*)(referenced_errors)(.*)(field:"
"maxResponseMessageBytes error:should be of type number)"));
VerifyRegexMatch(error, e);
}
} // namespace testing
} // namespace grpc_core
int main(int argc, char** argv) {
// Regexes don't work in old libstdc++ versions, so just skip testing in those
// cases
#if defined(__GLIBCXX__) && (__GLIBCXX__ <= 20150623)
gpr_log(GPR_ERROR,
"Skipping service_config_test since std::regex is not supported on "
"this system.");
return 0;
#endif
grpc::testing::TestEnvironment env(argc, argv);
grpc_init();
::testing::InitGoogleTest(&argc, argv);
int ret = RUN_ALL_TESTS();
grpc_shutdown();
return ret;
}
| muxi/grpc | test/core/client_channel/service_config_test.cc | C++ | apache-2.0 | 39,240 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pointr.tcp.util
import java.net.InetAddress
object TcpUtils {
@inline def getLocalHostname() = InetAddress.getLocalHost.getHostName
}
| OpenChaiSpark/OCspark | tcpclient/src/main/scala/com/pointr/tcp/util/TcpUtils.scala | Scala | apache-2.0 | 952 |
#!/bin/bash
# Set up the Puppet Master
# r10k -v info puppetfile install
### The following lines get a development version of tempest that fixes glance and neutron ids
#cd modules/tempest
#git fetch https://review.openstack.org/stackforge/puppet-tempest refs/changes/51/86751/1 && git checkout FETCH_HEAD
#cd ../..
vagrant ssh seed1 -c "
sudo puppet module install puppetlabs/apt
"
vagrant ssh seed2 -c "
sudo puppet module install puppetlabs/apt
"
| varunarya10/puppet-cassandra | tests/10_install_apt_module.sh | Shell | apache-2.0 | 458 |
/*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ide.fileTemplates;
import com.intellij.openapi.components.ServiceManager;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.project.ProjectManager;
import com.intellij.openapi.util.Key;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
import java.util.Collection;
import java.util.Properties;
/**
* @author MYakovlev
*/
public abstract class FileTemplateManager{
public static final Key<Properties> DEFAULT_TEMPLATE_PROPERTIES = Key.create("DEFAULT_TEMPLATE_PROPERTIES");
public static final int RECENT_TEMPLATES_SIZE = 25;
@NonNls
public static final String INTERNAL_HTML_TEMPLATE_NAME = "HTML4 File";
@NonNls
public static final String INTERNAL_HTML5_TEMPLATE_NAME = "HTML File";
@NonNls
public static final String INTERNAL_XHTML_TEMPLATE_NAME = "XHTML File";
@NonNls
public static final String FILE_HEADER_TEMPLATE_NAME = "File Header";
public static final String DEFAULT_TEMPLATES_CATEGORY = "Default";
public static final String INTERNAL_TEMPLATES_CATEGORY = "Internal";
public static final String INCLUDES_TEMPLATES_CATEGORY = "Includes";
public static final String CODE_TEMPLATES_CATEGORY = "Code";
public static final String J2EE_TEMPLATES_CATEGORY = "J2EE";
public static final String PROJECT_NAME_VARIABLE = "PROJECT_NAME";
public static FileTemplateManager getInstance(@NotNull Project project){
return ServiceManager.getService(project, FileTemplateManager.class).checkInitialized();
}
@NotNull
protected FileTemplateManager checkInitialized() { return this; }
/**
* @deprecated Use {@link #getInstance(Project)} instead
*/
@Deprecated
public static FileTemplateManager getInstance(){
return getDefaultInstance();
}
public static FileTemplateManager getDefaultInstance(){
return getInstance(ProjectManager.getInstance().getDefaultProject());
}
@NotNull
public abstract FileTemplatesScheme getCurrentScheme();
public abstract void setCurrentScheme(@NotNull FileTemplatesScheme scheme);
/**
* @return Project scheme, or null if manager is created for default project.
*/
public abstract FileTemplatesScheme getProjectScheme();
public abstract FileTemplate @NotNull [] getTemplates(@NotNull String category);
/**
* Returns all templates from "Default" category.
*/
public abstract FileTemplate @NotNull [] getAllTemplates();
public abstract FileTemplate getTemplate(@NotNull @NonNls String templateName);
/**
* @return a new Properties object filled with predefined properties.
*/
@NotNull
public abstract Properties getDefaultProperties();
/**
* @deprecated use {@link #getDefaultProperties()} instead
*/
@NotNull
@Deprecated
public Properties getDefaultProperties(@NotNull Project project) {
Properties properties = getDefaultProperties();
properties.setProperty(PROJECT_NAME_VARIABLE, project.getName());
return properties;
}
/**
* Creates a new template with specified name, and adds it to the list of default templates.
* @return created template
*/
@NotNull
public abstract FileTemplate addTemplate(@NotNull @NonNls String name, @NotNull @NonNls String extension);
public abstract void removeTemplate(@NotNull FileTemplate template);
@NotNull
public abstract Collection<String> getRecentNames();
public abstract void addRecentName(@NotNull @NonNls String name);
@NotNull
public abstract FileTemplate getInternalTemplate(@NotNull @NonNls String templateName);
public abstract FileTemplate findInternalTemplate(@NotNull @NonNls String templateName);
public abstract FileTemplate @NotNull [] getInternalTemplates();
/**
* @param templateName template name
* @return a template by name
* @throws IllegalStateException if template is not found
*/
@NotNull
public abstract FileTemplate getJ2eeTemplate(@NotNull @NonNls String templateName);
/**
* @param templateName template name
* @return a template by name
* @throws IllegalStateException if template is not found
*/
@NotNull
public abstract FileTemplate getCodeTemplate(@NotNull @NonNls String templateName);
public abstract FileTemplate @NotNull [] getAllPatterns();
public abstract FileTemplate @NotNull [] getAllCodeTemplates();
public abstract FileTemplate @NotNull [] getAllJ2eeTemplates();
@NotNull
public abstract String internalTemplateToSubject(@NotNull @NonNls String templateName);
public abstract FileTemplate getPattern(@NotNull @NonNls String name);
/**
* Returns template with default (bundled) text.
*/
@NotNull
public abstract FileTemplate getDefaultTemplate(@NotNull @NonNls String name);
public abstract void setTemplates(@NotNull String templatesCategory, @NotNull Collection<? extends FileTemplate> templates);
public abstract void saveAllTemplates();
}
| leafclick/intellij-community | platform/lang-impl/src/com/intellij/ide/fileTemplates/FileTemplateManager.java | Java | apache-2.0 | 5,499 |
/**
* Copyright 2013 Netherlands eScience Center
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ibis.constellation.impl.pool;
import ibis.ipl.IbisIdentifier;
import ibis.ipl.Location;
/**
* @version 1.0
* @since 1.0
*
*/
public class FakeIbisIdentifier implements IbisIdentifier {
private final Location location;
private final String name;
private final String poolName;
private final String tag;
public FakeIbisIdentifier(Location location, String name, String poolName, String tag) {
this.location = location;
this.name = name;
this.poolName = poolName;
this.tag = tag;
}
/* (non-Javadoc)
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(IbisIdentifier arg0) {
// TODO Auto-generated method stub
return 0;
}
/* (non-Javadoc)
* @see ibis.ipl.IbisIdentifier#location()
*/
@Override
public Location location() {
return location;
}
/* (non-Javadoc)
* @see ibis.ipl.IbisIdentifier#name()
*/
@Override
public String name() {
return name;
}
/* (non-Javadoc)
* @see ibis.ipl.IbisIdentifier#poolName()
*/
@Override
public String poolName() {
return poolName;
}
/* (non-Javadoc)
* @see ibis.ipl.IbisIdentifier#tag()
*/
@Override
public byte[] tag() {
return tag.getBytes();
}
/* (non-Javadoc)
* @see ibis.ipl.IbisIdentifier#tagAsString()
*/
@Override
public String tagAsString() {
return tag;
}
}
| CerielJacobs/Constellation | src/test/java/ibis/constellation/impl/pool/FakeIbisIdentifier.java | Java | apache-2.0 | 2,148 |
package com.examw.collector.domain;
import java.io.Serializable;
import java.util.Date;
/**
* 定时器的更新记录
* @author fengwei.
* @since 2014年7月28日 上午10:15:14.
*/
public class UpdateLog implements Serializable {
private static final long serialVersionUID = 1L;
private String id,name;
private Integer type;
private Date updateTime;
public final static int TYPE_UPDATE_SUBJECT = 1,TYPE_UPDATE_GRADE=2,TYPE_UPDATE_PACK=3;
/**
* 获取 ID
* @return id
*
*/
public String getId() {
return id;
}
/**
* 设置 ID
* @param id
*
*/
public void setId(String id) {
this.id = id;
}
/**
* 获取 名称
* @return name
*
*/
public String getName() {
return name;
}
/**
* 设置 名称
* @param name
*
*/
public void setName(String name) {
this.name = name;
}
/**
* 获取 类型
* @return type
*
*/
public Integer getType() {
return type;
}
/**
* 设置 类型
* @param type
*
*/
public void setType(Integer type) {
this.type = type;
}
/**
* 获取 更新时间
* @return updateTime
*
*/
public Date getUpdateTime() {
return updateTime;
}
/**
* 设置 更新时间
* @param updateTime
*
*/
public void setUpdateTime(Date updateTime) {
this.updateTime = updateTime;
}
/**
* 构造方法
* @param id
* @param name
* @param type
* @param updateTime
*/
public UpdateLog(String id, String name, Integer type, Date updateTime) {
super();
this.id = id;
this.name = name;
this.type = type;
this.updateTime = updateTime;
}
public UpdateLog(){}
}
| jeasonyoung/examw-collector | src/main/java/com/examw/collector/domain/UpdateLog.java | Java | apache-2.0 | 1,597 |
/* -------------------------------------------------------------------------- */
/* Copyright 2002-2007 GridWay Team, Distributed Systems Architecture */
/* Group, Universidad Complutense de Madrid */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
package org.ggf.drmaa;
import java.util.*;
/** DRMAA application uses the JobTemplate interface, in order to define the attributes associated with a job.
* JobTemplates are created via the active {@link Session} implementation. A DRMAA application
* gets a JobTemplate from the active {@link Session}, specifies in the JobTemplate any required job
* parameters, and the passes the JobTemplate back to the Session when requesting that a job be
* executed. When finished, the DRMAA application should call the
* {@link Session#deleteJobTemplate} method to allow the underlying implementation to free any
* resources bound to the JobTemplate object.
*/
public abstract interface JobTemplate
{
/** Pre-defined string to refer to the HOLD state on submission.
* Use this preprocessor directive to assign the value of the
* drmaa_js_state attribute through the {@link #setJobSubmissionState} method call.
*/
public static final int HOLD_STATE = 0;//"drmaa_hold";
/** Pre-defined string to refer to the ACTIVE state on submission. Use this
* preprocessor directive to assign the value of the drmaa_js_state attribute
* through the {@link #setJobSubmissionState} method call.
*/
public static final int ACTIVE_STATE = 1;//"drmaa_active";
/** Pre-defined string to refer the user's home directory.
*/
public static final String HOME_DIRECTORY = "$drmaa_hd_ph$";
/** Pre-defined string constant to represent the current working
* directory when building paths for the input, output, and error path attribute values.
* Plase note that ALL FILES MUST BE NAMED RELATIVE TO THE WORKING DIRECTORY.
*/
public static final String WORKING_DIRECTORY = "$drmaa_wd_ph$";
/** Pre-defined string to be used in parametric jobs (bulk jobs).
* PARAMETRIC_INDEX will be available during job execution and can be
* used as an ARGUMENT for the REMOTE COMMAND, or to generate output filenames.
* Please note that this attribute name should be used ONLY in conjuntion
* with a {@link Session#runBulkJobs} method call. Use DRMAA_GW_JOB_ID for "stand-alone" jobs.
*/
public static final String PARAMETRIC_INDEX = "$drmaa_incr_ph$";
/** This method set the attribute {@link SimpleJobTemplate#remoteCommand}.
*
* @param command A command to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setRemoteCommand(java.lang.String command)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#args}.
*
* @param args The attributes to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setArgs(java.util.List args)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#jobSubmissionState}.
*
* @param state The state to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setJobSubmissionState(int state)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#jobEnvironment}.
*
* @param env The jobEnvironment to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setJobEnvironment(java.util.Map env)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#workingDirectory}.
*
* @param wd The working directoy to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setWorkingDirectory(String wd)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#jobCategory}.
*
* @param category The category to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setJobCategory(String category)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#nativeSpecification}.
*
* @param spec The native specification to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setNativeSpecification(String spec)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#email}.
*
* @param email The email to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setEmail(java.util.Set email)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#blockEmail}.
*
* @param blockEmail The blockEmail to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setBlockEmail(boolean blockEmail)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#startTime}.
*
* @param startTime The startTime to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setStartTime(Date startTime)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#jobName}.
*
* @param name The Job name to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setJobName(String name)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#inputPath}.
*
* @param inputPath The input path to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setInputPath(String inputPath)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#outputPath}.
*
* @param outputPath The output path to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setOutputPath(String outputPath)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#errorPath}.
*
* @param errorPath The error path to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setErrorPath(String errorPath)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#joinFiles}.
*
* @param joinFiles The joinFiles to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setJoinFiles(boolean joinFiles)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#transferFiles}.
*
* @param mode The transfer mode to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setTransferFiles(FileTransferMode mode)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#deadlineTime}.
*
* @param deadline The deadline to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setDeadlineTime(PartialTimestamp deadline)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#hardWallclockTimeLimit}.
*
* @param limit The hard wall clock time limit to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setHardWallclockTimeLimit(long limit)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#softWallclockTimeLimit}.
*
* @param limit The soft wall clock time timit to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setSoftWallclockTimeLimit(long limit)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#hardRunDurationLimit}.
*
* @param limit The hard run duration limit to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setHardRunDurationLimit(long limit)
throws DrmaaException;
/** This method set the attribute {@link SimpleJobTemplate#softRunDurationLimit}.
*
* @param limit The soft run duration limit to set
*
* @throws InvalidAttributeValueException
* @throws ConflictingAttributeValuesException
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public void setSoftRunDurationLimit(long limit)
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#remoteCommand}.
*
* @return A {@link String} with the remoteCommand value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public String getRemoteCommand()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#args}.
*
* @return A {@link String} array with the args value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public java.util.List getArgs()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#jobSubmissionState}.
*
* @return A {@link String} with the jobSubmissionState value
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public int getJobSubmissionState()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#jobEnvironment}.
*
* @return A {@link Properties} object with the jobEnvironment value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public java.util.Map getJobEnvironment()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#workingDirectory}.
*
* @return A {@link String} with the workingDirectory value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public String getWorkingDirectory()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#jobCategory}.
*
* @return A {@link String} with the jobCategory value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public String getJobCategory()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#nativeSpecification}.
*
* @return A {@link String} with the nativeSpecificationnativeSpecification value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public String getNativeSpecification()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#email}.
*
* @return A {@link String} array with the email value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public java.util.Set getEmail()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#blockEmail}.
*
* @return A boolean with the blockEmail value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public boolean getBlockEmail()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#startTime}.
*
* @return A PartialTimestamp object with the startTime value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public Date getStartTime()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#jobName}.
*
* @return A {@link String} with the jobName value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public String getJobName()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#inputPath}.
*
* @return A {@link String} with the inputPath value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public String getInputPath()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#outputPath}.
*
* @return A {@link String} with the outputPath value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public String getOutputPath()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#errorPath}.
*
* @return A {@link String} with the errorPath value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public String getErrorPath()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#joinFiles}.
*
* @return A boolean with the joinFiles value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public boolean getJoinFiles()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#transferFiles}.
*
* @return A FileTransferMode object with the transferFiles value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public FileTransferMode getTransferFiles()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#deadlineTime}.
*
* @return A PartialTimestamp object with the deadlineTime value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public PartialTimestamp getDeadlineTime()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#hardWallclockTimeLimit}.
*
* @return A long with the hardWallclockTimeLimit value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public long getHardWallclockTimeLimit()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#softWallclockTimeLimit}.
*
* @return A long with the softWallclockTimeLimit value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public long getSoftWallclockTimeLimit()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#hardRunDurationLimit}.
*
* @return A long with the hardRunDurationLimit value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public long getHardRunDurationLimit()
throws DrmaaException;
/** This method get the attribute {@link SimpleJobTemplate#softRunDurationLimit}.
*
* @return A long with the softRunDurationLimit value
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public long getSoftRunDurationLimit()
throws DrmaaException;
/** This method return the list of supported property names.
* The required Gridway property names are:
* <ul>
* <li> {@link SimpleJobTemplate#remoteCommand}, {@link SimpleJobTemplate#args}, {@link SimpleJobTemplate#jobSubmissionState}</li>
* <li> {@link SimpleJobTemplate#jobEnvironment}, {@link SimpleJobTemplate#workingDirectory}, {@link SimpleJobTemplate#jobName}</li>
* <li> {@link SimpleJobTemplate#inputPath}, {@link SimpleJobTemplate#outputPath}, {@link SimpleJobTemplate#errorPath}</li>
* </ul>
*
* The optional Gridway property names (implemented in {@link GridWayJobTemplate}) are:
* <ul>
* <li> inputFiles, outputFiles, restartFiles</li>
* <li> rescheduleOnFailure, numberOfRetries, rank</li>
* <li> requirements</li>
* </ul>
*
* @throws NoActiveSessionException
* @throws java.lang.OutOfMemoryError
* @throws DrmCommunicationException
* @throws AuthorizationException
* @throws java.lang.IllegalArgumentException
* @throws InternalException
*/
public java.util.Set getAttributeNames()
throws DrmaaException;
}
| oldpatricka/Gridway | src/drmaa/drmaa1.0/org/ggf/drmaa/JobTemplate.java | Java | apache-2.0 | 25,608 |
# Annotation guidelines for the 'segmentation' model
## Introduction
For the following guidelines, it is expected that training data has been generated as explained [here](../Training-the-models-of-Grobid/#generation-of-training-data).
The following TEI elements are used by the segmentation model:
* `<titlePage>` for the cover page
* `<front>` for the document header
* `<note place="headnote">` for the page header
* `<note place="footnote">` for the page footer
* `<body>` for the document body
* `<listBibl>` for the bibliographical section
* `<page>` to indicate page numbers
* `<div type="annex">` for annexes
* `<div type="acknowledgment">` for acknowledgments
It is necessary to identify these above substructures when interrupting the `<body>`. Figures and tables (including their potential titles, captions and notes) are considered part of the body, so contained by the `<body>` element.
Note that the mark-up follows overall the [TEI](http://www.tei-c.org).
> Note: It is recommended to study the existing training documents for the segmentation model first to see some examples of how these elements should be used.
## Analysis
The following sections provide detailed information and examples on how to handle certain typical cases.
### Start of the document (front)
A cover page - usually added by the publisher to summarize the bibligraphical and copyright information - might be present, and is entirely identified by the the `<titlePage>` element.
The header section typically contains document's title, its author(s) with affiliations, an abstract and some keywords. All this material should be contained within the `<front>` element, as well as any footnotes that are referenced from within the header (such as author affiliations and addresses). Furthermore, the footer including the first page number should go in there. In general we expect as part of the header of the document to find all the bibliographical information for the article. This should be followed in order to ensure homogeneity across the training data.
There should be as many `<front>` elements as necessary that contain all the contents identified as 'front contents'. Note that for the segmentation model, there aren't any `<title>` or `<author>` elements as they are handled in the `header` model which is applied in cascaded in a next stage.
Any footnotes referenced from within the `<body>` should remain there.
Lines like the following that appear as a footnote on the first page of the document should be contained inside a `<front>` element:
* Received: [date]
* Revised: [date]
* Accepted: [date]
The following is an example of correcting an appearance of the article title on the front page:

which Grobid initially recognized as follows:

The following, corrected bit of TEI XML shows the presence of a `front` element surrounding both the topic and the title:
```xml
virus. <lb/>But is the role of LGP2 in CD8 + T cell <lb/>survival and function cell
intrinsic <lb/></body>
<front>A N T I V I R A L I M U N I T Y <lb/>LGP2 rigs CD8 + T cells for
survival <lb/></front>
<body> or extrinsic? T cell receptor-and <lb/>IFNβ-mediated signalling in CD8 + T
```
> Note: In general, whether the `<lb/>` (line break) element is inside or outside the `<front>` or other elements is of no importance. However as indicated [here](General-principles/#correcting-pre-annotated-files), the <lb/> element should not be removed and should follow the stream of text.
The following screen shot shows an example where an article starts mid-page, the end of the preceding one occupying the upper first third of the page. As this content does not belong to the article in question, don't add any elements and remove any `<front>` or `<body>` elements that could appear in the preceding article.

### Following document pages (body)
Any information appearing in the page header needs to be surrounded by a `<note place="headnote">`.

The contents of the grey band in the screenshot above should be surrounded by a `<note place="headnote">` except on the first page where this type of information would be inside the `<front>` element.
Any information appearing in the page footer needs to be put inside a `<note place="footnote">`, as is shown in the following example:

Corresponding TEI XML:
```xml
<note place="footnote">NATURE REVIEWS | IMMUNOLOGY <lb/>VOLUME 12 |
SEPTEMBER 2012 <lb/>© 2012 Macmillan Publishers Limited. All rights reserved</note>
```
The `<page>` element which contains the page number should be outside of any of the above `<note>` elements.
Any notes to the left of the main body text are to be encoded as `<note>` if they are related to an element of the `<body>`; if they concern header elements they go into a `<front>` element. See this screenshot as an example:

The following example shows a case where we have an acknowledgment (in the red frame) that gets encoded as a `<div type="acknowledgment">` whereas the title reference underneath (in the orange frame) is encoded using a `<front>` element.


### Tables and Figures
Figures and tables belong to the main body structure: they are not to be encoded specifically.
If a figure or table appears inside an annex of an article, it should remain inside the `<div type="annex">` element.
If a figure or table appears in an abstract (which is rare but it happens), this item should remain within the `<front>` element.
### Hidden characters
It happens that GROBID picks up hidden text that is present but not visible on the PDF's page for the reader (compare the XML below with the screenshot of the PDF page, the context being highlighted by red boxes); such content should not be surrounded by any element as to indicate to Grobid to ignore it.
```xml
visible in lane 10 (longer exposure), where anti-rabbit secondary antibodies<lb/> were used. <lb/></body>
print ncb1110 17/3/04 2:58 PM Page 309 <lb/>
<note place="footnote">© 2004 Nature Publishing Group <lb/></note>
```

| alexduch/grobid | doc/training/segmentation.md | Markdown | apache-2.0 | 6,698 |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>numpy.polynomial.chebyshev.Chebyshev.fit — NumPy v1.8 Manual</title>
<link rel="stylesheet" href="../../_static/pythonista.css" type="text/css" />
<link rel="stylesheet" href="../../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../../',
VERSION: '1.8.0',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: false
};
</script>
<script type="text/javascript" src="../../_static/jquery.js"></script>
<script type="text/javascript" src="../../_static/underscore.js"></script>
<script type="text/javascript" src="../../_static/doctools.js"></script>
<link rel="author" title="About these documents" href="../../about.html" />
<link rel="top" title="NumPy v1.8 Manual" href="../../index.html" />
<link rel="up" title="numpy.polynomial.chebyshev.Chebyshev" href="numpy.polynomial.chebyshev.Chebyshev.html" />
<link rel="next" title="numpy.polynomial.chebyshev.Chebyshev.fromroots" href="numpy.polynomial.chebyshev.Chebyshev.fromroots.html" />
<link rel="prev" title="numpy.polynomial.chebyshev.Chebyshev.deriv" href="numpy.polynomial.chebyshev.Chebyshev.deriv.html" />
<link rel="shortcut icon" type="image/png" href="../../_static/py.png" />
<meta name = "viewport" content = "width=device-width,initial-scale=1.0,user-scalable=no;">
<script type="text/javascript">
var getTextForSample = function(i) {
codeBlock = document.getElementsByClassName('highlight-python')[i];
return codeBlock.innerText;
}
var copySample = function (i) {
window.location.href = '/__pythonista_copy__/' + encodeURI(getTextForSample(i));
}
var openSample = function (i) {
window.location.href = '/__pythonista_open__/' + encodeURI(getTextForSample(i));
}
//Source: http://ejohn.org/blog/partial-functions-in-javascript/
Function.prototype.partial = function() {
var fn = this,
args = Array.prototype.slice.call(arguments);
return function() {
var arg = 0;
for (var i = 0; i < args.length && arg < arguments.length; i++)
if (args[i] === undefined) args[i] = arguments[arg++];
return fn.apply(this, args);
};
};
window.onload=function() {
//Add "Copy" and "Open in Editor" buttons for code samples:
var inApp = navigator.userAgent.match(/AppleWebKit/i) != null && navigator.userAgent.match(/Safari/i) == null;
if (inApp) {
codeBlocks = document.getElementsByClassName('highlight-python');
for (var i = 0; i < codeBlocks.length; i++) {
codeBlock = codeBlocks[i];
if (codeBlock.innerText.indexOf('>>>') == 0) {
//Don't add header for interactive sessions
continue;
}
var codeHeader = document.createElement('div');
codeHeader.className = 'pythonista-code-header';
var copyButton = document.createElement('button');
copyButton.className = 'pythonista-button';
copyButton.innerText = 'Copy';
copyButton.addEventListener('click', copySample.partial(i));
codeHeader.appendChild(copyButton);
var openButton = document.createElement('button');
openButton.className = 'pythonista-button';
openButton.innerText = 'Open in Editor';
openButton.addEventListener('click', openSample.partial(i));
codeHeader.appendChild(openButton);
codeBlock.parentElement.insertBefore(codeHeader, codeBlock);
}
}
}
</script>
</head>
<body ontouchstart="">
<div class="related">
<h3>Navigation</h3>
<ul>
<li class="right" style="margin-right: 10px">
<a href="../../genindex.html" title="General Index"
accesskey="I">index</a></li>
<li class="right" >
<a href="numpy.polynomial.chebyshev.Chebyshev.fromroots.html" title="numpy.polynomial.chebyshev.Chebyshev.fromroots"
accesskey="N">next</a> |</li>
<li class="right" >
<a href="numpy.polynomial.chebyshev.Chebyshev.deriv.html" title="numpy.polynomial.chebyshev.Chebyshev.deriv"
accesskey="P">previous</a> |</li>
<li><a href="../../index.html">NumPy v1.8 Manual</a> »</li>
<li><a href="../index.html" >NumPy Reference</a> »</li>
<li><a href="../routines.html" >Routines</a> »</li>
<li><a href="../routines.polynomials.html" >Polynomials</a> »</li>
<li><a href="../routines.polynomials.package.html" >Polynomial Package</a> »</li>
<li><a href="../routines.polynomials.chebyshev.html" >Chebyshev Module (<tt class="docutils literal docutils literal docutils literal docutils literal docutils literal docutils literal docutils literal docutils literal docutils literal docutils literal"><span class="pre">numpy.polynomial.chebyshev</span></tt>)</a> »</li>
<li><a href="numpy.polynomial.chebyshev.Chebyshev.html" accesskey="U">numpy.polynomial.chebyshev.Chebyshev</a> »</li>
</ul>
</div>
<div class="document">
<div class="documentwrapper">
<div class="body">
<div class="section" id="numpy-polynomial-chebyshev-chebyshev-fit">
<h1>numpy.polynomial.chebyshev.Chebyshev.fit<a class="headerlink" href="#numpy-polynomial-chebyshev-chebyshev-fit" title="Permalink to this headline">¶</a></h1>
<dl class="staticmethod">
<dt id="numpy.polynomial.chebyshev.Chebyshev.fit">
<em class="property">static </em><tt class="descclassname">Chebyshev.</tt><tt class="descname">fit</tt><big>(</big><em>x, y, deg, domain=None, rcond=None, full=False, w=None, window=[-1, 1]</em><big>)</big><a class="headerlink" href="#numpy.polynomial.chebyshev.Chebyshev.fit" title="Permalink to this definition">¶</a></dt>
<dd><p>Least squares fit to data.</p>
<p>Return a <a class="reference internal" href="numpy.polynomial.chebyshev.Chebyshev.html#numpy.polynomial.chebyshev.Chebyshev" title="numpy.polynomial.chebyshev.Chebyshev"><tt class="xref py py-obj docutils literal"><span class="pre">Chebyshev</span></tt></a> instance that is the least squares fit to the data
<em class="xref py py-obj">y</em> sampled at <em class="xref py py-obj">x</em>. Unlike <a class="reference internal" href="numpy.polynomial.chebyshev.chebfit.html#numpy.polynomial.chebyshev.chebfit" title="numpy.polynomial.chebyshev.chebfit"><tt class="xref py py-obj docutils literal"><span class="pre">chebfit</span></tt></a>, the domain of the returned
instance can be specified and this will often result in a superior
fit with less chance of ill conditioning. Support for NA was added
in version 1.7.0. See <a class="reference internal" href="numpy.polynomial.chebyshev.chebfit.html#numpy.polynomial.chebyshev.chebfit" title="numpy.polynomial.chebyshev.chebfit"><tt class="xref py py-obj docutils literal"><span class="pre">chebfit</span></tt></a> for full documentation of the
implementation.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><p class="first"><strong>x</strong> : array_like, shape (M,)</p>
<blockquote>
<div><p>x-coordinates of the M sample points <tt class="docutils literal"><span class="pre">(x[i],</span> <span class="pre">y[i])</span></tt>.</p>
</div></blockquote>
<p><strong>y</strong> : array_like, shape (M,) or (M, K)</p>
<blockquote>
<div><p>y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.</p>
</div></blockquote>
<p><strong>deg</strong> : int</p>
<blockquote>
<div><p>Degree of the fitting polynomial.</p>
</div></blockquote>
<p><strong>domain</strong> : {None, [beg, end], []}, optional</p>
<blockquote>
<div><p>Domain to use for the returned Chebyshev instance. If <tt class="docutils literal"><span class="pre">None</span></tt>,
then a minimal domain that covers the points <em class="xref py py-obj">x</em> is chosen. If
<tt class="docutils literal"><span class="pre">[]</span></tt> the default domain <tt class="docutils literal"><span class="pre">[-1,1]</span></tt> is used. The default
value is [-1,1] in numpy 1.4.x and <tt class="docutils literal"><span class="pre">None</span></tt> in later versions.
The <tt class="docutils literal"><span class="pre">'[]</span></tt> value was added in numpy 1.5.0.</p>
</div></blockquote>
<p><strong>rcond</strong> : float, optional</p>
<blockquote>
<div><p>Relative condition number of the fit. Singular values smaller
than this relative to the largest singular value will be
ignored. The default value is len(x)*eps, where eps is the
relative precision of the float type, about 2e-16 in most
cases.</p>
</div></blockquote>
<p><strong>full</strong> : bool, optional</p>
<blockquote>
<div><p>Switch determining nature of return value. When it is False
(the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is
also returned.</p>
</div></blockquote>
<p><strong>w</strong> : array_like, shape (M,), optional</p>
<blockquote>
<div><p>Weights. If not None the contribution of each point
<tt class="docutils literal"><span class="pre">(x[i],y[i])</span></tt> to the fit is weighted by <em class="xref py py-obj">w[i]</em>. Ideally the
weights are chosen so that the errors of the products
<tt class="docutils literal"><span class="pre">w[i]*y[i]</span></tt> all have the same variance. The default value is
None.
.. versionadded:: 1.5.0</p>
</div></blockquote>
<p><strong>window</strong> : {[beg, end]}, optional</p>
<blockquote>
<div><p>Window to use for the returned Chebyshev instance. The default
value is <tt class="docutils literal"><span class="pre">[-1,1]</span></tt>
.. versionadded:: 1.6.0</p>
</div></blockquote>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first"><strong>least_squares_fit</strong> : instance of Chebyshev</p>
<blockquote>
<div><p>The Chebyshev instance is the least squares fit to the data and
has the domain specified in the call.</p>
</div></blockquote>
<p><strong>[residuals, rank, singular_values, rcond]</strong> : only if <em class="xref py py-obj">full</em> = True</p>
<blockquote class="last">
<div><p>Residuals of the least squares fit, the effective rank of the
scaled Vandermonde matrix and its singular values, and the
specified value of <em class="xref py py-obj">rcond</em>. For more details, see
<em class="xref py py-obj">linalg.lstsq</em>.</p>
</div></blockquote>
</td>
</tr>
</tbody>
</table>
<div class="admonition seealso">
<p class="first admonition-title">See also</p>
<dl class="last docutils">
<dt><a class="reference internal" href="numpy.polynomial.chebyshev.chebfit.html#numpy.polynomial.chebyshev.chebfit" title="numpy.polynomial.chebyshev.chebfit"><tt class="xref py py-obj docutils literal"><span class="pre">chebfit</span></tt></a></dt>
<dd>similar function</dd>
</dl>
</div>
</dd></dl>
</div>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
© <a href="../../copyright.html">Copyright</a> 2008-2009, The Scipy community.
<br />
The Python Software Foundation is a non-profit corporation.
<a href="http://www.python.org/psf/donations/">Please donate.</a>
<br />
Last updated on May 03, 2016.
<a href="../../bugs.html">Found a bug</a>?
<br />
Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.2.1.
</div>
</body>
</html> | leesavide/pythonista-docs | Documentation/numpy/reference/generated/numpy.polynomial.chebyshev.Chebyshev.fit.html | HTML | apache-2.0 | 12,423 |
// ========================== KeySnail Init File =========================== //
// この領域は, GUI により設定ファイルを生成した際にも引き継がれます
// 特殊キー, キーバインド定義, フック, ブラックリスト以外のコードは, この中に書くようにして下さい
// ========================================================================= //
//{{%PRESERVE%
// ここにコードを入力して下さい
//}}%PRESERVE%
// ========================================================================= //
// ========================= Special key settings ========================== //
key.quitKey = "C-g";
key.helpKey = "<f1>";
key.escapeKey = "C-q";
key.macroStartKey = "<f3>";
key.macroEndKey = "<f4>";
key.universalArgumentKey = "C-u";
key.negativeArgument1Key = "C--";
key.negativeArgument2Key = "C-M--";
key.negativeArgument3Key = "M--";
key.suspendKey = "<f2>";
// ================================= Hooks ================================= //
hook.setHook('KeyBoardQuit', function (aEvent) {
if (key.currentKeySequence.length) {
return;
}
command.closeFindBar();
var marked = command.marked(aEvent);
if (util.isCaretEnabled()) {
if (marked) {
command.resetMark(aEvent);
} else {
if ("blur" in aEvent.target) {
aEvent.target.blur();
}
gBrowser.focus();
_content.focus();
}
} else {
goDoCommand("cmd_selectNone");
}
if (KeySnail.windowType === "navigator:browser" && !marked) {
key.generateKey(aEvent.originalTarget, KeyEvent.DOM_VK_ESCAPE, true);
}
});
hook.addToHook('KeyBoardQuit', function (aEvent) {
if (key.currentKeySequence.length) {
return;
}
command.closeFindBar();
var marked = command.marked(aEvent);
if (util.isCaretEnabled()) {
if (marked) {
command.resetMark(aEvent);
} else {
if ("blur" in aEvent.target) {
aEvent.target.blur();
}
gBrowser.focus();
_content.focus();
}
} else {
goDoCommand("cmd_selectNone");
}
if (KeySnail.windowType === "navigator:browser" && !marked) {
key.generateKey(aEvent.originalTarget, KeyEvent.DOM_VK_ESCAPE, true);
}
});
// ============================= Key bindings ============================== //
key.setGlobalKey('C-M-r', function (ev) {
userscript.reload();
}, '設定ファイルを再読み込み', true);
key.setGlobalKey('M-x', function (ev, arg) {
ext.select(arg, ev);
}, 'エクステ一覧表示', true);
key.setGlobalKey(['<f1>', 'b'], function (ev) {
key.listKeyBindings();
}, 'キーバインド一覧を表示');
key.setGlobalKey(['<f1>', 'F'], function (ev) {
openHelpLink("firefox-help");
}, 'Firefox のヘルプを表示');
key.setGlobalKey('C-m', function (ev) {
key.generateKey(ev.originalTarget, KeyEvent.DOM_VK_RETURN, true);
}, 'リターンコードを生成');
key.setGlobalKey(['C-x', 'l'], function (ev) {
command.focusToById("urlbar");
}, 'ロケーションバーへフォーカス', true);
key.setGlobalKey(['C-x', 'g'], function (ev) {
command.focusToById("searchbar");
}, '検索バーへフォーカス', true);
key.setGlobalKey(['C-x', 't'], function (ev) {
command.focusElement(command.elementsRetrieverTextarea, 0);
}, '最初のインプットエリアへフォーカス', true);
key.setGlobalKey(['C-x', 's'], function (ev) {
command.focusElement(command.elementsRetrieverButton, 0);
}, '最初のボタンへフォーカス', true);
key.setGlobalKey(['C-x', 'k'], function (ev) {
BrowserCloseTabOrWindow();
}, 'タブ / ウィンドウを閉じる');
key.setGlobalKey(['C-x', 'K'], function (ev) {
closeWindow(true);
}, 'ウィンドウを閉じる');
key.setGlobalKey(['C-x', 'n'], function (ev) {
OpenBrowserWindow();
}, 'ウィンドウを開く');
key.setGlobalKey(['C-x', 'C-c'], function (ev) {
goQuitApplication();
}, 'Firefox を終了', true);
key.setGlobalKey(['C-x', 'o'], function (ev, arg) {
command.focusOtherFrame(arg);
}, '次のフレームを選択');
key.setGlobalKey(['C-x', '1'], function (ev) {
window.loadURI(ev.target.ownerDocument.location.href);
}, '現在のフレームだけを表示', true);
key.setGlobalKey(['C-x', 'C-f'], function (ev) {
BrowserOpenFileWindow();
}, 'ファイルを開く', true);
key.setGlobalKey(['C-x', 'C-s'], function (ev) {
saveDocument(window.content.document);
}, 'ファイルを保存', true);
key.setGlobalKey('C-s', function (ev) {
command.iSearchForwardKs(ev);
}, 'Emacs ライクなインクリメンタル検索', true);
key.setGlobalKey('C-r', function (ev) {
command.iSearchBackwardKs(ev);
}, 'Emacs ライクな逆方向インクリメンタル検索', true);
key.setGlobalKey(['C-c', 'u'], function (ev) {
undoCloseTab();
}, '閉じたタブを元に戻す');
key.setGlobalKey(['C-c', 'C-c', 'C-v'], function (ev) {
toJavaScriptConsole();
}, 'Javascript コンソールを表示', true);
key.setGlobalKey(['C-c', 'C-c', 'C-c'], function (ev) {
command.clearConsole();
}, 'Javascript コンソールの表示をクリア', true);
key.setGlobalKey('C-M-l', function (ev) {
getBrowser().mTabContainer.advanceSelectedTab(1, true);
}, 'ひとつ右のタブへ');
key.setViewKey([['2'], ['l']], function (ev) {
getBrowser().mTabContainer.advanceSelectedTab(1, true);
}, 'ひとつ右のタブへ');
key.setGlobalKey('C-M-h', function (ev) {
getBrowser().mTabContainer.advanceSelectedTab(-1, true);
}, 'ひとつ左のタブへ');
key.setGlobalKey('M-:', function (ev) {
command.interpreter();
}, 'JavaScript のコードを評価', true);
key.setGlobalKey('M-w', function (ev) {
command.copyRegion(ev);
}, '選択中のテキストをコピー', true);
key.setViewKey([['1'], ['h']], function (ev) {
getBrowser().mTabContainer.advanceSelectedTab(-1, true);
}, 'ひとつ左のタブへ');
key.setViewKey([['C-n'], ['j']], function (ev) {
key.generateKey(ev.originalTarget, KeyEvent.DOM_VK_DOWN, true);
}, '一行スクロールダウン');
key.setViewKey([['C-p'], ['k']], function (ev) {
key.generateKey(ev.originalTarget, KeyEvent.DOM_VK_UP, true);
}, '一行スクロールアップ');
key.setViewKey([['C-f'], ['.']], function (ev) {
key.generateKey(ev.originalTarget, KeyEvent.DOM_VK_RIGHT, true);
}, '右へスクロール');
key.setViewKey([['C-b'], [',']], function (ev) {
key.generateKey(ev.originalTarget, KeyEvent.DOM_VK_LEFT, true);
}, '左へスクロール');
key.setViewKey([['M-v'], ['b']], function (ev) {
goDoCommand("cmd_scrollPageUp");
}, '一画面分スクロールアップ');
key.setViewKey('C-v', function (ev) {
goDoCommand("cmd_scrollPageDown");
}, '一画面スクロールダウン');
key.setViewKey([['M-<'], ['g']], function (ev) {
goDoCommand("cmd_scrollTop");
}, 'ページ先頭へ移動', true);
key.setViewKey([['M->'], ['G']], function (ev) {
goDoCommand("cmd_scrollBottom");
}, 'ページ末尾へ移動', true);
key.setViewKey(':', function (ev, arg) {
shell.input(null, arg);
}, 'コマンドの実行', true);
key.setViewKey('R', function (ev) {
BrowserReload();
}, '更新', true);
key.setViewKey('B', function (ev) {
BrowserBack();
}, '戻る');
key.setViewKey('F', function (ev) {
BrowserForward();
}, '進む');
key.setViewKey(['C-x', 'h'], function (ev) {
goDoCommand("cmd_selectAll");
}, 'すべて選択', true);
key.setViewKey('f', function (ev) {
command.focusElement(command.elementsRetrieverTextarea, 0);
}, '最初のインプットエリアへフォーカス', true);
key.setViewKey('M-p', function (ev) {
command.walkInputElement(command.elementsRetrieverButton, true, true);
}, '次のボタンへフォーカスを当てる');
key.setViewKey('M-n', function (ev) {
command.walkInputElement(command.elementsRetrieverButton, false, true);
}, '前のボタンへフォーカスを当てる');
key.setViewKey('r', function (ev, arg) {
ext.exec("kungfloo-reblog", arg, ev);
}, 'kungfloo - Reblog', true);
key.setEditKey(['C-x', 'h'], function (ev) {
command.selectAll(ev);
}, '全て選択', true);
key.setEditKey([['C-x', 'u'], ['C-_']], function (ev) {
display.echoStatusBar("Undo!", 2000);
goDoCommand("cmd_undo");
}, 'アンドゥ');
key.setEditKey(['C-x', 'r', 'd'], function (ev, arg) {
command.replaceRectangle(ev.originalTarget, "", false, !arg);
}, '矩形削除', true);
key.setEditKey(['C-x', 'r', 't'], function (ev) {
prompt.read("String rectangle: ", function (aStr, aInput) {command.replaceRectangle(aInput, aStr);}, ev.originalTarget);
}, '矩形置換', true);
key.setEditKey(['C-x', 'r', 'o'], function (ev) {
command.openRectangle(ev.originalTarget);
}, '矩形行空け', true);
key.setEditKey(['C-x', 'r', 'k'], function (ev, arg) {
command.kill.buffer = command.killRectangle(ev.originalTarget, !arg);
}, '矩形キル', true);
key.setEditKey(['C-x', 'r', 'y'], function (ev) {
command.yankRectangle(ev.originalTarget, command.kill.buffer);
}, '矩形ヤンク', true);
key.setEditKey([['C-`'], ['C-@'], ['C-SPC']], function (ev) {
command.setMark(ev);
}, 'マークをセット', true);
key.setEditKey('C-o', function (ev) {
command.openLine(ev);
}, '行を開く (Open line)');
key.setEditKey('C-\\', function (ev) {
display.echoStatusBar("Redo!", 2000);
goDoCommand("cmd_redo");
}, 'リドゥ');
key.setEditKey('C-a', function (ev) {
command.beginLine(ev);
}, '行頭へ移動');
key.setEditKey('C-e', function (ev) {
command.endLine(ev);
}, '行末へ');
key.setEditKey('C-f', function (ev) {
command.nextChar(ev);
}, '一文字右へ移動');
key.setEditKey('C-b', function (ev) {
command.previousChar(ev);
}, '一文字左へ移動');
key.setEditKey('C-n', function (ev) {
command.nextLine(ev);
}, '一行下へ');
key.setEditKey('C-p', function (ev) {
command.previousLine(ev);
}, '一行上へ');
key.setEditKey('C-v', function (ev) {
command.pageDown(ev);
}, '一画面分下へ');
key.setEditKey('M-<', function (ev) {
command.moveTop(ev);
}, 'テキストエリア先頭へ');
key.setEditKey('M->', function (ev) {
command.moveBottom(ev);
}, 'テキストエリア末尾へ');
key.setEditKey('C-d', function (ev) {
goDoCommand("cmd_deleteCharForward");
}, '次の一文字削除');
key.setEditKey('C-h', function (ev) {
goDoCommand("cmd_deleteCharBackward");
}, '前の一文字を削除');
key.setEditKey([['C-<backspace>'], ['M-<delete>']], function (ev) {
command.deleteBackwardWord(ev);
}, '前の一単語を削除');
key.setEditKey('C-k', function (ev) {
command.killLine(ev);
}, 'カーソルから先を一行カット (Kill line)');
key.setEditKey('C-y', command.yank, '貼り付け (Yank)');
key.setEditKey('M-y', command.yankPop, '古いクリップボードの中身を順に貼り付け (Yank pop)', true);
key.setEditKey('C-M-y', function (ev) {
if (!command.kill.ring.length) {
return;
}
let (ct = command.getClipboardText()) (!command.kill.ring.length || ct != command.kill.ring[0]) &&
command.pushKillRing(ct);
prompt.selector({message: "Paste:", collection: command.kill.ring, callback: function (i) {if (i >= 0) {key.insertText(command.kill.ring[i]);}}});
}, '以前にコピーしたテキスト一覧から選択して貼り付け', true);
key.setEditKey('C-w', function (ev) {
goDoCommand("cmd_copy");
goDoCommand("cmd_delete");
command.resetMark(ev);
}, '選択中のテキストを切り取り (Kill region)', true);
key.setEditKey('M-f', function (ev) {
command.forwardWord(ev);
}, '一単語右へ移動');
key.setEditKey('M-b', function (ev) {
command.backwardWord(ev);
}, '一単語左へ移動');
key.setEditKey('M-v', function (ev) {
command.pageUp(ev);
}, '一画面分上へ');
key.setEditKey('M-d', function (ev) {
command.deleteForwardWord(ev);
}, '次の一単語を削除');
key.setEditKey('M-u', function (ev, arg) {
command.wordCommand(ev, arg, command.upcaseForwardWord, command.upcaseBackwardWord);
}, '次の一単語を全て大文字に (Upper case)');
key.setEditKey('M-l', function (ev, arg) {
command.wordCommand(ev, arg, command.downcaseForwardWord, command.downcaseBackwardWord);
}, '次の一単語を全て小文字に (Lower case)');
key.setEditKey('M-c', function (ev, arg) {
command.wordCommand(ev, arg, command.capitalizeForwardWord, command.capitalizeBackwardWord);
}, '次の一単語をキャピタライズ');
key.setEditKey('M-n', function (ev) {
command.walkInputElement(command.elementsRetrieverTextarea, true, true);
}, '次のテキストエリアへフォーカス');
key.setEditKey('M-p', function (ev) {
command.walkInputElement(command.elementsRetrieverTextarea, false, true);
}, '前のテキストエリアへフォーカス');
key.setCaretKey([['C-a'], ['^']], function (ev) {
ev.target.ksMarked ? goDoCommand("cmd_selectBeginLine") : goDoCommand("cmd_beginLine");
}, 'キャレットを行頭へ移動');
key.setCaretKey([['C-e'], ['$'], ['M->'], ['G']], function (ev) {
ev.target.ksMarked ? goDoCommand("cmd_selectEndLine") : goDoCommand("cmd_endLine");
}, 'キャレットを行末へ移動');
key.setCaretKey([['C-n'], ['j']], function (ev) {
ev.target.ksMarked ? goDoCommand("cmd_selectLineNext") : goDoCommand("cmd_scrollLineDown");
}, 'キャレットを一行下へ');
key.setCaretKey([['C-p'], ['k']], function (ev) {
ev.target.ksMarked ? goDoCommand("cmd_selectLinePrevious") : goDoCommand("cmd_scrollLineUp");
}, 'キャレットを一行上へ');
key.setCaretKey([['C-f'], ['l']], function (ev) {
ev.target.ksMarked ? goDoCommand("cmd_selectCharNext") : goDoCommand("cmd_scrollRight");
}, 'キャレットを一文字右へ移動');
key.setCaretKey([['C-b'], ['h'], ['C-h']], function (ev) {
ev.target.ksMarked ? goDoCommand("cmd_selectCharPrevious") : goDoCommand("cmd_scrollLeft");
}, 'キャレットを一文字左へ移動');
key.setCaretKey([['M-f'], ['w']], function (ev) {
ev.target.ksMarked ? goDoCommand("cmd_selectWordNext") : goDoCommand("cmd_wordNext");
}, 'キャレットを一単語右へ移動');
key.setCaretKey([['M-b'], ['W']], function (ev) {
ev.target.ksMarked ? goDoCommand("cmd_selectWordPrevious") : goDoCommand("cmd_wordPrevious");
}, 'キャレットを一単語左へ移動');
key.setCaretKey([['C-v'], ['SPC']], function (ev) {
ev.target.ksMarked ? goDoCommand("cmd_selectPageNext") : goDoCommand("cmd_movePageDown");
}, 'キャレットを一画面分下へ');
key.setCaretKey([['M-v'], ['b']], function (ev) {
ev.target.ksMarked ? goDoCommand("cmd_selectPagePrevious") : goDoCommand("cmd_movePageUp");
}, 'キャレットを一画面分上へ');
key.setCaretKey([['M-<'], ['g']], function (ev) {
ev.target.ksMarked ? goDoCommand("cmd_selectTop") : goDoCommand("cmd_scrollTop");
}, 'キャレットをページ先頭へ移動');
key.setCaretKey('J', function (ev) {
util.getSelectionController().scrollLine(true);
}, '画面を一行分下へスクロール');
key.setCaretKey('K', function (ev) {
util.getSelectionController().scrollLine(false);
}, '画面を一行分上へスクロール');
key.setCaretKey(',', function (ev) {
util.getSelectionController().scrollHorizontal(true);
goDoCommand("cmd_scrollLeft");
}, '左へスクロール');
key.setCaretKey('.', function (ev) {
goDoCommand("cmd_scrollRight");
util.getSelectionController().scrollHorizontal(false);
}, '右へスクロール');
key.setCaretKey('z', function (ev) {
command.recenter(ev);
}, 'キャレットの位置までスクロール');
key.setCaretKey([['C-`'], ['C-@'], ['C-SPC']], function (ev) {
command.setMark(ev);
}, 'マークをセット', true);
key.setCaretKey(':', function (ev, arg) {
shell.input(null, arg);
}, 'コマンドの実行', true);
key.setCaretKey('R', function (ev) {
BrowserReload();
}, '更新', true);
key.setCaretKey('B', function (ev) {
BrowserBack();
}, '戻る');
key.setCaretKey('F', function (ev) {
BrowserForward();
}, '進む');
key.setCaretKey(['C-x', 'h'], function (ev) {
goDoCommand("cmd_selectAll");
}, 'すべて選択', true);
key.setCaretKey('f', function (ev) {
command.focusElement(command.elementsRetrieverTextarea, 0);
}, '最初のインプットエリアへフォーカス', true);
key.setCaretKey('M-p', function (ev) {
command.walkInputElement(command.elementsRetrieverButton, true, true);
}, '次のボタンへフォーカスを当てる');
key.setCaretKey('M-n', function (ev) {
command.walkInputElement(command.elementsRetrieverButton, false, true);
}, '前のボタンへフォーカスを当てる');
key.setCaretKey('r', function (ev, arg) {
ext.exec("kungfloo-reblog", arg, ev);
}, 'kungfloo - Reblog', true);
| mzp/chef-repo | dotfiles/keysnail.js | JavaScript | apache-2.0 | 17,112 |
package io.fabric8.openshift.api.model.tuned.v1;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.annotation.JsonAnyGetter;
import com.fasterxml.jackson.annotation.JsonAnySetter;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import io.fabric8.kubernetes.api.model.Container;
import io.fabric8.kubernetes.api.model.IntOrString;
import io.fabric8.kubernetes.api.model.KubernetesResource;
import io.fabric8.kubernetes.api.model.LabelSelector;
import io.fabric8.kubernetes.api.model.LocalObjectReference;
import io.fabric8.kubernetes.api.model.ObjectMeta;
import io.fabric8.kubernetes.api.model.ObjectReference;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
import io.fabric8.kubernetes.api.model.PodTemplateSpec;
import io.fabric8.kubernetes.api.model.ResourceRequirements;
import io.sundr.builder.annotations.Buildable;
import io.sundr.builder.annotations.BuildableReference;
import lombok.EqualsAndHashCode;
import lombok.Setter;
import lombok.ToString;
import lombok.experimental.Accessors;
@JsonDeserialize(using = com.fasterxml.jackson.databind.JsonDeserializer.None.class)
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonPropertyOrder({
"apiVersion",
"kind",
"metadata",
"label",
"match",
"type",
"value"
})
@ToString
@EqualsAndHashCode
@Setter
@Accessors(prefix = {
"_",
""
})
@Buildable(editableEnabled = false, validationEnabled = false, generateBuilderPackage = false, lazyCollectionInitEnabled = false, builderPackage = "io.fabric8.kubernetes.api.builder", refs = {
@BuildableReference(ObjectMeta.class),
@BuildableReference(LabelSelector.class),
@BuildableReference(Container.class),
@BuildableReference(PodTemplateSpec.class),
@BuildableReference(ResourceRequirements.class),
@BuildableReference(IntOrString.class),
@BuildableReference(ObjectReference.class),
@BuildableReference(LocalObjectReference.class),
@BuildableReference(PersistentVolumeClaim.class)
})
public class TunedMatch implements KubernetesResource
{
@JsonProperty("label")
private String label;
@JsonProperty("match")
@JsonInclude(JsonInclude.Include.NON_EMPTY)
private List<TunedMatch> match = new ArrayList<TunedMatch>();
@JsonProperty("type")
private String type;
@JsonProperty("value")
private String value;
@JsonIgnore
private Map<String, Object> additionalProperties = new HashMap<String, Object>();
/**
* No args constructor for use in serialization
*
*/
public TunedMatch() {
}
/**
*
* @param match
* @param label
* @param type
* @param value
*/
public TunedMatch(String label, List<TunedMatch> match, String type, String value) {
super();
this.label = label;
this.match = match;
this.type = type;
this.value = value;
}
@JsonProperty("label")
public String getLabel() {
return label;
}
@JsonProperty("label")
public void setLabel(String label) {
this.label = label;
}
@JsonProperty("match")
public List<TunedMatch> getMatch() {
return match;
}
@JsonProperty("match")
public void setMatch(List<TunedMatch> match) {
this.match = match;
}
@JsonProperty("type")
public String getType() {
return type;
}
@JsonProperty("type")
public void setType(String type) {
this.type = type;
}
@JsonProperty("value")
public String getValue() {
return value;
}
@JsonProperty("value")
public void setValue(String value) {
this.value = value;
}
@JsonAnyGetter
public Map<String, Object> getAdditionalProperties() {
return this.additionalProperties;
}
@JsonAnySetter
public void setAdditionalProperty(String name, Object value) {
this.additionalProperties.put(name, value);
}
}
| fabric8io/kubernetes-client | kubernetes-model-generator/openshift-model-tuned/src/generated/java/io/fabric8/openshift/api/model/tuned/v1/TunedMatch.java | Java | apache-2.0 | 4,218 |
import {
afterEach,
AsyncTestCompleter,
beforeEach,
ddescribe,
describe,
dispatchEvent,
el,
expect,
iit,
inject,
it,
xit
} from 'angular2/test_lib';
import {DOM} from 'angular2/src/dom/dom_adapter';
import {Inject} from 'angular2/di';
import {Component, Decorator, Template, PropertySetter} from 'angular2/angular2';
import {TestBed} from 'angular2/src/test_lib/test_bed';
import {ControlGroupDirective, ControlDirective, Control, ControlGroup, OptionalControl,
ControlValueAccessor, RequiredValidatorDirective, CheckboxControlValueAccessor,
DefaultValueAccessor, Validators} from 'angular2/forms';
export function main() {
if (DOM.supportsDOMEvents()) {
describe("integration tests", () => {
it("should initialize DOM elements with the given form object",
inject([TestBed, AsyncTestCompleter], (tb, async) => {
var ctx = new MyComp(new ControlGroup({
"login": new Control("loginValue")
}));
var t = `<div [control-group]="form">
<input type="text" control="login">
</div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
var input = view.querySelector("input");
expect(input.value).toEqual("loginValue");
async.done();
});
}));
it("should update the control group values on DOM change",
inject([TestBed, AsyncTestCompleter], (tb, async) => {
var form = new ControlGroup({
"login": new Control("oldValue")
});
var ctx = new MyComp(form);
var t = `<div [control-group]="form">
<input type="text" control="login">
</div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
var input = view.querySelector("input");
input.value = "updatedValue";
dispatchEvent(input, "change");
expect(form.value).toEqual({"login": "updatedValue"});
async.done();
});
}));
it("should work with single controls", inject([TestBed, AsyncTestCompleter], (tb, async) => {
var control = new Control("loginValue");
var ctx = new MyComp(control);
var t = `<div><input type="text" [control]="form"></div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
var input = view.querySelector("input")
expect(input.value).toEqual("loginValue");
input.value = "updatedValue";
dispatchEvent(input, "change");
expect(control.value).toEqual("updatedValue");
async.done();
});
}));
it("should update DOM elements when rebinding the control group",
inject([TestBed, AsyncTestCompleter], (tb, async) => {
var form = new ControlGroup({
"login": new Control("oldValue")
});
var ctx = new MyComp(form);
var t = `<div [control-group]="form">
<input type="text" control="login">
</div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
ctx.form = new ControlGroup({
"login": new Control("newValue")
});
view.detectChanges();
var input = view.querySelector("input")
expect(input.value).toEqual("newValue");
async.done();
});
}));
it("should update DOM element when rebinding the control name",
inject([TestBed, AsyncTestCompleter], (tb, async) => {
var ctx = new MyComp(new ControlGroup({
"one": new Control("one"),
"two": new Control("two")
}), "one");
var t = `<div [control-group]="form">
<input type="text" [control]="name">
</div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
var input = view.querySelector("input")
expect(input.value).toEqual("one");
ctx.name = "two";
view.detectChanges();
expect(input.value).toEqual("two");
async.done();
});
}));
describe("different control types", () => {
it("should support <input type=text>", inject([TestBed, AsyncTestCompleter], (tb, async) => {
var ctx = new MyComp(new ControlGroup({"text": new Control("old")}));
var t = `<div [control-group]="form">
<input type="text" control="text">
</div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
var input = view.querySelector("input")
expect(input.value).toEqual("old");
input.value = "new";
dispatchEvent(input, "input");
expect(ctx.form.value).toEqual({"text": "new"});
async.done();
});
}));
it("should support <input> without type", inject([TestBed, AsyncTestCompleter], (tb, async) => {
var ctx = new MyComp(new ControlGroup({"text": new Control("old")}));
var t = `<div [control-group]="form">
<input control="text">
</div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
var input = view.querySelector("input")
expect(input.value).toEqual("old");
input.value = "new";
dispatchEvent(input, "input");
expect(ctx.form.value).toEqual({"text": "new"});
async.done();
});
}));
it("should support <textarea>", inject([TestBed, AsyncTestCompleter], (tb, async) => {
var ctx = new MyComp(new ControlGroup({"text": new Control('old')}));
var t = `<div [control-group]="form">
<textarea control="text"></textarea>
</div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
var textarea = view.querySelector("textarea")
expect(textarea.value).toEqual("old");
textarea.value = "new";
dispatchEvent(textarea, "input");
expect(ctx.form.value).toEqual({"text": 'new'});
async.done();
});
}));
it("should support <type=checkbox>", inject([TestBed, AsyncTestCompleter], (tb, async) => {
var ctx = new MyComp(new ControlGroup({"checkbox": new Control(true)}));
var t = `<div [control-group]="form">
<input type="checkbox" control="checkbox">
</div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
var input = view.querySelector("input")
expect(input.checked).toBe(true);
input.checked = false;
dispatchEvent(input, "change");
expect(ctx.form.value).toEqual({"checkbox": false});
async.done();
});
}));
it("should support <select>", inject([TestBed, AsyncTestCompleter], (tb, async) => {
var ctx = new MyComp(new ControlGroup({"city": new Control("SF")}));
var t = `<div [control-group]="form">
<select control="city">
<option value="SF"></option>
<option value="NYC"></option>
</select>
</div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
var select = view.querySelector("select")
var sfOption = view.querySelector("option")
expect(select.value).toEqual('SF');
expect(sfOption.selected).toBe(true);
select.value = 'NYC';
dispatchEvent(select, "change");
expect(ctx.form.value).toEqual({"city": 'NYC'});
expect(sfOption.selected).toBe(false);
async.done();
});
}));
it("should support custom value accessors", inject([TestBed, AsyncTestCompleter], (tb, async) => {
var ctx = new MyComp(new ControlGroup({"name": new Control("aa")}));
var t = `<div [control-group]="form">
<input type="text" control="name" wrapped-value>
</div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
var input = view.querySelector("input")
expect(input.value).toEqual("!aa!");
input.value = "!bb!";
dispatchEvent(input, "change");
expect(ctx.form.value).toEqual({"name": "bb"});
async.done();
});
}));
});
describe("validations", () => {
it("should use validators defined in html",
inject([TestBed, AsyncTestCompleter], (tb, async) => {
var form = new ControlGroup({"login": new Control("aa")});
var ctx = new MyComp(form);
var t = `<div [control-group]="form">
<input type="text" control="login" required>
</div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
expect(form.valid).toEqual(true);
var input = view.querySelector("input");
input.value = "";
dispatchEvent(input, "change");
expect(form.valid).toEqual(false);
async.done();
});
}));
it("should use validators defined in the model",
inject([TestBed, AsyncTestCompleter], (tb, async) => {
var form = new ControlGroup({"login": new Control("aa", Validators.required)});
var ctx = new MyComp(form);
var t = `<div [control-group]="form">
<input type="text" control="login">
</div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
expect(form.valid).toEqual(true);
var input = view.querySelector("input");
input.value = "";
dispatchEvent(input, "change");
expect(form.valid).toEqual(false);
async.done();
});
}));
});
describe("nested forms", () => {
it("should init DOM with the given form object",
inject([TestBed, AsyncTestCompleter], (tb, async) => {
var form = new ControlGroup({
"nested": new ControlGroup({
"login": new Control("value")
})
});
var ctx = new MyComp(form);
var t = `<div [control-group]="form">
<div control-group="nested">
<input type="text" control="login">
</div>
</div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
var input = view.querySelector("input")
expect(input.value).toEqual("value");
async.done();
});
}));
it("should update the control group values on DOM change",
inject([TestBed, AsyncTestCompleter], (tb, async) => {
var form = new ControlGroup({
"nested": new ControlGroup({
"login": new Control("value")
})
});
var ctx = new MyComp(form);
var t = `<div [control-group]="form">
<div control-group="nested">
<input type="text" control="login">
</div>
</div>`;
tb.createView(MyComp, {context: ctx, html: t}).then((view) => {
view.detectChanges();
var input = view.querySelector("input")
input.value = "updatedValue";
dispatchEvent(input, "change");
expect(form.value).toEqual({"nested": {"login": "updatedValue"}});
async.done();
});
}));
});
});
}
}
@Component({selector: "my-comp"})
@Template({directives: [
ControlGroupDirective,
ControlDirective,
WrappedValue,
RequiredValidatorDirective,
CheckboxControlValueAccessor,
DefaultValueAccessor]})
class MyComp {
form:any;
name:string;
constructor(@Inject('form') form = null, @Inject('name') name = null) {
this.form = form;
this.name = name;
}
}
@Decorator({
selector:'[wrapped-value]',
events: {
'change' : 'handleOnChange($event.target.value)'
}
})
class WrappedValue {
_setProperty:Function;
onChange:Function;
constructor(cd:ControlDirective, @PropertySetter('value') setProperty:Function) {
this._setProperty = setProperty;
cd.valueAccessor = this;
}
writeValue(value) {
this._setProperty(`!${value}!`);
}
handleOnChange(value) {
this.onChange(value.substring(1, value.length - 1));
}
}
| keertip/angular | modules/angular2/test/forms/integration_spec.js | JavaScript | apache-2.0 | 13,142 |
package test.issues.strava;
import javastrava.model.StravaStream;
import javastrava.model.reference.StravaStreamType;
/**
* <p>
* These tests should PASS if issue <a href="https://github.com/danshannon/javastravav3api/issues/21">javastrava-api #21</a> is still current
* </p>
*
* @author Dan Shannon
* @see <a href= "https://github.com/danshannon/javastravav3api/issues/21">https://github.com/danshannon/javastravav3api/issues/21</a>
*/
public class Issue21 extends IssueTest {
/**
* @see test.issues.strava.IssueTest#isIssue()
*/
@SuppressWarnings("boxing")
@Override
public boolean isIssue() throws Exception {
final StravaStream[] streams = this.api.getActivityStreams(245713183L, StravaStreamType.DISTANCE.toString(), null, null);
for (final StravaStream stream : streams) {
if (stream.getResolution() != null) {
return true;
}
}
return false;
}
/**
* @see test.issues.strava.IssueTest#issueNumber()
*/
@Override
public int issueNumber() {
return 21;
}
}
| danshannon/javastrava-test | src/test/java/test/issues/strava/Issue21.java | Java | apache-2.0 | 1,006 |
package com.learning.designpattern.j2ee.businessdelegate.service;
public class EJBService implements BusinessService {
@Override
public void doProcessing() {
System.out.println("Processing task by invoking EJB Service");
}
}
| mandheer/learnjava | learn/tutorial/designpatterns/src/test/java/com/learning/designpattern/j2ee/businessdelegate/service/EJBService.java | Java | apache-2.0 | 246 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package de.unioninvestment.eai.portal.portlet.crud.domain.support.map;
/**
* Interface für Transformer im Rahmen des generischen Container.
*
* @param <A>
* Datentyp, aus dem transformiert werden soll.
* @param <B>
* Datentyp, in den transformiert werden soll.
*/
public interface ValueTransformer<A, B> {
B transform(A a);
}
| Union-Investment/Crud2Go | eai-portal-domain-crudportlet/src/main/java/de/unioninvestment/eai/portal/portlet/crud/domain/support/map/ValueTransformer.java | Java | apache-2.0 | 1,150 |
/*
* Copyright 1999-2011 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.dubbo.config;
import com.alibaba.dubbo.common.Constants;
import com.alibaba.dubbo.common.URL;
import com.alibaba.dubbo.common.Version;
import com.alibaba.dubbo.common.bytecode.Wrapper;
import com.alibaba.dubbo.common.extension.ExtensionLoader;
import com.alibaba.dubbo.common.utils.ConfigUtils;
import com.alibaba.dubbo.common.utils.NetUtils;
import com.alibaba.dubbo.common.utils.ReflectUtils;
import com.alibaba.dubbo.common.utils.StringUtils;
import com.alibaba.dubbo.config.annotation.Reference;
import com.alibaba.dubbo.config.support.Parameter;
import com.alibaba.dubbo.rpc.Invoker;
import com.alibaba.dubbo.rpc.Protocol;
import com.alibaba.dubbo.rpc.ProxyFactory;
import com.alibaba.dubbo.rpc.StaticContext;
import com.alibaba.dubbo.rpc.cluster.Cluster;
import com.alibaba.dubbo.rpc.cluster.directory.StaticDirectory;
import com.alibaba.dubbo.rpc.cluster.support.AvailableCluster;
import com.alibaba.dubbo.rpc.cluster.support.ClusterUtils;
import com.alibaba.dubbo.rpc.protocol.injvm.InjvmProtocol;
import com.alibaba.dubbo.rpc.service.GenericService;
import com.alibaba.dubbo.rpc.support.ProtocolUtils;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
/**
* ReferenceConfig
*
* @author william.liangf
* @export
*/
public class ReferenceConfig<T> extends AbstractReferenceConfig {
private static final long serialVersionUID = -5864351140409987595L;
private static final Protocol refprotocol = ExtensionLoader.getExtensionLoader(Protocol.class).getAdaptiveExtension();
private static final Cluster cluster = ExtensionLoader.getExtensionLoader(Cluster.class).getAdaptiveExtension();
private static final ProxyFactory proxyFactory = ExtensionLoader.getExtensionLoader(ProxyFactory.class).getAdaptiveExtension();
private final List<URL> urls = new ArrayList<URL>();
// 接口类型
private String interfaceName;
private Class<?> interfaceClass;
// 客户端类型
private String client;
// 点对点直连服务提供地址
private String url;
// 方法配置
private List<MethodConfig> methods;
// 缺省配置
private ConsumerConfig consumer;
private String protocol;
// 接口代理类引用
private transient volatile T ref;
private transient volatile Invoker<?> invoker;
private transient volatile boolean initialized;
private transient volatile boolean destroyed;
@SuppressWarnings("unused")
private final Object finalizerGuardian = new Object() {
@Override
protected void finalize() throws Throwable {
super.finalize();
if (!ReferenceConfig.this.destroyed) {
logger.warn("ReferenceConfig(" + url + ") is not DESTROYED when FINALIZE");
/* 先不做Destroy操作
try {
ReferenceConfig.this.destroy();
} catch (Throwable t) {
logger.warn("Unexpected err when destroy invoker of ReferenceConfig(" + url + ") in finalize method!", t);
}
*/
}
}
};
public ReferenceConfig() {
}
public ReferenceConfig(Reference reference) {
appendAnnotation(Reference.class, reference);
}
private static void checkAndConvertImplicitConfig(MethodConfig method, Map<String, String> map, Map<Object, Object> attributes) {
//check config conflict
if (Boolean.FALSE.equals(method.isReturn()) && (method.getOnreturn() != null || method.getOnthrow() != null)) {
throw new IllegalStateException("method config error : return attribute must be set true when onreturn or onthrow has been setted.");
}
//convert onreturn methodName to Method
String onReturnMethodKey = StaticContext.getKey(map, method.getName(), Constants.ON_RETURN_METHOD_KEY);
Object onReturnMethod = attributes.get(onReturnMethodKey);
if (onReturnMethod != null && onReturnMethod instanceof String) {
attributes.put(onReturnMethodKey, getMethodByName(method.getOnreturn().getClass(), onReturnMethod.toString()));
}
//convert onthrow methodName to Method
String onThrowMethodKey = StaticContext.getKey(map, method.getName(), Constants.ON_THROW_METHOD_KEY);
Object onThrowMethod = attributes.get(onThrowMethodKey);
if (onThrowMethod != null && onThrowMethod instanceof String) {
attributes.put(onThrowMethodKey, getMethodByName(method.getOnthrow().getClass(), onThrowMethod.toString()));
}
//convert oninvoke methodName to Method
String onInvokeMethodKey = StaticContext.getKey(map, method.getName(), Constants.ON_INVOKE_METHOD_KEY);
Object onInvokeMethod = attributes.get(onInvokeMethodKey);
if (onInvokeMethod != null && onInvokeMethod instanceof String) {
attributes.put(onInvokeMethodKey, getMethodByName(method.getOninvoke().getClass(), onInvokeMethod.toString()));
}
}
private static Method getMethodByName(Class<?> clazz, String methodName) {
try {
return ReflectUtils.findMethodByMethodName(clazz, methodName);
} catch (Exception e) {
throw new IllegalStateException(e);
}
}
public URL toUrl() {
return urls == null || urls.size() == 0 ? null : urls.iterator().next();
}
public List<URL> toUrls() {
return urls;
}
public synchronized T get() {
if (destroyed) {
throw new IllegalStateException("Already destroyed!");
}
if (ref == null) {
init();
}
return ref;
}
public synchronized void destroy() {
if (ref == null) {
return;
}
if (destroyed) {
return;
}
destroyed = true;
try {
invoker.destroy();
} catch (Throwable t) {
logger.warn("Unexpected err when destroy invoker of ReferenceConfig(" + url + ").", t);
}
invoker = null;
ref = null;
}
private void init() {
if (initialized) {
return;
}
initialized = true;
if (interfaceName == null || interfaceName.length() == 0) {
throw new IllegalStateException("<dubbo:reference interface=\"\" /> interface not allow null!");
}
// 获取消费者全局配置
checkDefault();
appendProperties(this);
if (getGeneric() == null && getConsumer() != null) {
setGeneric(getConsumer().getGeneric());
}
if (ProtocolUtils.isGeneric(getGeneric())) {
interfaceClass = GenericService.class;
} else {
try {
interfaceClass = Class.forName(interfaceName, true, Thread.currentThread()
.getContextClassLoader());
} catch (ClassNotFoundException e) {
throw new IllegalStateException(e.getMessage(), e);
}
checkInterfaceAndMethods(interfaceClass, methods);
}
String resolve = System.getProperty(interfaceName);
String resolveFile = null;
if (resolve == null || resolve.length() == 0) {
resolveFile = System.getProperty("dubbo.resolve.file");
if (resolveFile == null || resolveFile.length() == 0) {
File userResolveFile = new File(new File(System.getProperty("user.home")), "dubbo-resolve.properties");
if (userResolveFile.exists()) {
resolveFile = userResolveFile.getAbsolutePath();
}
}
if (resolveFile != null && resolveFile.length() > 0) {
Properties properties = new Properties();
FileInputStream fis = null;
try {
fis = new FileInputStream(new File(resolveFile));
properties.load(fis);
} catch (IOException e) {
throw new IllegalStateException("Unload " + resolveFile + ", cause: " + e.getMessage(), e);
} finally {
try {
if (null != fis) fis.close();
} catch (IOException e) {
logger.warn(e.getMessage(), e);
}
}
resolve = properties.getProperty(interfaceName);
}
}
if (resolve != null && resolve.length() > 0) {
url = resolve;
if (logger.isWarnEnabled()) {
if (resolveFile != null && resolveFile.length() > 0) {
logger.warn("Using default dubbo resolve file " + resolveFile + " replace " + interfaceName + "" + resolve + " to p2p invoke remote service.");
} else {
logger.warn("Using -D" + interfaceName + "=" + resolve + " to p2p invoke remote service.");
}
}
}
if (consumer != null) {
if (application == null) {
application = consumer.getApplication();
}
if (module == null) {
module = consumer.getModule();
}
if (registries == null) {
registries = consumer.getRegistries();
}
if (monitor == null) {
monitor = consumer.getMonitor();
}
}
if (module != null) {
if (registries == null) {
registries = module.getRegistries();
}
if (monitor == null) {
monitor = module.getMonitor();
}
}
if (application != null) {
if (registries == null) {
registries = application.getRegistries();
}
if (monitor == null) {
monitor = application.getMonitor();
}
}
checkApplication();
checkStubAndMock(interfaceClass);
Map<String, String> map = new HashMap<String, String>();
Map<Object, Object> attributes = new HashMap<Object, Object>();
map.put(Constants.SIDE_KEY, Constants.CONSUMER_SIDE);
map.put(Constants.DUBBO_VERSION_KEY, Version.getVersion());
map.put(Constants.TIMESTAMP_KEY, String.valueOf(System.currentTimeMillis()));
if (ConfigUtils.getPid() > 0) {
map.put(Constants.PID_KEY, String.valueOf(ConfigUtils.getPid()));
}
if (!isGeneric()) {
String revision = Version.getVersion(interfaceClass, version);
if (revision != null && revision.length() > 0) {
map.put("revision", revision);
}
String[] methods = Wrapper.getWrapper(interfaceClass).getMethodNames();
if (methods.length == 0) {
logger.warn("NO method found in service interface " + interfaceClass.getName());
map.put("methods", Constants.ANY_VALUE);
} else {
map.put("methods", StringUtils.join(new HashSet<String>(Arrays.asList(methods)), ","));
}
}
map.put(Constants.INTERFACE_KEY, interfaceName);
appendParameters(map, application);
appendParameters(map, module);
appendParameters(map, consumer, Constants.DEFAULT_KEY);
appendParameters(map, this);
String prifix = StringUtils.getServiceKey(map);
if (methods != null && methods.size() > 0) {
for (MethodConfig method : methods) {
appendParameters(map, method, method.getName());
String retryKey = method.getName() + ".retry";
if (map.containsKey(retryKey)) {
String retryValue = map.remove(retryKey);
if ("false".equals(retryValue)) {
map.put(method.getName() + ".retries", "0");
}
}
appendAttributes(attributes, method, prifix + "." + method.getName());
checkAndConvertImplicitConfig(method, map, attributes);
}
}
//attributes通过系统context进行存储.
StaticContext.getSystemContext().putAll(attributes);
ref = createProxy(map);
}
@SuppressWarnings({"unchecked", "rawtypes", "deprecation"})
private T createProxy(Map<String, String> map) {
URL tmpUrl = new URL("temp", "localhost", 0, map);
final boolean isJvmRefer;
if (isInjvm() == null) {
if (url != null && url.length() > 0) { //指定URL的情况下,不做本地引用
isJvmRefer = false;
} else if (InjvmProtocol.getInjvmProtocol().isInjvmRefer(tmpUrl)) {
//默认情况下如果本地有服务暴露,则引用本地服务.
isJvmRefer = true;
} else {
isJvmRefer = false;
}
} else {
isJvmRefer = isInjvm().booleanValue();
}
if (isJvmRefer) {
URL url = new URL(Constants.LOCAL_PROTOCOL, NetUtils.LOCALHOST, 0, interfaceClass.getName()).addParameters(map);
invoker = refprotocol.refer(interfaceClass, url);
if (logger.isInfoEnabled()) {
logger.info("Using injvm service " + interfaceClass.getName());
}
} else {
if (url != null && url.length() > 0) { // 用户指定URL,指定的URL可能是对点对直连地址,也可能是注册中心URL
String[] us = Constants.SEMICOLON_SPLIT_PATTERN.split(url);
if (us != null && us.length > 0) {
for (String u : us) {
URL url = URL.valueOf(u);
if (url.getPath() == null || url.getPath().length() == 0) {
url = url.setPath(interfaceName);
}
if (Constants.REGISTRY_PROTOCOL.equals(url.getProtocol())) {
urls.add(url.addParameterAndEncoded(Constants.REFER_KEY, StringUtils.toQueryString(map)));
} else {
urls.add(ClusterUtils.mergeUrl(url, map));
}
}
}
} else { // 通过注册中心配置拼装URL
List<URL> us = loadRegistries(false);
if (us != null && us.size() > 0) {
for (URL u : us) {
URL monitorUrl = loadMonitor(u);
if (monitorUrl != null) {
map.put(Constants.MONITOR_KEY, URL.encode(monitorUrl.toFullString()));
}
urls.add(u.addParameterAndEncoded(Constants.REFER_KEY, StringUtils.toQueryString(map)));
}
}
if (urls == null || urls.size() == 0) {
throw new IllegalStateException("No such any registry to reference " + interfaceName + " on the consumer " + NetUtils.getLocalHost() + " use dubbo version " + Version.getVersion() + ", please config <dubbo:registry address=\"...\" /> to your spring config.");
}
}
if (urls.size() == 1) {
invoker = refprotocol.refer(interfaceClass, urls.get(0));
} else {
List<Invoker<?>> invokers = new ArrayList<Invoker<?>>();
URL registryURL = null;
for (URL url : urls) {
invokers.add(refprotocol.refer(interfaceClass, url));
if (Constants.REGISTRY_PROTOCOL.equals(url.getProtocol())) {
registryURL = url; // 用了最后一个registry url
}
}
if (registryURL != null) { // 有 注册中心协议的URL
// 对有注册中心的Cluster 只用 AvailableCluster
URL u = registryURL.addParameter(Constants.CLUSTER_KEY, AvailableCluster.NAME);
invoker = cluster.join(new StaticDirectory(u, invokers));
} else { // 不是 注册中心的URL
invoker = cluster.join(new StaticDirectory(invokers));
}
}
}
Boolean c = check;
if (c == null && consumer != null) {
c = consumer.isCheck();
}
if (c == null) {
c = true; // default true
}
if (c && !invoker.isAvailable()) {
throw new IllegalStateException("Failed to check the status of the service " + interfaceName + ". No provider available for the service " + (group == null ? "" : group + "/") + interfaceName + (version == null ? "" : ":" + version) + " from the url " + invoker.getUrl() + " to the consumer " + NetUtils.getLocalHost() + " use dubbo version " + Version.getVersion());
}
if (logger.isInfoEnabled()) {
logger.info("Refer dubbo service " + interfaceClass.getName() + " from url " + invoker.getUrl());
}
// 创建服务代理
return (T) proxyFactory.getProxy(invoker);
}
private void checkDefault() {
if (consumer == null) {
consumer = new ConsumerConfig();
}
appendProperties(consumer);
}
public Class<?> getInterfaceClass() {
if (interfaceClass != null) {
return interfaceClass;
}
if (isGeneric()
|| (getConsumer() != null && getConsumer().isGeneric())) {
return GenericService.class;
}
try {
if (interfaceName != null && interfaceName.length() > 0) {
this.interfaceClass = Class.forName(interfaceName, true, Thread.currentThread()
.getContextClassLoader());
}
} catch (ClassNotFoundException t) {
throw new IllegalStateException(t.getMessage(), t);
}
return interfaceClass;
}
/**
* @param interfaceClass
* @see #setInterface(Class)
* @deprecated
*/
@Deprecated
public void setInterfaceClass(Class<?> interfaceClass) {
setInterface(interfaceClass);
}
public String getInterface() {
return interfaceName;
}
public void setInterface(Class<?> interfaceClass) {
if (interfaceClass != null && !interfaceClass.isInterface()) {
throw new IllegalStateException("The interface class " + interfaceClass + " is not a interface!");
}
this.interfaceClass = interfaceClass;
setInterface(interfaceClass == null ? (String) null : interfaceClass.getName());
}
public void setInterface(String interfaceName) {
this.interfaceName = interfaceName;
if (id == null || id.length() == 0) {
id = interfaceName;
}
}
public String getClient() {
return client;
}
public void setClient(String client) {
checkName("client", client);
this.client = client;
}
@Parameter(excluded = true)
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public List<MethodConfig> getMethods() {
return methods;
}
@SuppressWarnings("unchecked")
public void setMethods(List<? extends MethodConfig> methods) {
this.methods = (List<MethodConfig>) methods;
}
public ConsumerConfig getConsumer() {
return consumer;
}
public void setConsumer(ConsumerConfig consumer) {
this.consumer = consumer;
}
public String getProtocol() {
return protocol;
}
public void setProtocol(String protocol) {
this.protocol = protocol;
}
// just for test
Invoker<?> getInvoker() {
return invoker;
}
} | delavior/dubbo | dubbo-config/dubbo-config-api/src/main/java/com/alibaba/dubbo/config/ReferenceConfig.java | Java | apache-2.0 | 21,186 |
package com.xxr.utils;
import java.awt.Image;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import javax.swing.ImageIcon;
import com.sun.image.codec.jpeg.JPEGCodec;
import com.sun.image.codec.jpeg.JPEGEncodeParam;
import com.sun.image.codec.jpeg.JPEGImageEncoder;
/**
* @author xiongxianren
* @desciption 图片工具类:创建缩略图
*/
@SuppressWarnings({ "restriction", "unused" })
public class ImageUtil {
public static void createSmallPhoto(String photoPath, String smallPath) {
File _file = new File(photoPath);
Image src;
try {
src = javax.imageio.ImageIO.read(_file);
int wideth = 110;
int height = 80;
BufferedImage tag = new BufferedImage(wideth, height,
BufferedImage.TYPE_INT_RGB);
tag.getGraphics().drawImage(src, 0, 0, wideth, height, null);
FileOutputStream out = new FileOutputStream(smallPath);
JPEGImageEncoder encoder = JPEGCodec.createJPEGEncoder(out);
encoder.encode(tag);
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
| FrankXiong/web_album | src/com/xxr/utils/ImageUtil.java | Java | apache-2.0 | 1,137 |
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.execution.scratch;
import com.intellij.debugger.DebuggerManager;
import com.intellij.debugger.engine.DebugProcess;
import com.intellij.debugger.engine.DebugProcessImpl;
import com.intellij.debugger.engine.DebugProcessListener;
import com.intellij.execution.ExecutionBundle;
import com.intellij.execution.ExecutionException;
import com.intellij.execution.Executor;
import com.intellij.execution.JavaRunConfigurationExtensionManager;
import com.intellij.execution.application.ApplicationConfiguration;
import com.intellij.execution.configurations.*;
import com.intellij.execution.filters.TextConsoleBuilderFactory;
import com.intellij.execution.process.OSProcessHandler;
import com.intellij.execution.runners.ExecutionEnvironment;
import com.intellij.execution.util.JavaParametersUtil;
import com.intellij.execution.util.ProgramParametersUtil;
import com.intellij.openapi.options.SettingsEditor;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.projectRoots.JavaSdk;
import com.intellij.openapi.projectRoots.JavaSdkVersion;
import com.intellij.openapi.projectRoots.Sdk;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.openapi.vfs.VirtualFileManager;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.io.File;
/**
* @author Eugene Zhuravlev
*/
public class JavaScratchConfiguration extends ApplicationConfiguration {
protected JavaScratchConfiguration(String name, @NotNull Project project, @NotNull ConfigurationFactory factory) {
super(name, project, factory);
}
@Override
public boolean isBuildProjectOnEmptyModuleList() {
return false;
}
@Override
public void checkConfiguration() throws RuntimeConfigurationException {
JavaParametersUtil.checkAlternativeJRE(this);
final String className = getMainClassName();
if (className == null || className.length() == 0) {
throw new RuntimeConfigurationError(ExecutionBundle.message("no.main.class.specified.error.text"));
}
if (getScratchFileUrl() == null) {
throw new RuntimeConfigurationError("No scratch file associated with configuration");
}
if (getScratchVirtualFile() == null) {
throw new RuntimeConfigurationError("Associated scratch file not found");
}
ProgramParametersUtil.checkWorkingDirectoryExist(this, getProject(), getConfigurationModule().getModule());
JavaRunConfigurationExtensionManager.checkConfigurationIsValid(this);
}
@Override
public RunProfileState getState(@NotNull Executor executor, @NotNull ExecutionEnvironment env) throws ExecutionException {
final JavaCommandLineState state = new JavaApplicationCommandLineState<JavaScratchConfiguration>(this, env) {
@Override
protected JavaParameters createJavaParameters() throws ExecutionException {
final JavaParameters params = super.createJavaParameters();
// After params are fully configured, additionally ensure JAVA_ENABLE_PREVIEW_PROPERTY is set,
// because the scratch is compiled with this feature if it is supported by the JDK
final Sdk jdk = params.getJdk();
if (jdk != null) {
final JavaSdkVersion version = JavaSdk.getInstance().getVersion(jdk);
if (version != null && version.getMaxLanguageLevel().isPreview()) {
final ParametersList vmOptions = params.getVMParametersList();
if (!vmOptions.hasParameter(JavaParameters.JAVA_ENABLE_PREVIEW_PROPERTY)) {
vmOptions.add(JavaParameters.JAVA_ENABLE_PREVIEW_PROPERTY);
}
}
}
return params;
}
@Override
protected void setupJavaParameters(@NotNull JavaParameters params) throws ExecutionException {
super.setupJavaParameters(params);
final File scrachesOutput = JavaScratchCompilationSupport.getScratchOutputDirectory(getProject());
if (scrachesOutput != null) {
params.getClassPath().addFirst(FileUtil.toCanonicalPath(scrachesOutput.getAbsolutePath()).replace('/', File.separatorChar));
}
}
@NotNull
@Override
protected OSProcessHandler startProcess() throws ExecutionException {
final OSProcessHandler handler = super.startProcess();
if (getRunnerSettings() instanceof DebuggingRunnerData) {
final VirtualFile vFile = getConfiguration().getScratchVirtualFile();
if (vFile != null) {
DebuggerManager.getInstance(getProject()).addDebugProcessListener(handler, new DebugProcessListener() {
@Override
public void processAttached(@NotNull DebugProcess process) {
if (vFile.isValid()) {
process.appendPositionManager(new JavaScratchPositionManager((DebugProcessImpl)process, vFile));
}
process.removeDebugProcessListener(this);
}
});
}
}
return handler;
}
};
state.setConsoleBuilder(TextConsoleBuilderFactory.getInstance().createBuilder(getProject(), getConfigurationModule().getSearchScope()));
return state;
}
@NotNull
@Override
public SettingsEditor<? extends RunConfiguration> getConfigurationEditor() {
return new JavaScratchConfigurable(getProject());
}
public void setScratchFileUrl(String url) {
getOptions().setScratchFileUrl(url);
}
@Nullable
public String getScratchFileUrl() {
return getOptions().getScratchFileUrl();
}
@Nullable
public VirtualFile getScratchVirtualFile() {
final String url = getScratchFileUrl();
return url == null? null : VirtualFileManager.getInstance().findFileByUrl(url);
}
@NotNull
@Override
protected JavaScratchConfigurationOptions getOptions() {
return (JavaScratchConfigurationOptions)super.getOptions();
}
}
| leafclick/intellij-community | java/execution/impl/src/com/intellij/execution/scratch/JavaScratchConfiguration.java | Java | apache-2.0 | 6,017 |
<?php
class Excel_Handler extends MY_Controller{
function __construct()
{
parent::__construct();
$this->load->library('PHPExcel');
}
public function index()
{
}
/**
* [normal description]
* @param [type] $data [description]
* @param [type] $filename [description]
* @return [type] [description]
*/
public function normal($data, $filename) {
$objPHPExcel = new PHPExcel();
$objPHPExcel->getProperties()->setCreator("Rufus Mbugua");
$objPHPExcel->getProperties()->setLastModifiedBy("Rufus Mbugua");
$objPHPExcel->getProperties()->setTitle("Office 2007 XLSX Test Document");
$objPHPExcel->getProperties()->setSubject("Office 2007 XLSX Test Document");
$objPHPExcel->getProperties()->setDescription(" ");
// Add some data
// echo date('H:i:s') . " Add some data\n";
$objPHPExcel->setActiveSheetIndex(0);
$rowExec = 1;
//Looping through the cells
$column = 0;
//echo '<pre>';print_r($data);echo'</pre>';die;
foreach ($data[0] as $k=>$cell) {
//echo $column . $rowExec; die;
$objPHPExcel->getActiveSheet()->setCellValueByColumnAndRow($column, $rowExec, ucwords(str_replace('comm','commodity',str_replace('ar_','',str_replace('as_','',str_replace('ae_','',str_replace('ac_','',str_replace('li_','',
str_replace('lq_','',str_replace('fac', 'facility', str_replace('_', ' ', $k)))))))))));
$objPHPExcel->getActiveSheet()->getStyle(PHPExcel_Cell::stringFromColumnIndex($column) . $rowExec)->getFont()->setBold(true)->setSize(14);
$objPHPExcel->getActiveSheet()->getColumnDimension(PHPExcel_Cell::stringFromColumnIndex($column))->setAutoSize(true);
$column++;
}
$rowExec = 2;
foreach ($data as $key=>$rowset) {
//Looping through the cells per facility
$column = 0;
//var_dump($rowset);die;
foreach ($rowset as $cell) {
$objPHPExcel->getActiveSheet()->setCellValueByColumnAndRow($column, $rowExec, $cell);
$column++;
}
$rowExec++;
}
//die ;
// Rename sheet
// echo date('H:i:s') . " Rename sheet\n";
$objPHPExcel->getActiveSheet()->setTitle('Simple');
// Save Excel 2007 file
//echo date('H:i:s') . " Write to Excel2007 format\n";
$objWriter = new PHPExcel_Writer_Excel2007($objPHPExcel);
// We'll be outputting an excel file
header('Content-type: application/vnd.ms-excel');
// It will be called file.xls
header('Content-Disposition: attachment; filename=' . $filename . '.xlsx');
// Write file to the browser
$objWriter->save('php://output');
// Echo done
//echo date('H:i:s') . " Done writing file.\r\n";
}
/**
* [dynamic description]
* @param [type] $data [description]
* @param [type] $filename [description]
* @return [type] [description]
*/
public function dynamic($data, $filename) {
$objPHPExcel = new PHPExcel();
$objPHPExcel->getProperties()->setCreator("Rufus Mbugua");
$objPHPExcel->getProperties()->setLastModifiedBy("Rufus Mbugua");
$objPHPExcel->getProperties()->setTitle("Office 2007 XLSX Test Document");
$objPHPExcel->getProperties()->setSubject("Office 2007 XLSX Test Document");
$objPHPExcel->getProperties()->setDescription(" ");
// Add some data
// echo date('H:i:s') . " Add some data\n";
$objPHPExcel->setActiveSheetIndex(0);
$rowExec = 1;
//Looping through the cells
$column = 0;
//echo '<pre>';print_r($data);echo'</pre>';die;
foreach ($data['columns'] as $cell) {
//echo $column . $rowExec; die;
$objPHPExcel->getActiveSheet()->setCellValueByColumnAndRow($column, $rowExec, ucwords(str_replace('comm','commodity',str_replace('ar_','',str_replace('as_','',str_replace('ae_','',str_replace('ac_','',str_replace('li_','',
str_replace('lq_','',str_replace('fac', 'facility', str_replace('_', ' ', $cell)))))))))));
$objPHPExcel->getActiveSheet()->getStyle(PHPExcel_Cell::stringFromColumnIndex($column) . $rowExec)->getFont()->setBold(true)->setSize(14);
$objPHPExcel->getActiveSheet()->getColumnDimension(PHPExcel_Cell::stringFromColumnIndex($column))->setAutoSize(true);
$column++;
}
$rowExec = 2;
foreach ($data['data'] as $key=>$rowset) {
//Looping through the cells per facility
$column = 0;
//var_dump($rowset);die;
foreach($data['columns'] as $title){
if (array_key_exists($title,$rowset)) {
$objPHPExcel->getActiveSheet()->setCellValueByColumnAndRow($column, $rowExec, $rowset[$title]);
}
else{
$objPHPExcel->getActiveSheet()->setCellValueByColumnAndRow($column, $rowExec, "");
}
$column++;
}
$rowExec++;
}
//die ;
// Rename sheet
// echo date('H:i:s') . " Rename sheet\n";
$objPHPExcel->getActiveSheet()->setTitle('Simple');
// Save Excel 2007 file
//echo date('H:i:s') . " Write to Excel2007 format\n";
$objWriter = new PHPExcel_Writer_Excel2007($objPHPExcel);
// We'll be outputting an excel file
header('Content-type: application/vnd.ms-excel');
// It will be called file.xls
header('Content-Disposition: attachment; filename=' . $filename . '.xlsx');
// Write file to the browser
$objWriter->save('php://output');
// Echo done
//echo date('H:i:s') . " Done writing file.\r\n";
}
}
| karsanrichard/southern_cross | application/modules/export/controllers/excel_handler.php | PHP | apache-2.0 | 5,704 |
<!DOCTYPE html>
<!--[if IEMobile 7]><html class="no-js iem7 oldie linen"><![endif]-->
<!--[if (IE 7)&!(IEMobile)]><html class="no-js ie7 oldie linen" lang="en"><![endif]-->
<!--[if (IE 8)&!(IEMobile)]><html class="no-js ie8 oldie linen" lang="en"><![endif]-->
<!--[if (IE 9)&!(IEMobile)]><html class="no-js ie9 linen" lang="en"><![endif]-->
<!--[if (gt IE 9)|(gt IEMobile 7)]><!--><html class="no-js linen" lang="en"><!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>Developr</title>
<meta name="description" content="">
<meta name="author" content="">
<!-- http://davidbcalhoun.com/2010/viewport-metatag -->
<meta name="HandheldFriendly" content="True">
<meta name="MobileOptimized" content="320">
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
<!-- For all browsers -->
<link rel="stylesheet" href="css/reset.css?v=1">
<link rel="stylesheet" href="css/style.css?v=1">
<link rel="stylesheet" href="css/colors.css?v=1">
<link rel="stylesheet" media="print" href="css/print.css?v=1">
<!-- For progressively larger displays -->
<link rel="stylesheet" media="only all and (min-width: 480px)" href="css/480.css?v=1">
<link rel="stylesheet" media="only all and (min-width: 768px)" href="css/768.css?v=1">
<link rel="stylesheet" media="only all and (min-width: 992px)" href="css/992.css?v=1">
<link rel="stylesheet" media="only all and (min-width: 1200px)" href="css/1200.css?v=1">
<!-- For Retina displays -->
<link rel="stylesheet" media="only all and (-webkit-min-device-pixel-ratio: 1.5), only screen and (-o-min-device-pixel-ratio: 3/2), only screen and (min-device-pixel-ratio: 1.5)" href="css/2x.css?v=1">
<!-- Additional styles -->
<link rel="stylesheet" href="css/styles/form.css?v=1">
<link rel="stylesheet" href="css/styles/switches.css?v=1">
<!-- Login pages styles -->
<link rel="stylesheet" media="screen" href="css/login.css?v=1">
<!-- JavaScript at bottom except for Modernizr -->
<script src="js/libs/modernizr.custom.js"></script>
<!-- For Modern Browsers -->
<link rel="shortcut icon" href="img/favicons/favicon.png">
<!-- For everything else -->
<link rel="shortcut icon" href="img/favicons/favicon.ico">
<!-- For retina screens -->
<link rel="apple-touch-icon-precomposed" sizes="114x114" href="img/favicons/apple-touch-icon-retina.png">
<!-- For iPad 1-->
<link rel="apple-touch-icon-precomposed" sizes="72x72" href="img/favicons/apple-touch-icon-ipad.png">
<!-- For iPhone 3G, iPod Touch and Android -->
<link rel="apple-touch-icon-precomposed" href="img/favicons/apple-touch-icon.png">
<!-- iOS web-app metas -->
<meta name="apple-mobile-web-app-capable" content="yes">
<meta name="apple-mobile-web-app-status-bar-style" content="black">
<!-- Startup image for web apps -->
<link rel="apple-touch-startup-image" href="img/splash/ipad-landscape.png" media="screen and (min-device-width: 481px) and (max-device-width: 1024px) and (orientation:landscape)">
<link rel="apple-touch-startup-image" href="img/splash/ipad-portrait.png" media="screen and (min-device-width: 481px) and (max-device-width: 1024px) and (orientation:portrait)">
<link rel="apple-touch-startup-image" href="img/splash/iphone.png" media="screen and (max-device-width: 320px)">
<!-- Microsoft clear type rendering -->
<meta http-equiv="cleartype" content="on">
<!-- IE9 Pinned Sites: http://msdn.microsoft.com/en-us/library/gg131029.aspx -->
<meta name="application-name" content="Developr Admin Skin">
<meta name="msapplication-tooltip" content="Cross-platform admin template.">
<meta name="msapplication-starturl" content="http://www.display-inline.fr/demo/developr">
<!-- These custom tasks are examples, you need to edit them to show actual pages -->
<meta name="msapplication-task" content="name=Agenda;action-uri=http://www.display-inline.fr/demo/developr/agenda.html;icon-uri=http://www.display-inline.fr/demo/developr/img/favicons/favicon.ico">
<meta name="msapplication-task" content="name=My profile;action-uri=http://www.display-inline.fr/demo/developr/profile.html;icon-uri=http://www.display-inline.fr/demo/developr/img/favicons/favicon.ico">
</head>
<body>
<div id="container">
<hgroup id="login-title" class="large-margin-bottom">
<h1 class="login-title-image">Developr</h1>
<h5>© Your Company</h5>
</hgroup>
<form method="post" action="" id="form-login">
<ul class="inputs black-input large">
<!-- The autocomplete="off" attributes is the only way to prevent webkit browsers from filling the inputs with yellow -->
<li><span class="icon-user mid-margin-right"></span><input type="text" name="login" id="login" value="" class="input-unstyled" placeholder="Login" autocomplete="off"></li>
<li><span class="icon-lock mid-margin-right"></span><input type="password" name="pass" id="pass" value="" class="input-unstyled" placeholder="Password" autocomplete="off"></li>
</ul>
<button type="submit" class="button glossy full-width huge">Login</button>
</form>
</div>
<!-- JavaScript at the bottom for fast page loading -->
<!-- Scripts -->
<script src="js/libs/jquery-1.8.2.min.js"></script>
<script src="js/setup.js"></script>
<!-- Template functions -->
<script src="js/developr.input.js"></script>
<script src="js/developr.message.js"></script>
<script src="js/developr.notify.js"></script>
<script src="js/developr.tooltip.js"></script>
<script>
/*
* How do I hook my login script to this?
* --------------------------------------
*
* This script is meant to be non-obtrusive: if the user has disabled javascript or if an error occurs, the login form
* works fine without ajax.
*
* The only part you need to edit is the login script between the EDIT SECTION tags, which does inputs validation
* and send data to server. For instance, you may keep the validation and add an AJAX call to the server with the
* credentials, then redirect to the dashboard or display an error depending on server return.
*
* Or if you don't trust AJAX calls, just remove the event.preventDefault() part and let the form be submitted.
*/
$(document).ready(function()
{
/*
* JS login effect
* This script will enable effects for the login page
*/
// Elements
var doc = $('html').addClass('js-login'),
container = $('#container'),
formLogin = $('#form-login'),
// If layout is centered
centered;
/******* EDIT THIS SECTION *******/
/*
* AJAX login
* This function will handle the login process through AJAX
*/
formLogin.submit(function(event)
{
// Values
var login = $.trim($('#login').val()),
pass = $.trim($('#pass').val());
// Check inputs
if (login.length === 0)
{
// Display message
displayError('Please fill in your login');
return false;
}
else if (pass.length === 0)
{
// Remove empty login message if displayed
formLogin.clearMessages('Please fill in your login');
// Display message
displayError('Please fill in your password');
return false;
}
else
{
// Remove previous messages
formLogin.clearMessages();
// Show progress
displayLoading('Checking credentials...');
event.preventDefault();
// Stop normal behavior
event.preventDefault();
/*
* This is where you may do your AJAX call, for instance:
* $.ajax(url, {
* data: {
* login: login,
* pass: pass
* },
* success: function(data)
* {
* if (data.logged)
* {
* document.location.href = 'login.html';
* }
* else
* {
* formLogin.clearMessages();
* displayError('Invalid user/password, please try again');
* }
* },
* error: function()
* {
* formLogin.clearMessages();
* displayError('Error while contacting server, please try again');
* }
* });
*/
// Simulate server-side check
setTimeout(function() {
document.location.href = './'
}, 2000);
}
});
/******* END OF EDIT SECTION *******/
// Handle resizing (mostly for debugging)
function handleLoginResize()
{
// Detect mode
centered = (container.css('position') === 'absolute');
// Set min-height for mobile layout
if (!centered)
{
container.css('margin-top', '');
}
else
{
if (parseInt(container.css('margin-top'), 10) === 0)
{
centerForm(false);
}
}
};
// Register and first call
$(window).bind('normalized-resize', handleLoginResize);
handleLoginResize();
/*
* Center function
* @param boolean animate whether or not to animate the position change
* @param string|element|array any jQuery selector, DOM element or set of DOM elements which should be ignored
* @return void
*/
function centerForm(animate, ignore)
{
// If layout is centered
if (centered)
{
var siblings = formLogin.siblings(),
finalSize = formLogin.outerHeight();
// Ignored elements
if (ignore)
{
siblings = siblings.not(ignore);
}
// Get other elements height
siblings.each(function(i)
{
finalSize += $(this).outerHeight(true);
});
// Setup
container[animate ? 'animate' : 'css']({ marginTop: -Math.round(finalSize/2)+'px' });
}
};
// Initial vertical adjust
centerForm(false);
/**
* Function to display error messages
* @param string message the error to display
*/
function displayError(message)
{
// Show message
var message = formLogin.message(message, {
append: false,
arrow: 'bottom',
classes: ['red-gradient'],
animate: false // We'll do animation later, we need to know the message height first
});
// Vertical centering (where we need the message height)
centerForm(true, 'fast');
// Watch for closing and show with effect
message.bind('endfade', function(event)
{
// This will be called once the message has faded away and is removed
centerForm(true, message.get(0));
}).hide().slideDown('fast');
}
/**
* Function to display loading messages
* @param string message the message to display
*/
function displayLoading(message)
{
// Show message
var message = formLogin.message('<strong>'+message+'</strong>', {
append: false,
arrow: 'bottom',
classes: ['blue-gradient', 'align-center'],
stripes: true,
darkStripes: false,
closable: false,
animate: false // We'll do animation later, we need to know the message height first
});
// Vertical centering (where we need the message height)
centerForm(true, 'fast');
// Watch for closing and show with effect
message.bind('endfade', function(event)
{
// This will be called once the message has faded away and is removed
centerForm(true, message.get(0));
}).hide().slideDown('fast');
}
});
// What about a notification?
notify('Alternate login', 'Want to see another login page style? Try the <a href="login-box.html"><b>box version</b></a> or the <a href="login-full.html"><b>full version</b></a>.', {
autoClose: false,
delay: 2500,
icon: 'img/demo/icon.png'
});
</script>
</body>
</html> | johnson1994/UMESpage | testx/login.html | HTML | apache-2.0 | 11,490 |
package racoon.rsim.monitors;
import racoon.input.DOObjective;
import racoon.input.DesignObjectives;
import racoon.rsim.RSProtocol;
/**
* Define classes that allows to measure the value of a custom {@link DOObjective}
* for a given {@link RSProtocol}.
*
* @author Guido Lena Cota <guido.lena@unimi.it>
*/
public abstract class DOMonitor {
// FIELDS
// ------------------------------------------------------------------------------------------
/**
* The names of the metrics.
*/
protected String[] metricsName;
/**
* The values of the measurements.
*/
protected double[] metricsMeasures;
/**
* The identifier of the {@link RSProtocol} to observe.
*/
protected int protocolID;
// INITIALIZATION
// ------------------------------------------------------------------------------------------
/**
* Default constructor.
*/
public DOMonitor(int protocolID, String metrics){
this.protocolID = protocolID;
metricsName = metrics.split(",");
}
// METHODS
// ------------------------------------------------------------------------------------------
/**
* Do the measurement of the {@link DOObjective}s associated with the monitor
* object that implements this interface.
*/
public abstract void measure();
/**
* Return the names of each metric in a tabular format (separation term: semi-colon).
*/
public abstract String metricsToString();
/**
* Return the array of doubles that stores the value of each metric.
*/
public double[] getMeasurements() {
return metricsMeasures;
}
/**
* Verify the achievement of the {@link DOObjective}s associated with the monitor
* object that implements this interface.
*/
public void verifyObjectives(){
// verify the design objectives
for(int i = 0; i < metricsName.length; i++){
if(DesignObjectives.getObjective(metricsName[i]) != null){
DesignObjectives.getObjective(metricsName[i]).verifyObjective(metricsMeasures[i]);
}
}
}
/**
* Return the measurements of each metric in a tabular format (separation term: semi-colon).
*/
public String measurementsToString(){
StringBuffer measurements = new StringBuffer();
for(int i = 0; i < metricsMeasures.length; i++)
measurements.append(String.format("%.2f", metricsMeasures[i])).append(';');
return measurements.toString();
}
/**
* Return a string with all metrics and associated values, in a human-readable way.
*/
public String toString() {
StringBuffer output = new StringBuffer();
output.append(this.getClass().getName() + "\n");
for(int i = 0 ; i < metricsName.length ; i++){
output.append(metricsName[i] + ":\t" + metricsMeasures[i] + "\n");
}
output.append('\n');
return output.toString();
}
}
| glenacota/racoon | racoon-src/src/racoon/rsim/monitors/DOMonitor.java | Java | apache-2.0 | 2,801 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2017 Georgi Georgiev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
#ge0rgi:added is_volume_trusted
import requests
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder.common import constants
from cinder import compute
from cinder import context
from cinder import coordination
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder import keymgr as key_manager
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder.image import image_utils
from cinder import manager
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import cgsnapshot
from cinder.objects import consistencygroup
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import group_types
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
from cinder.scheduler.filters.asset_tag_filter import TrustAssertionFilter
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',)
VA_LIST = objects.VolumeAttachmentList
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.ibm.xiv_ds8k':
'cinder.volume.drivers.ibm.ibm_storage',
'cinder.volume.drivers.emc.scaleio':
'cinder.volume.drivers.dell_emc.scaleio.driver',
'cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver':
'cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver',
'cinder.volume.drivers.emc.xtremio.XtremIOISCSIDriver':
'cinder.volume.drivers.dell_emc.xtremio.XtremIOISCSIDriver',
'cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver':
'cinder.volume.drivers.dell_emc.xtremio.XtremIOFCDriver',
'cinder.volume.drivers.datera.DateraDriver':
'cinder.volume.drivers.datera.datera_iscsi.DateraDriver',
'cinder.volume.drivers.emc.emc_vmax_iscsi.EMCVMAXISCSIDriver':
'cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver',
'cinder.volume.drivers.emc.emc_vmax_fc.EMCVMAXFCDriver':
'cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver',
'cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver':
'cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver',
}
class VolumeManager(manager.CleanableManager,
manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment', 'group'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, in case there was a restart or something
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
constants.VOLUME_BINARY)
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info(_LI("Service not found for updating "
"active_backend_id, assuming default "
"for driver init."))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW("Suppressing requests library SSL Warnings"))
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.key_manager = key_manager.API(CONF)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
cluster_name=self.cluster,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id)
if self.cluster and not self.driver.SUPPORTS_ACTIVE_ACTIVE:
msg = _LE('Active-Active configuration is not currently supported '
'by driver %s.') % volume_driver
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'),
{'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'),
{'host': self.host})
self.image_volume_cache = None
if CONF.trusted_computing:
self.asset_tag_filter = TrustAssertionFilter()
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception(_LE('Fetch volume pool name failed.'),
resource=volume)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# items to the update if they're relevant but we need to be safe in
# what we allow and add a list of allowed keys. Things that make sense
# are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']])
if update:
update = update[0]
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
# NOTE(jdg): snapshots are slighty harder, because
# we do not have a host column and of course no get
# all by host, so we use a get_all and bounce our
# response off of it
if snapshot_updates:
cinder_snaps = self.db.snapshot_get_all(ctxt)
for snap in cinder_snaps:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def _include_resources_in_cluster(self, ctxt):
LOG.info(_LI('Including all resources from host %(host)s in cluster '
'%(cluster)s.'),
{'host': self.host, 'cluster': self.cluster})
num_vols = objects.VolumeList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cgs = objects.ConsistencyGroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cache = db.image_volume_cache_include_in_cluster(
ctxt, self.cluster, host=self.host)
LOG.info(_LI('%(num_vols)s volumes, %(num_cgs)s consistency groups, '
'and %(num_cache)s image volume caches from host '
'%(host)s have been included in cluster %(cluster)s.'),
{'num_vols': num_vols, 'num_cgs': num_cgs,
'host': self.host, 'cluster': self.cluster,
'num_cache': num_cache})
def init_host(self, added_to_cluster=None, **kwargs):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
if not self.driver.supported:
utils.log_unsupported_driver_warning(self.driver)
if not self.configuration.enable_unsupported_driver:
LOG.error(_LE("Unsupported drivers are disabled."
" You can re-enable by adding "
"enable_unsupported_driver=True to the "
"driver section in cinder.conf"),
resource={'type': 'driver',
'id': self.__class__.__name__})
return
# If we have just added this host to a cluster we have to include all
# our resources in that cluster.
if added_to_cluster:
self._include_resources_in_cluster(ctxt)
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
volumes = self._get_my_volumes(ctxt)
snapshots = self._get_my_snapshots(ctxt)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
try:
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
volume.conditional_update({'status': 'error'},
{'status': 'in-use'})
# All other cleanups are processed by parent class CleanableManager
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
# Keep the image tmp file clean when init host.
backend_name = vol_utils.extract_host(self.service_topic_queue)
image_utils.cleanup_temporary_file(backend_name)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
# Make sure to call CleanableManager to do the cleanup
super(VolumeManager, self).init_host(added_to_cluster=added_to_cluster,
**kwargs)
def init_host_with_rpc(self):
LOG.info(_LI("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
# Make sure the driver is initialized first
utils.log_unsupported_driver_warning(self.driver)
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
LOG.error(_LE("Cannot complete RPC initialization because "
"driver isn't initialized properly."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
return
stats = self.driver.get_volume_stats(refresh=True)
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
constants.VOLUME_BINARY)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Service not found for updating "
"replication_status."))
if service.replication_status != (
fields.ReplicationStatus.FAILED_OVER):
if stats and stats.get('replication_enabled', False):
service.replication_status = fields.ReplicationStatus.ENABLED
else:
service.replication_status = fields.ReplicationStatus.DISABLED
service.save()
LOG.info(_LI("Driver post RPC initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def _do_cleanup(self, ctxt, vo_resource):
if isinstance(vo_resource, objects.Volume):
if vo_resource.status == 'downloading':
self.driver.clear_download(ctxt, vo_resource)
elif vo_resource.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, vo_resource.id)
elif vo_resource.status == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
vo_resource, cascade=True)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, vo_resource, cascade=True)
# We signal that we take care of cleaning the worker ourselves
# (with set_workers decorator in delete_volume method) so
# do_cleanup method doesn't need to remove it.
return True
# For Volume creating and downloading and for Snapshot downloading
# statuses we have to set status to error
if vo_resource.status in ('creating', 'downloading'):
vo_resource.status = 'error'
vo_resource.save()
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def _set_resource_host(self, resource):
"""Set the host field on the DB to our own when we are clustered."""
if (resource.is_clustered and
not vol_utils.hosts_are_equivalent(resource.host, self.host)):
pool = vol_utils.extract_host(resource.host, 'pool')
resource.host = vol_utils.append_host(self.host, pool)
resource.save()
@objects.Volume.set_workers
def create_volume(self, context, volume, request_spec=None,
filter_properties=None, allow_reschedule=True):
"""Creates the volume."""
# Log about unsupported drivers
utils.log_unsupported_driver_warning(self.driver)
# Make sure the host in the DB matches our own when clustered
self._set_resource_host(volume)
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = objects.RequestSpec()
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
source_replicaid = request_spec.get('source_replicaid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
try:
if locked_action is None:
_run_flow()
else:
with coordination.Lock(locked_action):
_run_flow()
finally:
try:
flow_engine.storage.fetch('refreshed')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if not rescheduled:
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(volume)
LOG.info(_LI("Created volume successfully."), resource=volume)
return volume.id
def _check_is_our_resource(self, resource):
if resource.host:
res_backend = vol_utils.extract_host(resource.service_topic_queue)
backend = vol_utils.extract_host(self.service_topic_queue)
if res_backend != backend:
msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not '
'local to %(backend)s.') %
{'resource': resource.obj_name, 'id': resource.id,
'backend': backend})
raise exception.Invalid(msg)
@coordination.synchronized('{volume.id}-{f_name}')
@objects.Volume.set_workers
def delete_volume(self, context, volume, unmanage_only=False,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume.id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED:
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume.id)
self._check_is_our_resource(volume)
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
notification = "delete.start"
if unmanage_only:
notification = "unmanage.start"
self._notify_about_volume_usage(context, volume, notification)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != 'deleting':
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
new_status = 'error_deleting'
if unmanage_only is True:
new_status = 'error_unmanaging'
self._clear_db(context, is_migrating_dest, volume,
new_status)
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reservations = None
if volume.status != 'error_managing_deleting':
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume)
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume.id)
volume.destroy()
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
notification = "delete.end"
if unmanage_only:
notification = "unmanage.end"
self._notify_about_volume_usage(context, volume, notification)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
size = volume.size
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
msg = _LI("Deleted volume successfully.")
if unmanage_only:
msg = _LI("Unmanaged volume successfully.")
LOG.info(msg, resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error(_LE("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted)."), resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
@objects.Snapshot.set_workers
def create_snapshot(self, context, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
vol_ref = self.db.volume_get(context, snapshot.volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, snapshot.volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': snapshot.volume_id},
resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info(_LI("Create snapshot completed successfully"),
resource=snapshot)
return snapshot.id
@coordination.synchronized('{snapshot.id}-{f_name}')
def delete_snapshot(self, context, snapshot, unmanage_only=False):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
return
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Update snapshot usages failed."),
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
msg = _LI("Delete snapshot completed successfully.")
if unmanage_only:
msg = _LI("Unmanage snapshot completed successfully.")
LOG.info(msg, resource=snapshot)
@coordination.synchronized('{volume_id}')
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode, volume=None):
"""Updates db to show volume is attached."""
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look
# up the volume by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
# Get admin_metadata. This needs admin context.
with volume.obj_as_admin():
volume_metadata = volume.admin_metadata
# check the volume status before attaching
if volume.status == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
if (volume.status == 'in-use' and not volume.multiattach
and not volume.migration_status):
raise exception.InvalidVolume(
reason=_("volume is already attached"))
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = (
VA_LIST.get_all_by_instance_uuid(
context, instance_uuid))
else:
attachments = (
VA_LIST.get_all_by_host(
context, host_name_sanitized))
if attachments:
# check if volume<->instance mapping is already tracked in DB
for attachment in attachments:
if attachment['volume_id'] == volume_id:
volume.status = 'in-use'
volume.save()
return attachment
self._notify_about_volume_usage(context, volume,
"attach.start")
attachment = volume.begin_attach(mode)
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
raise exception.InvalidUUID(uuid=instance_uuid)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
self.message_api.create(
context, defined_messages.EventIds.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume.id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume.id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info(_LI('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.'),
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
volume = attachment.finish_attach(
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return attachment
@coordination.synchronized('{volume_id}-{f_name}')
def detach_volume(self, context, volume_id, attachment_id=None,
volume=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
if attachment_id:
try:
attachment = objects.VolumeAttachment.get_by_id(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
volume.finish_detach(attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = volume.volume_attachment
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
volume.status = 'available'
volume.attach_status = fields.VolumeAttachStatus.DETACHED
volume.save()
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info(_LI('Detaching volume %(volume_id)s from instance '
'%(instance)s.'),
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'), {
'attach_status':
fields.VolumeAttachStatus.ERROR_DETACHING})
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Detach volume failed, due to "
"uninitialized driver."),
resource=volume)
except Exception as ex:
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
volume.finish_detach(attachment.id)
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
image_volume = None
try:
if not self.image_volume_cache.ensure_space(ctx, volume_ref):
LOG.warning(_LW('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on %(service)s.'),
{'image': image_id,
'service': volume_ref.service_topic_queue})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning(_LW('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.'),
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning(_LW('Failed to create new image-volume cache entry.'
' Error: %(exception)s'), {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) -
self._VOLUME_CLONE_SKIP_PROPERTIES}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = (
fields.VolumeAttachStatus.DETACHED)
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception(_LE('Create clone_image_volume: %(volume_id)s'
'for image %(image_id)s, '
'failed (Exception: %(except)s)'),
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume, allow_reschedule=False)
image_volume = objects.Volume.get_by_id(ctx, image_volume.id)
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception(_LE('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.'),
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete the image volume %(id)s.'),
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
# The image_owner metadata should be set before uri is added to
# the image so glance cinder store can check its owner.
image_volume_meta = {'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception(_LE('Failed to register image volume location '
'%(uri)s.'), {'uri': uri})
if not image_registered:
LOG.warning(_LW('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.'),
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete failed image volume '
'%(id)s.'), {'id': image_volume.id})
return False
image_volume_meta['glance_image_id'] = image_meta['id']
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = objects.Volume.get_by_id(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error(_LE("Upload volume to image encountered an error "
"(image-id: %(image_id)s)."),
{'image_id': image_meta['id']},
resource=volume)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
if isinstance(error, exception.ImageLimitExceeded):
self.message_api.create(
context,
defined_messages.EventIds.IMAGE_FROM_VOLUME_OVER_QUOTA,
context.project_id,
resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info(_LI("Copy volume to image completed successfully."),
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning(_LW("Deleting image in unexpected status: "
"%(image_status)s."),
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Image delete encountered an error."),
exc_info=True, resource={'type': 'image',
'id': image_id})
def _parse_connection_options(self, context, volume, conn_info):
# Add qos_specs to connection info
typeid = volume.volume_type_id
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = volume.admin_metadata
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.encryption_key_id)
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
return conn_info
def initialize_connection(self, context, volume, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
# TODO(jdg): Add deprecation warning
utils.require_driver_initialized(self.driver)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException as ex:
msg = _("Create export of volume failed (%s)") % ex.msg
LOG.exception(msg, resource=volume)
raise exception.VolumeBackendAPIException(data=msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(context, volume, conn_info)
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume_ref)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Remove volume export completed successfully."),
resource=volume_ref)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume model for "
"transfer operation failed."),
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False,
attach_encryptor=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume %(vol)s."),
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume, properties)
attach_info = self._connect_device(conn)
try:
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_attach_volume_encryptor(ctxt,
attach_info,
encryption)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume encryptor"
" %(vol)s."), {'vol': volume['id']})
self._detach_volume(ctxt, attach_info, volume, properties)
return attach_info
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False,
attach_encryptor=False):
connector = attach_info['connector']
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_detach_volume_encryptor(attach_info, encryption)
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to terminate volume connection: '
'%(err)s.') % {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
attach_encryptor = False
# If the encryption method or key is changed, we have to
# copy data through dm-crypt.
if volume_types.volume_types_encryption_changed(
ctxt,
src_vol.volume_type_id,
dest_vol.volume_type_id):
attach_encryptor = True
properties = utils.brick_get_connector_properties()
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(
ctxt, dest_vol, properties,
remote=dest_remote,
attach_encryptor=attach_encryptor)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(
ctxt, src_vol, properties,
remote=src_remote,
attach_encryptor=attach_encryptor)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach source volume for copy."))
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote,
attach_encryptor=attach_encryptor)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt,
dest_vol.service_topic_queue,
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
copy_error = True
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=copy_error,
remote=dest_remote,
attach_encryptor=attach_encryptor)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=copy_error,
remote=src_remote,
attach_encryptor=attach_encryptor)
def _migrate_volume_generic(self, ctxt, volume, backend, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
tmp_skip = {'snapshot_id', 'source_volid'}
skip = self._VOLUME_CLONE_SKIP_PROPERTIES | tmp_skip | {'host',
'cluster_name'}
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
if volume_types.volume_types_encryption_changed(
ctxt, volume.volume_type_id, new_type_id):
encryption_key_id = vol_utils.create_encryption_key(
ctxt, self.key_manager, new_type_id)
new_vol_values['encryption_key_id'] = encryption_key_id
new_volume = objects.Volume(
context=ctxt,
host=backend['host'],
cluster_name=backend.get('cluster_name'),
status='creating',
attach_status=fields.VolumeAttachStatus.DETACHED,
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, None, None,
allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
# TODO(thangp): Replace get_by_id with refresh when it is available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
# TODO(thangp): Replace get_by_id with refresh when it is
# available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
# Set skipped value to avoid calling
# function except for _create_raw_volume
tmp_skipped_values = {k: volume[k] for k in tmp_skip if volume.get(k)}
if tmp_skipped_values:
new_volume.update(tmp_skipped_values)
new_volume.save()
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
if not attachments:
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume, new_volume,
error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
"Failed to copy volume %(vol1)s to %(vol2)s"), {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume.id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume, new_volume, error=False):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
# Detach the source volume (if it fails, don't fail the migration)
# As after detach and refresh, volume_attchments will be None.
# We keep volume_attachment for later attach.
volume_attachments = []
if orig_volume_status == 'in-use':
for attachment in volume.volume_attachment:
# Save the attachments the volume currently have
volume_attachments.append(attachment)
try:
self.detach_volume(ctxt, volume.id, attachment.id)
except Exception as ex:
LOG.error(_LE("Detach migration source volume "
"%(volume.id)s from instance "
"%(instance_id)s failed: %(err)s"),
{'err': ex,
'volume.id': volume.id,
'instance_id': attachment.id},
resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
# Restore the attachmens
if orig_volume_status == 'in-use':
for attachment in volume_attachments:
LOG.debug('Re-attaching: %s', attachment)
rpcapi.attach_volume(ctxt, volume,
attachment.instance_uuid,
attachment.attached_host,
attachment.mountpoint,
'rw')
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error(_LE('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s'),
{'vol': volume.id, 'err': ex})
LOG.info(_LI("Complete-Migrate volume completed successfully."),
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume, host, force_host_copy=False,
new_type_id=None):
"""Migrate the volume to the specified host (called on source host)."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
updates = {'host': host['host'],
'cluster_name': host.get('cluster_name'),
'migration_status': 'success',
'previous_status': volume.status}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host, new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info(_LI("Migrate volume completed successfully."),
resource=volume)
@periodic_task.periodic_task
def _report_driver_status(self, context):
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW("Update driver status failed: %(config_group)s "
"is uninitialized."),
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_group_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_generic_group(
context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_snapshot_usage(self,
context,
group_snapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_group_snapshot_usage(
context, group_snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except Exception:
LOG.exception(_LE("Extend volume failed."),
resource=volume)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
volume.update({'size': int(new_size), 'status': 'available'})
volume.save()
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info(_LI("Extend volume completed successfully."),
resource=volume)
def _is_our_backend(self, host, cluster_name):
return ((not cluster_name and
vol_utils.hosts_are_equivalent(self.driver.host, host)) or
(cluster_name and
vol_utils.hosts_are_equivalent(self.driver.cluster_name,
cluster_name)))
def retype(self, context, volume, new_type_id, host,
migration_policy='never', reservations=None,
old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
status_update = {'status': volume.previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# If old_reservations has been passed in from the API, we should
# skip quotas.
# TODO(ntpttr): These reservation checks are left in to be backwards
# compatible with Liberty and can be removed in N.
if not old_reservations:
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
# NOTE(wanghao): We don't need to reserve volumes and gigabytes
# quota for retyping operation since they didn't changed, just
# reserve volume_type and type gigabytes is fine.
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
volume.update(status_update)
volume.save()
msg = _("Failed to update quota usage while retyping volume.")
LOG.exception(msg, resource=volume)
raise exception.CinderException(msg)
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything.
# Use the admin contex to be able to access volume extra_specs
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context.elevated(), volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host or cluster (depending
# if it's the volume is in a clustered backend or not) is the same as
# the current. If it's not don't call the driver.retype method,
# otherwise drivers that implement retype may report success, but it's
# invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
not diff.get('encryption') and
self._is_our_backend(host['host'], host.get('cluster_name'))):
try:
new_type = volume_types.get_volume_type(context.elevated(),
new_type_id)
with volume.obj_as_admin():
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume.id)
except Exception:
retyped = False
LOG.exception(_LE("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism."), volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'cluster_name': host.get('cluster_name'),
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
self._set_replication_status(diff, model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info(_LI("Retype volume completed successfully."),
resource=volume)
@staticmethod
def _set_replication_status(diff, model_update):
"""Update replication_status in model_update if it has changed."""
if not diff or model_update.get('replication_status'):
return
diff_specs = diff.get('extra_specs', {})
replication_diff = diff_specs.get('replication_enabled')
if replication_diff:
is_replicated = vol_utils.is_replicated_str(replication_diff[1])
if is_replicated:
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
model_update['replication_status'] = replication_status
def manage_existing(self, ctxt, volume, ref=None):
vol_ref = self._run_manage_existing_flow_engine(
ctxt, volume, ref)
self._update_stats_for_managed(vol_ref)
LOG.info(_LI("Manage existing volume completed successfully."),
resource=vol_ref)
return vol_ref.id
def _update_stats_for_managed(self, volume_reference):
# Update volume stats
pool = vol_utils.extract_host(volume_reference.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume_reference.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= volume_reference.size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=volume_reference.size)
def _run_manage_existing_flow_engine(self, ctxt, volume, ref):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume,
ref,
)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
return vol_ref
def _get_my_resources(self, ctxt, ovo_class_list):
if self.cluster:
filters = {'cluster_name': self.cluster}
else:
filters = {'host': self.host}
return getattr(ovo_class_list, 'get_all')(ctxt, filters=filters)
def _get_my_volumes(self, ctxt):
return self._get_my_resources(ctxt, objects.VolumeList)
def _get_my_snapshots(self, ctxt):
return self._get_my_resources(ctxt, objects.SnapshotList)
def get_manageable_volumes(self, ctxt, marker, limit, offset, sort_keys,
sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable volumes failed, due "
"to uninitialized driver."))
cinder_volumes = self._get_my_volumes(ctxt)
try:
driver_entries = self.driver.get_manageable_volumes(
cinder_volumes, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableVolumeList.
from_primitives(ctxt, driver_entries))
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable volumes failed, due "
"to driver error."))
return driver_entries
def create_consistencygroup(self, context, group):
"""Creates the consistency group."""
return self._create_group(context, group, False)
def create_group(self, context, group):
"""Creates the group."""
return self._create_group(context, group)
def _create_group(self, context, group, is_generic_group=True):
context = context.elevated()
# Make sure the host in the DB matches our own when clustered
self._set_resource_host(group)
status = fields.GroupStatus.AVAILABLE
model_update = None
if is_generic_group:
self._notify_about_group_usage(
context, group, "create.start")
else:
self._notify_about_consistencygroup_usage(
context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Group %s: creating"), group.name)
if is_generic_group:
try:
model_update = self.driver.create_group(context,
group)
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update = self._create_group_generic(context,
group)
else:
cg, __ = self._convert_group_to_cg(group, [])
model_update = self.driver.create_consistencygroup(
context, cg)
else:
model_update = self.driver.create_consistencygroup(context,
group)
if model_update:
if (model_update['status'] ==
fields.GroupStatus.ERROR):
msg = (_('Create group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error(_LE("Group %s: create failed"),
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info(_LI("Group %s: created successfully"),
group.name)
if is_generic_group:
self._notify_about_group_usage(
context, group, "create.end")
else:
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create group completed successfully."),
resource={'type': 'group',
'id': group.id})
return group
def create_consistencygroup_from_src(self, context, group,
cgsnapshot=None, source_cg=None):
"""Creates the consistency group from source.
The source can be a CG snapshot or a source CG.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = self.db.volume_get_all_by_group(context, group.id)
if cgsnapshot:
try:
# Check if cgsnapshot still exists
cgsnapshot = objects.CGSnapshot.get_by_id(
context, cgsnapshot.id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': cgsnapshot.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("snapshot-%s") % cgsnapshot.id
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
if source_cg:
try:
source_cg = objects.ConsistencyGroup.get_by_id(
context, source_cg.id)
except exception.ConsistencyGroupNotFound:
LOG.error(_LE("Create consistency group "
"from source cg-%(cg)s failed: "
"ConsistencyGroupNotFound."),
{'cg': source_cg.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("cg-%s") % source_cg.id
source_vols = self.db.volume_get_all_by_group(
context, source_cg.id)
for source_vol in source_vols:
if (source_vol['status'] not in
VALID_CREATE_CG_SRC_CG_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol['id'],
'valid': VALID_CREATE_CG_SRC_CG_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if cgsnapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_cg and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_consistencygroup_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create consistency group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
return group
def create_group_from_src(self, context, group,
group_snapshot=None, source_group=None):
"""Creates the group from source.
The source can be a group snapshot or a source group.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = objects.VolumeList.get_all_by_generic_group(context,
group.id)
if group_snapshot:
try:
# Check if group_snapshot still exists
group_snapshot = objects.GroupSnapshot.get_by_id(
context, group_snapshot.id)
except exception.GroupSnapshotNotFound:
LOG.error(_LE("Create group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': group_snapshot.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("snapshot-%s") % group_snapshot.id
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_GROUP_SRC_SNAP_STATUS):
msg = (_("Cannot create group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_GROUP_SRC_SNAP_STATUS})
raise exception.InvalidGroup(reason=msg)
if source_group:
try:
source_group = objects.Group.get_by_id(
context, source_group.id)
except exception.GroupNotFound:
LOG.error(_LE("Create group "
"from source group-%(group)s failed: "
"GroupNotFound."),
{'group': source_group.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("group-%s") % source_group.id
source_vols = objects.VolumeList.get_all_by_generic_group(
context, source_group.id)
for source_vol in source_vols:
if (source_vol.status not in
VALID_CREATE_GROUP_SRC_GROUP_STATUS):
msg = (_("Cannot create group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol.id,
'valid': VALID_CREATE_GROUP_SRC_GROUP_STATUS})
raise exception.InvalidGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if group_snapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_group and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_group_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.create_group_from_src(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group, sorted_source_vols))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, volumes_model_update = (
self._create_group_from_src_generic(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group,
sorted_source_vols))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
cgsnapshot, sorted_snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, sorted_snapshots, context))
source_cg, sorted_source_vols = (
self._convert_group_to_cg(source_group,
sorted_source_vols))
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, cg, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
self._remove_cgsnapshot_id_from_snapshots(sorted_snapshots)
self._remove_consistencygroup_id_from_volumes(volumes)
self._remove_consistencygroup_id_from_volumes(
sorted_source_vols)
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'group',
'id': group.id})
# Update volume status to 'error' as well.
self._remove_consistencygroup_id_from_volumes(volumes)
for vol in volumes:
vol.status = 'error'
vol.save()
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_group_usage(
context, group, "create.end")
LOG.info(_LI("Create group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'group',
'id': group.id})
return group
def _create_group_from_src_generic(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
"""Creates a group from source.
:param context: the context of the caller.
:param group: the Group object to be created.
:param volumes: a list of volume objects in the group.
:param group_snapshot: the GroupSnapshot object as source.
:param snapshots: a list of snapshot objects in group_snapshot.
:param source_group: the Group object as source.
:param source_vols: a list of volume objects in the source_group.
:returns: model_update, volumes_model_update
"""
for vol in volumes:
try:
if snapshots:
for snapshot in snapshots:
if vol.snapshot_id == snapshot.id:
self.driver.create_volume_from_snapshot(
vol, snapshot)
break
except Exception:
raise
try:
if source_vols:
for source_vol in source_vols:
if vol.source_volid == source_vol.id:
self.driver.create_cloned_volume(vol, source_vol)
break
except Exception:
raise
return None, None
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error(_LE("Source volumes cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
source_volid = vol.get('source_volid')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
if source_volid:
source_vol = objects.Volume.get_by_id(context, source_volid)
if source_vol.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context, source_volid, vol['id'])
if source_vol.multiattach:
update['multiattach'] = True
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_group(context, group.id)
for volume in volumes:
if (volume.attach_status ==
fields.VolumeAttachStatus.ATTACHED):
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume.id)
self._check_is_our_resource(volume)
self._notify_about_consistencygroup_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, group, volumes))
if volumes_model_update:
for volume in volumes_model_update:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.save()
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
for volume in volumes:
# Get reservations for volume
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume.id)
self.db.volume_destroy(context, volume.id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume.size
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
group.destroy()
self._notify_about_consistencygroup_usage(
context, group, "delete.end", volumes)
self.publish_service_capabilities(context)
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def delete_group(self, context, group):
"""Deletes group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
for vol_obj in volumes:
if vol_obj.attach_status == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=vol_obj.id)
self._check_is_our_resource(vol_obj)
self._notify_about_group_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.delete_group(context, group, volumes))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, volumes_model_update = (
self._delete_group_generic(context, group, volumes))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, cg,
volumes))
self._remove_consistencygroup_id_from_volumes(volumes)
if volumes_model_update:
for update in volumes_model_update:
# If we failed to delete a volume, make sure the
# status for the group is set to error as well
if (update['status'] in ['error_deleting', 'error']
and model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = update['status']
self.db.volumes_update(context, volumes_model_update)
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
self._remove_consistencygroup_id_from_volumes(volumes)
for vol_obj in volumes:
vol_obj.status = 'error'
vol_obj.save()
# Get reservations for group
try:
reserve_opts = {'groups': -1}
grpreservations = GROUP_QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
grpreservations = None
LOG.exception(_LE("Delete group "
"failed to update usages."),
resource={'type': 'group',
'id': group.id})
for vol in volumes:
# Get reservations for volume
try:
reserve_opts = {'volumes': -1,
'gigabytes': -vol.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
vol.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete group "
"failed to update usages."),
resource={'type': 'group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, vol.id)
vol.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= vol.size
if grpreservations:
GROUP_QUOTAS.commit(context, grpreservations,
project_id=project_id)
group.destroy()
self._notify_about_group_usage(
context, group, "delete.end")
self.publish_service_capabilities(context)
LOG.info(_LI("Delete group "
"completed successfully."),
resource={'type': 'group',
'id': group.id})
def _convert_group_to_cg(self, group, volumes):
if not group:
return None, None
cg = consistencygroup.ConsistencyGroup()
cg.from_group(group)
for vol in volumes:
vol.consistencygroup_id = vol.group_id
vol.consistencygroup = cg
return cg, volumes
def _remove_consistencygroup_id_from_volumes(self, volumes):
if not volumes:
return
for vol in volumes:
vol.consistencygroup_id = None
vol.consistencygroup = None
def _convert_group_snapshot_to_cgsnapshot(self, group_snapshot, snapshots,
ctxt):
if not group_snapshot:
return None, None
cgsnap = cgsnapshot.CGSnapshot()
cgsnap.from_group_snapshot(group_snapshot)
# Populate consistencygroup object
grp = objects.Group.get_by_id(ctxt, group_snapshot.group_id)
cg, __ = self._convert_group_to_cg(grp, [])
cgsnap.consistencygroup = cg
for snap in snapshots:
snap.cgsnapshot_id = snap.group_snapshot_id
snap.cgsnapshot = cgsnap
return cgsnap, snapshots
def _remove_cgsnapshot_id_from_snapshots(self, snapshots):
if not snapshots:
return
for snap in snapshots:
snap.cgsnapshot_id = None
snap.cgsnapshot = None
def _create_group_generic(self, context, group):
"""Creates a group."""
# A group entry is already created in db. Just returns a status here.
model_update = {'status': fields.GroupStatus.AVAILABLE,
'created_at': timeutils.utcnow()}
return model_update
def _delete_group_generic(self, context, group, volumes):
"""Deletes a group and volumes in the group."""
model_update = {'status': group.status}
volume_model_updates = []
for volume_ref in volumes:
volume_model_update = {'id': volume_ref.id}
try:
self.driver.remove_export(context, volume_ref)
self.driver.delete_volume(volume_ref)
volume_model_update['status'] = 'deleted'
except exception.VolumeIsBusy:
volume_model_update['status'] = 'available'
except Exception:
volume_model_update['status'] = 'error'
model_update['status'] = fields.GroupStatus.ERROR
volume_model_updates.append(volume_model_update)
return model_update, volume_model_updates
def _update_group_generic(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a group."""
# NOTE(xyang): The volume manager adds/removes the volume to/from the
# group in the database. This default implementation does not do
# anything in the backend storage.
return None, None, None
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ovo = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol},
resource={'type': 'consistency_group',
'id': group.id})
raise
if add_vol_ovo.status not in VALID_ADD_VOL_TO_CG_STATUS:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ovo.id,
'group_id': group.id,
'status': add_vol_ovo.status,
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
self._check_is_our_resource(add_vol_ovo)
add_volumes_ref.append(add_vol_ovo)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol},
resource={'type': 'consistency_group',
'id': group.id})
raise
if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref['id'],
'group_id': group.id,
'status': remove_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in (
[fields.ConsistencyGroupStatus.ERROR]):
msg = (_('Error occurred when updating consistency group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
group.status = 'available'
group.update_at = now
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group.id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
self._notify_about_consistencygroup_usage(
context, group, "update.end")
LOG.info(_LI("Update consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates group.
Update group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref.id},
resource={'type': 'group',
'id': group.id})
raise
if add_vol_ref.status not in VALID_ADD_VOL_TO_GROUP_STATUS:
msg = (_("Cannot add volume %(volume_id)s to "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id,
'status': add_vol_ref.status,
'valid': VALID_ADD_VOL_TO_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
self._check_is_our_resource(add_vol_ref)
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = objects.Volume.get_by_id(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref.id},
resource={'type': 'group',
'id': group.id})
raise
if (remove_vol_ref.status not in
VALID_REMOVE_VOL_FROM_GROUP_STATUS):
msg = (_("Cannot remove volume %(volume_id)s from "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref.id,
'group_id': group.id,
'status': remove_vol_ref.status,
'valid': VALID_REMOVE_VOL_FROM_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_group_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
try:
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_group(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, add_volumes_update, remove_volumes_update = (
self._update_group_generic(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
else:
cg, remove_volumes_ref = self._convert_group_to_cg(
group, remove_volumes_ref)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
self._remove_consistencygroup_id_from_volumes(
remove_volumes_ref)
if add_volumes_update:
self.db.volumes_update(context, add_volumes_update)
if remove_volumes_update:
self.db.volumes_update(context, remove_volumes_update)
if model_update:
if model_update['status'] in (
[fields.GroupStatus.ERROR]):
msg = (_('Error occurred when updating group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
self._remove_consistencygroup_id_from_volumes(
remove_volumes_ref)
for rem_vol in remove_volumes_ref:
rem_vol.status = 'error'
rem_vol.save()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.status = 'error'
rem_vol.save()
group.status = 'available'
group.save()
for add_vol in add_volumes_ref:
add_vol.group_id = group.id
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.group_id = None
rem_vol.save()
self._notify_about_group_usage(
context, group, "update.end")
LOG.info(_LI("Update group completed successfully."),
resource={'type': 'group',
'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
self.db.snapshot_update(context,
snap_model['id'],
snap_model)
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
# TODO(thangp): Switch over to use snapshot.update()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_update(
context, snapshot_id, {
'status': fields.SnapshotStatus.ERROR})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'],
{'status': fields.SnapshotStatus.AVAILABLE,
'progress': '100%'})
cgsnapshot.status = 'available'
cgsnapshot.save()
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.end")
return cgsnapshot
def create_group_snapshot(self, context, group_snapshot):
"""Creates the group_snapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("GroupSnapshot %s: creating."), group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Group snapshot %(grp_snap_id)s: creating.",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.create_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group_snapshot.group_type_id != cgsnap_type['id']:
model_update, snapshots_model_update = (
self._create_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap_id = snap_model.pop('id')
snap_obj = objects.Snapshot.get_by_id(context, snap_id)
snap_obj.update(snap_model)
snap_obj.save()
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = 'error'
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot.volume_id
snapshot_id = snapshot.id
vol_obj = objects.Volume.get_by_id(context, volume_id)
if vol_obj.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
group_snapshot.status = 'available'
group_snapshot.save()
LOG.info(_LI("group_snapshot %s: created successfully"),
group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.end")
return group_snapshot
def _create_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Creates a group_snapshot."""
model_update = {'status': 'available'}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.create_snapshot(snapshot)
snapshot_model_update['status'] = 'available'
except Exception:
snapshot_model_update['status'] = 'error'
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def _delete_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Deletes a group_snapshot."""
model_update = {'status': group_snapshot.status}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.delete_snapshot(snapshot)
snapshot_model_update['status'] = 'deleted'
except exception.SnapshotIsBusy:
snapshot_model_update['status'] = 'available'
except Exception:
snapshot_model_update['status'] = 'error'
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
project_id = cgsnapshot.project_id
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap.status = snap_model['status']
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
# TODO(thangp): Switch over to use snapshot.destroy()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
cgsnapshot.destroy()
LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id)
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
snapshots)
def delete_group_snapshot(self, context, group_snapshot):
"""Deletes group_snapshot."""
caller_context = context
context = context.elevated()
project_id = group_snapshot.project_id
LOG.info(_LI("group_snapshot %s: deleting"), group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("group_snapshot %(grp_snap_id)s: deleting",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.delete_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group_snapshot.group_type_id != cgsnap_type['id']:
model_update, snapshots_model_update = (
self._delete_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap_model.pop('id')
snap.update(snap_model)
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = 'error'
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
self._remove_cgsnapshot_id_from_snapshots(snapshots)
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = objects.Volume.get_by_id(context,
snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot.id)
snapshot.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
group_snapshot.destroy()
LOG.info(_LI("group_snapshot %s: deleted successfully"),
group_snapshot.id)
self._notify_about_group_snapshot_usage(context, group_snapshot,
"delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 and a/a method
def failover(self, context, secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceptable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
updates = {}
repl_status = fields.ReplicationStatus
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(context, svc_host,
constants.VOLUME_BINARY)
volumes = self._get_my_volumes(context)
exception_encountered = True
try:
# For non clustered we can call v2.1 failover_host, but for
# clustered we call a/a failover method. We know a/a method
# exists because BaseVD class wouldn't have started if it didn't.
failover = getattr(self.driver,
'failover' if service.is_clustered
else 'failover_host')
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
active_backend_id, volume_update_list = failover(
context,
volumes,
secondary_id=secondary_backend_id)
exception_encountered = False
except exception.UnableToFailOver:
LOG.exception(_LE("Failed to perform replication failover"))
updates['replication_status'] = repl_status.FAILOVER_ERROR
except exception.InvalidReplicationTarget:
LOG.exception(_LE("Invalid replication target specified "
"for failover"))
# Preserve the replication_status: Status should be failed over if
# we were failing back or if we were failing over from one
# secondary to another secondary. In both cases active_backend_id
# will be set.
if service.active_backend_id:
updates['replication_status'] = repl_status.FAILED_OVER
else:
updates['replication_status'] = repl_status.ENABLED
except exception.VolumeDriverException:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
LOG.error(_LE("Driver reported error during "
"replication failover."))
updates.update(disabled=True,
replication_status=repl_status.FAILOVER_ERROR)
if exception_encountered:
LOG.error(
_LE("Error encountered during failover on host: "
"%(host)s invalid target ID %(backend_id)s"),
{'host': self.host, 'backend_id':
secondary_backend_id})
self.finish_failover(context, service, updates)
return
if secondary_backend_id == "default":
updates['replication_status'] = repl_status.ENABLED
updates['active_backend_id'] = ''
updates['disabled'] = service.frozen
updates['disabled_reason'] = 'frozen' if service.frozen else ''
else:
updates['replication_status'] = repl_status.FAILED_OVER
updates['active_backend_id'] = active_backend_id
updates['disabled'] = True
updates['disabled_reason'] = 'failed-over'
self.finish_failover(context, service, updates)
for update in volume_update_list:
# Response must include an id key: {volume_id: <cinder-uuid>}
if not update.get('volume_id'):
raise exception.UnableToFailOver(
reason=_("Update list, doesn't include volume_id"))
# Key things to consider (attaching failed-over volumes):
# provider_location
# provider_auth
# provider_id
# replication_status
vobj = objects.Volume.get_by_id(context, update['volume_id'])
vobj.update(update.get('updates', {}))
vobj.save()
LOG.info(_LI("Failed over to replication target successfully."))
# TODO(geguileo): In P - remove this
failover_host = failover
def finish_failover(self, context, service, updates):
"""Completion of the failover locally or via RPC."""
# If the service is clustered, broadcast the service changes to all
# volume services, including this one.
if service.is_clustered:
# We have to update the cluster with the same data, and we do it
# before broadcasting the failover_completed RPC call to prevent
# races with services that may be starting..
for key, value in updates.items():
setattr(service.cluster, key, value)
service.cluster.save()
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.failover_completed(context, service, updates)
else:
service.update(updates)
service.save()
def failover_completed(self, context, updates):
"""Finalize failover of this backend.
When a service is clustered and replicated the failover has 2 stages,
one that does the failover of the volumes and another that finalizes
the failover of the services themselves.
This method takes care of the last part and is called from the service
doing the failover of the volumes after finished processing the
volumes.
"""
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(context, svc_host,
constants.VOLUME_BINARY)
service.update(updates)
try:
self.driver.failover_completed(context, service.active_backend_id)
except Exception:
msg = _('Driver reported error during replication failover '
'completion.')
LOG.exception(msg)
service.disabled = True
service.disabled_reason = msg
service.replication_status = (
fields.ReplicationStatus.ERROR)
service.save()
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
# need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning(_LW('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.'))
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
constants.VOLUME_BINARY)
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info(_LI("Set backend status to frozen successfully."))
return True
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error(_LE('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.'))
return False
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
constants.VOLUME_BINARY)
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info(_LI("Thawed backend successfully."))
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
msg = _LE("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.")
LOG.exception(msg, {'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_manageable_snapshots(self, ctxt, marker, limit, offset,
sort_keys, sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable snapshots failed, due "
"to uninitialized driver."))
cinder_snapshots = self._get_my_snapshots(ctxt)
try:
driver_entries = self.driver.get_manageable_snapshots(
cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableSnapshotList.
from_primitives(ctxt, driver_entries))
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable snapshots failed, due "
"to driver error."))
return driver_entries
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
def get_backup_device(self, ctxt, backup, want_objects=False):
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
# TODO(sborkows): from_primitive method will be removed in O, so there
# is a need to clean here then.
return (objects.BackupDeviceInfo.from_primitive(backup_device_dict,
ctxt)
if want_objects else backup_device_dict)
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
def _connection_create(self, ctxt, volume, attachment, connector):
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(ctxt.elevated(),
volume, connector)
except exception.CinderException as ex:
err_msg = (_("Create export for volume failed (%s).") % ex.msg)
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(ctxt.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(ctxt, volume, conn_info)
# NOTE(jdg): Get rid of the nested dict (data key)
conn_data = conn_info.pop('data', {})
connection_info = conn_data.copy()
connection_info.update(conn_info)
values = {'volume_id': volume.id,
'attach_status': 'attaching', }
self.db.volume_attachment_update(ctxt, attachment.id, values)
self.db.attachment_specs_update_or_create(
ctxt,
attachment.id,
connector)
connection_info['attachment_id'] = attachment.id
return connection_info
def attachment_update(self,
context,
vref,
connector,
attachment_id):
"""Update/Finalize an attachment.
This call updates a valid attachment record to associate with a volume
and provide the caller with the proper connection info. Note that
this call requires an `attachment_ref`. It's expected that prior to
this call that the volume and an attachment UUID has been reserved.
param: vref: Volume object to create attachment for
param: connector: Connector object to use for attachment creation
param: attachment_ref: ID of the attachment record to update
"""
mode = connector.get('mode', 'rw')
self._notify_about_volume_usage(context, vref, 'attach.start')
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
connection_info = self._connection_create(context,
vref,
attachment_ref,
connector)
# FIXME(jdg): get rid of this admin_meta option here, the only thing
# it does is enforce that a volume is R/O, that should be done via a
# type and not *more* metadata
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(),
attachment_ref.volume_id,
{'attached_mode': mode}, False)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, vref.id,
{'status': 'error_attaching'})
self.message_api.create(
context, defined_messages.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=vref.id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=vref.id)
try:
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
vref,
attachment_ref.instance_uuid,
connector.get('hostname', ''),
connector.get('mountpoint', 'na'))
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_ref.id,
{'attach_status': 'error_attaching'})
self.db.volume_attached(context.elevated(),
attachment_ref.id,
attachment_ref.instance_uuid,
connector.get('hostname', ''),
connector.get('mountpoint', 'na'),
mode)
vref.refresh()
self._notify_about_volume_usage(context, vref, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=vref)
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
return connection_info
def _connection_terminate(self, context, volume,
attachment, force=False):
"""Remove a volume connection, but leave attachment."""
utils.require_driver_initialized(self.driver)
# TODO(jdg): Add an object method to cover this
connector = self.db.attachment_specs_get(
context,
attachment.id)
try:
shared_connections = self.driver.terminate_connection(volume,
connector,
force=force)
if not isinstance(shared_connections, bool):
shared_connections = False
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume)
# NOTE(jdg): Return True/False if there are other outstanding
# attachments that share this connection. If True should signify
# caller to preserve the actual host connection (work should be
# done in the brick connector as it has the knowledge of what's
# going on here.
return shared_connections
def attachment_delete(self, context, attachment_id, vref):
"""Delete/Detach the specified attachment.
Notifies the backend device that we're detaching the specified
attachment instance.
param: vref: Volume object associated with the attachment
param: attachment: Attachment reference object to remove
NOTE if the attachment reference is None, we remove all existing
attachments for the specified volume object.
"""
has_shared_connection = False
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
if not attachment_ref:
for attachment in VA_LIST.get_all_by_volume_id(context, vref.id):
if self._do_attachment_delete(context, vref, attachment):
has_shared_connection = True
else:
has_shared_connection = (
self._do_attachment_delete(context, vref, attachment_ref))
return has_shared_connection
def _do_attachment_delete(self, context, vref, attachment):
utils.require_driver_initialized(self.driver)
self._notify_about_volume_usage(context, vref, "detach.start")
has_shared_connection = self._connection_terminate(context,
vref,
attachment)
self.driver.detach_volume(context, vref, attachment)
try:
LOG.debug('Deleting attachment %(attachment_id)s.',
{'attachment_id': attachment.id},
resource=vref)
self.driver.detach_volume(context, vref, attachment)
self.driver.remove_export(context.elevated(), vref)
except Exception:
# FIXME(jdg): Obviously our volume object is going to need some
# changes to deal with multi-attach and figuring out how to
# represent a single failed attach out of multiple attachments
# TODO(jdg): object method here
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
else:
self.db.volume_detached(context.elevated(), vref.id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(),
vref.id,
'attached_mode')
self._notify_about_volume_usage(context, vref, "detach.end")
return has_shared_connection
def is_volume_trusted(self, ctxt, volume_id):
volume = self.db.api.volume_get(ctxt, volume_id)
verify_trust = False
asset_tags = 'None'
host = ''
for metadata in volume.volume_metadata:
if metadata.key == 'trust':
host = volume.host.split("@")[0]
verify_trust = True
elif metadata.key == 'asset_tags':
asset_tags = metadata.value
if verify_trust:
return self.asset_tag_filter.is_trusted(host, asset_tags)
return None | ge0rgi/cinder | cinder/volume/manager.py | Python | apache-2.0 | 211,218 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_26) on Mon May 07 13:00:01 PDT 2012 -->
<TITLE>
Uses of Class org.apache.hadoop.mapred.join.JoinRecordReader (Hadoop 0.20.2-cdh3u4 API)
</TITLE>
<META NAME="date" CONTENT="2012-05-07">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.apache.hadoop.mapred.join.JoinRecordReader (Hadoop 0.20.2-cdh3u4 API)";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../org/apache/hadoop/mapred/join/JoinRecordReader.html" title="class in org.apache.hadoop.mapred.join"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../index.html?org/apache/hadoop/mapred/join//class-useJoinRecordReader.html" target="_top"><B>FRAMES</B></A>
<A HREF="JoinRecordReader.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<CENTER>
<H2>
<B>Uses of Class<br>org.apache.hadoop.mapred.join.JoinRecordReader</B></H2>
</CENTER>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
Packages that use <A HREF="../../../../../../org/apache/hadoop/mapred/join/JoinRecordReader.html" title="class in org.apache.hadoop.mapred.join">JoinRecordReader</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><A HREF="#org.apache.hadoop.mapred.join"><B>org.apache.hadoop.mapred.join</B></A></TD>
<TD>Given a set of sorted datasets keyed with the same class and yielding equal
partitions, it is possible to effect a join of those datasets prior to the map. </TD>
</TR>
</TABLE>
<P>
<A NAME="org.apache.hadoop.mapred.join"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
Uses of <A HREF="../../../../../../org/apache/hadoop/mapred/join/JoinRecordReader.html" title="class in org.apache.hadoop.mapred.join">JoinRecordReader</A> in <A HREF="../../../../../../org/apache/hadoop/mapred/join/package-summary.html">org.apache.hadoop.mapred.join</A></FONT></TH>
</TR>
</TABLE>
<P>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left" COLSPAN="2">Subclasses of <A HREF="../../../../../../org/apache/hadoop/mapred/join/JoinRecordReader.html" title="class in org.apache.hadoop.mapred.join">JoinRecordReader</A> in <A HREF="../../../../../../org/apache/hadoop/mapred/join/package-summary.html">org.apache.hadoop.mapred.join</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> class</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../../../org/apache/hadoop/mapred/join/InnerJoinRecordReader.html" title="class in org.apache.hadoop.mapred.join">InnerJoinRecordReader<K extends WritableComparable></A></B></CODE>
<BR>
Full inner join.</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> class</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../../../org/apache/hadoop/mapred/join/OuterJoinRecordReader.html" title="class in org.apache.hadoop.mapred.join">OuterJoinRecordReader<K extends WritableComparable></A></B></CODE>
<BR>
Full outer join.</TD>
</TR>
</TABLE>
<P>
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../org/apache/hadoop/mapred/join/JoinRecordReader.html" title="class in org.apache.hadoop.mapred.join"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../index.html?org/apache/hadoop/mapred/join//class-useJoinRecordReader.html" target="_top"><B>FRAMES</B></A>
<A HREF="JoinRecordReader.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
Copyright © 2009 The Apache Software Foundation
</BODY>
</HTML>
| Shmuma/hadoop | docs/api/org/apache/hadoop/mapred/join/class-use/JoinRecordReader.html | HTML | apache-2.0 | 8,672 |
/*
* Copyright 2012 International Business Machines Corp.
*
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership. Licensed under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package javax.batch.annotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
public @interface OnProcessError {
}
| mminella/jsr-352-ri-tck | JSR352.Annotations/src/javax/batch/annotation/OnProcessError.java | Java | apache-2.0 | 1,055 |
# Release notes
### 2.17.0 (2022-02-24)
* Core library:
* Sleep and retry when creating a `MediaCodec` instance fails. This works
around an issue that occurs on some devices when switching a surface
from a secure codec to another codec
([#8696](https://github.com/google/ExoPlayer/issues/8696)).
* Add `MediaCodecAdapter.getMetrics()` to allow users obtain metrics data
from `MediaCodec`.
([#9766](https://github.com/google/ExoPlayer/issues/9766)).
* Fix Maven dependency resolution
([#8353](https://github.com/google/ExoPlayer/issues/8353)).
* Disable automatic speed adjustment for live streams that neither have
low-latency features nor a user request setting the speed
([#9329](https://github.com/google/ExoPlayer/issues/9329)).
* Rename `DecoderCounters#inputBufferCount` to `queuedInputBufferCount`.
* Make `SimpleExoPlayer.renderers` private. Renderers can be accessed via
`ExoPlayer.getRenderer`.
* Updated some `AnalyticsListener.EventFlags` constant values to match
values in `Player.EventFlags`.
* Split `AnalyticsCollector` into an interface and default implementation
to allow it to be stripped by R8 if an app doesn't need it.
* Track selection:
* Support preferred video role flags in track selection
([#9402](https://github.com/google/ExoPlayer/issues/9402)).
* Update video track selection logic to take preferred MIME types and role
flags into account when selecting multiple video tracks for adaptation
([#9519](https://github.com/google/ExoPlayer/issues/9519)).
* Update video and audio track selection logic to only choose formats for
adaptive selections that have the same level of decoder and hardware
support ([#9565](https://github.com/google/ExoPlayer/issues/9565)).
* Update video track selection logic to prefer more efficient codecs if
multiple codecs are supported by primary, hardware-accelerated decoders
([#4835](https://github.com/google/ExoPlayer/issues/4835)).
* Prefer audio content preferences (for example, the "default" audio track
or a track matching the system locale language) over technical track
selection constraints (for example, preferred MIME type, or maximum
channel count).
* Prohibit duplicate `TrackGroup`s in a `TrackGroupArray`. `TrackGroup`s
can always be made distinguishable by setting an `id` in the
`TrackGroup` constructor. This fixes a crash when resuming playback
after backgrounding the app with an active track override
([#9718](https://github.com/google/ExoPlayer/issues/9718)).
* Amend logic in `AdaptiveTrackSelection` to allow a quality increase
under sufficient network bandwidth even if playback is very close to the
live edge ([#9784](https://github.com/google/ExoPlayer/issues/9784)).
* Video:
* Fix decoder fallback logic for Dolby Vision to use a compatible
H264/H265 decoder if needed.
* Audio:
* Fix decoder fallback logic for Dolby Atmos (E-AC3-JOC) to use a
compatible E-AC3 decoder if needed.
* Change `AudioCapabilities` APIs to require passing explicitly
`AudioCapabilities.DEFAULT_AUDIO_CAPABILITIES` instead of `null`.
* Allow customization of the `AudioTrack` buffer size calculation by
injecting an `AudioTrackBufferSizeProvider` to `DefaultAudioSink`.
([#8891](https://github.com/google/ExoPlayer/issues/8891)).
* Retry `AudioTrack` creation if the requested buffer size was > 1MB.
([#9712](https://github.com/google/ExoPlayer/issues/9712)).
* Extractors:
* Fix incorrect parsing of H.265 SPS NAL units
([#9719](https://github.com/google/ExoPlayer/issues/9719)).
* Parse Vorbis Comments (including `METADATA_BLOCK_PICTURE`) in Ogg Opus
and Ogg Vorbis files.
* Text:
* Add a `MediaItem.SubtitleConfiguration.id` field which is propagated to
the `Format.id` field of the subtitle track created from the
configuration
([#9673](https://github.com/google/ExoPlayer/issues/9673)).
* Add basic support for WebVTT subtitles in Matroska containers
([#9886](https://github.com/google/ExoPlayer/issues/9886)).
* Prevent `Cea708Decoder` from reading more than the declared size of a
service block.
* DRM:
* Remove `playbackLooper` from `DrmSessionManager.(pre)acquireSession`.
When a `DrmSessionManager` is used by an app in a custom `MediaSource`,
the `playbackLooper` needs to be passed to `DrmSessionManager.setPlayer`
instead.
* Ad playback / IMA:
* Add support for
[IMA Dynamic Ad Insertion (DAI)](https://support.google.com/admanager/answer/6147120)
([#8213](https://github.com/google/ExoPlayer/issues/8213)).
* Add a method to `AdPlaybackState` to allow resetting an ad group so that
it can be played again
([#9615](https://github.com/google/ExoPlayer/issues/9615)).
* Enforce playback speed of 1.0 during ad playback
([#9018](https://github.com/google/ExoPlayer/issues/9018)).
* Fix issue where an ad group that failed to load caused an immediate
playback reset
([#9929](https://github.com/google/ExoPlayer/issues/9929)).
* UI:
* Fix the color of the numbers in `StyledPlayerView` rewind and
fastforward buttons when using certain themes
([#9765](https://github.com/google/ExoPlayer/issues/9765)).
* Correctly translate playback speed strings
([#9811](https://github.com/google/ExoPlayer/issues/9811)).
* DASH:
* Support the `forced-subtitle` track role
([#9727](https://github.com/google/ExoPlayer/issues/9727)).
* Stop interpreting the `main` track role as `C.SELECTION_FLAG_DEFAULT`.
* Fix base URL exclusion logic for manifests that do not declare the DVB
namespace ([#9856](https://github.com/google/ExoPlayer/issues/9856)).
* Support relative `MPD.Location` URLs
([#9939](https://github.com/google/ExoPlayer/issues/9939)).
* HLS:
* Use chunkless preparation by default to improve start up time. If your
renditions contain muxed closed-caption tracks that are **not** declared
in the master playlist, you should add them to the master playlist to be
available for playback, or turn off chunkless preparation with
`HlsMediaSource.Factory.setAllowChunklessPreparation(false)`.
* Support key-frame accurate seeking in HLS
([#2882](https://github.com/google/ExoPlayer/issues/2882)).
* Correctly populate `Format.label` for audio only HLS streams
([#9608](https://github.com/google/ExoPlayer/issues/9608)).
* RTSP:
* Provide a client API to override the `SocketFactory` used for any server
connection ([#9606](https://github.com/google/ExoPlayer/pull/9606)).
* Prefer DIGEST authentication method over BASIC if both are present
([#9800](https://github.com/google/ExoPlayer/issues/9800)).
* Handle when RTSP track timing is not available
([#9775](https://github.com/google/ExoPlayer/issues/9775)).
* Ignore invalid RTP-Info header values
([#9619](https://github.com/google/ExoPlayer/issues/9619)).
* Transformer:
* Increase required min API version to 21.
* `TransformationException` is now used to describe errors that occur
during a transformation.
* Add `TransformationRequest` for specifying the transformation options.
* Allow multiple listeners to be registered.
* Fix Transformer being stuck when the codec output is partially read.
* Fix potential NPE in `Transformer.getProgress` when releasing the muxer
throws.
* Add a demo app for applying transformations.
* The transformer module is no longer included by depending on
`com.google.android.exoplayer:exoplayer`. To continue using transformer,
add an additional dependency on
`com.google.android.exoplayer:exoplayer-transformer`.
* MediaSession extension:
* By default, `MediaSessionConnector` now clears the playlist on stop.
Apps that want the playlist to be retained can call
`setClearMediaItemsOnStop(false)` on the connector.
* Cast extension:
* Fix bug that prevented `CastPlayer` from calling `onIsPlayingChanged`
correctly ([#9792](https://github.com/google/ExoPlayer/issues/9792)).
* Support audio metadata including artwork with
`DefaultMediaItemConverter`
([#9663](https://github.com/google/ExoPlayer/issues/9663)).
* FFmpeg extension:
* Make `build_ffmpeg.sh` depend on LLVM's bin utils instead of GNU's
([#9933](https://github.com/google/ExoPlayer/issues/9933)).
* Android 12 compatibility:
* Upgrade the Cast extension to depend on
`com.google.android.gms:play-services-cast-framework:20.1.0`. Earlier
versions of `play-services-cast-framework` are not compatible with apps
targeting Android 12, and will crash with an `IllegalArgumentException`
when creating `PendingIntent`s
([#9528](https://github.com/google/ExoPlayer/issues/9528)).
* Remove deprecated symbols:
* Remove `Player.EventLister`. Use `Player.Listener` instead.
* Remove `MediaSourceFactory#setDrmSessionManager`,
`MediaSourceFactory#setDrmHttpDataSourceFactory`, and
`MediaSourceFactory#setDrmUserAgent`. Use
`MediaSourceFactory#setDrmSessionManagerProvider` instead.
* Remove `MediaSourceFactory#setStreamKeys`. Use
`MediaItem.Builder#setStreamKeys` instead.
* Remove `MediaSourceFactory#createMediaSource(Uri)`. Use
`MediaSourceFactory#createMediaSource(MediaItem)` instead.
* Remove `setTag` from `DashMediaSource`, `HlsMediaSource` and
`SsMediaSource`. Use `MediaItem.Builder#setTag` instead.
* Remove `DashMediaSource#setLivePresentationDelayMs(long, boolean)`. Use
`MediaItem.Builder#setLiveConfiguration` and
`MediaItem.LiveConfiguration.Builder#setTargetOffsetMs` to override the
manifest, or `DashMediaSource#setFallbackTargetLiveOffsetMs` to provide
a fallback value.
* Remove `(Simple)ExoPlayer.setThrowsWhenUsingWrongThread`. Opting out of
the thread enforcement is no longer possible.
* Remove `ActionFile` and `ActionFileUpgradeUtil`. Use ExoPlayer 2.16.1 or
before to use `ActionFileUpgradeUtil` to merge legacy action files into
`DefaultDownloadIndex`.
* Remove `ProgressiveMediaSource#setExtractorsFactory`. Use
`ProgressiveMediaSource.Factory(DataSource.Factory, ExtractorsFactory)`
constructor instead.
* Remove `ProgressiveMediaSource.Factory#setTag` and, and
`ProgressiveMediaSource.Factory#setCustomCacheKey`. Use
`MediaItem.Builder#setTag` and `MediaItem.Builder#setCustomCacheKey`
instead.
* Remove `DefaultRenderersFactory(Context, @ExtensionRendererMode int)`
and `DefaultRenderersFactory(Context, @ExtensionRendererMode int, long)`
constructors. Use the `DefaultRenderersFactory(Context)` constructor,
`DefaultRenderersFactory#setExtensionRendererMode`, and
`DefaultRenderersFactory#setAllowedVideoJoiningTimeMs` instead.
* Remove all public `CronetDataSource` constructors. Use
`CronetDataSource.Factory` instead.
* Change the following `IntDefs` to `@Target(TYPE_USE)` only. This may break
the compilation of usages in Kotlin, which can be fixed by moving the
annotation to annotate the type (`Int`).
* `@AacAudioObjectType`
* `@Ac3Util.SyncFrameInfo.StreamType`
* `@AdLoadException.Type`
* `@AdtsExtractor.Flags`
* `@AmrExtractor.Flags`
* `@AspectRatioFrameLayout.ResizeMode`
* `@AudioFocusManager.PlayerCommand`
* `@AudioSink.SinkFormatSupport`
* `@BinarySearchSeeker.TimestampSearchResult.Type`
* `@BufferReplacementMode`
* `@C.BufferFlags`
* `@C.ColorRange`
* `@C.ColorSpace`
* `@C.ColorTransfer`
* `@C.CryptoMode`
* `@C.Encoding`
* `@C.PcmEncoding`
* `@C.Projection`
* `@C.SelectionReason`
* `@C.StereoMode`
* `@C.VideoOutputMode`
* `@CacheDataSource.Flags`
* `@CaptionStyleCompat.EdgeType`
* `@DataSpec.Flags`
* `@DataSpec.HttpMethods`
* `@DecoderDiscardReasons`
* `@DecoderReuseResult`
* `@DefaultAudioSink.OutputMode`
* `@DefaultDrmSessionManager.Mode`
* `@DefaultTrackSelector.SelectionEligibility`
* `@DefaultTsPayloadReaderFactory.Flags`
* `@EGLSurfaceTexture.SecureMode`
* `@EbmlProcessor.ElementType`
* `@ExoMediaDrm.KeyRequest.RequestType`
* `@ExtensionRendererMode`
* `@Extractor.ReadResult`
* `@FileTypes.Type`
* `@FlacExtractor.Flags` (in `com.google.android.exoplayer2.ext.flac`
package)
* `@FlacExtractor.Flags` (in
`com.google.android.exoplayer2.extractor.flac` package)
* `@FragmentedMp4Extractor.Flags`
* `@HlsMediaPlaylist.PlaylistType`
* `@HttpDataSourceException.Type`
* `@IllegalClippingException.Reason`
* `@IllegalMergeException.Reason`
* `@LoadErrorHandlingPolicy.FallbackType`
* `@MatroskaExtractor.Flags`
* `@Mp3Extractor.Flags`
* `@Mp4Extractor.Flags`
* `@NotificationUtil.Importance`
* `@PlaybackException.FieldNumber`
* `@PlayerNotificationManager.Priority`
* `@PlayerNotificationManager.Visibility`
* `@PlayerView.ShowBuffering`
* `@Renderer.State`
* `@RendererCapabilities.AdaptiveSupport`
* `@RendererCapabilities.Capabilities`
* `@RendererCapabilities.DecoderSupport`
* `@RendererCapabilities.FormatSupport`
* `@RendererCapabilities.HardwareAccelerationSupport`
* `@RendererCapabilities.TunnelingSupport`
* `@SampleStream.ReadDataResult`
* `@SampleStream.ReadFlags`
* `@StyledPlayerView.ShowBuffering`
* `@SubtitleView.ViewType`
* `@TextAnnotation.Position`
* `@TextEmphasisSpan.MarkFill`
* `@TextEmphasisSpan.MarkShape`
* `@Track.Transformation`
* `@TrackOutput.SampleDataPart`
* `@Transformer.ProgressState`
* `@TsExtractor.Mode`
* `@TsPayloadReader.Flags`
* `@WebvttCssStyle.FontSizeUnit`
### 2.16.1 (2021-11-18)
* Core Library:
* Fix track selection issue where overriding one track group did not
disable other track groups of the same type
([#9675](https://github.com/google/ExoPlayer/issues/9675)).
* Fix track selection issue where a mixture of non-empty and empty track
overrides is not applied correctly
([#9649](https://github.com/google/ExoPlayer/issues/9649)).
* Add protected method `DefaultRenderersFactory.getCodecAdapterFactory()`
so that subclasses of `DefaultRenderersFactory` that override
`buildVideoRenderers()` or `buildAudioRenderers()` can access the codec
adapter factory and pass it to `MediaCodecRenderer` instances they
create.
* Propagate ICY header fields `name` and `genre` to
`MediaMetadata.station` and `MediaMetadata.genre` respectively so that
they reach the app via `Player.Listener.onMediaMetadataChanged()`
([#9677](https://github.com/google/ExoPlayer/issues/9677)).
* Remove null keys from `DefaultHttpDataSource#getResponseHeaders`.
* Extractors:
* WAV: Add support for RF64 streams
([#9543](https://github.com/google/ExoPlayer/issues/9543)).
* DASH:
* Add parsed essential and supplemental properties to the `Representation`
([#9579](https://github.com/google/ExoPlayer/issues/9579)).
* HLS:
* Correctly populate `Format.label` for audio only HLS streams
([#9608](https://github.com/google/ExoPlayer/issues/9608)).
### 2.16.0 (2021-11-04)
* Core Library:
* Deprecate `SimpleExoPlayer`. All functionality has been moved to
`ExoPlayer` instead. `ExoPlayer.Builder` can be used instead of
`SimpleExoPlayer.Builder`.
* Add track selection methods to the `Player` interface, for example,
`Player.getCurrentTracksInfo` and `Player.setTrackSelectionParameters`.
These methods can be used instead of directly accessing the track
selector.
* Enable MediaCodec asynchronous queueing by default on devices with API
level >= 31. Add methods in `DefaultMediaCodecRendererFactory` and
`DefaultRenderersFactory` to force enable or force disable asynchronous
queueing ([6348](https://github.com/google/ExoPlayer/issues/6348)).
* Remove final dependency on `jcenter()`.
* Fix `mediaMetadata` being reset when media is repeated
([#9458](https://github.com/google/ExoPlayer/issues/9458)).
* Adjust `ExoPlayer` `MediaMetadata` update priority, such that values
input through the `MediaItem.MediaMetadata` are used above media derived
values.
* Move `com.google.android.exoplayer2.device.DeviceInfo` to
`com.google.android.exoplayer2.DeviceInfo`.
* Move `com.google.android.exoplayer2.drm.DecryptionException` to
`com.google.android.exoplayer2.decoder.CryptoException`.
* Move `com.google.android.exoplayer2.upstream.cache.CachedRegionTracker`
to `com.google.android.exoplayer2.upstream.CachedRegionTracker`.
* Move `Player.addListener(EventListener)` and
`Player.removeListener(EventListener)` out of `Player` into subclasses.
* Android 12 compatibility:
* Keep `DownloadService` started and in the foreground whilst waiting for
requirements to be met on Android 12. This is necessary due to new
[foreground service launch restrictions](https://developer.android.com/about/versions/12/foreground-services).
`DownloadService.getScheduler` will not be called on Android 12 devices.
* Disable platform transcoding when playing content URIs on Android 12.
* Add `ExoPlayer.setVideoChangeFrameRateStrategy` to allow disabling of
calls from the player to `Surface.setFrameRate`. This is useful for
applications wanting to call `Surface.setFrameRate` directly from
application code with Android 12's `Surface.CHANGE_FRAME_RATE_ALWAYS`.
* Upgrade the WorkManager extension to depend on
`androidx.work:work-runtime:2.7.0`. Earlier versions of `work-runtime`
are not compatible with apps targeting Android 12, and will crash with
an `IllegalArgumentException` when creating `PendingIntent`s
([#9181](https://github.com/google/ExoPlayer/issues/9181)).
* Video:
* Fix bug in `MediaCodecVideoRenderer` that resulted in re-using a
released `Surface` when playing without an app-provided `Surface`
([#9476](https://github.com/google/ExoPlayer/issues/9476)).
* DRM:
* Log an error (instead of throwing `IllegalStateException`) when calling
`DefaultDrmSession#release()` on a fully released session
([#9392](https://github.com/google/ExoPlayer/issues/9392)).
* UI:
* `SubtitleView` no longer implements `TextOutput`. `SubtitleView`
implements `Player.Listener`, so can be registered to a player with
`Player.addListener`.
* Fix initial timestamp display in `PlayerControlView`
([#9524](https://github.com/google/ExoPlayer/issues/9254)).
* Fix capitalization of languages in the track selector
([#9452](https://github.com/google/ExoPlayer/issues/9452)).
* Extractors:
* MP4: Correctly handle HEVC tracks with pixel aspect ratios other than 1.
* MP4: Add support for Dolby TrueHD (only for unfragmented streams)
([#9496](https://github.com/google/ExoPlayer/issues/9496)).
* MP4: Avoid throwing `ArrayIndexOutOfBoundsException` when parsing
invalid `colr` boxes produced by some device cameras
([#9332](https://github.com/google/ExoPlayer/issues/9332)).
* MP4: Parse HDR static metadata from the `clli` and `mdcv` boxes.
* TS: Correctly handle HEVC tracks with pixel aspect ratios other than 1.
* TS: Map stream type 0x80 to H262
([#9472](https://github.com/google/ExoPlayer/issues/9472)).
* Downloads and caching:
* Modify `DownloadService` behavior when `DownloadService.getScheduler`
returns `null`, or returns a `Scheduler` that does not support the
requirements for downloads to continue. In both cases, `DownloadService`
will now remain started and in the foreground whilst waiting for
requirements to be met.
* Modify `DownloadService` behavior when running on Android 12 and above.
See the "Android 12 compatibility" section above.
* RTSP:
* Support RFC4566 SDP attribute field grammar
([#9430](https://github.com/google/ExoPlayer/issues/9430)).
* DASH:
* Populate `Format.sampleMimeType`, `width` and `height` for image
`AdaptationSet` elements
([#9500](https://github.com/google/ExoPlayer/issues/9500)).
* HLS:
* Fix rounding error in HLS playlists
([#9575](https://github.com/google/ExoPlayer/issues/9575)).
* Fix `NoSuchElementException` thrown when an HLS manifest declares
`#EXT-X-RENDITION-REPORT` at the beginning of the playlist
([#9592](https://github.com/google/ExoPlayer/issues/9592)).
* RTMP extension:
* Upgrade to `io.antmedia:rtmp_client`, which does not rely on `jcenter()`
([#9591](https://github.com/google/ExoPlayer/issues/9591)).
* MediaSession extension:
* Rename
`MediaSessionConnector.QueueNavigator#onCurrentWindowIndexChanged` to
`onCurrentMediaItemIndexChanged`.
* Transformer:
* Avoid sending a duplicate timestamp to the encoder with the end of
stream buffer.
* Remove deprecated symbols:
* Remove `Renderer.VIDEO_SCALING_MODE_*` constants. Use identically named
constants in `C` instead.
* Remove `C.MSG_*` constants. Use identically named constants in
`Renderer` instead, except for `C.MSG_SET_SURFACE`, which is replaced
with `Renderer.MSG_SET_VIDEO_OUTPUT`.
* Remove `DeviceListener`. Use `Player.Listener` instead.
* Remove `CacheDataSourceFactory`. Use `CacheDataSource.Factory` instead.
* Remove `CacheDataSinkFactory`. Use `CacheDataSink.Factory` instead.
* Remove `FileDataSourceFactory`. Use `FileDataSource.Factory` instead.
* Remove `SimpleExoPlayer.addMetadataOutput` and `removeMetadataOutput`.
Use `Player.addListener` and `Player.Listener` instead.
* Remove `SimpleExoPlayer.addAudioListener`, `removeAudioListener` and
`AudioListener`. Use `Player.addListener` and `Player.Listener` instead.
* Remove `SimpleExoPlayer.addVideoListener`, `removeVideoListener` and
`VideoListener`. Use `Player.addListener` and `Player.Listener` instead.
* Remove `DefaultHttpDataSourceFactory`. Use
`DefaultHttpDataSource.Factory` instead.
* Remove `SingleSampleMediaSource.createMediaSource(Uri, Format, long)`.
Use `SingleSampleMediaSource.createMediaSource(MediaItem.Subtitle,
long)` instead.
* Remove `HttpDataSource.Factory.getDefaultRequestProperties`. Use
`HttpDataSource.Factory.setDefaultRequestProperties` instead.
* Remove `GvrAudioProcessor` and the GVR extension, which has been
deprecated since 2.11.0.
* Remove `DownloadService.onDownloadChanged` and
`DownloadService.onDownloadRemoved`. Instead, use
`DownloadManager.addListener` to register a listener directly to the
`DownloadManager` returned through `DownloadService.getDownloadManager`.
* Remove `Player.getCurrentStaticMetadata`,
`Player.Listener.onStaticMetadataChanged` and
`Player.EVENT_STATIC_METADATA_CHANGED`. Use `Player.getMediaMetadata`,
`Player.Listener.onMediaMetadataChanged` and
`Player.EVENT_MEDIA_METADATA_CHANGED` for convenient access to
structured metadata, or access the raw static metadata directly from the
`TrackSelection#getFormat()`.
* Remove `ControlDispatcher` and `DefaultControlDispatcher`. Operations
can be customized by using a `ForwardingPlayer`, or when configuring the
player (for example by using
`ExoPlayer.Builder.setSeekBackIncrementMs`).
### 2.15.1 (2021-09-20)
* Core Library:
* Fix track selection in `StyledPlayerControlView` when using
`ForwardingPlayer`.
* Fix `FlagSet#equals` on API levels below 24.
* Fix `NullPointerException` being thrown from `CacheDataSource` when
reading a fully cached resource with `DataSpec.position` equal to the
resource length.
* Fix a bug when [depending on ExoPlayer locally](README.md#locally) with
a relative path
([#9403](https://github.com/google/ExoPlayer/issues/9403)).
* Better handle invalid seek requests. Seeks to positions that are before
the start or after the end of the media are now handled as seeks to the
start and end respectively
([8906](https://github.com/google/ExoPlayer/issues/8906)).
* Rename `MimeTypes.AUDIO_DTS_UHD` to `MimeTypes.AUDIO_DTS_X` and add
required profile to its value
([#9429](https://github.com/google/ExoPlayer/issues/9429)).
* Extractors:
* Support TS packets without PTS flag
([#9294](https://github.com/google/ExoPlayer/issues/9294)).
* Fix issue decoding ID3 tags containing UTF-16 encoded strings
([#9087](https://github.com/google/ExoPlayer/issues/9087)).
* Video:
* Request smaller decoder input buffers for Dolby Vision. This fixes an
issue that could cause UHD Dolby Vision playbacks to fail on some
devices, including Amazon Fire TV 4K.
* DRM:
* Fix `DefaultDrmSessionManager` to correctly eagerly release preacquired
DRM sessions when there's a shortage of DRM resources on the device.
* Downloads and caching:
* Workaround platform issue that can cause a `SecurityException` to be
thrown from `Requirements.isInternetConnectivityValidated` on devices
running Android 11
([#9002](https://github.com/google/ExoPlayer/issues/9002)).
* DASH:
* Use identical cache keys for downloading and playing DASH segments
([#9370](https://github.com/google/ExoPlayer/issues/9370)).
* Fix base URL selection and load error handling when base URLs are shared
across adaptation sets.
* HLS:
* Fix bug where the player would get stuck if all download attempts fail
and would not raise an error to the application
([#9390](https://github.com/google/ExoPlayer/issues/9390)).
* RTSP:
* Handle when additional spaces are in SDP's RTPMAP atrribute
([#9379](https://github.com/google/ExoPlayer/issues/9379)).
* Handle partial URIs in RTP-Info headers
([#9346](https://github.com/google/ExoPlayer/issues/9346)).
* Fix RTSP Session header handling
([#9416](https://github.com/google/ExoPlayer/issues/9416)).
* Fix RTSP WWW-Authenticate header parsing
([#9428](https://github.com/google/ExoPlayer/issues/9428)).
* UI:
* Use `defStyleAttr` when obtaining styled attributes in
`StyledPlayerView`, `PlayerView` and `PlayerControlView`
([#9024](https://github.com/google/ExoPlayer/issues/9024)).
* Fix accessibility focus in `PlayerControlView`
([#9111](https://github.com/google/ExoPlayer/issues/9111)).
* Fix issue that `StyledPlayerView` and `PlayerView` don't update UI when
available player commands change.
* Cast extension:
* Implement `CastPlayer.setPlaybackParameters(PlaybackParameters)` to
support setting the playback speed
([#6784](https://github.com/google/ExoPlayer/issues/6784)).
### 2.15.0 (2021-08-10)
* Core Library:
* Add `MediaCodecAdapter.needsReconfiguration` method.
* Add `getSeekBackIncrement`, `seekBack`, `getSeekForwardIncrement`,
`seekForward`, `getMaxSeekToPreviousPosition`, `seekToPrevious` and
`seekToNext` methods to `Player`.
* Rename `Player` methods:
* `hasPrevious` to `hasPreviousWindow`.
* `previous` to `seekToPreviousWindow`.
* `hasNext` to `hasNextWindow`.
* `next` to `seekToNextWindow`.
* Rename `Player` commands:
* `COMMAND_SEEK_IN_CURRENT_MEDIA_ITEM` to
`COMMAND_SEEK_IN_CURRENT_WINDOW`.
* `COMMAND_SEEK_TO_NEXT_MEDIA_ITEM` to `COMMAND_SEEK_TO_NEXT_WINDOW`.
* `COMMAND_SEEK_TO_PREVIOUS_MEDIA_ITEM` to
`COMMAND_SEEK_TO_PREVIOUS_WINDOW`.
* `COMMAND_SEEK_TO_MEDIA_ITEM` to `COMMAND_SEEK_TO_WINDOW`.
* `COMMAND_GET_MEDIA_ITEMS` to `COMMAND_GET_TIMELINE`.
* Rename `Player.EventFlags` IntDef to `Player.Event`.
* Make `Player` depend on the new `PlaybackException` class instead of
`ExoPlaybackException`:
* `Player.getPlayerError` now returns a `PlaybackException`.
* `Player.Listener.onPlayerError` now receives a `PlaybackException`.
* Add a new listener method `Player.Listener.onPlayerErrorChanged`,
which is equivalent to `onPlayerError` except that it is also called
when the player error becomes `null`.
* `Player` implementations like `ExoPlayer` may use
`PlaybackException` subclasses (like `ExoPlaybackException`), so
users can downcast the `PlaybackException` instance to obtain
implementation-specific fields (like
`ExoPlaybackException.rendererIndex`).
* `PlaybackException` introduces an `errorCode` which identifies the cause
of the failure in order to simplify error handling
([#1611](https://github.com/google/ExoPlayer/issues/1611)).
* Add a `DefaultMediaDescriptionAdapter` for the
`PlayerNotificationManager`, that makes use of the `Player`
`MediaMetadata` to populate the notification fields.
* Add `@FallbackType` to `LoadErrorHandlingPolicy` to support
customization of the exclusion duration for locations and tracks.
* Change interface of `LoadErrorHandlingPolicy` to support configuring the
behavior of track and location fallback. Location fallback is currently
only supported for DASH manifests with multiple base URLs.
* Restrict use of `AudioTrack.isDirectPlaybackSupported` to TVs, to avoid
listing audio offload encodings as supported for passthrough mode on
mobile devices
([#9239](https://github.com/google/ExoPlayer/issues/9239)).
* Extractors:
* Add support for DTS-UHD in MP4
([#9163](https://github.com/google/ExoPlayer/issues/9163)).
* Text:
* TTML: Inherit the `rubyPosition` value from a containing `<span
ruby="container">` element.
* WebVTT: Add support for CSS `font-size` property
([#8964](https://github.com/google/ExoPlayer/issues/8964)).
* Ad playback:
* Support changing ad break positions in the player logic
([#5067](https://github.com/google/ExoPlayer/issues/5067)).
* Support resuming content with an offset after an ad group.
* UI:
* Add `setUseRewindAction` and `setUseFastForwardAction` to
`PlayerNotificationManager`, and `setUseFastForwardActionInCompactView`
and `setUseRewindActionInCompactView` to show the actions in compact
view mode.
* Remove `rewind_increment` and `fastforward_increment` attributes from
`PlayerControlView` and `StyledPlayerControlView`. These increments can
be customized by configuring the `Player` (see `setSeekBackIncrementMs`
and `setSeekForwardIncrementMs` in `SimpleExoPlayer.Builder`), or by
using a `ForwardingPlayer` that overrides `getSeekBackIncrement`,
`seekBack`, `getSeekForwardIncrement` and `seekForward`. The rewind and
fast forward buttons can be disabled by using a `ForwardingPlayer` that
removes `COMMAND_SEEK_BACK` and `COMMAND_SEEK_FORWARD` from the
available commands.
* Update `DefaultControlDispatcher` `getRewindIncrementMs` and
`getFastForwardIncrementMs` to take the player as parameter.
* DASH:
* Add support for multiple base URLs and DVB attributes in the manifest.
Apps that are using `DefaultLoadErrorHandlingPolicy` with such manifests
have base URL fallback automatically enabled
([#771](https://github.com/google/ExoPlayer/issues/771),
[#7654](https://github.com/google/ExoPlayer/issues/7654)).
* HLS:
* Fix issue that could cause some playbacks to be stuck buffering
([#8850](https://github.com/google/ExoPlayer/issues/8850),
[#9153](https://github.com/google/ExoPlayer/issues/9153)).
* Report audio track type in
`AnalyticsListener.onDownstreamFormatChanged()` for audio-only
playlists, so that the `PlaybackStatsListener` can derive audio
format-related information
([#9175](https://github.com/google/ExoPlayer/issues/9175)).
* RTSP:
* Use standard RTSP header names
([#9182](https://github.com/google/ExoPlayer/issues/9182)).
* Handle an extra semicolon in SDP fmtp attribute
([#9247](https://github.com/google/ExoPlayer/pull/9247)).
* Fix handling of special characters in the RTSP session ID
([#9254](https://github.com/google/ExoPlayer/issues/9254)).
* SmoothStreaming:
* Propagate `StreamIndex` element `Name` attribute value as `Format` label
([#9252](https://github.com/google/ExoPlayer/issues/9252)).
* Cronet extension:
* Add `CronetDataSource.Factory.setRequestPriority` to allow setting the
priority of requests made by `CronetDataSource` instances.
* OkHttp extension:
* Switch to OkHttp 4.9.1. This increases the extension's minimum SDK
version requirement from 16 to 21.
* Remove deprecated symbols:
* Remove `CastPlayer` specific playlist manipulation methods. Use
`setMediaItems`, `addMediaItems`, `removeMediaItem` and `moveMediaItem`
instead.
* Remove `Format.create` methods. Use `Format.Builder` instead.
* Remove `MediaSource.getTag`. Use `MediaSource.getMediaItem` and
`MediaItem.PlaybackProperties.tag` instead.
* Remove `PlaybackPreparer`. UI components that previously had
`setPlaybackPreparer` methods will now call `Player.prepare` by default.
If this behavior is sufficient, use of `PlaybackPreparer` can be removed
from application code without replacement. For custom preparation logic,
use a `ForwardingPlayer` that implements custom preparation logic in
`prepare`.
* Remove `Player.Listener.onTimelineChanged(Timeline, Object, int)`. Use
`Player.Listener.onTimelineChanged(Timeline, int)` instead. The manifest
can be accessed using `Player.getCurrentManifest`.
* Remove `Player.getCurrentTag`. Use `Player.getCurrentMediaItem` and
`MediaItem.PlaybackProperties.tag` instead.
* Remove `Player.getPlaybackError`. Use `Player.getPlayerError` instead.
* Remove `PlayerNotificationManager` constructors and `createWith`
methods. Use `PlayerNotificationManager.Builder` instead.
* Remove `PlayerNotificationManager.setNotificationListener`. Use
`PlayerNotificationManager.Builder.setNotificationListener` instead.
* Remove `PlayerNotificationManager` `setUseNavigationActions` and
`setUseNavigationActionsInCompactView`. Use `setUseNextAction`,
`setUsePreviousAction`, `setUseNextActionInCompactView` and
`setUsePreviousActionInCompactView` instead.
* Remove `setRewindIncrementMs` and `setFastForwardIncrementMs` from UI
components. These increments can be customized by configuring the
`Player` (see `setSeekBackIncrementMs` and `setSeekForwardIncrementMs`
in `SimpleExoPlayer.Builder`), or by using a `ForwardingPlayer` that
overrides `getSeekBackIncrement`, `seekBack`, `getSeekForwardIncrement`
and `seekForward`. The rewind and fast forward buttons can be disabled
by using a `ForwardingPlayer` that removes `COMMAND_SEEK_BACK` and
`COMMAND_SEEK_FORWARD` from the available commands.
* Remove `Timeline.getWindow(int, Window, boolean)`. Use
`Timeline.getWindow(int, Window)` instead, which will always set tags.
### 2.14.2 (2021-07-20)
* Core Library:
* Explicitly mark several methods on `SimpleExoPlayer` as `@Deprecated`.
These methods are all overrides and are already deprecated on `Player`
and the respective `ExoPlayer` component classes (since 2.14.0).
* Video:
* Fix `IncorrectContextUseViolation` strict mode warning on Android 11
([#8246](https://github.com/google/ExoPlayer/pull/8246)).
* Audio:
* Fix track selection for E-AC-3 streams.
* Use `AudioTrack.isDirectPlaybackSupported` to check for encoded audio
passthrough capability from API 29 onwards, instead of using the HDMI
audio plug intent
([#6500](https://github.com/google/ExoPlayer/pull/6500)).
* Extractors:
* Fix issue where a `trun` atom could be associated with the wrong track
in an FMP4 stream
([#9056](https://github.com/google/ExoPlayer/pull/9056)). The fix
removes a previous workaround to handle content in which the `track_ID`
is set incorrectly
([#4083](https://github.com/google/ExoPlayer/issues/4083)). Such content
is malformed and should be re-encoded.
* Improve support for truncated Ogg streams
([#7608](https://github.com/google/ExoPlayer/issues/7608)).
* Add support for MP4 H263 atom type
([#9158](https://github.com/google/ExoPlayer/issues/9158)).
* Fix issue around TS synchronization when reading a file's duration
([#9100](https://github.com/google/ExoPlayer/pull/9100)).
* HLS:
* Fix issue where playback of a live event could become stuck rather than
transitioning to `STATE_ENDED` when the event ends
([#9067](https://github.com/google/ExoPlayer/issues/9067)).
* Fix issue where a new initialization segment, as specified by an
`EXT-X-MAP` tag in a media playlist, would not be loaded when
encountered during playback
([#9004](https://github.com/google/ExoPlayer/issues/9004)).
* Forward the `FRAME-RATE` value from the master playlist to renditions.
([#8960](https://github.com/google/ExoPlayer/issues/8960)).
* Fix issue where HLS events would start at positions greater than
specified by an `EXT-X-START` tag when placed in a playlist
([#9037](https://github.com/google/ExoPlayer/issues/9037)).
* Ad playback:
* Use the content URI when auto-generating an ad ID (in addition to the
media ID and ad tag URI)
([#9106](https://github.com/google/ExoPlayer/issues/9106).
* DRM:
* Allow repeated provisioning in `DefaultDrmSession(Manager)`.
* Fix a crash due to `DefaultDrmSessionManager.release()` incorrectly
releasing too many keep-alive `DefaultDrmSession` references, resulting
in `DefaultDrmSession.release()` throwing an `IllegalStateException`
([#9193](https://github.com/google/ExoPlayer/issues/9193)).
* Metadata:
* Fix handling of emsg messages with an unset duration
([#9123](https://github.com/google/ExoPlayer/issues/9123)).
* UI:
* Add `PendingIntent.FLAG_IMMUTABLE` when creating broadcast intents in
`PlayerNotificationManager`. This is required by a
[behaviour change](https://developer.android.com/about/versions/12/behavior-changes-12#pending-intent-mutability)
in Android 12.
* Fix focusability of `StyledPlayerView` and `StyledPlayerControlView`
popup menus on API levels prior to 26
([#9061](https://github.com/google/ExoPlayer/issues/9061)).
* Fix progress bar flickering immediately after the user seeks
([#9049](https://github.com/google/ExoPlayer/pull/9049)).
* Fix `StyledPlayerView` and `StyledPlayerControlView` popup menu items
not expanding to occupy the full width of the popup
([#9086](https://github.com/google/ExoPlayer/issues/9086)).
* Don't propagate `AttributeSet` from `SubtitleView` constructor into
`CanvasSubtitleOutput`. Just passing the `Context` is enough, and
ensures programmatic changes to the `SubtitleView` will propagate down.
* RTSP:
* Fix session description (SDP) parsing to use a HashMap-like behaviour
for duplicated attributes.
([#9014](https://github.com/google/ExoPlayer/issues/9014)).
* Allow using absolute URI in the control attribute in a media description
([#9183](https://github.com/google/ExoPlayer/issues/9183)).
* Allow the timeout to be customised via
`RtspMediaSource.Factory.setTimeoutMs`.
### 2.14.1 (2021-06-11)
* Core Library:
* Fix gradle config to allow specifying a relative path for
`exoplayerRoot` when [depending on ExoPlayer locally](README.md#locally)
([#8927](https://github.com/google/ExoPlayer/issues/8927)).
* Update `MediaItem.Builder` javadoc to discourage calling setters that
will be (currently) ignored if another setter is not also called.
* Extractors:
* Add support for MPEG-H 3D Audio in MP4 extractors
([#8860](https://github.com/google/ExoPlayer/pull/8860)).
* Video:
* Fix bug that could cause `CodecException: Error 0xffffffff` to be thrown
from `MediaCodec.native_setSurface` in use cases that involve both
swapping the output `Surface` and a mixture of secure and non-secure
content being played
([#8776](https://github.com/google/ExoPlayer/issues/8776)).
* HLS:
* Use the `PRECISE` attribute in `EXT-X-START` to select the default start
position.
* Fix a bug where skipping into spliced-in chunks triggered an assertion
error ([#8937](https://github.com/google/ExoPlayer/issues/8937)).
* DRM:
* Keep secure `MediaCodec` instances initialized when disabling (but not
resetting) `MediaCodecRenderer`. This helps re-use secure decoders in
more contexts, which avoids the 'black flash' caused by detaching a
`Surface` from a secure decoder on some devices
([#8842](https://github.com/google/ExoPlayer/issues/8842)). It will also
result in DRM license refresh network requests while the player is
stopped if `Player#setForegroundMode` is true.
* Fix issue where offline keys were unnecessarily (and incorrectly)
restored into a session before being released. This call sequence is
explicitly disallowed in OEMCrypto v16.
* UI:
* Keep subtitle language features embedded (e.g. rubies & tate-chu-yoko)
in `Cue.text` even when `SubtitleView#setApplyEmbeddedStyles()` is
`false`.
* Fix `NullPointerException` in `StyledPlayerView` that could occur after
calling `StyledPlayerView.setPlayer(null)`
([#8985](https://github.com/google/ExoPlayer/issues/8985)).
* RTSP:
* Add support for RTSP basic and digest authentication
([#8941](https://github.com/google/ExoPlayer/issues/8941)).
* Enable using repeat mode and playlist with RTSP
([#8994](https://github.com/google/ExoPlayer/issues/8994)).
* Add `RtspMediaSource.Factory` option to set the RTSP user agent.
* Add `RtspMediaSource.Factory` option to force using TCP for streaming.
* GL demo app:
* Fix texture transformation to avoid green bars shown on some videos
([#8992](https://github.com/google/ExoPlayer/issues/8992)).
### 2.14.0 (2021-05-13)
* Core Library:
* Move `Player` components to `ExoPlayer`. For example
`Player.VideoComponent` is now `ExoPlayer.VideoComponent`.
* The most used methods of `Player`'s audio, video, text and metadata
components have been added directly to `Player`.
* Add `Player.getAvailableCommands`, `Player.isCommandAvailable` and
`Listener.onAvailableCommandsChanged` to query which commands that can
be executed on the player.
* Add a `Player.Listener` interface to receive all player events.
Component listeners and `EventListener` have been deprecated.
* Add `Player.getMediaMetadata`, which returns a combined and structured
`MediaMetadata` object. Changes to metadata are reported to
`Listener.onMediaMetadataChanged`.
* `Player.setPlaybackParameters` no longer accepts null, use
`PlaybackParameters.DEFAULT` instead.
* Report information about the old and the new playback positions to
`Listener.onPositionDiscontinuity`. Add `DISCONTINUITY_REASON_SKIP` and
`DISCONTINUITY_REASON_REMOVE` as discontinuity reasons, and rename
`DISCONTINUITY_REASON_PERIOD_TRANSITION` to
`DISCONTINUITY_REASON_AUTO_TRANSITION`. Remove
`DISCONTINUITY_REASON_AD_INSERTION`, for which
`DISCONTINUITY_REASON_AUTO_TRANSITION` is used instead
([#6163](https://github.com/google/ExoPlayer/issues/6163),
[#4768](https://github.com/google/ExoPlayer/issues/4768)).
* Deprecate `ExoPlayer.Builder`. Use `SimpleExoPlayer.Builder` instead.
* Move `Player.getRendererCount` and `Player.getRendererType` to
`ExoPlayer`.
* Use an empty string instead of the URI if the media ID is not explicitly
set with `MediaItem.Builder.setMediaId(String)`.
* Remove `MediaCodecRenderer.configureCodec()` and add
`MediaCodecRenderer.getMediaCodecConfiguration()`. The new method is
called just before the `MediaCodec` is created and returns the
parameters needed to create and configure the `MediaCodec` instance.
Applications can override `MediaCodecRenderer.onCodecInitialized()` to
be notified after a `MediaCodec` is initialized, or they can inject a
custom `MediaCodecAdapter.Factory` if they want to control how the
`MediaCodec` is configured.
* Promote `AdaptiveTrackSelection.AdaptationCheckpoint` to `public`
visibility to allow Kotlin subclasses of
`AdaptiveTrackSelection.Factory`
([#8830](https://github.com/google/ExoPlayer/issues/8830)).
* Fix bug when transitions from content to ad periods called
`onMediaItemTransition` by mistake.
* `AdsLoader.AdViewProvider` and `AdsLoader.OverlayInfo` have been renamed
`com.google.android.exoplayer2.ui.AdViewProvider` and
`com.google.android.exoplayer2.ui.AdOverlayInfo` respectively.
* `CaptionStyleCompat` has been moved to the
`com.google.android.exoplayer2.ui` package.
* `DebugTextViewHelper` has been moved from the `ui` package to the `util`
package.
* RTSP:
* Initial support for RTSP playbacks
([#55](https://github.com/google/ExoPlayer/issues/55)).
* Downloads and caching:
* Fix `CacheWriter` to correctly handle cases where the request `DataSpec`
extends beyond the end of the underlying resource. Caching will now
succeed in this case, with data up to the end of the resource being
cached. This behaviour is enabled by default, and so the
`allowShortContent` parameter has been removed
([#7326](https://github.com/google/ExoPlayer/issues/7326)).
* Fix `CacheWriter` to correctly handle `DataSource.close` failures, for
which it cannot be assumed that data was successfully written to the
cache.
* DRM:
* Prepare DRM sessions (and fetch keys) ahead of the playback position
([#4133](https://github.com/google/ExoPlayer/issues/4133)).
* Only dispatch DRM session acquire and release events once per period
when playing content that uses the same encryption keys for both audio &
video tracks. Previously, separate acquire and release events were
dispatched for each track in each period.
* Include the session state in DRM session-acquired listener methods.
* UI:
* Add `PlayerNotificationManager.Builder`, with the ability to specify
which group the notification should belong to.
* Remove `setUseSensorRotation` from `PlayerView` and `StyledPlayerView`.
Instead, cast the view returned by `getVideoSurfaceView` to
`SphericalGLSurfaceView`, and then call `setUseSensorRotation` on the
`SphericalGLSurfaceView` directly.
* Analytics:
* Add `onAudioCodecError` and `onVideoCodecError` to `AnalyticsListener`.
* Video:
* Add `Player.getVideoSize()` to retrieve the current size of the video
stream. Add `Listener.onVideoSizeChanged(VideoSize)` and deprecate
`Listener.onVideoSizeChanged(int, int, int, float)`.
* Audio:
* Report unexpected audio discontinuities to
`AnalyticsListener.onAudioSinkError`
([#6384](https://github.com/google/ExoPlayer/issues/6384)).
* Allow forcing offload for gapless content even if gapless playback is
not supported.
* Allow fall back from DTS-HD to DTS when playing via passthrough.
* Text:
* Fix overlapping lines when using `SubtitleView.VIEW_TYPE_WEB`.
* Parse SSA/ASS underline & strikethrough info in `Style:` lines
([#8435](https://github.com/google/ExoPlayer/issues/8435)).
* Ensure TTML `tts:textAlign` is correctly propagated from `<p>` nodes to
child nodes.
* Support TTML `ebutts:multiRowAlign` attributes.
* Allow the use of Android platform extractors through
[MediaParser](https://developer.android.com/reference/android/media/MediaParser).
Only supported on API 30+.
* You can use platform extractors for progressive media by passing
`MediaParserExtractorAdapter.FACTORY` when creating a
`ProgressiveMediaSource.Factory`.
* You can use platform extractors for HLS by passing
`MediaParserHlsMediaChunkExtractor.FACTORY` when creating a
`HlsMediaSource.Factory`.
* You can use platform extractors for DASH by passing a
`DefaultDashChunkSource` that uses `MediaParserChunkExtractor.FACTORY`
when creating a `DashMediaSource.Factory`.
* Cast extension:
* Trigger `onMediaItemTransition` event for all reasons except
`MEDIA_ITEM_TRANSITION_REASON_REPEAT`.
* MediaSession extension:
* Remove dependency on `exoplayer-core`, relying only `exoplayer-common`
instead. To achieve this, `TimelineQueueEditor` uses a new
`MediaDescriptionConverter` interface, and no longer relies on
`ConcatenatingMediaSource`.
* Remove deprecated symbols:
* Remove `ExoPlayerFactory`. Use `SimpleExoPlayer.Builder` instead.
* Remove `Player.DefaultEventListener`. Use `Player.Listener` instead.
* Remove `ExtractorMediaSource`. Use `ProgressiveMediaSource` instead.
* Remove `DefaultMediaSourceEventListener`. Use `MediaSourceEventListener`
instead.
* Remove `DashManifest` constructor. Use the remaining constructor with
`programInformation` and `serviceDescription` set to `null` instead.
* Remove `CryptoInfo.getFrameworkCryptoInfoV16`. Use
`CryptoInfo.getFrameworkCryptoInfo` instead.
* Remove `NotificationUtil.createNotificationChannel(Context, String, int,
int)`. Use `createNotificationChannel(Context, String, int, int, int)`
instead.
* Remove `PlayerNotificationManager.setNotificationListener`. Use
`PlayerNotificationManager.Builder.setNotificationListener` instead.
* Remove `PlayerNotificationManager.NotificationListener`
`onNotificationStarted(int, Notification)` and
`onNotificationCancelled(int)`. Use `onNotificationPosted(int,
Notification, boolean)` and `onNotificationCancelled(int, boolean)`
instead.
* Remove `DownloadNotificationUtil`. Use `DownloadNotificationHelper`
instead.
* Remove `extension-jobdispatcher` module. Use the `extension-workmanager`
module instead.
### 2.13.3 (2021-04-14)
* Published via the Google Maven repository (i.e., google()) rather than
JCenter.
* Core:
* Reset playback speed when live playback speed control becomes unused
([#8664](https://github.com/google/ExoPlayer/issues/8664)).
* Fix playback position issue when re-preparing playback after a
`BehindLiveWindowException`
([#8675](https://github.com/google/ExoPlayer/issues/8675)).
* Assume Dolby Vision content is encoded as H264 when calculating maximum
codec input size
([#8705](https://github.com/google/ExoPlayer/issues/8705)).
* UI:
* Fix `StyledPlayerView` scrubber not reappearing correctly in some cases
([#8646](https://github.com/google/ExoPlayer/issues/8646)).
* Fix measurement of `StyledPlayerView` and `StyledPlayerControlView` when
`wrap_content` is used
([#8726](https://github.com/google/ExoPlayer/issues/8726)).
* Fix `StyledPlayerControlView` to stay in full mode (rather than minimal
mode) when possible
([#8763](https://github.com/google/ExoPlayer/issues/8763)).
* DASH:
* Parse `forced_subtitle` role from DASH manifests
([#8781](https://github.com/google/ExoPlayer/issues/8781)).
* DASH:
* Fix rounding error that could cause `SegmentTemplate.getSegmentCount()`
to return incorrect values
([#8804](https://github.com/google/ExoPlayer/issues/8804)).
* HLS:
* Fix bug of ignoring `EXT-X-START` when setting the live target offset
([#8764](https://github.com/google/ExoPlayer/pull/8764)).
* Fix incorrect application of byte ranges to `EXT-X-MAP` tags
([#8783](https://github.com/google/ExoPlayer/issues/8783)).
* Fix issue that could cause playback to become stuck if corresponding
`EXT-X-DISCONTINUITY` tags in different media playlists occur at
different positions in time
([#8372](https://github.com/google/ExoPlayer/issues/8372)).
* Fix issue that could cause playback of on-demand content to not start in
cases where the media playlists referenced by the master playlist have
different starting `EXT-X-PROGRAM-DATE-TIME` tags.
* Fix container type detection for segments with incorrect file extension
or HTTP Content-Type
([#8733](https://github.com/google/ExoPlayer/issues/8733)).
* Extractors:
* Add support for `GContainer` and `GContainerItem` XMP namespace prefixes
in JPEG motion photo parsing.
* Allow JFIF APP0 marker segment preceding Exif APP1 segment in
`JpegExtractor`.
* Text:
* Parse SSA/ASS bold & italic info in `Style:` lines
([#8435](https://github.com/google/ExoPlayer/issues/8435)).
* Don't display subtitles after the end position of the current media
period (if known). This ensures sideloaded subtitles respect the end
point of `ClippingMediaPeriod` and prevents content subtitles from
continuing to be displayed over mid-roll ads
([#5317](https://github.com/google/ExoPlayer/issues/5317),
[#8456](https://github.com/google/ExoPlayer/issues/8456)).
* Fix CEA-708 priority handling to sort cues in the order defined by the
spec ([#8704](https://github.com/google/ExoPlayer/issues/8704)).
* Support TTML `textEmphasis` attributes, used for Japanese boutens.
* Support TTML `shear` attributes.
* Metadata:
* Ensure that timed metadata near the end of a period is not dropped
([#8710](https://github.com/google/ExoPlayer/issues/8710)).
* Cast extension:
* Fix `onPositionDiscontinuity` event so that it is not triggered with
reason `DISCONTINUITY_REASON_PERIOD_TRANSITION` after a seek to another
media item and so that it is not triggered after a timeline change.
* IMA extension:
* Fix error caused by `AdPlaybackState` ad group times being cleared,
which can occur if the `ImaAdsLoader` is released while an ad is pending
loading ([#8693](https://github.com/google/ExoPlayer/issues/8693)).
* Upgrade IMA SDK dependency to 3.23.0, fixing an issue with
`NullPointerExceptions` within `WebView` callbacks
([#8447](https://github.com/google/ExoPlayer/issues/8447)).
* FFmpeg extension: Fix playback failure when switching to TrueHD tracks
during playback ([#8616](https://github.com/google/ExoPlayer/issues/8616)).
### 2.13.2 (2021-02-25)
* Extractors:
* Add support for MP4 and QuickTime meta atoms that are not full atoms.
* UI:
* Make conditions to enable UI actions consistent in
`DefaultControlDispatcher`, `PlayerControlView`,
`StyledPlayerControlView`, `PlayerNotificationManager` and
`TimelineQueueNavigator`.
* Fix conditions to enable seeking to next/previous media item to handle
the case where a live stream has ended.
* Audio:
* Fix `SimpleExoPlayer` reporting audio session ID as 0 in some cases
([#8585](https://github.com/google/ExoPlayer/issues/8585)).
* IMA extension:
* Fix a bug where playback could get stuck when seeking into a playlist
item with ads, if the preroll ad had preloaded but the window position
of the seek should instead trigger playback of a midroll.
* Fix a bug with playback of ads in playlists, where the incorrect period
index was used when deciding whether to trigger playback of an ad after
a seek.
* Text:
* Parse SSA/ASS font size in `Style:` lines
([#8435](https://github.com/google/ExoPlayer/issues/8435)).
* VP9 extension: Update to use NDK r21
([#8581](https://github.com/google/ExoPlayer/issues/8581)).
* FLAC extension: Update to use NDK r21
([#8581](https://github.com/google/ExoPlayer/issues/8581)).
* Opus extension: Update to use NDK r21
([#8581](https://github.com/google/ExoPlayer/issues/8581)).
* FFmpeg extension: Update to use NDK r21
([#8581](https://github.com/google/ExoPlayer/issues/8581)).
### 2.13.1 (2021-02-12)
* Live streaming:
* Fix playback issue for HLS live streams without program date time
information ([#8560](https://github.com/google/ExoPlayer/issues/8560)).
* Fix playback issue for multi-period DASH live streams
([#8537](https://github.com/google/ExoPlayer/issues/8537)).
* Fix playback failures when playing live streams with video tunneling
enabled ([#8570](https://github.com/google/ExoPlayer/issues/8570)).
* IMA extension:
* Fix handling of repeated ad loads, to avoid ads being discarded if the
user seeks away and then back to a preloaded postroll (for example).
* Fix a bug where an assertion would fail if the player started to buffer
an ad media period before the ad URI was known then an ad state update
arrived that didn't set the ad URI.
* Add `ImaAdsLoader.focusSkipButton` to allow apps to request that the
skip button should receive UI focus, if shown
([#8565](https://github.com/google/ExoPlayer/issues/8565)).
* DRM:
* Re-use the previous `DrmSessionManager` instance when playing a playlist
(if possible)
([#8523](https://github.com/google/ExoPlayer/issues/8523)).
* Propagate DRM configuration when creating media sources for ad content
([#8568](https://github.com/google/ExoPlayer/issues/8568)).
* Only release 'keepalive' references to `DrmSession` in
`DefaultDrmSessionManager#release()` if keepalive is enabled
([#8576](https://github.com/google/ExoPlayer/issues/8576)).
### 2.13.0 (2021-02-04)
* Core library:
* Verify correct thread usage in `SimpleExoPlayer` by default. Opt-out is
still possible until the next major release using
`setThrowsWhenUsingWrongThread(false)`
([#4463](https://github.com/google/ExoPlayer/issues/4463)).
* Add `Player.getCurrentStaticMetadata` and
`EventListener.onStaticMetadataChanged` to expose static metadata
belonging to the currently playing stream
([#7266](https://github.com/google/ExoPlayer/issues/7266)).
* Add `PlayerMessage.setLooper` and deprecate `PlayerMessage.setHandler`.
* Add option to `MergingMediaSource` to clip the durations of all sources
to have the same length
([#8422](https://github.com/google/ExoPlayer/issues/8422)).
* Remove `Player.setVideoDecoderOutputBufferRenderer` from Player API. Use
`setVideoSurfaceView` and `clearVideoSurfaceView` instead.
* Default `SingleSampleMediaSource.treatLoadErrorsAsEndOfStream` to `true`
so that errors loading external subtitle files do not cause playback to
fail ([#8430](https://github.com/google/ExoPlayer/issues/8430)). A
warning will be logged by `SingleSampleMediaPeriod` whenever a load
error is treated as though the end of the stream has been reached.
* Time out on release to prevent ANRs if an underlying platform call is
stuck ([#4352](https://github.com/google/ExoPlayer/issues/4352)).
* Time out when detaching a surface to prevent ANRs if the underlying
platform call is stuck
([#5887](https://github.com/google/ExoPlayer/issues/5887)).
* Fix bug where `AnalyticsListener` callbacks could arrive in the wrong
order ([#8048](https://github.com/google/ExoPlayer/issues/8048)).
* Media transformation:
* Add a new `transformer` module for converting media streams. The
initially available transformations are changing the container format,
removing tracks, and slow motion flattening.
* Low latency live streaming:
* Support low-latency DASH (also known as ULL-CMAF) and Apple's
low-latency HLS extension.
* Add `LiveConfiguration` to `MediaItem` to define live offset and
playback speed adjustment parameters. The same parameters can be set on
`DefaultMediaSourceFactory` to apply for all `MediaItems`.
* Add `LivePlaybackSpeedControl` to control playback speed adjustments
during live playbacks. Such adjustments allow the player to stay close
to the live offset. `DefaultLivePlaybackSpeedControl` is provided as a
default implementation.
* Add `targetLiveOffsetUs` parameter to `LoadControl.shouldStartPlayback`.
* Extractors:
* Populate codecs string for H.264/AVC in MP4, Matroska and FLV streams to
allow decoder capability checks based on codec profile and level
([#8393](https://github.com/google/ExoPlayer/issues/8393)).
* Populate codecs string for H.265/HEVC in MP4, Matroska and MPEG-TS
streams to allow decoder capability checks based on codec profile and
level ([#8393](https://github.com/google/ExoPlayer/issues/8393)).
* Add support for playing JPEG motion photos
([#5405](https://github.com/google/ExoPlayer/issues/5405)).
* Handle sample size mismatches between raw audio `stsd` information and
`stsz` fixed sample size in MP4 extractors.
* Fix Vorbis private codec data parsing in the Matroska extractor
([#8496](https://github.com/google/ExoPlayer/issues/8496)).
* Track selection:
* Move `Player.getTrackSelector` to the `ExoPlayer` interface.
* Move the mutable parts of `TrackSelection` into an `ExoTrackSelection`
subclass.
* Allow parallel adaptation of video and audio
([#5111](https://github.com/google/ExoPlayer/issues/5111)).
* Simplify enabling tunneling with `DefaultTrackSelector`.
`ParametersBuilder.setTunnelingAudioSessionId` has been replaced with
`ParametersBuilder.setTunnelingEnabled`. The player's audio session ID
will be used, and so a tunneling specific ID is no longer needed.
* Add additional configuration parameters to `DefaultTrackSelector`.
`DefaultTrackSelector.ParametersBuilder` now includes:
* `setPreferredVideoMimeType`, `setPreferredVideoMimeTypes`,
`setPreferredAudioMimeType` and `setPreferredAudioMimeTypes` for
specifying preferred video and audio MIME type(s)
([#8320](https://github.com/google/ExoPlayer/issues/8320)).
* `setPreferredAudioLanguages` and `setPreferredTextLanguages` for
specifying multiple preferred audio and text languages.
* `setPreferredAudioRoleFlags` for specifying preferred audio role
flags.
* Forward `Timeline` and `MediaPeriodId` to `TrackSelection.Factory`.
* DASH:
* Support low-latency DASH playback (`availabilityTimeOffset` and
`ServiceDescription` tags)
([#4904](https://github.com/google/ExoPlayer/issues/4904)).
* Improve logic for determining whether to refresh the manifest when a
chunk load error occurs in a live streams that contains EMSG data
([#8408](https://github.com/google/ExoPlayer/issues/8408)).
* HLS:
* Support playlist delta updates, blocking playlist reloads and rendition
reports.
* Support low-latency HLS playback (`EXT-X-PART` and preload hints)
([#5011](https://github.com/google/ExoPlayer/issues/5011)).
* UI:
* Improve `StyledPlayerControlView` button animations.
* Miscellaneous fixes for `StyledPlayerControlView` in minimal mode.
* DRM:
* Fix playback failure when switching from PlayReady protected content to
Widevine or Clearkey protected content in a playlist.
* Add `ExoMediaDrm.KeyRequest.getRequestType`
([#7847](https://github.com/google/ExoPlayer/issues/7847)).
* Drop key and provision responses if `DefaultDrmSession` is released
while waiting for the response. This prevents harmless log messages of
the form: `IllegalStateException: sending message to a Handler on a dead
thread` ([#8328](https://github.com/google/ExoPlayer/issues/8328)).
* Allow apps to fully customize DRM behaviour for each `MediaItem` by
passing a `DrmSessionManagerProvider` to `MediaSourceFactory`
([#8466](https://github.com/google/ExoPlayer/issues/8466)).
* Analytics:
* Add an `onEvents` callback to `Player.EventListener` and
`AnalyticsListener`. When one or more player states change
simultaneously, `onEvents` is called once after all of the callbacks
associated with the individual state changes.
* Pass a `DecoderReuseEvaluation` to `AnalyticsListener`'s
`onVideoInputFormatChanged` and `onAudioInputFormatChanged` methods. The
`DecoderReuseEvaluation` indicates whether it was possible to re-use an
existing decoder instance for the new format, and if not then the
reasons why.
* Video:
* Fall back to AVC/HEVC decoders for Dolby Vision streams with level 10 to
13 ([#8530](https://github.com/google/ExoPlayer/issues/8530)).
* Fix VP9 format capability checks on API level 23 and earlier. The
platform does not correctly report the VP9 level supported by the
decoder in this case, so we estimate it based on the decoder's maximum
supported bitrate.
* Audio:
* Fix handling of audio session IDs
([#8190](https://github.com/google/ExoPlayer/issues/8190)).
`SimpleExoPlayer` now generates an audio session ID on construction,
which can be immediately queried by calling
`SimpleExoPlayer.getAudioSessionId`. The audio session ID will only
change if application code calls `SimpleExoPlayer.setAudioSessionId`.
* Replace `onAudioSessionId` with `onAudioSessionIdChanged` in
`AudioListener` and `AnalyticsListener`. Note that
`onAudioSessionIdChanged` is called in fewer cases than
`onAudioSessionId` was called, due to the improved handling of audio
session IDs as described above.
* Retry playback after some types of `AudioTrack` error.
* Create E-AC3 JOC passthrough `AudioTrack` instances using the maximum
supported channel count (instead of assuming 6 channels) from API 29.
* Text:
* Add support for the SSA `primaryColour` style attribute
([#8435](https://github.com/google/ExoPlayer/issues/8435)).
* Fix CEA-708 sequence number discontinuity handling
([#1807](https://github.com/google/ExoPlayer/issues/1807)).
* Fix CEA-708 handling of unexpectedly small packets
([#1807](https://github.com/google/ExoPlayer/issues/1807)).
* Data sources:
* For `HttpDataSource` implementations, default to using the user agent of
the underlying network stack.
* Deprecate `HttpDataSource.Factory.getDefaultRequestProperties`.
`HttpDataSource.Factory.setDefaultRequestProperties` instead.
* Add `DefaultHttpDataSource.Factory` and deprecate
`DefaultHttpDataSourceFactory`.
* Metadata retriever:
* Parse Google Photos HEIC and JPEG motion photo metadata.
* IMA extension:
* Add support for playback of ads in playlists
([#3750](https://github.com/google/ExoPlayer/issues/3750)).
* Add `ImaAdsLoader.Builder.setEnableContinuousPlayback` for setting
whether to request ads for continuous playback.
* Upgrade IMA SDK dependency to 3.22.0. This fixes leaking of the ad view
group ([#7344](https://github.com/google/ExoPlayer/issues/7344),
[#8339](https://github.com/google/ExoPlayer/issues/8339)).
* Fix a bug that could cause the next content position played after a seek
to snap back to the cue point of the preceding ad, rather than the
requested content position.
* Fix a regression that caused an ad group to be skipped after an initial
seek to a non-zero position. Unsupported VPAID ads will still be
skipped, but only after the preload timeout rather than instantly
([#8428](https://github.com/google/ExoPlayer/issues/8428),
[#7832](https://github.com/google/ExoPlayer/issues/7832)).
* Fix a regression that caused a short ad followed by another ad to be
skipped due to playback being stuck buffering waiting for the second ad
to load ([#8492](https://github.com/google/ExoPlayer/issues/8492)).
* FFmpeg extension:
* Link the FFmpeg library statically, saving 350KB in binary size on
average.
* OkHttp extension:
* Add `OkHttpDataSource.Factory` and deprecate `OkHttpDataSourceFactory`.
* Cronet extension:
* Add `CronetDataSource.Factory` and deprecate `CronetDataSourceFactory`.
* Support setting the user agent on `CronetDataSource.Factory` and
`CronetEngineWrapper`.
* MediaSession extension:
* Support `setPlaybackSpeed(float)` and disable it by default. Use
`MediaSessionConnector.setEnabledPlaybackActions(long)` to enable
([#8229](https://github.com/google/ExoPlayer/issues/8229)).
* Remove deprecated symbols:
* `AdaptiveMediaSourceEventListener`. Use `MediaSourceEventListener`
instead.
* `DashMediaSource.Factory.setMinLoadableRetryCount(int)`. Use
`DashMediaSource.Factory.setLoadErrorHandlingPolicy(LoadErrorHandlingPolicy)`
instead.
* `DefaultAnalyticsListener`. Use `AnalyticsListener` instead.
* `DefaultLoadControl` constructors. Use `DefaultLoadControl.Builder`
instead.
* `DrmInitData.get(UUID)`. Use `DrmInitData.get(int)` and
`DrmInitData.SchemeData.matches(UUID)` instead.
* `ExtractorsMediaSource.Factory.setMinLoadableRetryCount(int)`. Use
`ExtractorsMediaSource.Factory.setLoadErrorHandlingPolicy(LoadErrorHandlingPolicy)`
instead.
* `FixedTrackSelection.Factory`. If you need to disable adaptive selection
in `DefaultTrackSelector`, enable the
`DefaultTrackSelector.Parameters.forceHighestSupportedBitrate` flag.
* `HlsMediaSource.Factory.setMinLoadableRetryCount(int)`. Use
`HlsMediaSource.Factory.setLoadErrorHandlingPolicy(LoadErrorHandlingPolicy)`
instead.
* `MappedTrackInfo.getTrackFormatSupport(int, int, int)`. Use
`MappedTrackInfo.getTrackSupport(int, int, int)` instead.
* `MappedTrackInfo.getTrackTypeRendererSupport(int)`. Use
`MappedTrackInfo.getTypeSupport(int)` instead.
* `MappedTrackInfo.getUnassociatedTrackGroups()`. Use
`MappedTrackInfo.getUnmappedTrackGroups()` instead.
* `MappedTrackInfo.length`. Use `MappedTrackInfo.getRendererCount()`
instead.
* `Player.DefaultEventListener.onTimelineChanged(Timeline, Object)`. Use
`Player.EventListener.onTimelineChanged(Timeline, int)` instead.
* `Player.setAudioAttributes(AudioAttributes)`. Use
`Player.AudioComponent.setAudioAttributes(AudioAttributes, boolean)`
instead.
* `PlayerView.setDefaultArtwork(Bitmap)`. Use
`PlayerView.setDefaultArtwork(Drawable)` instead.
* `PlayerView.setShowBuffering(boolean)`. Use
`PlayerView.setShowBuffering(int)` instead.
* `SimpleExoPlayer.clearMetadataOutput(MetadataOutput)`. Use
`SimpleExoPlayer.removeMetadataOutput(MetadataOutput)` instead.
* `SimpleExoPlayer.clearTextOutput(TextOutput)`. Use
`SimpleExoPlayer.removeTextOutput(TextOutput)` instead.
* `SimpleExoPlayer.clearVideoListener()`. Use
`SimpleExoPlayer.removeVideoListener(VideoListener)` instead.
* `SimpleExoPlayer.getAudioStreamType()`. Use
`SimpleExoPlayer.getAudioAttributes()` instead.
* `SimpleExoPlayer.setAudioDebugListener(AudioRendererEventListener)`. Use
`SimpleExoPlayer.addAnalyticsListener(AnalyticsListener)` instead.
* `SimpleExoPlayer.setAudioStreamType(int)`. Use
`SimpleExoPlayer.setAudioAttributes(AudioAttributes)` instead.
* `SimpleExoPlayer.setMetadataOutput(MetadataOutput)`. Use
`SimpleExoPlayer.addMetadataOutput(MetadataOutput)` instead. If your
application is calling `SimpleExoPlayer.setMetadataOutput(null)`, make
sure to replace this call with a call to
`SimpleExoPlayer.removeMetadataOutput(MetadataOutput)`.
* `SimpleExoPlayer.setPlaybackParams(PlaybackParams)`. Use
`SimpleExoPlayer.setPlaybackParameters(PlaybackParameters)` instead.
* `SimpleExoPlayer.setTextOutput(TextOutput)`. Use
`SimpleExoPlayer.addTextOutput(TextOutput)` instead. If your application
is calling `SimpleExoPlayer.setTextOutput(null)`, make sure to replace
this call with a call to `SimpleExoPlayer.removeTextOutput(TextOutput)`.
* `SimpleExoPlayer.setVideoDebugListener(VideoRendererEventListener)`. Use
`SimpleExoPlayer.addAnalyticsListener(AnalyticsListener)` instead.
* `SimpleExoPlayer.setVideoListener(VideoListener)`. Use
`SimpleExoPlayer.addVideoListener(VideoListener)` instead. If your
application is calling `SimpleExoPlayer.setVideoListener(null)`, make
sure to replace this call with a call to
`SimpleExoPlayer.removeVideoListener(VideoListener)`.
* `SimpleExoPlayer.VideoListener`. Use
`com.google.android.exoplayer2.video.VideoListener` instead.
* `SingleSampleMediaSource.EventListener` and constructors. Use
`MediaSourceEventListener` and `SingleSampleMediaSource.Factory`
instead.
* `SimpleExoPlayer.addVideoDebugListener`,
`SimpleExoPlayer.removeVideoDebugListener`,
`SimpleExoPlayer.addAudioDebugListener` and
`SimpleExoPlayer.removeAudioDebugListener`. Use
`SimpleExoPlayer.addAnalyticsListener` and
`SimpleExoPlayer.removeAnalyticsListener` instead.
* `SingleSampleMediaSource.Factory.setMinLoadableRetryCount(int)`. Use
`SingleSampleMediaSource.Factory.setLoadErrorHandlingPolicy(LoadErrorHandlingPolicy)`
instead.
* `SsMediaSource.Factory.setMinLoadableRetryCount(int)`. Use
`SsMediaSource.Factory.setLoadErrorHandlingPolicy(LoadErrorHandlingPolicy)`
instead.
### 2.12.3 (2021-01-13)
* Core library:
* Fix `MediaCodecRenderer` issue where empty streams would fail to play in
bypass mode ([#8374](https://github.com/google/ExoPlayer/issues/8374)).
* Fix playback issues after seeking during an ad
([#8349](https://github.com/google/ExoPlayer/issues/8349)).
* Fix propagation of `LoadErrorHandlingPolicy` from
`DefaultMediaSourceFactory` into `SingleSampleMediaSource.Factory` when
creating subtitle media sources from
`MediaItem.playbackProperties.subtitles`
([#8430](https://github.com/google/ExoPlayer/issues/8430)).
* UI:
* Fix issue where pop-up menus belonging to `StyledPlayerControlView`
would not be dismissed when tapping outside of the menu area or pressing
the back button, on API level 22 and earlier
([#8272](https://github.com/google/ExoPlayer/issues/8272)).
* Downloads:
* Fix crash in `DownloadManager` that could occur when adding a stopped
download with the same ID as a download currently being removed
([#8419](https://github.com/google/ExoPlayer/issues/8419)).
* Text:
* Gracefully handle null-terminated subtitle content in Matroska
containers.
* Fix CEA-708 anchor positioning
([#1807](https://github.com/google/ExoPlayer/issues/1807)).
* IMA extension:
* Fix a condition where playback could get stuck before an empty ad
([#8205](https://github.com/google/ExoPlayer/issues/8205)).
* Log a warning rather than throwing when reaching the end of the stream
with an ad playing but without ad media info
([#8290](https://github.com/google/ExoPlayer/issues/8290)).
* Media2 extension:
* Make media2-extension depend on AndroidX media2:media2-session:1.1.0 to
fix a deadlock while creating PlaybackStateCompat internally.
([#8011](https://github.com/google/ExoPlayer/issues/8011)).
### 2.12.2 (2020-12-01)
* Core library:
* Suppress exceptions from registering and unregistering the stream volume
receiver ([#8087](https://github.com/google/ExoPlayer/issues/8087),
[#8106](https://github.com/google/ExoPlayer/issues/8106)).
* Suppress ProGuard warnings caused by Guava's compile-only dependencies
([#8103](https://github.com/google/ExoPlayer/issues/8103)).
* Fix issue that could cause playback to freeze when selecting tracks, if
extension audio renderers are being used
([#8203](https://github.com/google/ExoPlayer/issues/8203)).
* UI:
* Fix incorrect color and text alignment of the `StyledPlayerControlView`
fast forward and rewind buttons, when used together with the
`com.google.android.material` library
([#7898](https://github.com/google/ExoPlayer/issues/7898)).
* Add `dispatchPrepare(Player)` to `ControlDispatcher` and implement it in
`DefaultControlDispatcher`. Deprecate `PlaybackPreparer` and
`setPlaybackPreparer` in `StyledPlayerView`, `StyledPlayerControlView`,
`PlayerView`, `PlayerControlView`, `PlayerNotificationManager` and
`LeanbackPlayerAdapter` and use `ControlDispatcher` for dispatching
prepare instead
([#7882](https://github.com/google/ExoPlayer/issues/7882)).
* Increase seekbar's touch target height in `StyledPlayerControlView`.
* Update `StyledPlayerControlView` menu items to behave correctly for
right-to-left languages.
* Support enabling the previous and next actions individually in
`PlayerNotificationManager`.
* Audio:
* Work around `AudioManager` crashes when calling `getStreamVolume`
([#8191](https://github.com/google/ExoPlayer/issues/8191)).
* Extractors:
* Matroska: Add support for 32-bit floating point PCM, and 8-bit and
16-bit big endian integer PCM
([#8142](https://github.com/google/ExoPlayer/issues/8142)).
* MP4: Add support for mpeg1 video box
([#8257](https://github.com/google/ExoPlayer/issues/8257)).
* IMA extension:
* Upgrade IMA SDK dependency to 3.21.0, and release the `AdsLoader`
([#7344](https://github.com/google/ExoPlayer/issues/7344)).
* Improve handling of ad tags with unsupported VPAID ads
([#7832](https://github.com/google/ExoPlayer/issues/7832)).
* Fix a bug that caused multiple ads in an ad pod to be skipped when one
ad in the ad pod was skipped.
* Fix a bug that caused ad progress not to be updated if the player
resumed after buffering during an ad
([#8239](https://github.com/google/ExoPlayer/issues/8239)).
* Fix passing an ads response to the `ImaAdsLoader` builder.
* Set the overlay language based on the device locale by default.
* Cronet extension:
* Fix handling of HTTP status code 200 when making unbounded length range
requests ([#8090](https://github.com/google/ExoPlayer/issues/8090)).
* Text
* Allow tx3g subtitles with `styl` boxes with start and/or end offsets
that lie outside the length of the cue text.
* Media2 extension:
* Notify onBufferingEnded when the state of origin player becomes
`STATE_IDLE` or `STATE_ENDED`.
* Allow to remove all playlist items that makes the player reset
([#8047](https://github.com/google/ExoPlayer/issues/8047)).
### 2.12.1 (2020-10-23)
* Core library:
* Fix issue where `Player.setMediaItems` would ignore its `resetPosition`
argument ([#8024](https://github.com/google/ExoPlayer/issues/8024)).
* Fix bug where streams with highly uneven track durations may get stuck
in a buffering state
([#7943](https://github.com/google/ExoPlayer/issues/7943)).
* Switch Guava dependency from `implementation` to `api`
([#7905](https://github.com/google/ExoPlayer/issues/7905),
[#7993](https://github.com/google/ExoPlayer/issues/7993)).
* Add 403, 500 and 503 to the list of HTTP status codes that can trigger
failover to another quality variant during adaptive playbacks.
* Data sources:
* Add support for `android.resource` URI scheme in `RawResourceDataSource`
([#7866](https://github.com/google/ExoPlayer/issues/7866)).
* Text:
* Add support for `\h` SSA/ASS style override code (non-breaking space).
* Fix playback of WebVTT subtitles in MP4 containers in DASH streams
([#7985](https://github.com/google/ExoPlayer/issues/7985)).
* Fix `NullPointerException` in `TextRenderer` when playing content with a
single subtitle buffer
([#8017](https://github.com/google/ExoPlayer/issues/8017)).
* UI:
* Fix animation when `StyledPlayerView` first shows its playback controls.
* Improve touch targets in `StyledPlayerView` to make tapping easier.
* Allow `subtitleButton` to be omitted in custom `StyledPlayerView`
layouts ([#7962](https://github.com/google/ExoPlayer/issues/7962)).
* Add an option to sort tracks by `Format` in `TrackSelectionView` and
`TrackSelectionDialogBuilder`
([#7709](https://github.com/google/ExoPlayer/issues/7709)).
* Audio:
* Fix the default audio sink position not advancing correctly when using
`AudioTrack` based speed adjustment
([#7982](https://github.com/google/ExoPlayer/issues/7982)).
* Fix `NoClassDefFoundError` warning for `AudioTrack$StreamEventCallback`
([#8058](https://github.com/google/ExoPlayer/issues/8058)).
* Extractors:
* MP4:
* Add support for `_mp2` boxes
([#7967](https://github.com/google/ExoPlayer/issues/7967)).
* Fix playback of files containing `pcm_alaw` or `pcm_mulaw` audio
tracks, by enabling sample rechunking for such tracks.
* MPEG-TS:
* Add `TsExtractor` parameter to configure the number of bytes in
which to search for timestamps when seeking and determining stream
duration ([#7988](https://github.com/google/ExoPlayer/issues/7988)).
* Ignore negative payload size in PES packets
([#8005](https://github.com/google/ExoPlayer/issues/8005)).
* MP3: Use TLEN ID3 tag to compute the stream duration
([#7949](https://github.com/google/ExoPlayer/issues/7949)).
* Ogg: Fix regression playing files with packets that span multiple pages
([#7992](https://github.com/google/ExoPlayer/issues/7992)).
* FLV: Make files seekable by using the key frame index
([#7378](https://github.com/google/ExoPlayer/issues/7378)).
* Downloads: Fix issue retrying progressive downloads, which could also result
in a crash in `DownloadManager.InternalHandler.onContentLengthChanged`
([#8078](https://github.com/google/ExoPlayer/issues/8078)).
* HLS: Fix crash affecting chunkful preparation of master playlists that start
with an I-FRAME only variant
([#8025](https://github.com/google/ExoPlayer/issues/8025)).
* IMA extension:
* Fix position reporting after fetch errors
([#7956](https://github.com/google/ExoPlayer/issues/7956)).
* Allow apps to specify a `VideoAdPlayerCallback`
([#7944](https://github.com/google/ExoPlayer/issues/7944)).
* Accept ad tags via the `AdsMediaSource` constructor and deprecate
passing them via the `ImaAdsLoader` constructor/builders. Passing the ad
tag via media item playback properties continues to be supported. This
is in preparation for supporting ads in playlists
([#3750](https://github.com/google/ExoPlayer/issues/3750)).
* Add a way to override ad media MIME types
([#7961](https://github.com/google/ExoPlayer/issues/7961)).
* Fix incorrect truncation of large cue point positions
([#8067](https://github.com/google/ExoPlayer/issues/8067)).
* Upgrade IMA SDK dependency to 3.20.1. This brings in a fix for companion
ads rendering when targeting API 29
([#6432](https://github.com/google/ExoPlayer/issues/6432)).
### 2.12.0 (2020-09-11)
To learn more about what's new in 2.12, read the corresponding
[blog post](https://medium.com/google-exoplayer/exoplayer-2-12-whats-new-e43ef8ff72e7).
* Core library:
* `Player`:
* Add a top level playlist API based on a new `MediaItem` class
([#6161](https://github.com/google/ExoPlayer/issues/6161)). The new
methods for playlist manipulation are `setMediaItem(s)`,
`addMediaItem(s)`, `moveMediaItem(s)`, `removeMediaItem(s)` and
`clearMediaItems`. The playlist can be queried using
`getMediaItemCount` and `getMediaItemAt`. This API should be used
instead of `ConcatenatingMediaSource` in most cases. Learn more by
reading
[this blog post](https://medium.com/google-exoplayer/a-top-level-playlist-api-for-exoplayer-abe0a24edb55).
* Add `getCurrentMediaItem` for getting the currently playing item in
the playlist.
* Add `EventListener.onMediaItemTransition` to report when playback
transitions from one item to another in the playlist.
* Add `play` and `pause` convenience methods. They are equivalent to
`setPlayWhenReady(true)` and `setPlayWhenReady(false)` respectively.
* Add `getCurrentLiveOffset` for getting the offset of the current
playback position from the live edge of a live stream.
* Add `getTrackSelector` for getting the `TrackSelector` used by the
player.
* Add `AudioComponent.setAudioSessionId` to set the audio session ID.
This method is also available on `SimpleExoPlayer`.
* Remove `PlaybackParameters.skipSilence`, and replace it with
`AudioComponent.setSkipSilenceEnabled`. This method is also
available on `SimpleExoPlayer`. An
`AudioListener.onSkipSilenceEnabledChanged` callback is also added.
* Add `TextComponent.getCurrentCues` to get the current cues. This
method is also available on `SimpleExoPlayer`. The current cues are
no longer automatically forwarded to a `TextOutput` when it's added
to a `SimpleExoPlayer`.
* Add `Player.DeviceComponent` to query and control the device volume.
`SimpleExoPlayer` implements this interface.
* Deprecate and rename `getPlaybackError` to `getPlayerError` for
consistency.
* Deprecate and rename `onLoadingChanged` to `onIsLoadingChanged` for
consistency.
* Deprecate `EventListener.onPlayerStateChanged`, replacing it with
`EventListener.onPlayWhenReadyChanged` and
`EventListener.onPlaybackStateChanged`.
* Deprecate `EventListener.onSeekProcessed` because seek changes now
happen instantly and listening to `onPositionDiscontinuity` is
sufficient.
* `ExoPlayer`:
* Add `setMediaSource(s)` and `addMediaSource(s)` to `ExoPlayer`, for
adding `MediaSource` instances directly to the playlist.
* Add `ExoPlayer.setPauseAtEndOfMediaItems` to let the player pause at
the end of each media item
([#5660](https://github.com/google/ExoPlayer/issues/5660)).
* Allow passing `C.TIME_END_OF_SOURCE` to `PlayerMessage.setPosition`
to send a `PlayerMessage` at the end of a stream.
* `SimpleExoPlayer`:
* `SimpleExoPlayer` implements the new `MediaItem` based playlist API,
using a `MediaSourceFactory` to convert `MediaItem` instances to
playable `MediaSource` instances. A `DefaultMediaSourceFactory` is
used by default. `Builder.setMediaSourceFactory` allows setting a
custom factory.
* Update [APK shrinking guide](https://exoplayer.dev/shrinking.html)
to explain how shrinking works with the new `MediaItem` and
`DefaultMediaSourceFactory` implementations
([#7937](https://github.com/google/ExoPlayer/issues/7937)).
* Add additional options to `Builder` that were previously only
accessible via setters.
* Add opt-in to verify correct thread usage with
`setThrowsWhenUsingWrongThread(true)`
([#4463](https://github.com/google/ExoPlayer/issues/4463)).
* `Format`:
* Add a `Builder` and deprecate all `create` methods and most
`Format.copyWith` methods.
* Split `bitrate` into `averageBitrate` and `peakBitrate`
([#2863](https://github.com/google/ExoPlayer/issues/2863)).
* `LoadControl`:
* Add a `playbackPositionUs` parameter to `shouldContinueLoading`.
* Set the default minimum buffer duration in `DefaultLoadControl` to
50 seconds (equal to the default maximum buffer), and treat audio
and video the same.
* Add a `MetadataRetriever` API for retrieving track information and
static metadata for a media item
([#3609](https://github.com/google/ExoPlayer/issues/3609)).
* Attach an identifier and extra information to load error events passed
to `LoadErrorHandlingPolicy`
([#7309](https://github.com/google/ExoPlayer/issues/7309)).
`LoadErrorHandlingPolicy` implementations should migrate to implementing
the non-deprecated methods of the interface.
* Add an option to `MergingMediaSource` to adjust the time offsets between
the merged sources
([#6103](https://github.com/google/ExoPlayer/issues/6103)).
* Move `MediaSourceEventListener.LoadEventInfo` and
`MediaSourceEventListener.MediaLoadData` to be top-level classes in
`com.google.android.exoplayer2.source`.
* Move `SimpleDecoderVideoRenderer` and `SimpleDecoderAudioRenderer` to
`DecoderVideoRenderer` and `DecoderAudioRenderer` respectively, and
generalize them to work with `Decoder` rather than `SimpleDecoder`.
* Deprecate `C.MSG_*` constants, replacing them with constants in
`Renderer`.
* Split the `library-core` module into `library-core`, `library-common`
and `library-extractor`. The `library-core` module has an API dependency
on both of the new modules, so this change should be transparent to
developers including ExoPlayer using Gradle dependencies.
* Add a dependency on Guava.
* Video:
* Pass frame rate hint to `Surface.setFrameRate` on Android 11.
* Fix incorrect aspect ratio when transitioning from one video to another
with the same resolution, but a different pixel aspect ratio
([#6646](https://github.com/google/ExoPlayer/issues/6646)).
* Audio:
* Add experimental support for power efficient playback using audio
offload.
* Add support for using framework audio speed adjustment instead of
ExoPlayer's implementation
([#7502](https://github.com/google/ExoPlayer/issues/7502)). This option
can be set using
`DefaultRenderersFactory.setEnableAudioTrackPlaybackParams`.
* Add an event for the audio position starting to advance, to make it
easier for apps to determine when audio playout started
([#7577](https://github.com/google/ExoPlayer/issues/7577)).
* Generalize support for floating point audio.
* Add an option to `DefaultAudioSink` for enabling floating point
output. This option can also be set using
`DefaultRenderersFactory.setEnableAudioFloatOutput`.
* Add floating point output capability to `MediaCodecAudioRenderer`
and `LibopusAudioRenderer`, which is enabled automatically if the
audio sink supports floating point output and if it makes sense for
the content being played.
* Enable the floating point output capability of `FfmpegAudioRenderer`
automatically if the audio sink supports floating point output and
if it makes sense for the content being played. The option to
manually enable floating point output has been removed, since this
now done with the generalized option on `DefaultAudioSink`.
* In `MediaCodecAudioRenderer`, stop passing audio samples through
`MediaCodec` when playing PCM audio or encoded audio using passthrough
mode.
* Reuse audio decoders when transitioning through playlists of gapless
audio, rather than reinstantiating them.
* Check `DefaultAudioSink` supports passthrough, in addition to checking
the `AudioCapabilities`
([#7404](https://github.com/google/ExoPlayer/issues/7404)).
* Text:
* Many of the changes described below improve support for Japanese
subtitles. Read
[this blog post](https://medium.com/google-exoplayer/improved-japanese-subtitle-support-7598fee12cf4)
to learn more.
* Add a WebView-based output option to `SubtitleView`. This can display
some features not supported by the existing Canvas-based output such as
vertical text and rubies. It can be enabled by calling
`SubtitleView#setViewType(VIEW_TYPE_WEB)`.
* Recreate the decoder when handling and swallowing decode errors in
`TextRenderer`. This fixes a case where playback would never end when
playing content with malformed subtitles
([#7590](https://github.com/google/ExoPlayer/issues/7590)).
* Only apply `CaptionManager` font scaling in
`SubtitleView.setUserDefaultTextSize` if the `CaptionManager` is
enabled.
* Improve positioning of vertical cues when rendered horizontally.
* Redefine `Cue.lineType=LINE_TYPE_NUMBER` in terms of aligning the cue
text lines to grid of viewport lines. Only consider `Cue.lineAnchor`
when `Cue.lineType=LINE_TYPE_FRACTION`.
* WebVTT:
* Add support for default
[text](https://www.w3.org/TR/webvtt1/#default-text-color) and
[background](https://www.w3.org/TR/webvtt1/#default-text-background)
colors ([#6581](https://github.com/google/ExoPlayer/issues/6581)).
* Update position alignment parsing to recognise `line-left`, `center`
and `line-right`.
* Implement steps 4-10 of the
[WebVTT line computation algorithm](https://www.w3.org/TR/webvtt1/#cue-computed-line).
* Stop parsing unsupported CSS properties. The spec provides an
[exhaustive list](https://www.w3.org/TR/webvtt1/#the-cue-pseudo-element)
of which properties are supported.
* Parse the `ruby-position` CSS property.
* Parse the `text-combine-upright` CSS property (i.e., tate-chu-yoko).
* Parse the `<ruby>` and `<rt>` tags.
* TTML:
* Parse the `tts:combineText` property (i.e., tate-chu-yoko).
* Parse t`tts:ruby` and `tts:rubyPosition` properties.
* CEA-608:
* Implement timing-out of stuck captions, as permitted by
ANSI/CTA-608-E R-2014 Annex C.9. The default timeout is set to 16
seconds ([#7181](https://github.com/google/ExoPlayer/issues/7181)).
* Trim lines that exceed the maximum length of 32 characters
([#7341](https://github.com/google/ExoPlayer/issues/7341)).
* Fix positioning of roll-up captions in the top half of the screen
([#7475](https://github.com/google/ExoPlayer/issues/7475)).
* Stop automatically generating a CEA-608 track when playing
standalone MPEG-TS files. The previous behavior can still be
obtained by manually injecting a customized
`DefaultTsPayloadReaderFactory` into `TsExtractor`.
* Metadata: Add minimal DVB Application Information Table (AIT) support.
* DASH:
* Add support for canceling in-progress segment fetches
([#2848](https://github.com/google/ExoPlayer/issues/2848)).
* Add support for CEA-708 embedded in FMP4.
* SmoothStreaming:
* Add support for canceling in-progress segment fetches
([#2848](https://github.com/google/ExoPlayer/issues/2848)).
* HLS:
* Add support for discarding buffered media (e.g., to allow faster
adaptation to a higher quality variant)
([#6322](https://github.com/google/ExoPlayer/issues/6322)).
* Add support for canceling in-progress segment fetches
([#2848](https://github.com/google/ExoPlayer/issues/2848)).
* Respect 33-bit PTS wrapping when applying `X-TIMESTAMP-MAP` to WebVTT
timestamps ([#7464](https://github.com/google/ExoPlayer/issues/7464)).
* Extractors:
* Optimize the `Extractor` sniffing order to reduce start-up latency in
`DefaultExtractorsFactory` and `DefaultHlsExtractorsFactory`
([#6410](https://github.com/google/ExoPlayer/issues/6410)).
* Use filename extensions and response header MIME types to further
optimize `Extractor` sniffing order on a per-media basis.
* MP3: Add `IndexSeeker` for accurate seeks in VBR MP3 streams
([#6787](https://github.com/google/ExoPlayer/issues/6787)). This seeker
can be enabled by passing `FLAG_ENABLE_INDEX_SEEKING` to the
`Mp3Extractor`. A significant portion of the file may need to be scanned
when a seek is performed, which may be costly for large files.
* MP4: Fix playback of MP4 streams that contain Opus audio.
* FMP4: Add support for partially fragmented MP4s
([#7308](https://github.com/google/ExoPlayer/issues/7308)).
* Matroska:
* Support Dolby Vision
([#7267](https://github.com/google/ExoPlayer/issues/7267)).
* Populate `Format.label` with track titles.
* Remove support for the `Invisible` block header flag.
* MPEG-TS: Add support for MPEG-4 Part 2 and H.263
([#1603](https://github.com/google/ExoPlayer/issues/1603),
[#5107](https://github.com/google/ExoPlayer/issues/5107)).
* Ogg: Fix handling of non-contiguous pages
([#7230](https://github.com/google/ExoPlayer/issues/7230)).
* UI:
* Add `StyledPlayerView` and `StyledPlayerControlView`, which provide a
more polished user experience than `PlayerView` and `PlayerControlView`
at the cost of decreased customizability.
* Remove the previously deprecated `SimpleExoPlayerView` and
`PlaybackControlView` classes, along with the corresponding
`exo_simple_player_view.xml` and `exo_playback_control_view.xml` layout
resources. Use the equivalent `PlayerView`, `PlayerControlView`,
`exo_player_view.xml` and `exo_player_control_view.xml` instead.
* Add setter methods to `PlayerView` and `PlayerControlView` to set
whether the rewind, fast forward, previous and next buttons are shown
([#7410](https://github.com/google/ExoPlayer/issues/7410)).
* Update `TrackSelectionDialogBuilder` to use the AndroidX app compat
`AlertDialog` rather than the platform version, if available
([#7357](https://github.com/google/ExoPlayer/issues/7357)).
* Make UI components dispatch previous, next, fast forward and rewind
actions via their `ControlDispatcher`
([#6926](https://github.com/google/ExoPlayer/issues/6926)).
* Downloads and caching:
* Add `DownloadRequest.Builder`.
* Add `DownloadRequest.keySetId` to make it easier to store an offline
license keyset identifier alongside the other information that's
persisted in `DownloadIndex`.
* Support passing an `Executor` to `DefaultDownloaderFactory` on which
data downloads are performed.
* Parallelize and merge downloads in `SegmentDownloader` to improve
download speeds
([#5978](https://github.com/google/ExoPlayer/issues/5978)).
* Replace `CacheDataSinkFactory` and `CacheDataSourceFactory` with
`CacheDataSink.Factory` and `CacheDataSource.Factory` respectively.
* Remove `DownloadConstructorHelper` and instead use
`CacheDataSource.Factory` directly.
* Add `Requirements.DEVICE_STORAGE_NOT_LOW`, which can be specified as a
requirement to a `DownloadManager` for it to proceed with downloading.
* For failed downloads, propagate the `Exception` that caused the failure
to `DownloadManager.Listener.onDownloadChanged`.
* Support multiple non-overlapping write locks for the same key in
`SimpleCache`.
* Remove `CacheUtil`. Equivalent functionality is provided by a new
`CacheWriter` class, `Cache.getCachedBytes`, `Cache.removeResource` and
`CacheKeyFactory.DEFAULT`.
* DRM:
* Remove previously deprecated APIs to inject `DrmSessionManager` into
`Renderer` instances. `DrmSessionManager` must now be injected into
`MediaSource` instances via the `MediaSource` factories.
* Add the ability to inject a custom `DefaultDrmSessionManager` into
`OfflineLicenseHelper`
([#7078](https://github.com/google/ExoPlayer/issues/7078)).
* Keep DRM sessions alive for a short time before fully releasing them
([#7011](https://github.com/google/ExoPlayer/issues/7011),
[#6725](https://github.com/google/ExoPlayer/issues/6725),
[#7066](https://github.com/google/ExoPlayer/issues/7066)).
* Remove support for `cbc1` and `cens` encrytion schemes. Support for
these schemes was removed from the Android platform from API level 30,
and the range of API levels for which they are supported is too small to
be useful.
* Remove generic types from DRM components.
* Rename `DefaultDrmSessionEventListener` to `DrmSessionEventListener`.
* Track selection:
* Add `TrackSelection.shouldCancelMediaChunkLoad` to check whether an
ongoing load should be canceled
([#2848](https://github.com/google/ExoPlayer/issues/2848)).
* Add `DefaultTrackSelector` constraints for minimum video resolution,
bitrate and frame rate
([#4511](https://github.com/google/ExoPlayer/issues/4511)).
* Remove previously deprecated `DefaultTrackSelector` members.
* Data sources:
* Add `HttpDataSource.InvalidResponseCodeException#responseBody` field
([#6853](https://github.com/google/ExoPlayer/issues/6853)).
* Add `DataSpec.Builder` and deprecate most `DataSpec` constructors.
* Add `DataSpec.customData` to allow applications to pass custom data
through `DataSource` chains.
* Deprecate `CacheDataSinkFactory` and `CacheDataSourceFactory`, which are
replaced by `CacheDataSink.Factory` and `CacheDataSource.Factory`
respectively.
* Analytics:
* Extend `EventTime` with more details about the current player state
([#7332](https://github.com/google/ExoPlayer/issues/7332)).
* Add `AnalyticsListener.onVideoFrameProcessingOffset` to report how early
or late video frames are processed relative to them needing to be
presented. Video frame processing offset fields are also added to
`DecoderCounters`.
* Fix incorrect `MediaPeriodId` for some renderer errors reported by
`AnalyticsListener.onPlayerError`.
* Remove `onMediaPeriodCreated`, `onMediaPeriodReleased` and
`onReadingStarted` from `AnalyticsListener`.
* Test utils: Add `TestExoPlayer`, a utility class with APIs to create
`SimpleExoPlayer` instances with fake components for testing.
* Media2 extension: This is a new extension that makes it easy to use
ExoPlayer together with AndroidX Media2. Read
[this blog post](https://medium.com/google-exoplayer/the-media2-extension-for-exoplayer-d6b7d89b9063)
to learn more.
* Cast extension: Implement playlist API and deprecate the old queue
manipulation API.
* IMA extension:
* Migrate to new 'friendly obstruction' IMA SDK APIs, and allow apps to
register a purpose and detail reason for overlay views via
`AdsLoader.AdViewProvider`.
* Add support for audio-only ads display containers by returning `null`
from `AdsLoader.AdViewProvider.getAdViewGroup`, and allow skipping
audio-only ads via `ImaAdsLoader.skipAd`
([#7689](https://github.com/google/ExoPlayer/issues/7689)).
* Add `ImaAdsLoader.Builder.setCompanionAdSlots` so it's possible to set
companion ad slots without accessing the `AdDisplayContainer`.
* Add missing notification of `VideoAdPlayerCallback.onLoaded`.
* Fix handling of incompatible VPAID ads
([#7832](https://github.com/google/ExoPlayer/issues/7832)).
* Fix handling of empty ads at non-integer cue points
([#7889](https://github.com/google/ExoPlayer/issues/7889)).
* Demo app:
* Replace the `extensions` variant with `decoderExtensions` and update the
demo app use the Cronet and IMA extensions by default.
* Expand the `exolist.json` schema, as well the structure of intents that
can be used to launch `PlayerActivity`. See the
[Demo application page](https://exoplayer.dev/demo-application.html#playing-your-own-content)
for the latest versions. Changes include:
* Add `drm_session_for_clear_content` to allow attaching DRM sessions
to clear audio and video tracks.
* Add `clip_start_position_ms` and `clip_end_position_ms` to allow
clipped samples.
* Use `StyledPlayerControlView` rather than `PlayerView`.
* Remove support for media tunneling, random ABR and playback of spherical
video. Developers wishing to experiment with these features can enable
them by modifying the demo app source code.
* Add support for downloading DRM-protected content using offline Widevine
licenses.
### 2.11.8 (2020-08-25)
* Fix distorted playback of floating point audio when samples exceed the `[-1,
1]` nominal range.
* MP4:
* Add support for `piff` and `isml` brands
([#7584](https://github.com/google/ExoPlayer/issues/7584)).
* Fix playback of very short MP4 files.
* FMP4:
* Fix `saiz` and `senc` sample count checks, resolving a "length mismatch"
`ParserException` when playing certain protected FMP4 streams
([#7592](https://github.com/google/ExoPlayer/issues/7592)).
* Fix handling of `traf` boxes containing multiple `sbgp` or `sgpd` boxes.
* FLV: Ignore `SCRIPTDATA` segments with invalid name types, rather than
failing playback ([#7675](https://github.com/google/ExoPlayer/issues/7675)).
* Better infer the content type of `.ism` and `.isml` streaming URLs.
* Workaround an issue on Broadcom based devices where playbacks would not
transition to `STATE_ENDED` when using video tunneling mode
([#7647](https://github.com/google/ExoPlayer/issues/7647)).
* IMA extension: Upgrade to IMA SDK 3.19.4, bringing in a fix for setting the
media load timeout
([#7170](https://github.com/google/ExoPlayer/issues/7170)).
* Demo app: Fix playback of ClearKey protected content on API level 26 and
earlier ([#7735](https://github.com/google/ExoPlayer/issues/7735)).
### 2.11.7 (2020-06-29)
* IMA extension: Fix the way postroll "content complete" notifications are
handled to avoid repeatedly refreshing the timeline after playback ends.
### 2.11.6 (2020-06-19)
* UI: Prevent `PlayerView` from temporarily hiding the video surface when
seeking to an unprepared period within the current window. For example when
seeking over an ad group, or to the next period in a multi-period DASH
stream ([#5507](https://github.com/google/ExoPlayer/issues/5507)).
* IMA extension:
* Add option to skip ads before the start position.
* Catch unexpected errors in `stopAd` to avoid a crash
([#7492](https://github.com/google/ExoPlayer/issues/7492)).
* Fix a bug that caused playback to be stuck buffering on resuming from
the background after all ads had played to the end
([#7508](https://github.com/google/ExoPlayer/issues/7508)).
* Fix a bug where the number of ads in an ad group couldn't change
([#7477](https://github.com/google/ExoPlayer/issues/7477)).
* Work around unexpected `pauseAd`/`stopAd` for ads that have preloaded on
seeking to another position
([#7492](https://github.com/google/ExoPlayer/issues/7492)).
* Fix incorrect rounding of ad cue points.
* Fix handling of postrolls preloading
([#7518](https://github.com/google/ExoPlayer/issues/7518)).
### 2.11.5 (2020-06-05)
* Improve the smoothness of video playback immediately after starting, seeking
or resuming a playback
([#6901](https://github.com/google/ExoPlayer/issues/6901)).
* Add `SilenceMediaSource.Factory` to support tags.
* Enable the configuration of `SilenceSkippingAudioProcessor`
([#6705](https://github.com/google/ExoPlayer/issues/6705)).
* Fix bug where `PlayerMessages` throw an exception after `MediaSources` are
removed from the playlist
([#7278](https://github.com/google/ExoPlayer/issues/7278)).
* Fix "Not allowed to start service" `IllegalStateException` in
`DownloadService`
([#7306](https://github.com/google/ExoPlayer/issues/7306)).
* Fix issue in `AudioTrackPositionTracker` that could cause negative positions
to be reported at the start of playback and immediately after seeking
([#7456](https://github.com/google/ExoPlayer/issues/7456)).
* Fix further cases where downloads would sometimes not resume after their
network requirements are met
([#7453](https://github.com/google/ExoPlayer/issues/7453)).
* DASH:
* Merge trick play adaptation sets (i.e., adaptation sets marked with
`http://dashif.org/guidelines/trickmode`) into the same `TrackGroup` as
the main adaptation sets to which they refer. Trick play tracks are
marked with the `C.ROLE_FLAG_TRICK_PLAY` flag.
* Fix assertion failure in `SampleQueue` when playing DASH streams with
EMSG tracks ([#7273](https://github.com/google/ExoPlayer/issues/7273)).
* MP4: Store the Android capture frame rate only in `Format.metadata`.
`Format.frameRate` now stores the calculated frame rate.
* FMP4: Avoid throwing an exception while parsing default sample values whose
most significant bits are set
([#7207](https://github.com/google/ExoPlayer/issues/7207)).
* MP3: Fix issue parsing the XING headers belonging to files larger than 2GB
([#7337](https://github.com/google/ExoPlayer/issues/7337)).
* MPEG-TS: Fix issue where SEI NAL units were incorrectly dropped from H.265
samples ([#7113](https://github.com/google/ExoPlayer/issues/7113)).
* UI:
* Fix `DefaultTimeBar` to respect touch transformations
([#7303](https://github.com/google/ExoPlayer/issues/7303)).
* Add `showScrubber` and `hideScrubber` methods to `DefaultTimeBar`.
* Text:
* Use anti-aliasing and bitmap filtering when displaying bitmap subtitles.
* Fix `SubtitlePainter` to render `EDGE_TYPE_OUTLINE` using the correct
color.
* IMA extension:
* Upgrade to IMA SDK version 3.19.0, and migrate to new preloading APIs
([#6429](https://github.com/google/ExoPlayer/issues/6429)). This fixes
several issues involving preloading and handling of ad loading error
cases: ([#4140](https://github.com/google/ExoPlayer/issues/4140),
[#5006](https://github.com/google/ExoPlayer/issues/5006),
[#6030](https://github.com/google/ExoPlayer/issues/6030),
[#6097](https://github.com/google/ExoPlayer/issues/6097),
[#6425](https://github.com/google/ExoPlayer/issues/6425),
[#6967](https://github.com/google/ExoPlayer/issues/6967),
[#7041](https://github.com/google/ExoPlayer/issues/7041),
[#7161](https://github.com/google/ExoPlayer/issues/7161),
[#7212](https://github.com/google/ExoPlayer/issues/7212),
[#7340](https://github.com/google/ExoPlayer/issues/7340)).
* Add support for timing out ad preloading, to avoid playback getting
stuck if an ad group unexpectedly fails to load
([#5444](https://github.com/google/ExoPlayer/issues/5444),
[#5966](https://github.com/google/ExoPlayer/issues/5966),
[#7002](https://github.com/google/ExoPlayer/issues/7002)).
* Fix `AdsMediaSource` child `MediaSource`s not being released.
* Cronet extension: Default to using the Cronet implementation in Google Play
Services rather than Cronet Embedded. This allows Cronet to be used with a
negligible increase in application size, compared to approximately 8MB when
embedding the library.
* OkHttp extension: Upgrade OkHttp dependency to 3.12.11.
* MediaSession extension:
* Only set the playback state to `BUFFERING` if `playWhenReady` is true
([#7206](https://github.com/google/ExoPlayer/issues/7206)).
* Add missing `@Nullable` annotations to `MediaSessionConnector`
([#7234](https://github.com/google/ExoPlayer/issues/7234)).
* AV1 extension: Add a heuristic to determine the default number of threads
used for AV1 playback using the extension.
### 2.11.4 (2020-04-08)
* Add `SimpleExoPlayer.setWakeMode` to allow automatic `WifiLock` and
`WakeLock` handling
([#6914](https://github.com/google/ExoPlayer/issues/6914)). To use this
feature, you must add the
[WAKE_LOCK](https://developer.android.com/reference/android/Manifest.permission.html#WAKE_LOCK)
permission to your application's manifest file.
* Text:
* Catch and log exceptions in `TextRenderer` rather than re-throwing. This
allows playback to continue even if subtitle decoding fails
([#6885](https://github.com/google/ExoPlayer/issues/6885)).
* Allow missing hours and milliseconds in SubRip (.srt) timecodes
([#7122](https://github.com/google/ExoPlayer/issues/7122)).
* Audio:
* Enable playback speed adjustment and silence skipping for floating point
PCM audio, via resampling to 16-bit integer PCM. To output the original
floating point audio without adjustment, pass `enableFloatOutput=true`
to the `DefaultAudioSink` constructor
([#7134](https://github.com/google/ExoPlayer/issues/7134)).
* Workaround issue that could cause slower than realtime playback of AAC
on Android 10
([#6671](https://github.com/google/ExoPlayer/issues/6671)).
* Fix case where another app spuriously holding transient audio focus
could prevent ExoPlayer from acquiring audio focus for an indefinite
period of time
([#7182](https://github.com/google/ExoPlayer/issues/7182)).
* Fix case where the player volume could be permanently ducked if audio
focus was released whilst ducking.
* Fix playback of WAV files with trailing non-media bytes
([#7129](https://github.com/google/ExoPlayer/issues/7129)).
* Fix playback of ADTS files with mid-stream ID3 metadata.
* DRM:
* Fix stuck ad playbacks with DRM protected content
([#7188](https://github.com/google/ExoPlayer/issues/7188)).
* Fix playback of Widevine protected content that only provides V1 PSSH
atoms on API levels 21 and 22.
* Fix playback of PlayReady content on Fire TV Stick (Gen 2).
* DASH:
* Update the manifest URI to avoid repeated HTTP redirects
([#6907](https://github.com/google/ExoPlayer/issues/6907)).
* Parse period `AssetIdentifier` elements.
* HLS: Recognize IMSC subtitles
([#7185](https://github.com/google/ExoPlayer/issues/7185)).
* UI: Add an option to set whether to use the orientation sensor for rotation
in spherical playbacks
([#6761](https://github.com/google/ExoPlayer/issues/6761)).
* Analytics: Fix `PlaybackStatsListener` behavior when not keeping history
([#7160](https://github.com/google/ExoPlayer/issues/7160)).
* FFmpeg extension: Add support for `x86_64` architecture.
* Opus extension: Fix parsing of negative gain values
([#7046](https://github.com/google/ExoPlayer/issues/7046)).
* Cast extension: Upgrade `play-services-cast-framework` dependency to 18.1.0.
This fixes an issue where `RemoteServiceException` was thrown due to
`Context.startForegroundService()` not calling `Service.startForeground()`
([#7191](https://github.com/google/ExoPlayer/issues/7191)).
### 2.11.3 (2020-02-19)
* SmoothStreaming: Fix regression that broke playback in 2.11.2
([#6981](https://github.com/google/ExoPlayer/issues/6981)).
* DRM: Fix issue switching from protected content that uses a 16-byte
initialization vector to one that uses an 8-byte initialization vector
([#6982](https://github.com/google/ExoPlayer/issues/6982)).
### 2.11.2 (2020-02-13)
* Add Java FLAC extractor
([#6406](https://github.com/google/ExoPlayer/issues/6406)).
* Startup latency optimization:
* Reduce startup latency for DASH and SmoothStreaming playbacks by
allowing codec initialization to occur before the network connection for
the first media segment has been established.
* Reduce startup latency for on-demand DASH playbacks by allowing codec
initialization to occur before the sidx box has been loaded.
* Downloads:
* Fix download resumption when the requirements for them to continue are
met ([#6733](https://github.com/google/ExoPlayer/issues/6733),
[#6798](https://github.com/google/ExoPlayer/issues/6798)).
* Fix `DownloadHelper.createMediaSource` to use `customCacheKey` when
creating `ProgressiveMediaSource` instances.
* DRM: Fix `NullPointerException` when playing DRM protected content
([#6951](https://github.com/google/ExoPlayer/issues/6951)).
* Metadata:
* Update `IcyDecoder` to try ISO-8859-1 decoding if UTF-8 decoding fails.
Also change `IcyInfo.rawMetadata` from `String` to `byte[]` to allow
developers to handle data that's neither UTF-8 nor ISO-8859-1
([#6753](https://github.com/google/ExoPlayer/issues/6753)).
* Select multiple metadata tracks if multiple metadata renderers are
available ([#6676](https://github.com/google/ExoPlayer/issues/6676)).
* Add support for ID3 genres added in Wimamp 5.6 (2010).
* UI:
* Show ad group markers in `DefaultTimeBar` even if they are after the end
of the current window
([#6552](https://github.com/google/ExoPlayer/issues/6552)).
* Don't use notification chronometer if playback speed is != 1.0
([#6816](https://github.com/google/ExoPlayer/issues/6816)).
* HLS: Fix playback of DRM protected content that uses key rotation
([#6903](https://github.com/google/ExoPlayer/issues/6903)).
* WAV:
* Support IMA ADPCM encoded data.
* Improve support for G.711 A-law and mu-law encoded data.
* MP4: Support "twos" codec (big endian PCM)
([#5789](https://github.com/google/ExoPlayer/issues/5789)).
* FMP4: Add support for encrypted AC-4 tracks.
* HLS: Fix slow seeking into long MP3 segments
([#6155](https://github.com/google/ExoPlayer/issues/6155)).
* Fix handling of E-AC-3 streams that contain AC-3 syncframes
([#6602](https://github.com/google/ExoPlayer/issues/6602)).
* Fix playback of TrueHD streams in Matroska
([#6845](https://github.com/google/ExoPlayer/issues/6845)).
* Fix MKV subtitles to disappear when intended instead of lasting until the
next cue ([#6833](https://github.com/google/ExoPlayer/issues/6833)).
* OkHttp extension: Upgrade OkHttp dependency to 3.12.8, which fixes a class
of `SocketTimeoutException` issues when using HTTP/2
([#4078](https://github.com/google/ExoPlayer/issues/4078)).
* FLAC extension: Fix handling of bit depths other than 16 in `FLACDecoder`.
This issue caused FLAC streams with other bit depths to sound like white
noise on earlier releases, but only when embedded in a non-FLAC container
such as Matroska or MP4.
* Demo apps: Add
[GL demo app](https://github.com/google/ExoPlayer/tree/dev-v2/demos/gl) to
show how to render video to a `GLSurfaceView` while applying a GL shader.
([#6920](https://github.com/google/ExoPlayer/issues/6920)).
### 2.11.1 (2019-12-20)
* UI: Exclude `DefaultTimeBar` region from system gesture detection
([#6685](https://github.com/google/ExoPlayer/issues/6685)).
* ProGuard fixes:
* Ensure `Libgav1VideoRenderer` constructor is kept for use by
`DefaultRenderersFactory`
([#6773](https://github.com/google/ExoPlayer/issues/6773)).
* Ensure `VideoDecoderOutputBuffer` and its members are kept for use by
video decoder extensions.
* Ensure raw resources used with `RawResourceDataSource` are kept.
* Suppress spurious warnings about the `javax.annotation` package, and
restructure use of `IntDef` annotations to remove spurious warnings
about `SsaStyle$SsaAlignment`
([#6771](https://github.com/google/ExoPlayer/issues/6771)).
* Fix `CacheDataSource` to correctly propagate `DataSpec.httpRequestHeaders`.
* Fix issue with `DefaultDownloadIndex` that could result in an
`IllegalStateException` being thrown from
`DefaultDownloadIndex.getDownloadForCurrentRow`
([#6785](https://github.com/google/ExoPlayer/issues/6785)).
* Fix `IndexOutOfBoundsException` in `SinglePeriodTimeline.getWindow`
([#6776](https://github.com/google/ExoPlayer/issues/6776)).
* Add missing `@Nullable` to `MediaCodecAudioRenderer.getMediaClock` and
`SimpleDecoderAudioRenderer.getMediaClock`
([#6792](https://github.com/google/ExoPlayer/issues/6792)).
### 2.11.0 (2019-12-11)
* Core library:
* Replace `ExoPlayerFactory` by `SimpleExoPlayer.Builder` and
`ExoPlayer.Builder`.
* Add automatic `WakeLock` handling to `SimpleExoPlayer`, which can be
enabled by calling `SimpleExoPlayer.setHandleWakeLock`
([#5846](https://github.com/google/ExoPlayer/issues/5846)). To use this
feature, you must add the
[WAKE_LOCK](https://developer.android.com/reference/android/Manifest.permission.html#WAKE_LOCK)
permission to your application's manifest file.
* Add automatic "audio becoming noisy" handling to `SimpleExoPlayer`,
which can be enabled by calling
`SimpleExoPlayer.setHandleAudioBecomingNoisy`.
* Wrap decoder exceptions in a new `DecoderException` class and report
them as renderer errors.
* Add `Timeline.Window.isLive` to indicate that a window is a live stream
([#2668](https://github.com/google/ExoPlayer/issues/2668) and
[#5973](https://github.com/google/ExoPlayer/issues/5973)).
* Add `Timeline.Window.uid` to uniquely identify window instances.
* Deprecate `setTag` parameter of `Timeline.getWindow`. Tags will always
be set.
* Deprecate passing the manifest directly to
`Player.EventListener.onTimelineChanged`. It can be accessed through
`Timeline.Window.manifest` or `Player.getCurrentManifest()`
* Add `MediaSource.enable` and `MediaSource.disable` to improve resource
management in playlists.
* Add `MediaPeriod.isLoading` to improve `Player.isLoading` state.
* Fix issue where player errors are thrown too early at playlist
transitions ([#5407](https://github.com/google/ExoPlayer/issues/5407)).
* Add `Format` and renderer support flags to renderer
`ExoPlaybackException`s.
* Where there are multiple platform decoders for a given MIME type, prefer
to use one that advertises support for the profile and level of the
media being played over one that does not, even if it does not come
first in the `MediaCodecList`.
* DRM:
* Inject `DrmSessionManager` into the `MediaSources` instead of
`Renderers`. This allows each `MediaSource` in a
`ConcatenatingMediaSource` to use a different `DrmSessionManager`
([#5619](https://github.com/google/ExoPlayer/issues/5619)).
* Add `DefaultDrmSessionManager.Builder`, and remove
`DefaultDrmSessionManager` static factory methods that leaked
`ExoMediaDrm` instances
([#4721](https://github.com/google/ExoPlayer/issues/4721)).
* Add support for the use of secure decoders when playing clear content
([#4867](https://github.com/google/ExoPlayer/issues/4867)). This can be
enabled using `DefaultDrmSessionManager.Builder`'s
`setUseDrmSessionsForClearContent` method.
* Add support for custom `LoadErrorHandlingPolicies` in key and
provisioning requests
([#6334](https://github.com/google/ExoPlayer/issues/6334)). Custom
policies can be passed via `DefaultDrmSessionManager.Builder`'s
`setLoadErrorHandlingPolicy` method.
* Use `ExoMediaDrm.Provider` in `OfflineLicenseHelper` to avoid leaking
`ExoMediaDrm` instances
([#4721](https://github.com/google/ExoPlayer/issues/4721)).
* Track selection:
* Update `DefaultTrackSelector` to set a viewport constraint for the
default display by default.
* Update `DefaultTrackSelector` to set text language and role flag
constraints for the device's accessibility settings by default
([#5749](https://github.com/google/ExoPlayer/issues/5749)).
* Add option to set preferred text role flags using
`DefaultTrackSelector.ParametersBuilder.setPreferredTextRoleFlags`.
* LoadControl:
* Default `prioritizeTimeOverSizeThresholds` to false to prevent OOM
errors ([#6647](https://github.com/google/ExoPlayer/issues/6647)).
* Android 10:
* Set `compileSdkVersion` to 29 to enable use of Android 10 APIs.
* Expose new `isHardwareAccelerated`, `isSoftwareOnly` and `isVendor`
flags in `MediaCodecInfo`
([#5839](https://github.com/google/ExoPlayer/issues/5839)).
* Add `allowedCapturePolicy` field to `AudioAttributes` to allow to
configuration of the audio capture policy.
* Video:
* Pass the codec output `MediaFormat` to `VideoFrameMetadataListener`.
* Fix byte order of HDR10+ static metadata to match CTA-861.3.
* Support out-of-band HDR10+ dynamic metadata for VP9 in WebM/Matroska.
* Assume that protected content requires a secure decoder when evaluating
whether `MediaCodecVideoRenderer` supports a given video format
([#5568](https://github.com/google/ExoPlayer/issues/5568)).
* Fix Dolby Vision fallback to AVC and HEVC.
* Fix early end-of-stream detection when using video tunneling, on API
level 23 and above.
* Fix an issue where a keyframe was rendered rather than skipped when
performing an exact seek to a non-zero position close to the start of
the stream.
* Audio:
* Fix the start of audio getting truncated when transitioning to a new
item in a playlist of Opus streams.
* Workaround broken raw audio decoding on Oppo R9
([#5782](https://github.com/google/ExoPlayer/issues/5782)).
* Reconfigure audio sink when PCM encoding changes
([#6601](https://github.com/google/ExoPlayer/issues/6601)).
* Allow `AdtsExtractor` to encounter EOF when calculating average frame
size ([#6700](https://github.com/google/ExoPlayer/issues/6700)).
* Text:
* Add support for position and overlapping start/end times in SSA/ASS
subtitles ([#6320](https://github.com/google/ExoPlayer/issues/6320)).
* Require an end time or duration for SubRip (SRT) and SubStation Alpha
(SSA/ASS) subtitles. This applies to both sidecar files & subtitles
[embedded in Matroska streams](https://matroska.org/technical/specs/subtitles/index.html).
* UI:
* Make showing and hiding player controls accessible to TalkBack in
`PlayerView`.
* Rename `spherical_view` surface type to `spherical_gl_surface_view`.
* Make it easier to override the shuffle, repeat, fullscreen, VR and small
notification icon assets
([#6709](https://github.com/google/ExoPlayer/issues/6709)).
* Analytics:
* Remove `AnalyticsCollector.Factory`. Instances should be created
directly, and the `Player` should be set by calling
`AnalyticsCollector.setPlayer`.
* Add `PlaybackStatsListener` to collect `PlaybackStats` for analysis and
analytics reporting.
* DataSource
* Add `DataSpec.httpRequestHeaders` to support setting per-request headers
for HTTP and HTTPS.
* Remove the `DataSpec.FLAG_ALLOW_ICY_METADATA` flag. Use is replaced by
setting the `IcyHeaders.REQUEST_HEADER_ENABLE_METADATA_NAME` header in
`DataSpec.httpRequestHeaders`.
* Fail more explicitly when local file URIs contain invalid parts (e.g. a
fragment) ([#6470](https://github.com/google/ExoPlayer/issues/6470)).
* DASH: Support negative @r values in segment timelines
([#1787](https://github.com/google/ExoPlayer/issues/1787)).
* HLS:
* Use peak bitrate rather than average bitrate for adaptive track
selection.
* Fix issue where streams could get stuck in an infinite buffering state
after a postroll ad
([#6314](https://github.com/google/ExoPlayer/issues/6314)).
* Matroska: Support lacing in Blocks
([#3026](https://github.com/google/ExoPlayer/issues/3026)).
* AV1 extension:
* New in this release. The AV1 extension allows use of the
[libgav1 software decoder](https://chromium.googlesource.com/codecs/libgav1/)
in ExoPlayer. You can read more about playing AV1 videos with ExoPlayer
[here](https://medium.com/google-exoplayer/playing-av1-videos-with-exoplayer-a7cb19bedef9).
* VP9 extension:
* Update to use NDK r20.
* Rename `VpxVideoSurfaceView` to `VideoDecoderSurfaceView` and move it to
the core library.
* Move `LibvpxVideoRenderer.MSG_SET_OUTPUT_BUFFER_RENDERER` to
`C.MSG_SET_OUTPUT_BUFFER_RENDERER`.
* Use `VideoDecoderRenderer` as an implementation of
`VideoDecoderOutputBufferRenderer`, instead of
`VideoDecoderSurfaceView`.
* FLAC extension: Update to use NDK r20.
* Opus extension: Update to use NDK r20.
* FFmpeg extension:
* Update to use NDK r20.
* Update to use FFmpeg version 4.2. It is necessary to rebuild the native
part of the extension after this change, following the instructions in
the extension's readme.
* MediaSession extension: Add `MediaSessionConnector.setCaptionCallback` to
support `ACTION_SET_CAPTIONING_ENABLED` events.
* GVR extension: This extension is now deprecated.
* Demo apps:
* Add
[SurfaceControl demo app](https://github.com/google/ExoPlayer/tree/r2.11.0/demos/surface)
to show how to use the Android 10 `SurfaceControl` API with ExoPlayer
([#677](https://github.com/google/ExoPlayer/issues/677)).
* Add support for subtitle files to the
[Main demo app](https://github.com/google/ExoPlayer/tree/r2.11.0/demos/main)
([#5523](https://github.com/google/ExoPlayer/issues/5523)).
* Remove the IMA demo app. IMA functionality is demonstrated by the
[main demo app](https://github.com/google/ExoPlayer/tree/r2.11.0/demos/main).
* Add basic DRM support to the
[Cast demo app](https://github.com/google/ExoPlayer/tree/r2.11.0/demos/cast).
* TestUtils: Publish the `testutils` module to simplify unit testing with
ExoPlayer ([#6267](https://github.com/google/ExoPlayer/issues/6267)).
* IMA extension: Remove `AdsManager` listeners on release to avoid leaking an
`AdEventListener` provided by the app
([#6687](https://github.com/google/ExoPlayer/issues/6687)).
### 2.10.8 (2019-11-19)
* E-AC3 JOC
* Handle new signaling in DASH manifests
([#6636](https://github.com/google/ExoPlayer/issues/6636)).
* Fix E-AC3 JOC passthrough playback failing to initialize due to
incorrect channel count check.
* FLAC
* Fix sniffing for some FLAC streams.
* Fix FLAC `Format.bitrate` values.
* Parse ALAC channel count and sample rate information from a more robust
source when contained in MP4
([#6648](https://github.com/google/ExoPlayer/issues/6648)).
* Fix seeking into multi-period content in the edge case that the period
containing the seek position has just been removed
([#6641](https://github.com/google/ExoPlayer/issues/6641)).
### 2.10.7 (2019-11-06)
* HLS: Fix detection of Dolby Atmos to match the HLS authoring specification.
* MediaSession extension: Update shuffle and repeat modes when playback state
is invalidated ([#6582](https://github.com/google/ExoPlayer/issues/6582)).
* Fix the start of audio getting truncated when transitioning to a new item in
a playlist of Opus streams.
### 2.10.6 (2019-10-17)
* Add `Player.onPlaybackSuppressionReasonChanged` to allow listeners to detect
playbacks suppressions (e.g. transient audio focus loss) directly
([#6203](https://github.com/google/ExoPlayer/issues/6203)).
* DASH:
* Support `Label` elements
([#6297](https://github.com/google/ExoPlayer/issues/6297)).
* Support legacy audio channel configuration
([#6523](https://github.com/google/ExoPlayer/issues/6523)).
* HLS: Add support for ID3 in EMSG when using FMP4 streams
([spec](https://aomediacodec.github.io/av1-id3/)).
* MP3: Add workaround to avoid prematurely ending playback of some SHOUTcast
live streams ([#6537](https://github.com/google/ExoPlayer/issues/6537),
[#6315](https://github.com/google/ExoPlayer/issues/6315) and
[#5658](https://github.com/google/ExoPlayer/issues/5658)).
* Metadata: Expose the raw ICY metadata through `IcyInfo`
([#6476](https://github.com/google/ExoPlayer/issues/6476)).
* UI:
* Setting `app:played_color` on `PlayerView` and `PlayerControlView` no
longer adjusts the colors of the scrubber handle , buffered and unplayed
parts of the time bar. These can be set separately using
`app:scrubber_color`, `app:buffered_color` and `app_unplayed_color`
respectively.
* Setting `app:ad_marker_color` on `PlayerView` and `PlayerControlView` no
longer adjusts the color of played ad markers. The color of played ad
markers can be set separately using `app:played_ad_marker_color`.
### 2.10.5 (2019-09-20)
* Add `Player.isPlaying` and `EventListener.onIsPlayingChanged` to check
whether the playback position is advancing. This helps to determine if
playback is suppressed due to audio focus loss. Also add
`Player.getPlaybackSuppressedReason` to determine the reason of the
suppression ([#6203](https://github.com/google/ExoPlayer/issues/6203)).
* Track selection
* Add `allowAudioMixedChannelCountAdaptiveness` parameter to
`DefaultTrackSelector` to allow adaptive selections of audio tracks with
different channel counts.
* Improve text selection logic to always prefer the better language
matches over other selection parameters.
* Fix audio selection issue where languages are compared by bitrate
([#6335](https://github.com/google/ExoPlayer/issues/6335)).
* Performance
* Increase maximum video buffer size from 13MB to 32MB. The previous
default was too small for high quality streams.
* Reset `DefaultBandwidthMeter` to initial values on network change.
* Bypass sniffing in `ProgressiveMediaPeriod` in case a single extractor
is provided ([#6325](https://github.com/google/ExoPlayer/issues/6325)).
* Metadata
* Support EMSG V1 boxes in FMP4.
* Support unwrapping of nested metadata (e.g. ID3 and SCTE-35 in EMSG).
* Add `HttpDataSource.getResponseCode` to provide the status code associated
with the most recent HTTP response.
* Fix transitions between packed audio and non-packed audio segments in HLS
([#6444](https://github.com/google/ExoPlayer/issues/6444)).
* Fix issue where a request would be retried after encountering an error, even
though the `LoadErrorHandlingPolicy` classified the error as fatal.
* Fix initialization data handling for FLAC in MP4
([#6396](https://github.com/google/ExoPlayer/issues/6396),
[#6397](https://github.com/google/ExoPlayer/issues/6397)).
* Fix decoder selection for E-AC3 JOC streams
([#6398](https://github.com/google/ExoPlayer/issues/6398)).
* Fix `PlayerNotificationManager` to show play icon rather than pause icon
when playback is ended
([#6324](https://github.com/google/ExoPlayer/issues/6324)).
* RTMP extension: Upgrade LibRtmp-Client-for-Android to fix RTMP playback
issues ([#4200](https://github.com/google/ExoPlayer/issues/4200),
[#4249](https://github.com/google/ExoPlayer/issues/4249),
[#4319](https://github.com/google/ExoPlayer/issues/4319),
[#4337](https://github.com/google/ExoPlayer/issues/4337)).
* IMA extension: Fix crash in `ImaAdsLoader.onTimelineChanged`
([#5831](https://github.com/google/ExoPlayer/issues/5831)).
### 2.10.4 (2019-07-26)
* Offline: Add `Scheduler` implementation that uses `WorkManager`.
* Add ability to specify a description when creating notification channels via
ExoPlayer library classes.
* Switch normalized BCP-47 language codes to use 2-letter ISO 639-1 language
tags instead of 3-letter ISO 639-2 language tags.
* Ensure the `SilenceMediaSource` position is in range
([#6229](https://github.com/google/ExoPlayer/issues/6229)).
* WAV: Calculate correct duration for clipped streams
([#6241](https://github.com/google/ExoPlayer/issues/6241)).
* MP3: Use CBR header bitrate, not calculated bitrate. This reverts a change
from 2.9.3 ([#6238](https://github.com/google/ExoPlayer/issues/6238)).
* FLAC extension: Parse `VORBIS_COMMENT` and `PICTURE` metadata
([#5527](https://github.com/google/ExoPlayer/issues/5527)).
* Fix issue where initial seek positions get ignored when playing a preroll ad
([#6201](https://github.com/google/ExoPlayer/issues/6201)).
* Fix issue where invalid language tags were normalized to "und" instead of
keeping the original
([#6153](https://github.com/google/ExoPlayer/issues/6153)).
* Fix `DataSchemeDataSource` re-opening and range requests
([#6192](https://github.com/google/ExoPlayer/issues/6192)).
* Fix FLAC and ALAC playback on some LG devices
([#5938](https://github.com/google/ExoPlayer/issues/5938)).
* Fix issue when calling `performClick` on `PlayerView` without
`PlayerControlView`
([#6260](https://github.com/google/ExoPlayer/issues/6260)).
* Fix issue where playback speeds are not used in adaptive track selections
after manual selection changes for other renderers
([#6256](https://github.com/google/ExoPlayer/issues/6256)).
### 2.10.3 (2019-07-09)
* Display last frame when seeking to end of stream
([#2568](https://github.com/google/ExoPlayer/issues/2568)).
* Audio:
* Fix an issue where not all audio was played out when the configuration
for the underlying track was changing (e.g., at some period
transitions).
* Fix an issue where playback speed was applied inaccurately in playlists
([#6117](https://github.com/google/ExoPlayer/issues/6117)).
* UI: Fix `PlayerView` incorrectly consuming touch events if no controller is
attached ([#6109](https://github.com/google/ExoPlayer/issues/6109)).
* CEA608: Fix repetition of special North American characters
([#6133](https://github.com/google/ExoPlayer/issues/6133)).
* FLV: Fix bug that caused playback of some live streams to not start
([#6111](https://github.com/google/ExoPlayer/issues/6111)).
* SmoothStreaming: Parse text stream `Subtype` into `Format.roleFlags`.
* MediaSession extension: Fix `MediaSessionConnector.play()` not resuming
playback ([#6093](https://github.com/google/ExoPlayer/issues/6093)).
### 2.10.2 (2019-06-03)
* Add `ResolvingDataSource` for just-in-time resolution of `DataSpec`s
([#5779](https://github.com/google/ExoPlayer/issues/5779)).
* Add `SilenceMediaSource` that can be used to play silence of a given
duration ([#5735](https://github.com/google/ExoPlayer/issues/5735)).
* Offline:
* Prevent unexpected `DownloadHelper.Callback.onPrepared` callbacks after
preparation of a `DownloadHelper` fails
([#5915](https://github.com/google/ExoPlayer/issues/5915)).
* Fix `CacheUtil.cache()` downloading too much data
([#5927](https://github.com/google/ExoPlayer/issues/5927)).
* Fix misreporting cached bytes when caching is paused
([#5573](https://github.com/google/ExoPlayer/issues/5573)).
* UI:
* Allow setting `DefaultTimeBar` attributes on `PlayerView` and
`PlayerControlView`.
* Change playback controls toggle from touch down to touch up events
([#5784](https://github.com/google/ExoPlayer/issues/5784)).
* Fix issue where playback controls were not kept visible on key presses
([#5963](https://github.com/google/ExoPlayer/issues/5963)).
* Subtitles:
* CEA-608: Handle XDS and TEXT modes
([#5807](https://github.com/google/ExoPlayer/pull/5807)).
* TTML: Fix bitmap rendering
([#5633](https://github.com/google/ExoPlayer/pull/5633)).
* IMA: Fix ad pod index offset calculation without preroll
([#5928](https://github.com/google/ExoPlayer/issues/5928)).
* Add a `playWhenReady` flag to MediaSessionConnector.PlaybackPreparer methods
to indicate whether a controller sent a play or only a prepare command. This
allows to take advantage of decoder reuse with the MediaSessionConnector
([#5891](https://github.com/google/ExoPlayer/issues/5891)).
* Add `ProgressUpdateListener` to `PlayerControlView`
([#5834](https://github.com/google/ExoPlayer/issues/5834)).
* Add support for auto-detecting UDP streams in `DefaultDataSource`
([#6036](https://github.com/google/ExoPlayer/pull/6036)).
* Allow enabling decoder fallback with `DefaultRenderersFactory`
([#5942](https://github.com/google/ExoPlayer/issues/5942)).
* Gracefully handle revoked `ACCESS_NETWORK_STATE` permission
([#6019](https://github.com/google/ExoPlayer/issues/6019)).
* Fix decoding problems when seeking back after seeking beyond a mid-roll ad
([#6009](https://github.com/google/ExoPlayer/issues/6009)).
* Fix application of `maxAudioBitrate` for adaptive audio track groups
([#6006](https://github.com/google/ExoPlayer/issues/6006)).
* Fix bug caused by parallel adaptive track selection using `Format`s without
bitrate information
([#5971](https://github.com/google/ExoPlayer/issues/5971)).
* Fix bug in `CastPlayer.getCurrentWindowIndex()`
([#5955](https://github.com/google/ExoPlayer/issues/5955)).
### 2.10.1 (2019-05-16)
* Offline: Add option to remove all downloads.
* HLS: Fix `NullPointerException` when using HLS chunkless preparation
([#5868](https://github.com/google/ExoPlayer/issues/5868)).
* Fix handling of empty values and line terminators in SHOUTcast ICY metadata
([#5876](https://github.com/google/ExoPlayer/issues/5876)).
* Fix DVB subtitles for SDK 28
([#5862](https://github.com/google/ExoPlayer/issues/5862)).
* Add a workaround for a decoder failure on ZTE Axon7 mini devices when
playing 48kHz audio
([#5821](https://github.com/google/ExoPlayer/issues/5821)).
### 2.10.0 (2019-04-15)
* Core library:
* Improve decoder re-use between playbacks
([#2826](https://github.com/google/ExoPlayer/issues/2826)). Read
[this blog post](https://medium.com/google-exoplayer/improved-decoder-reuse-in-exoplayer-ef4c6d99591d)
for more details.
* Rename `ExtractorMediaSource` to `ProgressiveMediaSource`.
* Fix issue where using `ProgressiveMediaSource.Factory` would mean that
`DefaultExtractorsFactory` would be kept by proguard. Custom
`ExtractorsFactory` instances must now be passed via the
`ProgressiveMediaSource.Factory` constructor, and `setExtractorsFactory`
is deprecated.
* Make the default minimum buffer size equal the maximum buffer size for
video playbacks
([#2083](https://github.com/google/ExoPlayer/issues/2083)).
* Move `PriorityTaskManager` from `DefaultLoadControl` to
`SimpleExoPlayer`.
* Add new `ExoPlaybackException` types for remote exceptions and
out-of-memory errors.
* Use full BCP 47 language tags in `Format`.
* Do not retry failed loads whose error is `FileNotFoundException`.
* Fix issue where not resetting the position for a new `MediaSource` in
calls to `ExoPlayer.prepare` causes an `IndexOutOfBoundsException`
([#5520](https://github.com/google/ExoPlayer/issues/5520)).
* Offline:
* Improve offline support. `DownloadManager` now tracks all offline
content, not just tasks in progress. Read
[this page](https://exoplayer.dev/downloading-media.html) for more
details.
* Caching:
* Improve performance of `SimpleCache`
([#4253](https://github.com/google/ExoPlayer/issues/4253)).
* Cache data with unknown length by default. The previous flag to opt in
to this behavior (`DataSpec.FLAG_ALLOW_CACHING_UNKNOWN_LENGTH`) has been
replaced with an opt out flag
(`DataSpec.FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN`).
* Extractors:
* MP4/FMP4: Add support for Dolby Vision.
* MP4: Fix issue handling meta atoms in some streams
([#5698](https://github.com/google/ExoPlayer/issues/5698),
[#5694](https://github.com/google/ExoPlayer/issues/5694)).
* MP3: Add support for SHOUTcast ICY metadata
([#3735](https://github.com/google/ExoPlayer/issues/3735)).
* MP3: Fix ID3 frame unsychronization
([#5673](https://github.com/google/ExoPlayer/issues/5673)).
* MP3: Fix playback of badly clipped files
([#5772](https://github.com/google/ExoPlayer/issues/5772)).
* MPEG-TS: Enable HDMV DTS stream detection only if a flag is set. By
default (i.e. if the flag is not set), the 0x82 elementary stream type
is now treated as an SCTE subtitle track
([#5330](https://github.com/google/ExoPlayer/issues/5330)).
* Track selection:
* Add options for controlling audio track selections to
`DefaultTrackSelector`
([#3314](https://github.com/google/ExoPlayer/issues/3314)).
* Update `TrackSelection.Factory` interface to support creating all track
selections together.
* Allow to specify a selection reason for a `SelectionOverride`.
* Select audio track based on system language if no preference is
provided.
* When no text language preference matches, only select forced text tracks
whose language matches the selected audio language.
* UI:
* Update `DefaultTimeBar` based on duration of media and add parameter to
set the minimum update interval to control the smoothness of the updates
([#5040](https://github.com/google/ExoPlayer/issues/5040)).
* Move creation of dialogs for `TrackSelectionView`s to
`TrackSelectionDialogBuilder` and add option to select multiple
overrides.
* Change signature of `PlayerNotificationManager.NotificationListener` to
better fit service requirements.
* Add option to include navigation actions in the compact mode of
notifications created using `PlayerNotificationManager`.
* Fix issues with flickering notifications on KitKat when using
`PlayerNotificationManager` and `DownloadNotificationUtil`. For the
latter, applications should switch to using
`DownloadNotificationHelper`.
* Fix accuracy of D-pad seeking in `DefaultTimeBar`
([#5767](https://github.com/google/ExoPlayer/issues/5767)).
* Audio:
* Allow `AudioProcessor`s to be drained of pending output after they are
reconfigured.
* Fix an issue that caused audio to be truncated at the end of a period
when switching to a new period where gapless playback information was
newly present or newly absent.
* Add support for reading AC-4 streams
([#5303](https://github.com/google/ExoPlayer/pull/5303)).
* Video:
* Remove `MediaCodecSelector.DEFAULT_WITH_FALLBACK`. Apps should instead
signal that fallback should be used by passing `true` as the
`enableDecoderFallback` parameter when instantiating the video renderer.
* Support video tunneling when the decoder is not listed first for the
MIME type ([#3100](https://github.com/google/ExoPlayer/issues/3100)).
* Query `MediaCodecList.ALL_CODECS` when selecting a tunneling decoder
([#5547](https://github.com/google/ExoPlayer/issues/5547)).
* DRM:
* Fix black flicker when keys rotate in DRM protected content
([#3561](https://github.com/google/ExoPlayer/issues/3561)).
* Work around lack of LA_URL attribute in PlayReady key request init data.
* CEA-608: Improved conformance to the specification
([#3860](https://github.com/google/ExoPlayer/issues/3860)).
* DASH:
* Parse role and accessibility descriptors into `Format.roleFlags`.
* Support multiple CEA-608 channels muxed into FMP4 representations
([#5656](https://github.com/google/ExoPlayer/issues/5656)).
* HLS:
* Prevent unnecessary reloads of initialization segments.
* Form an adaptive track group out of audio renditions with matching name.
* Support encrypted initialization segments
([#5441](https://github.com/google/ExoPlayer/issues/5441)).
* Parse `EXT-X-MEDIA` `CHARACTERISTICS` attribute into `Format.roleFlags`.
* Add metadata entry for HLS tracks to expose master playlist information.
* Prevent `IndexOutOfBoundsException` in some live HLS scenarios
([#5816](https://github.com/google/ExoPlayer/issues/5816)).
* Support for playing spherical videos on Daydream.
* Cast extension: Work around Cast framework returning a limited-size queue
items list ([#4964](https://github.com/google/ExoPlayer/issues/4964)).
* VP9 extension: Remove RGB output mode and libyuv dependency, and switch to
surface YUV output as the default. Remove constructor parameters
`scaleToFit` and `useSurfaceYuvOutput`.
* MediaSession extension:
* Let apps intercept media button events
([#5179](https://github.com/google/ExoPlayer/issues/5179)).
* Fix issue with `TimelineQueueNavigator` not publishing the queue in
shuffled order when in shuffle mode.
* Allow handling of custom commands via `registerCustomCommandReceiver`.
* Add ability to include an extras `Bundle` when reporting a custom error.
* Log warnings when extension native libraries can't be used, to help with
diagnosing playback failures
([#5788](https://github.com/google/ExoPlayer/issues/5788)).
### 2.9.6 (2019-02-19)
* Remove `player` and `isTopLevelSource` parameters from
`MediaSource.prepare`.
* IMA extension:
* Require setting the `Player` on `AdsLoader` instances before playback.
* Remove deprecated `ImaAdsMediaSource`. Create `AdsMediaSource` with an
`ImaAdsLoader` instead.
* Remove deprecated `AdsMediaSource` constructors. Listen for media source
events using `AdsMediaSource.addEventListener`, and ad interaction
events by adding a listener when building `ImaAdsLoader`.
* Allow apps to register playback-related obstructing views that are on
top of their ad display containers via `AdsLoader.AdViewProvider`.
`PlayerView` implements this interface and will register its control
view. This makes it possible for ad loading SDKs to calculate ad
viewability accurately.
* DASH: Fix issue handling large `EventStream` presentation timestamps
([#5490](https://github.com/google/ExoPlayer/issues/5490)).
* HLS: Fix transition to STATE_ENDED when playing fragmented mp4 in chunkless
preparation ([#5524](https://github.com/google/ExoPlayer/issues/5524)).
* Revert workaround for video quality problems with Amlogic decoders, as this
may cause problems for some devices and/or non-interlaced content
([#5003](https://github.com/google/ExoPlayer/issues/5003)).
### 2.9.5 (2019-01-31)
* HLS: Parse `CHANNELS` attribute from `EXT-X-MEDIA` tag.
* ConcatenatingMediaSource:
* Add `Handler` parameter to methods that take a callback `Runnable`.
* Fix issue with dropped messages when releasing the source
([#5464](https://github.com/google/ExoPlayer/issues/5464)).
* ExtractorMediaSource: Fix issue that could cause the player to get stuck
buffering at the end of the media.
* PlayerView: Fix issue preventing `OnClickListener` from receiving events
([#5433](https://github.com/google/ExoPlayer/issues/5433)).
* IMA extension: Upgrade IMA dependency to 3.10.6.
* Cronet extension: Upgrade Cronet dependency to 71.3578.98.
* OkHttp extension: Upgrade OkHttp dependency to 3.12.1.
* MP3: Wider fix for issue where streams would play twice on some Samsung
devices ([#4519](https://github.com/google/ExoPlayer/issues/4519)).
### 2.9.4 (2019-01-15)
* IMA extension: Clear ads loader listeners on release
([#4114](https://github.com/google/ExoPlayer/issues/4114)).
* SmoothStreaming: Fix support for subtitles in DRM protected streams
([#5378](https://github.com/google/ExoPlayer/issues/5378)).
* FFmpeg extension: Treat invalid data errors as non-fatal to match the
behavior of MediaCodec
([#5293](https://github.com/google/ExoPlayer/issues/5293)).
* GVR extension: upgrade GVR SDK dependency to 1.190.0.
* Associate fatal player errors of type SOURCE with the loading source in
`AnalyticsListener.EventTime`
([#5407](https://github.com/google/ExoPlayer/issues/5407)).
* Add `startPositionUs` to `MediaSource.createPeriod`. This fixes an issue
where using lazy preparation in `ConcatenatingMediaSource` with an
`ExtractorMediaSource` overrides initial seek positions
([#5350](https://github.com/google/ExoPlayer/issues/5350)).
* Add subtext to the `MediaDescriptionAdapter` of the
`PlayerNotificationManager`.
* Add workaround for video quality problems with Amlogic decoders
([#5003](https://github.com/google/ExoPlayer/issues/5003)).
* Fix issue where sending callbacks for playlist changes may cause problems
because of parallel player access
([#5240](https://github.com/google/ExoPlayer/issues/5240)).
* Fix issue with reusing a `ClippingMediaSource` with an inner
`ExtractorMediaSource` and a non-zero start position
([#5351](https://github.com/google/ExoPlayer/issues/5351)).
* Fix issue where uneven track durations in MP4 streams can cause OOM problems
([#3670](https://github.com/google/ExoPlayer/issues/3670)).
### 2.9.3 (2018-12-20)
* Captions: Support PNG subtitles in SMPTE-TT
([#1583](https://github.com/google/ExoPlayer/issues/1583)).
* MPEG-TS: Use random access indicators to minimize the need for
`FLAG_ALLOW_NON_IDR_KEYFRAMES`.
* Downloading: Reduce time taken to remove downloads
([#5136](https://github.com/google/ExoPlayer/issues/5136)).
* MP3:
* Use the true bitrate for constant-bitrate MP3 seeking.
* Fix issue where streams would play twice on some Samsung devices
([#4519](https://github.com/google/ExoPlayer/issues/4519)).
* Fix regression where some audio formats were incorrectly marked as being
unplayable due to under-reporting of platform decoder capabilities
([#5145](https://github.com/google/ExoPlayer/issues/5145)).
* Fix decode-only frame skipping on Nvidia Shield TV devices.
* Workaround for MiTV (dangal) issue when swapping output surface
([#5169](https://github.com/google/ExoPlayer/issues/5169)).
### 2.9.2 (2018-11-28)
* HLS:
* Fix issue causing unnecessary media playlist requests when playing live
streams ([#5059](https://github.com/google/ExoPlayer/issues/5059)).
* Fix decoder re-instantiation issue for packed audio streams
([#5063](https://github.com/google/ExoPlayer/issues/5063)).
* MP4: Support Opus and FLAC in the MP4 container, and in DASH
([#4883](https://github.com/google/ExoPlayer/issues/4883)).
* DASH: Fix detecting the end of live events
([#4780](https://github.com/google/ExoPlayer/issues/4780)).
* Spherical video: Fall back to `TYPE_ROTATION_VECTOR` if
`TYPE_GAME_ROTATION_VECTOR` is unavailable
([#5119](https://github.com/google/ExoPlayer/issues/5119)).
* Support seeking for a wider range of MPEG-TS streams
([#5097](https://github.com/google/ExoPlayer/issues/5097)).
* Include channel count in audio capabilities check
([#4690](https://github.com/google/ExoPlayer/issues/4690)).
* Fix issue with applying the `show_buffering` attribute in `PlayerView`
([#5139](https://github.com/google/ExoPlayer/issues/5139)).
* Fix issue where null `Metadata` was output when it failed to decode
([#5149](https://github.com/google/ExoPlayer/issues/5149)).
* Fix playback of some invalid but playable MP4 streams by replacing
assertions with logged warnings in sample table parsing code
([#5162](https://github.com/google/ExoPlayer/issues/5162)).
* Fix UUID passed to `MediaCrypto` when using `C.CLEARKEY_UUID` before API 27.
### 2.9.1 (2018-11-01)
* Add convenience methods `Player.next`, `Player.previous`, `Player.hasNext`
and `Player.hasPrevious`
([#4863](https://github.com/google/ExoPlayer/issues/4863)).
* Improve initial bandwidth meter estimates using the current country and
network type.
* IMA extension:
* For preroll to live stream transitions, project forward the loading
position to avoid being behind the live window.
* Let apps specify whether to focus the skip button on ATV
([#5019](https://github.com/google/ExoPlayer/issues/5019)).
* MP3:
* Support seeking based on MLLT metadata
([#3241](https://github.com/google/ExoPlayer/issues/3241)).
* Fix handling of streams with appended data
([#4954](https://github.com/google/ExoPlayer/issues/4954)).
* DASH: Parse ProgramInformation element if present in the manifest.
* HLS:
* Add constructor to `DefaultHlsExtractorFactory` for adding TS payload
reader factory flags
([#4861](https://github.com/google/ExoPlayer/issues/4861)).
* Fix bug in segment sniffing
([#5039](https://github.com/google/ExoPlayer/issues/5039)).
* SubRip: Add support for alignment tags, and remove tags from the displayed
captions ([#4306](https://github.com/google/ExoPlayer/issues/4306)).
* Fix issue with blind seeking to windows with non-zero offset in a
`ConcatenatingMediaSource`
([#4873](https://github.com/google/ExoPlayer/issues/4873)).
* Fix logic for enabling next and previous actions in `TimelineQueueNavigator`
([#5065](https://github.com/google/ExoPlayer/issues/5065)).
* Fix issue where audio focus handling could not be disabled after enabling it
([#5055](https://github.com/google/ExoPlayer/issues/5055)).
* Fix issue where subtitles were positioned incorrectly if `SubtitleView` had
a non-zero position offset to its parent
([#4788](https://github.com/google/ExoPlayer/issues/4788)).
* Fix issue where the buffered position was not updated correctly when
transitioning between periods
([#4899](https://github.com/google/ExoPlayer/issues/4899)).
* Fix issue where a `NullPointerException` is thrown when removing an
unprepared media source from a `ConcatenatingMediaSource` with the
`useLazyPreparation` option enabled
([#4986](https://github.com/google/ExoPlayer/issues/4986)).
* Work around an issue where a non-empty end-of-stream audio buffer would be
output with timestamp zero, causing the player position to jump backwards
([#5045](https://github.com/google/ExoPlayer/issues/5045)).
* Suppress a spurious assertion failure on some Samsung devices
([#4532](https://github.com/google/ExoPlayer/issues/4532)).
* Suppress spurious "references unknown class member" shrinking warning
([#4890](https://github.com/google/ExoPlayer/issues/4890)).
* Swap recommended order for google() and jcenter() in gradle config
([#4997](https://github.com/google/ExoPlayer/issues/4997)).
### 2.9.0 (2018-09-06)
* Turn on Java 8 compiler support for the ExoPlayer library. Apps may need to
add `compileOptions { targetCompatibility JavaVersion.VERSION_1_8 }` to
their gradle settings to ensure bytecode compatibility.
* Set `compileSdkVersion` and `targetSdkVersion` to 28.
* Support for automatic audio focus handling via
`SimpleExoPlayer.setAudioAttributes`.
* Add `ExoPlayer.retry` convenience method.
* Add `AudioListener` for listening to changes in audio configuration during
playback ([#3994](https://github.com/google/ExoPlayer/issues/3994)).
* Add `LoadErrorHandlingPolicy` to allow configuration of load error handling
across `MediaSource` implementations
([#3370](https://github.com/google/ExoPlayer/issues/3370)).
* Allow passing a `Looper`, which specifies the thread that must be used to
access the player, when instantiating player instances using
`ExoPlayerFactory`
([#4278](https://github.com/google/ExoPlayer/issues/4278)).
* Allow setting log level for ExoPlayer logcat output
([#4665](https://github.com/google/ExoPlayer/issues/4665)).
* Simplify `BandwidthMeter` injection: The `BandwidthMeter` should now be
passed directly to `ExoPlayerFactory`, instead of to
`TrackSelection.Factory` and `DataSource.Factory`. The `BandwidthMeter` is
passed to the components that need it internally. The `BandwidthMeter` may
also be omitted, in which case a default instance will be used.
* Spherical video:
* Support for spherical video by setting `surface_type="spherical_view"`
on `PlayerView`.
* Support for
[VR180](https://github.com/google/spatial-media/blob/master/docs/vr180.md).
* HLS:
* Support PlayReady.
* Add container format sniffing
([#2025](https://github.com/google/ExoPlayer/issues/2025)).
* Support alternative `EXT-X-KEY` tags.
* Support `EXT-X-INDEPENDENT-SEGMENTS` in the master playlist.
* Support variable substitution
([#4422](https://github.com/google/ExoPlayer/issues/4422)).
* Fix the bitrate being unset on primary track sample formats
([#3297](https://github.com/google/ExoPlayer/issues/3297)).
* Make `HlsMediaSource.Factory` take a factory of trackers instead of a
tracker instance
([#4814](https://github.com/google/ExoPlayer/issues/4814)).
* DASH:
* Support `messageData` attribute for in-manifest event streams.
* Clip periods to their specified durations
([#4185](https://github.com/google/ExoPlayer/issues/4185)).
* Improve seeking support for progressive streams:
* Support seeking in MPEG-TS
([#966](https://github.com/google/ExoPlayer/issues/966)).
* Support seeking in MPEG-PS
([#4476](https://github.com/google/ExoPlayer/issues/4476)).
* Support approximate seeking in ADTS using a constant bitrate assumption
([#4548](https://github.com/google/ExoPlayer/issues/4548)). The
`FLAG_ENABLE_CONSTANT_BITRATE_SEEKING` flag must be set on the extractor
to enable this functionality.
* Support approximate seeking in AMR using a constant bitrate assumption.
The `FLAG_ENABLE_CONSTANT_BITRATE_SEEKING` flag must be set on the
extractor to enable this functionality.
* Add `DefaultExtractorsFactory.setConstantBitrateSeekingEnabled` to
enable approximate seeking using a constant bitrate assumption on all
extractors that support it.
* Video:
* Add callback to `VideoListener` to notify of surface size changes.
* Improve performance when playing high frame-rate content, and when
playing at greater than 1x speed
([#2777](https://github.com/google/ExoPlayer/issues/2777)).
* Scale up the initial video decoder maximum input size so playlist
transitions with small increases in maximum sample size do not require
reinitialization
([#4510](https://github.com/google/ExoPlayer/issues/4510)).
* Fix a bug where the player would not transition to the ended state when
playing video in tunneled mode.
* Audio:
* Support attaching auxiliary audio effects to the `AudioTrack` via
`Player.setAuxEffectInfo` and `Player.clearAuxEffectInfo`.
* Support seamless adaptation while playing xHE-AAC streams.
([#4360](https://github.com/google/ExoPlayer/issues/4360)).
* Increase `AudioTrack` buffer sizes to the theoretical maximum required
for each encoding for passthrough playbacks
([#3803](https://github.com/google/ExoPlayer/issues/3803)).
* WAV: Fix issue where white noise would be output at the end of playback
([#4724](https://github.com/google/ExoPlayer/issues/4724)).
* MP3: Fix issue where streams would play twice on the SM-T530
([#4519](https://github.com/google/ExoPlayer/issues/4519)).
* Analytics:
* Add callbacks to `DefaultDrmSessionEventListener` and
`AnalyticsListener` to be notified of acquired and released DRM
sessions.
* Add uri field to `LoadEventInfo` in `MediaSourceEventListener` and
`AnalyticsListener` callbacks. This uri is the redirected uri if
redirection occurred
([#2054](https://github.com/google/ExoPlayer/issues/2054)).
* Add response headers field to `LoadEventInfo` in
`MediaSourceEventListener` and `AnalyticsListener` callbacks
([#4361](https://github.com/google/ExoPlayer/issues/4361) and
[#4615](https://github.com/google/ExoPlayer/issues/4615)).
* UI:
* Add option to `PlayerView` to show buffering view when playWhenReady is
false ([#4304](https://github.com/google/ExoPlayer/issues/4304)).
* Allow any `Drawable` to be used as `PlayerView` default artwork.
* ConcatenatingMediaSource:
* Support lazy preparation of playlist media sources
([#3972](https://github.com/google/ExoPlayer/issues/3972)).
* Support range removal with `removeMediaSourceRange` methods
([#4542](https://github.com/google/ExoPlayer/issues/4542)).
* Support setting a new shuffle order with `setShuffleOrder`
([#4791](https://github.com/google/ExoPlayer/issues/4791)).
* MPEG-TS: Support CEA-608/708 in H262
([#2565](https://github.com/google/ExoPlayer/issues/2565)).
* Allow configuration of the back buffer in `DefaultLoadControl.Builder`
([#4857](https://github.com/google/ExoPlayer/issues/4857)).
* Allow apps to pass a `CacheKeyFactory` for setting custom cache keys when
creating a `CacheDataSource`.
* Provide additional information for adaptive track selection.
`TrackSelection.updateSelectedTrack` has two new parameters for the current
queue of media chunks and iterators for information about upcoming chunks.
* Allow `MediaCodecSelector`s to return multiple compatible decoders for
`MediaCodecRenderer`, and provide an (optional) `MediaCodecSelector` that
falls back to less preferred decoders like `MediaCodec.createDecoderByType`
([#273](https://github.com/google/ExoPlayer/issues/273)).
* Enable gzip for requests made by `SingleSampleMediaSource`
([#4771](https://github.com/google/ExoPlayer/issues/4771)).
* Fix bug reporting buffered position for multi-period windows, and add
convenience methods `Player.getTotalBufferedDuration` and
`Player.getContentBufferedDuration`
([#4023](https://github.com/google/ExoPlayer/issues/4023)).
* Fix bug where transitions to clipped media sources would happen too early
([#4583](https://github.com/google/ExoPlayer/issues/4583)).
* Fix bugs reporting events for multi-period media sources
([#4492](https://github.com/google/ExoPlayer/issues/4492) and
[#4634](https://github.com/google/ExoPlayer/issues/4634)).
* Fix issue where removing looping media from a playlist throws an exception
([#4871](https://github.com/google/ExoPlayer/issues/4871)).
* Fix issue where the preferred audio or text track would not be selected if
mapped onto a secondary renderer of the corresponding type
([#4711](http://github.com/google/ExoPlayer/issues/4711)).
* Fix issue where errors of upcoming playlist items are thrown too early
([#4661](https://github.com/google/ExoPlayer/issues/4661)).
* Allow edit lists which do not start with a sync sample.
([#4774](https://github.com/google/ExoPlayer/issues/4774)).
* Fix issue with audio discontinuities at period transitions, e.g. when
looping ([#3829](https://github.com/google/ExoPlayer/issues/3829)).
* Fix issue where `player.getCurrentTag()` throws an
`IndexOutOfBoundsException`
([#4822](https://github.com/google/ExoPlayer/issues/4822)).
* Fix bug preventing use of multiple key session support (`multiSession=true`)
for non-Widevine `DefaultDrmSessionManager` instances
([#4834](https://github.com/google/ExoPlayer/issues/4834)).
* Fix issue where audio and video would desynchronize when playing
concatenations of gapless content
([#4559](https://github.com/google/ExoPlayer/issues/4559)).
* IMA extension:
* Refine the previous fix for empty ad groups to avoid discarding ad
breaks unnecessarily
([#4030](https://github.com/google/ExoPlayer/issues/4030) and
[#4280](https://github.com/google/ExoPlayer/issues/4280)).
* Fix handling of empty postrolls
([#4681](https://github.com/google/ExoPlayer/issues/4681)).
* Fix handling of postrolls with multiple ads
([#4710](https://github.com/google/ExoPlayer/issues/4710)).
* MediaSession extension:
* Add `MediaSessionConnector.setCustomErrorMessage` to support setting
custom error messages.
* Add `MediaMetadataProvider` to support setting custom metadata
([#3497](https://github.com/google/ExoPlayer/issues/3497)).
* Cronet extension: Now distributed via jCenter.
* FFmpeg extension: Support mu-law and A-law PCM.
### 2.8.4 (2018-08-17)
* IMA extension: Improve handling of consecutive empty ad groups
([#4030](https://github.com/google/ExoPlayer/issues/4030)),
([#4280](https://github.com/google/ExoPlayer/issues/4280)).
### 2.8.3 (2018-07-23)
* IMA extension:
* Fix behavior when creating/releasing the player then releasing
`ImaAdsLoader`
([#3879](https://github.com/google/ExoPlayer/issues/3879)).
* Add support for setting slots for companion ads.
* Captions:
* TTML: Fix an issue with TTML using font size as % of cell resolution
that makes `SubtitleView.setApplyEmbeddedFontSizes()` not work
correctly. ([#4491](https://github.com/google/ExoPlayer/issues/4491)).
* CEA-608: Improve handling of embedded styles
([#4321](https://github.com/google/ExoPlayer/issues/4321)).
* DASH:
* Exclude text streams from duration calculations
([#4029](https://github.com/google/ExoPlayer/issues/4029)).
* Fix freezing when playing multi-period manifests with `EventStream`s
([#4492](https://github.com/google/ExoPlayer/issues/4492)).
* DRM: Allow DrmInitData to carry a license server URL
([#3393](https://github.com/google/ExoPlayer/issues/3393)).
* MPEG-TS: Fix bug preventing SCTE-35 cues from being output
([#4573](https://github.com/google/ExoPlayer/issues/4573)).
* Expose all internal ID3 data stored in MP4 udta boxes, and switch from using
CommentFrame to InternalFrame for frames with gapless metadata in MP4.
* Add `PlayerView.isControllerVisible`
([#4385](https://github.com/google/ExoPlayer/issues/4385)).
* Fix issue playing DRM protected streams on Asus Zenfone 2
([#4403](https://github.com/google/ExoPlayer/issues/4413)).
* Add support for multiple audio and video tracks in MPEG-PS streams
([#4406](https://github.com/google/ExoPlayer/issues/4406)).
* Add workaround for track index mismatches between trex and tkhd boxes in
fragmented MP4 files
([#4477](https://github.com/google/ExoPlayer/issues/4477)).
* Add workaround for track index mismatches between tfhd and tkhd boxes in
fragmented MP4 files
([#4083](https://github.com/google/ExoPlayer/issues/4083)).
* Ignore all MP4 edit lists if one edit list couldn't be handled
([#4348](https://github.com/google/ExoPlayer/issues/4348)).
* Fix issue when switching track selection from an embedded track to a primary
track in DASH ([#4477](https://github.com/google/ExoPlayer/issues/4477)).
* Fix accessibility class name for `DefaultTimeBar`
([#4611](https://github.com/google/ExoPlayer/issues/4611)).
* Improved compatibility with FireOS devices.
### 2.8.2 (2018-06-06)
* IMA extension: Don't advertise support for video/mpeg ad media, as we don't
have an extractor for this
([#4297](https://github.com/google/ExoPlayer/issues/4297)).
* DASH: Fix playback getting stuck when playing representations that have both
sidx atoms and non-zero presentationTimeOffset values.
* HLS:
* Allow injection of custom playlist trackers.
* Fix adaptation in live playlists with EXT-X-PROGRAM-DATE-TIME tags.
* Mitigate memory leaks when `MediaSource` loads are slow to cancel
([#4249](https://github.com/google/ExoPlayer/issues/4249)).
* Fix inconsistent `Player.EventListener` invocations for recursive player
state changes ([#4276](https://github.com/google/ExoPlayer/issues/4276)).
* Fix `MediaCodec.native_setSurface` crash on Moto C
([#4315](https://github.com/google/ExoPlayer/issues/4315)).
* Fix missing whitespace in CEA-608
([#3906](https://github.com/google/ExoPlayer/issues/3906)).
* Fix crash downloading HLS media playlists
([#4396](https://github.com/google/ExoPlayer/issues/4396)).
* Fix a bug where download cancellation was ignored
([#4403](https://github.com/google/ExoPlayer/issues/4403)).
* Set `METADATA_KEY_TITLE` on media descriptions
([#4292](https://github.com/google/ExoPlayer/issues/4292)).
* Allow apps to register custom MIME types
([#4264](https://github.com/google/ExoPlayer/issues/4264)).
### 2.8.1 (2018-05-22)
* HLS:
* Fix playback of livestreams with EXT-X-PROGRAM-DATE-TIME tags
([#4239](https://github.com/google/ExoPlayer/issues/4239)).
* Fix playback of clipped streams starting from non-keyframe positions
([#4241](https://github.com/google/ExoPlayer/issues/4241)).
* OkHttp extension: Fix to correctly include response headers in thrown
`InvalidResponseCodeException`s.
* Add possibility to cancel `PlayerMessage`s.
* UI:
* Add `PlayerView.setKeepContentOnPlayerReset` to keep the currently
displayed video frame or media artwork visible when the player is reset
([#2843](https://github.com/google/ExoPlayer/issues/2843)).
* Fix crash when switching surface on Moto E(4)
([#4134](https://github.com/google/ExoPlayer/issues/4134)).
* Fix a bug that could cause event listeners to be called with inconsistent
information if an event listener interacted with the player
([#4262](https://github.com/google/ExoPlayer/issues/4262)).
* Audio:
* Fix extraction of PCM in MP4/MOV
([#4228](https://github.com/google/ExoPlayer/issues/4228)).
* FLAC: Supports seeking for FLAC files without SEEKTABLE
([#1808](https://github.com/google/ExoPlayer/issues/1808)).
* Captions:
* TTML:
* Fix a styling issue when there are multiple regions displayed at the
same time that can make text size of each region much smaller than
defined.
* Fix an issue when the caption line has no text (empty line or only line
break), and the line's background is still displayed.
* Support TTML font size using % correctly (as percentage of document cell
resolution).
### 2.8.0 (2018-05-03)
* Downloading:
* Add `DownloadService`, `DownloadManager` and related classes
([#2643](https://github.com/google/ExoPlayer/issues/2643)). Information
on using these components to download progressive formats can be found
[here](https://medium.com/google-exoplayer/downloading-streams-6d259eec7f95).
To see how to download DASH, HLS and SmoothStreaming media, take a look
at the app.
* Updated main demo app to support downloading DASH, HLS, SmoothStreaming
and progressive media.
* MediaSources:
* Allow reusing media sources after they have been released and also in
parallel to allow adding them multiple times to a concatenation.
([#3498](https://github.com/google/ExoPlayer/issues/3498)).
* Merged `DynamicConcatenatingMediaSource` into `ConcatenatingMediaSource`
and deprecated `DynamicConcatenatingMediaSource`.
* Allow clipping of child media sources where the period and window have a
non-zero offset with `ClippingMediaSource`.
* Allow adding and removing `MediaSourceEventListener`s to MediaSources
after they have been created. Listening to events is now supported for
all media sources including composite sources.
* Added callbacks to `MediaSourceEventListener` to get notified when media
periods are created, released and being read from.
* Support live stream clipping with `ClippingMediaSource`.
* Allow setting tags for all media sources in their factories. The tag of
the current window can be retrieved with `Player.getCurrentTag`.
* UI:
* Add support for displaying error messages and a buffering spinner in
`PlayerView`.
* Add support for listening to `AspectRatioFrameLayout`'s aspect ratio
update ([#3736](https://github.com/google/ExoPlayer/issues/3736)).
* Add `PlayerNotificationManager` for displaying notifications reflecting
the player state.
* Add `TrackSelectionView` for selecting tracks with
`DefaultTrackSelector`.
* Add `TrackNameProvider` for converting track `Format`s to textual
descriptions, and `DefaultTrackNameProvider` as a default
implementation.
* Track selection:
* Reworked `MappingTrackSelector` and `DefaultTrackSelector`.
* `DefaultTrackSelector.Parameters` now implements `Parcelable`.
* Added UI components for track selection (see above).
* Audio:
* Support extracting data from AMR container formats, including both
narrow and wide band
([#2527](https://github.com/google/ExoPlayer/issues/2527)).
* FLAC:
* Sniff FLAC files correctly if they have ID3 headers
([#4055](https://github.com/google/ExoPlayer/issues/4055)).
* Supports FLAC files with high sample rate (176400 and 192000)
([#3769](https://github.com/google/ExoPlayer/issues/3769)).
* Factor out `AudioTrack` position tracking from `DefaultAudioSink`.
* Fix an issue where the playback position would pause just after playback
begins, and poll the audio timestamp less frequently once it starts
advancing ([#3841](https://github.com/google/ExoPlayer/issues/3841)).
* Add an option to skip silent audio in `PlaybackParameters`
([#2635](https://github.com/google/ExoPlayer/issues/2635)).
* Fix an issue where playback of TrueHD streams would get stuck after
seeking due to not finding a syncframe
([#3845](https://github.com/google/ExoPlayer/issues/3845)).
* Fix an issue with eac3-joc playback where a codec would fail to
configure ([#4165](https://github.com/google/ExoPlayer/issues/4165)).
* Handle non-empty end-of-stream buffers, to fix gapless playback of
streams with encoder padding when the decoder returns a non-empty final
buffer.
* Allow trimming more than one sample when applying an elst audio edit via
gapless playback info.
* Allow overriding skipping/scaling with custom `AudioProcessor`s
([#3142](https://github.com/google/ExoPlayer/issues/3142)).
* Caching:
* Add release method to the `Cache` interface, and prevent multiple
instances of `SimpleCache` using the same folder at the same time.
* Cache redirect URLs
([#2360](https://github.com/google/ExoPlayer/issues/2360)).
* DRM:
* Allow multiple listeners for `DefaultDrmSessionManager`.
* Pass `DrmSessionManager` to `ExoPlayerFactory` instead of
`RendererFactory`.
* Change minimum API requirement for CBC and pattern encryption from 24 to
25 ([#4022](https://github.com/google/ExoPlayer/issues/4022)).
* Fix handling of 307/308 redirects when making license requests
([#4108](https://github.com/google/ExoPlayer/issues/4108)).
* HLS:
* Fix playlist loading error propagation when the current selection does
not include all of the playlist's variants.
* Fix SAMPLE-AES-CENC and SAMPLE-AES-CTR EXT-X-KEY methods
([#4145](https://github.com/google/ExoPlayer/issues/4145)).
* Preeptively declare an ID3 track in chunkless preparation
([#4016](https://github.com/google/ExoPlayer/issues/4016)).
* Add support for multiple #EXT-X-MAP tags in a media playlist
([#4164](https://github.com/google/ExoPlayer/issues/4182)).
* Fix seeking in live streams
([#4187](https://github.com/google/ExoPlayer/issues/4187)).
* IMA extension:
* Allow setting the ad media load timeout
([#3691](https://github.com/google/ExoPlayer/issues/3691)).
* Expose ad load errors via `MediaSourceEventListener` on
`AdsMediaSource`, and allow setting an ad event listener on
`ImaAdsLoader`. Deprecate the `AdsMediaSource.EventListener`.
* Add `AnalyticsListener` interface which can be registered in
`SimpleExoPlayer` to receive detailed metadata for each ExoPlayer event.
* Optimize seeking in FMP4 by enabling seeking to the nearest sync sample
within a fragment. This benefits standalone FMP4 playbacks, DASH and
SmoothStreaming.
* Updated default max buffer length in `DefaultLoadControl`.
* Fix ClearKey decryption error if the key contains a forward slash
([#4075](https://github.com/google/ExoPlayer/issues/4075)).
* Fix crash when switching surface on Huawei P9 Lite
([#4084](https://github.com/google/ExoPlayer/issues/4084)), and Philips
QM163E ([#4104](https://github.com/google/ExoPlayer/issues/4104)).
* Support ZLIB compressed PGS subtitles.
* Added `getPlaybackError` to `Player` interface.
* Moved initial bitrate estimate from `AdaptiveTrackSelection` to
`DefaultBandwidthMeter`.
* Removed default renderer time offset of 60000000 from internal player. The
actual renderer timestamp offset can be obtained by listening to
`BaseRenderer.onStreamChanged`.
* Added dependencies on checkerframework annotations for static code analysis.
### 2.7.3 (2018-04-04)
* Fix ProGuard configuration for Cast, IMA and OkHttp extensions.
* Update OkHttp extension to depend on OkHttp 3.10.0.
### 2.7.2 (2018-03-29)
* Gradle: Upgrade Gradle version from 4.1 to 4.4 so it can work with Android
Studio 3.1 ([#3708](https://github.com/google/ExoPlayer/issues/3708)).
* Match codecs starting with "mp4a" to different Audio MimeTypes
([#3779](https://github.com/google/ExoPlayer/issues/3779)).
* Fix ANR issue on Redmi 4X and Redmi Note 4
([#4006](https://github.com/google/ExoPlayer/issues/4006)).
* Fix handling of zero padded strings when parsing Matroska streams
([#4010](https://github.com/google/ExoPlayer/issues/4010)).
* Fix "Decoder input buffer too small" error when playing some FLAC streams.
* MediaSession extension: Omit fast forward and rewind actions when media is
not seekable ([#4001](https://github.com/google/ExoPlayer/issues/4001)).
### 2.7.1 (2018-03-09)
* Gradle: Replaced 'compile' (deprecated) with 'implementation' and 'api'.
This may lead to build breakage for applications upgrading from previous
version that rely on indirect dependencies of certain modules. In such
cases, application developers need to add the missing dependency to their
gradle file. You can read more about the new dependency configurations
[here](https://developer.android.com/studio/build/gradle-plugin-3-0-0-migration.html#new_configurations).
* HlsMediaSource: Make HLS periods start at zero instead of the epoch.
Applications that rely on HLS timelines having a period starting at the
epoch will need to update their handling of HLS timelines. The program date
time is still available via the informational
`Timeline.Window.windowStartTimeMs` field
([#3865](https://github.com/google/ExoPlayer/issues/3865),
[#3888](https://github.com/google/ExoPlayer/issues/3888)).
* Enable seeking in MP4 streams where duration is set incorrectly in the track
header ([#3926](https://github.com/google/ExoPlayer/issues/3926)).
* Video: Force rendering a frame periodically in `MediaCodecVideoRenderer` and
`LibvpxVideoRenderer`, even if it is late.
### 2.7.0 (2018-02-19)
* Player interface:
* Add optional parameter to `stop` to reset the player when stopping.
* Add a reason to `EventListener.onTimelineChanged` to distinguish between
initial preparation, reset and dynamic updates.
* Add `Player.DISCONTINUITY_REASON_AD_INSERTION` to the possible reasons
reported in `Eventlistener.onPositionDiscontinuity` to distinguish
transitions to and from ads within one period from transitions between
periods.
* Replaced `ExoPlayer.sendMessages` with `ExoPlayer.createMessage` to
allow more customization of the message. Now supports setting a message
delivery playback position and/or a delivery handler
([#2189](https://github.com/google/ExoPlayer/issues/2189)).
* Add `Player.VideoComponent`, `Player.TextComponent` and
`Player.MetadataComponent` interfaces that define optional video, text
and metadata output functionality. New `getVideoComponent`,
`getTextComponent` and `getMetadataComponent` methods provide access to
this functionality.
* Add `ExoPlayer.setSeekParameters` for controlling how seek operations are
performed. The `SeekParameters` class contains defaults for exact seeking
and seeking to the closest sync points before, either side or after
specified seek positions. `SeekParameters` are not currently supported when
playing HLS streams.
* DefaultTrackSelector:
* Replace `DefaultTrackSelector.Parameters` copy methods with a builder.
* Support disabling of individual text track selection flags.
* Buffering:
* Allow a back-buffer of media to be retained behind the current playback
position, for fast backward seeking. The back-buffer can be configured
by custom `LoadControl` implementations.
* Add ability for `SequenceableLoader` to re-evaluate its buffer and
discard buffered media so that it can be re-buffered in a different
quality.
* Allow more flexible loading strategy when playing media containing
multiple sub-streams, by allowing injection of custom
`CompositeSequenceableLoader` factories through
`DashMediaSource.Factory`, `HlsMediaSource.Factory`,
`SsMediaSource.Factory`, and `MergingMediaSource`.
* Play out existing buffer before retrying for progressive live streams
([#1606](https://github.com/google/ExoPlayer/issues/1606)).
* UI:
* Generalized player and control views to allow them to bind with any
`Player`, and renamed them to `PlayerView` and `PlayerControlView`
respectively.
* Made `PlayerView` automatically apply video rotation when configured to
use `TextureView`
([#91](https://github.com/google/ExoPlayer/issues/91)).
* Made `PlayerView` play button behave correctly when the player is ended
([#3689](https://github.com/google/ExoPlayer/issues/3689)), and call a
`PlaybackPreparer` when the player is idle.
* DRM: Optimistically attempt playback of DRM protected content that does not
declare scheme specific init data in the manifest. If playback of clear
samples without keys is allowed, delay DRM session error propagation until
keys are actually needed
([#3630](https://github.com/google/ExoPlayer/issues/3630)).
* DASH:
* Support in-band Emsg events targeting the player with scheme id
`urn:mpeg:dash:event:2012` and scheme values "1", "2" and "3".
* Support EventStream elements in DASH manifests.
* HLS:
* Add opt-in support for chunkless preparation in HLS. This allows an HLS
source to finish preparation without downloading any chunks, which can
significantly reduce initial buffering time
([#3149](https://github.com/google/ExoPlayer/issues/3149)). More details
can be found
[here](https://medium.com/google-exoplayer/faster-hls-preparation-f6611aa15ea6).
* Fail if unable to sync with the Transport Stream, rather than entering
stuck in an indefinite buffering state.
* Fix mime type propagation
([#3653](https://github.com/google/ExoPlayer/issues/3653)).
* Fix ID3 context reuse across segment format changes
([#3622](https://github.com/google/ExoPlayer/issues/3622)).
* Use long for media sequence numbers
([#3747](https://github.com/google/ExoPlayer/issues/3747))
* Add initial support for the EXT-X-GAP tag.
* Audio:
* Support TrueHD passthrough for rechunked samples in Matroska files
([#2147](https://github.com/google/ExoPlayer/issues/2147)).
* Support resampling 24-bit and 32-bit integer to 32-bit float for high
resolution output in `DefaultAudioSink`
([#3635](https://github.com/google/ExoPlayer/pull/3635)).
* Captions:
* Basic support for PGS subtitles
([#3008](https://github.com/google/ExoPlayer/issues/3008)).
* Fix handling of CEA-608 captions where multiple buffers have the same
presentation timestamp
([#3782](https://github.com/google/ExoPlayer/issues/3782)).
* Caching:
* Fix cache corruption issue
([#3762](https://github.com/google/ExoPlayer/issues/3762)).
* Implement periodic check in `CacheDataSource` to see whether it's
possible to switch to reading/writing the cache having initially
bypassed it.
* IMA extension:
* Fix the player getting stuck when an ad group fails to load
([#3584](https://github.com/google/ExoPlayer/issues/3584)).
* Work around loadAd not being called beore the LOADED AdEvent arrives
([#3552](https://github.com/google/ExoPlayer/issues/3552)).
* Handle asset mismatch errors
([#3801](https://github.com/google/ExoPlayer/issues/3801)).
* Add support for playing non-Extractor content MediaSources in the IMA
demo app ([#3676](https://github.com/google/ExoPlayer/issues/3676)).
* Fix handling of ad tags where ad groups are out of order
([#3716](https://github.com/google/ExoPlayer/issues/3716)).
* Fix handling of ad tags with only preroll/postroll ad groups
([#3715](https://github.com/google/ExoPlayer/issues/3715)).
* Propagate ad media preparation errors to IMA so that the ads can be
skipped.
* Handle exceptions in IMA callbacks so that can be logged less verbosely.
* New Cast extension. Simplifies toggling between local and Cast playbacks.
* `EventLogger` moved from the demo app into the core library.
* Fix ANR issue on the Huawei P8 Lite, Huawei Y6II, Moto C+, Meizu M5C, Lenovo
K4 Note and Sony Xperia E5.
([#3724](https://github.com/google/ExoPlayer/issues/3724),
[#3835](https://github.com/google/ExoPlayer/issues/3835)).
* Fix potential NPE when removing media sources from a
DynamicConcatenatingMediaSource
([#3796](https://github.com/google/ExoPlayer/issues/3796)).
* Check `sys.display-size` on Philips ATVs
([#3807](https://github.com/google/ExoPlayer/issues/3807)).
* Release `Extractor`s on the loading thread to avoid potentially leaking
resources when the playback thread has quit by the time the loading task has
completed.
* ID3: Better handle malformed ID3 data
([#3792](https://github.com/google/ExoPlayer/issues/3792)).
* Support 14-bit mode and little endianness in DTS PES packets
([#3340](https://github.com/google/ExoPlayer/issues/3340)).
* Demo app: Add ability to download not DRM protected content.
### 2.6.1 (2017-12-15)
* Add factories to `ExtractorMediaSource`, `HlsMediaSource`, `SsMediaSource`,
`DashMediaSource` and `SingleSampleMediaSource`.
* Use the same listener `MediaSourceEventListener` for all MediaSource
implementations.
* IMA extension:
* Support non-ExtractorMediaSource ads
([#3302](https://github.com/google/ExoPlayer/issues/3302)).
* Skip ads before the ad preceding the player's initial seek position
([#3527](https://github.com/google/ExoPlayer/issues/3527)).
* Fix ad loading when there is no preroll.
* Add an option to turn off hiding controls during ad playback
([#3532](https://github.com/google/ExoPlayer/issues/3532)).
* Support specifying an ads response instead of an ad tag
([#3548](https://github.com/google/ExoPlayer/issues/3548)).
* Support overriding the ad load timeout
([#3556](https://github.com/google/ExoPlayer/issues/3556)).
* DASH: Support time zone designators in ISO8601 UTCTiming elements
([#3524](https://github.com/google/ExoPlayer/issues/3524)).
* Audio:
* Support 32-bit PCM float output from `DefaultAudioSink`, and add an
option to use this with `FfmpegAudioRenderer`.
* Add support for extracting 32-bit WAVE files
([#3379](https://github.com/google/ExoPlayer/issues/3379)).
* Support extraction and decoding of Dolby Atmos
([#2465](https://github.com/google/ExoPlayer/issues/2465)).
* Fix handling of playback parameter changes while paused when followed by
a seek.
* SimpleExoPlayer: Allow multiple audio and video debug listeners.
* DefaultTrackSelector: Support undefined language text track selection when
the preferred language is not available
([#2980](https://github.com/google/ExoPlayer/issues/2980)).
* Add options to `DefaultLoadControl` to set maximum buffer size in bytes and
to choose whether size or time constraints are prioritized.
* Use surfaceless context for secure `DummySurface`, if available
([#3558](https://github.com/google/ExoPlayer/issues/3558)).
* FLV: Fix playback of live streams that do not contain an audio track
([#3188](https://github.com/google/ExoPlayer/issues/3188)).
* CEA-608: Fix handling of row count changes in roll-up mode
([#3513](https://github.com/google/ExoPlayer/issues/3513)).
* Prevent period transitions when seeking to the end of a period when paused
([#2439](https://github.com/google/ExoPlayer/issues/2439)).
### 2.6.0 (2017-11-03)
* Removed "r" prefix from versions. This release is "2.6.0", not "r2.6.0".
* New `Player.DefaultEventListener` abstract class can be extended to avoid
having to implement all methods defined by `Player.EventListener`.
* Added a reason to `EventListener.onPositionDiscontinuity`
([#3252](https://github.com/google/ExoPlayer/issues/3252)).
* New `setShuffleModeEnabled` method for enabling shuffled playback.
* SimpleExoPlayer: Support for multiple video, text and metadata outputs.
* Support for `Renderer`s that don't consume any media
([#3212](https://github.com/google/ExoPlayer/issues/3212)).
* Fix reporting of internal position discontinuities via
`Player.onPositionDiscontinuity`. `DISCONTINUITY_REASON_SEEK_ADJUSTMENT` is
added to disambiguate position adjustments during seeks from other types of
internal position discontinuity.
* Fix potential `IndexOutOfBoundsException` when calling
`ExoPlayer.getDuration`
([#3362](https://github.com/google/ExoPlayer/issues/3362)).
* Fix playbacks involving looping, concatenation and ads getting stuck when
media contains tracks with uneven durations
([#1874](https://github.com/google/ExoPlayer/issues/1874)).
* Fix issue with `ContentDataSource` when reading from certain
`ContentProvider` implementations
([#3426](https://github.com/google/ExoPlayer/issues/3426)).
* Better playback experience when the video decoder cannot keep up, by
skipping to key-frames. This is particularly relevant for variable speed
playbacks.
* Allow `SingleSampleMediaSource` to suppress load errors
([#3140](https://github.com/google/ExoPlayer/issues/3140)).
* `DynamicConcatenatingMediaSource`: Allow specifying a callback to be invoked
after a dynamic playlist modification has been applied
([#3407](https://github.com/google/ExoPlayer/issues/3407)).
* Audio: New `AudioSink` interface allows customization of audio output path.
* Offline: Added `Downloader` implementations for DASH, HLS, SmoothStreaming
and progressive streams.
* Track selection:
* Fixed adaptive track selection logic for live playbacks
([#3017](https://github.com/google/ExoPlayer/issues/3017)).
* Added ability to select the lowest bitrate tracks.
* DASH:
* Don't crash when a malformed or unexpected manifest update occurs
([#2795](https://github.com/google/ExoPlayer/issues/2795)).
* HLS:
* Support for Widevine protected FMP4 variants.
* Support CEA-608 in FMP4 variants.
* Support extractor injection
([#2748](https://github.com/google/ExoPlayer/issues/2748)).
* DRM:
* Improved compatibility with ClearKey content
([#3138](https://github.com/google/ExoPlayer/issues/3138)).
* Support multiple PSSH boxes of the same type.
* Retry initial provisioning and key requests if they fail
* Fix incorrect parsing of non-CENC sinf boxes.
* IMA extension:
* Expose `AdsLoader` via getter
([#3322](https://github.com/google/ExoPlayer/issues/3322)).
* Handle `setPlayWhenReady` calls during ad playbacks
([#3303](https://github.com/google/ExoPlayer/issues/3303)).
* Ignore seeks if an ad is playing
([#3309](https://github.com/google/ExoPlayer/issues/3309)).
* Improve robustness of `ImaAdsLoader` in case content is not paused
between content to ad transitions
([#3430](https://github.com/google/ExoPlayer/issues/3430)).
* UI:
* Allow specifying a `Drawable` for the `TimeBar` scrubber
([#3337](https://github.com/google/ExoPlayer/issues/3337)).
* Allow multiple listeners on `TimeBar`
([#3406](https://github.com/google/ExoPlayer/issues/3406)).
* New Leanback extension: Simplifies binding Exoplayer to Leanback UI
components.
* Unit tests moved to Robolectric.
* Misc bugfixes.
### r2.5.4 (2017-10-19)
* Remove unnecessary media playlist fetches during playback of live HLS
streams.
* Add the ability to inject a HLS playlist parser through `HlsMediaSource`.
* Fix potential `IndexOutOfBoundsException` when using `ImaMediaSource`
([#3334](https://github.com/google/ExoPlayer/issues/3334)).
* Fix an issue parsing MP4 content containing non-CENC sinf boxes.
* Fix memory leak when seeking with repeated periods.
* Fix playback position when `ExoPlayer.prepare` is called with
`resetPosition` set to false.
* Ignore MP4 edit lists that seem invalid
([#3351](https://github.com/google/ExoPlayer/issues/3351)).
* Add extractor flag for ignoring all MP4 edit lists
([#3358](https://github.com/google/ExoPlayer/issues/3358)).
* Improve extensibility by exposing public constructors for
`FrameworkMediaCrypto` and by making `DefaultDashChunkSource.getNextChunk`
non-final.
### r2.5.3 (2017-09-20)
* IMA extension: Support skipping of skippable ads on AndroidTV and other
non-touch devices
([#3258](https://github.com/google/ExoPlayer/issues/3258)).
* HLS: Fix broken WebVTT captions when PTS wraps around
([#2928](https://github.com/google/ExoPlayer/issues/2928)).
* Captions: Fix issues rendering CEA-608 captions
([#3250](https://github.com/google/ExoPlayer/issues/3250)).
* Workaround broken AAC decoders on Galaxy S6
([#3249](https://github.com/google/ExoPlayer/issues/3249)).
* Caching: Fix infinite loop when cache eviction fails
([#3260](https://github.com/google/ExoPlayer/issues/3260)).
* Caching: Force use of BouncyCastle on JellyBean to fix decryption issue
([#2755](https://github.com/google/ExoPlayer/issues/2755)).
### r2.5.2 (2017-09-11)
* IMA extension: Fix issue where ad playback could end prematurely for some
content types ([#3180](https://github.com/google/ExoPlayer/issues/3180)).
* RTMP extension: Fix SIGABRT on fast RTMP stream restart
([#3156](https://github.com/google/ExoPlayer/issues/3156)).
* UI: Allow app to manually specify ad markers
([#3184](https://github.com/google/ExoPlayer/issues/3184)).
* DASH: Expose segment indices to subclasses of DefaultDashChunkSource
([#3037](https://github.com/google/ExoPlayer/issues/3037)).
* Captions: Added robustness against malformed WebVTT captions
([#3228](https://github.com/google/ExoPlayer/issues/3228)).
* DRM: Support forcing a specific license URL.
* Fix playback error when seeking in media loaded through content:// URIs
([#3216](https://github.com/google/ExoPlayer/issues/3216)).
* Fix issue playing MP4s in which the last atom specifies a size of zero
([#3191](https://github.com/google/ExoPlayer/issues/3191)).
* Workaround playback failures on some Xiaomi devices
([#3171](https://github.com/google/ExoPlayer/issues/3171)).
* Workaround SIGSEGV issue on some devices when setting and swapping surface
for secure playbacks
([#3215](https://github.com/google/ExoPlayer/issues/3215)).
* Workaround for Nexus 7 issue when swapping output surface
([#3236](https://github.com/google/ExoPlayer/issues/3236)).
* Workaround for SimpleExoPlayerView's surface not being hidden properly
([#3160](https://github.com/google/ExoPlayer/issues/3160)).
### r2.5.1 (2017-08-08)
* Fix an issue that could cause the reported playback position to stop
advancing in some cases.
* Fix an issue where a Surface could be released whilst still in use by the
player.
### r2.5.0 (2017-08-07)
* IMA extension: Wraps the Google Interactive Media Ads (IMA) SDK to provide
an easy and seamless way of incorporating display ads into ExoPlayer
playbacks. You can read more about the IMA extension
[here](https://medium.com/google-exoplayer/playing-ads-with-exoplayer-and-ima-868dfd767ea).
* MediaSession extension: Provides an easy way to connect ExoPlayer with
MediaSessionCompat in the Android Support Library.
* RTMP extension: An extension for playing streams over RTMP.
* Build: Made it easier for application developers to depend on a local
checkout of ExoPlayer. You can learn how to do this
[here](https://medium.com/google-exoplayer/howto-2-depend-on-a-local-checkout-of-exoplayer-bcd7f8531720).
* Core playback improvements:
* Eliminated re-buffering when changing audio and text track selections
during playback of progressive streams
([#2926](https://github.com/google/ExoPlayer/issues/2926)).
* New DynamicConcatenatingMediaSource class to support playback of dynamic
playlists.
* New ExoPlayer.setRepeatMode method for dynamic toggling of repeat mode
during playback. Use of setRepeatMode should be preferred to
LoopingMediaSource for most looping use cases. You can read more about
setRepeatMode
[here](https://medium.com/google-exoplayer/repeat-modes-in-exoplayer-19dd85f036d3).
* Eliminated jank when switching video playback from one Surface to
another on API level 23+ for unencrypted content, and on devices that
support the EGL_EXT_protected_content OpenGL extension for protected
content ([#677](https://github.com/google/ExoPlayer/issues/677)).
* Enabled ExoPlayer instantiation on background threads without Loopers.
Events from such players are delivered on the application's main thread.
* HLS improvements:
* Optimized adaptive switches for playlists that specify the
EXT-X-INDEPENDENT-SEGMENTS tag.
* Optimized in-buffer seeking
([#551](https://github.com/google/ExoPlayer/issues/551)).
* Eliminated re-buffering when changing audio and text track selections
during playback, provided the new selection does not require switching
to different renditions
([#2718](https://github.com/google/ExoPlayer/issues/2718)).
* Exposed all media playlist tags in ExoPlayer's MediaPlaylist object.
* DASH: Support for seamless switching across streams in different
AdaptationSet elements
([#2431](https://github.com/google/ExoPlayer/issues/2431)).
* DRM: Support for additional crypto schemes (cbc1, cbcs and cens) on API
level 24+ ([#1989](https://github.com/google/ExoPlayer/issues/1989)).
* Captions: Initial support for SSA/ASS subtitles
([#889](https://github.com/google/ExoPlayer/issues/889)).
* AndroidTV: Fixed issue where tunneled video playback would not start on some
devices ([#2985](https://github.com/google/ExoPlayer/issues/2985)).
* MPEG-TS: Fixed segmentation issue when parsing H262
([#2891](https://github.com/google/ExoPlayer/issues/2891)).
* Cronet extension: Support for a user-defined fallback if Cronet library is
not present.
* Fix buffer too small IllegalStateException issue affecting some composite
media playbacks ([#2900](https://github.com/google/ExoPlayer/issues/2900)).
* Misc bugfixes.
### r2.4.4 (2017-07-19)
* HLS/MPEG-TS: Some initial optimizations of MPEG-TS extractor performance
([#3040](https://github.com/google/ExoPlayer/issues/3040)).
* HLS: Fix propagation of format identifier for CEA-608
([#3033](https://github.com/google/ExoPlayer/issues/3033)).
* HLS: Detect playlist stuck and reset conditions
([#2872](https://github.com/google/ExoPlayer/issues/2872)).
* Video: Fix video dimension reporting on some devices
([#3007](https://github.com/google/ExoPlayer/issues/3007)).
### r2.4.3 (2017-06-30)
* Audio: Workaround custom audio decoders misreporting their maximum supported
channel counts ([#2940](https://github.com/google/ExoPlayer/issues/2940)).
* Audio: Workaround for broken MediaTek raw decoder on some devices
([#2873](https://github.com/google/ExoPlayer/issues/2873)).
* Captions: Fix TTML captions appearing at the top of the screen
([#2953](https://github.com/google/ExoPlayer/issues/2953)).
* Captions: Fix handling of some DVB subtitles
([#2957](https://github.com/google/ExoPlayer/issues/2957)).
* Track selection: Fix setSelectionOverride(index, tracks, null)
([#2988](https://github.com/google/ExoPlayer/issues/2988)).
* GVR extension: Add support for mono input
([#2710](https://github.com/google/ExoPlayer/issues/2710)).
* FLAC extension: Fix failing build
([#2977](https://github.com/google/ExoPlayer/pull/2977)).
* Misc bugfixes.
### r2.4.2 (2017-06-06)
* Stability: Work around Nexus 10 reboot when playing certain content
([#2806](https://github.com/google/ExoPlayer/issues/2806)).
* MP3: Correctly treat MP3s with INFO headers as constant bitrate
([#2895](https://github.com/google/ExoPlayer/issues/2895)).
* HLS: Use average rather than peak bandwidth when available
([#2863](https://github.com/google/ExoPlayer/issues/2863)).
* SmoothStreaming: Fix timeline for live streams
([#2760](https://github.com/google/ExoPlayer/issues/2760)).
* UI: Fix DefaultTimeBar invalidation
([#2871](https://github.com/google/ExoPlayer/issues/2871)).
* Misc bugfixes.
### r2.4.1 (2017-05-23)
* Stability: Avoid OutOfMemoryError in extractors when parsing malformed media
([#2780](https://github.com/google/ExoPlayer/issues/2780)).
* Stability: Avoid native crash on Galaxy Nexus. Avoid unnecessarily large
codec input buffer allocations on all devices
([#2607](https://github.com/google/ExoPlayer/issues/2607)).
* Variable speed playback: Fix interpolation for rate/pitch adjustment
([#2774](https://github.com/google/ExoPlayer/issues/2774)).
* HLS: Include EXT-X-DATERANGE tags in HlsMediaPlaylist.
* HLS: Don't expose CEA-608 track if CLOSED-CAPTIONS=NONE
([#2743](https://github.com/google/ExoPlayer/issues/2743)).
* HLS: Correctly propagate errors loading the media playlist
([#2623](https://github.com/google/ExoPlayer/issues/2623)).
* UI: DefaultTimeBar enhancements and bug fixes
([#2740](https://github.com/google/ExoPlayer/issues/2740)).
* Ogg: Fix failure to play some Ogg files
([#2782](https://github.com/google/ExoPlayer/issues/2782)).
* Captions: Don't select text tack with no language by default.
* Captions: TTML positioning fixes
([#2824](https://github.com/google/ExoPlayer/issues/2824)).
* Misc bugfixes.
### r2.4.0 (2017-04-25)
* New modular library structure. You can read more about depending on
individual library modules
[here](https://medium.com/google-exoplayer/exoplayers-new-modular-structure-a916c0874907).
* Variable speed playback support on API level 16+. You can read more about
changing the playback speed
[here](https://medium.com/google-exoplayer/variable-speed-playback-with-exoplayer-e6e6a71e0343)
([#26](https://github.com/google/ExoPlayer/issues/26)).
* New time bar view, including support for displaying ad break markers.
* Support DVB subtitles in MPEG-TS and MKV.
* Support adaptive playback for audio only DASH, HLS and SmoothStreaming
([#1975](https://github.com/google/ExoPlayer/issues/1975)).
* Support for setting extractor flags on DefaultExtractorsFactory
([#2657](https://github.com/google/ExoPlayer/issues/2657)).
* Support injecting custom renderers into SimpleExoPlayer using a new
RenderersFactory interface.
* Correctly set ExoPlayer's internal thread priority to
`THREAD_PRIORITY_AUDIO`.
* TX3G: Support styling and positioning.
* FLV:
* Support MP3 in FLV.
* Skip unhandled metadata rather than failing
([#2634](https://github.com/google/ExoPlayer/issues/2634)).
* Fix potential OutOfMemory errors.
* ID3: Better handle malformed ID3 data
([#2604](https://github.com/google/ExoPlayer/issues/2604),
[#2663](https://github.com/google/ExoPlayer/issues/2663)).
* FFmpeg extension: Fixed build instructions
([#2561](https://github.com/google/ExoPlayer/issues/2561)).
* VP9 extension: Reduced binary size.
* FLAC extension: Enabled 64 bit targets.
* Misc bugfixes.
### r2.3.1 (2017-03-23)
* Fix NPE enabling WebVTT subtitles in DASH streams
([#2596](https://github.com/google/ExoPlayer/issues/2596)).
* Fix skipping to keyframes when MediaCodecVideoRenderer is enabled but
without a Surface
([#2575](https://github.com/google/ExoPlayer/issues/2575)).
* Minor fix for CEA-708 decoder
([#2595](https://github.com/google/ExoPlayer/issues/2595)).
### r2.3.0 (2017-03-16)
* GVR extension: Wraps the Google VR Audio SDK to provide spatial audio
rendering. You can read more about the GVR extension
[here](https://medium.com/google-exoplayer/spatial-audio-with-exoplayer-and-gvr-cecb00e9da5f#.xdjebjd7g).
* DASH improvements:
* Support embedded CEA-608 closed captions
([#2362](https://github.com/google/ExoPlayer/issues/2362)).
* Support embedded EMSG events
([#2176](https://github.com/google/ExoPlayer/issues/2176)).
* Support mspr:pro manifest element
([#2386](https://github.com/google/ExoPlayer/issues/2386)).
* Correct handling of empty segment indices at the start of live events
([#1865](https://github.com/google/ExoPlayer/issues/1865)).
* HLS improvements:
* Respect initial track selection
([#2353](https://github.com/google/ExoPlayer/issues/2353)).
* Reduced frequency of media playlist requests when playback position is
close to the live edge
([#2548](https://github.com/google/ExoPlayer/issues/2548)).
* Exposed the master playlist through ExoPlayer.getCurrentManifest()
([#2537](https://github.com/google/ExoPlayer/issues/2537)).
* Support CLOSED-CAPTIONS #EXT-X-MEDIA type
([#341](https://github.com/google/ExoPlayer/issues/341)).
* Fixed handling of negative values in #EXT-X-SUPPORT
([#2495](https://github.com/google/ExoPlayer/issues/2495)).
* Fixed potential endless buffering state for streams with WebVTT
subtitles ([#2424](https://github.com/google/ExoPlayer/issues/2424)).
* MPEG-TS improvements:
* Support for multiple programs.
* Support for multiple closed captions and caption service descriptors
([#2161](https://github.com/google/ExoPlayer/issues/2161)).
* MP3: Add `FLAG_ENABLE_CONSTANT_BITRATE_SEEKING` extractor option to enable
constant bitrate seeking in MP3 files that would otherwise be unseekable
([#2445](https://github.com/google/ExoPlayer/issues/2445)).
* ID3: Better handle malformed ID3 data
([#2486](https://github.com/google/ExoPlayer/issues/2486)).
* Track selection: Added maxVideoBitrate parameter to DefaultTrackSelector.
* DRM: Add support for CENC ClearKey on API level 21+
([#2361](https://github.com/google/ExoPlayer/issues/2361)).
* DRM: Support dynamic setting of key request headers
([#1924](https://github.com/google/ExoPlayer/issues/1924)).
* SmoothStreaming: Fixed handling of start_time placeholder
([#2447](https://github.com/google/ExoPlayer/issues/2447)).
* FLAC extension: Fix proguard configuration
([#2427](https://github.com/google/ExoPlayer/issues/2427)).
* Misc bugfixes.
### r2.2.0 (2017-01-30)
* Demo app: Automatic recovery from BehindLiveWindowException, plus improved
handling of pausing and resuming live streams
([#2344](https://github.com/google/ExoPlayer/issues/2344)).
* AndroidTV: Added Support for tunneled video playback
([#1688](https://github.com/google/ExoPlayer/issues/1688)).
* DRM: Renamed StreamingDrmSessionManager to DefaultDrmSessionManager and
added support for using offline licenses
([#876](https://github.com/google/ExoPlayer/issues/876)).
* DRM: Introduce OfflineLicenseHelper to help with offline license
acquisition, renewal and release.
* UI: Updated player control assets. Added vector drawables for use on API
level 21 and above.
* UI: Made player control seek bar work correctly with key events if focusable
([#2278](https://github.com/google/ExoPlayer/issues/2278)).
* HLS: Improved support for streams that use EXT-X-DISCONTINUITY without
EXT-X-DISCONTINUITY-SEQUENCE
([#1789](https://github.com/google/ExoPlayer/issues/1789)).
* HLS: Support for EXT-X-START tag
([#1544](https://github.com/google/ExoPlayer/issues/1544)).
* HLS: Check #EXTM3U header is present when parsing the playlist. Fail
gracefully if not
([#2301](https://github.com/google/ExoPlayer/issues/2301)).
* HLS: Fix memory leak
([#2319](https://github.com/google/ExoPlayer/issues/2319)).
* HLS: Fix non-seamless first adaptation where master playlist omits
resolution tags ([#2096](https://github.com/google/ExoPlayer/issues/2096)).
* HLS: Fix handling of WebVTT subtitle renditions with non-standard segment
file extensions ([#2025](https://github.com/google/ExoPlayer/issues/2025)
and [#2355](https://github.com/google/ExoPlayer/issues/2355)).
* HLS: Better handle inconsistent HLS playlist update
([#2249](https://github.com/google/ExoPlayer/issues/2249)).
* DASH: Don't overflow when dealing with large segment numbers
([#2311](https://github.com/google/ExoPlayer/issues/2311)).
* DASH: Fix propagation of language from the manifest
([#2335](https://github.com/google/ExoPlayer/issues/2335)).
* SmoothStreaming: Work around "Offset to sample data was negative" failures
([#2292](https://github.com/google/ExoPlayer/issues/2292),
[#2101](https://github.com/google/ExoPlayer/issues/2101) and
[#1152](https://github.com/google/ExoPlayer/issues/1152)).
* MP3/ID3: Added support for parsing Chapter and URL link frames
([#2316](https://github.com/google/ExoPlayer/issues/2316)).
* MP3/ID3: Handle ID3 frames that end with empty text field
([#2309](https://github.com/google/ExoPlayer/issues/2309)).
* Added ClippingMediaSource for playing clipped portions of media
([#1988](https://github.com/google/ExoPlayer/issues/1988)).
* Added convenience methods to query whether the current window is dynamic and
seekable ([#2320](https://github.com/google/ExoPlayer/issues/2320)).
* Support setting of default headers on HttpDataSource.Factory implementations
([#2166](https://github.com/google/ExoPlayer/issues/2166)).
* Fixed cache failures when using an encrypted cache content index.
* Fix visual artifacts when switching output surface
([#2093](https://github.com/google/ExoPlayer/issues/2093)).
* Fix gradle + proguard configurations.
* Fix player position when replacing the MediaSource
([#2369](https://github.com/google/ExoPlayer/issues/2369)).
* Misc bug fixes, including
[#2330](https://github.com/google/ExoPlayer/issues/2330),
[#2269](https://github.com/google/ExoPlayer/issues/2269),
[#2252](https://github.com/google/ExoPlayer/issues/2252),
[#2264](https://github.com/google/ExoPlayer/issues/2264) and
[#2290](https://github.com/google/ExoPlayer/issues/2290).
### r2.1.1 (2016-12-20)
* Fix some subtitle types (e.g. WebVTT) being displayed out of sync
([#2208](https://github.com/google/ExoPlayer/issues/2208)).
* Fix incorrect position reporting for on-demand HLS media that includes
EXT-X-PROGRAM-DATE-TIME tags
([#2224](https://github.com/google/ExoPlayer/issues/2224)).
* Fix issue where playbacks could get stuck in the initial buffering state if
over 1MB of data needs to be read to initialize the playback.
### r2.1.0 (2016-12-14)
* HLS: Support for seeking in live streams
([#87](https://github.com/google/ExoPlayer/issues/87)).
* HLS: Improved support:
* Support for EXT-X-PROGRAM-DATE-TIME
([#747](https://github.com/google/ExoPlayer/issues/747)).
* Improved handling of sample timestamps and their alignment across
variants and renditions.
* Fix issue that could cause playbacks to get stuck in an endless initial
buffering state.
* Correctly propagate BehindLiveWindowException instead of
IndexOutOfBoundsException exception
([#1695](https://github.com/google/ExoPlayer/issues/1695)).
* MP3/MP4: Support for ID3 metadata, including embedded album art
([#979](https://github.com/google/ExoPlayer/issues/979)).
* Improved customization of UI components. You can read about customization of
ExoPlayer's UI components
[here](https://medium.com/google-exoplayer/customizing-exoplayers-ui-components-728cf55ee07a#.9ewjg7avi).
* Robustness improvements when handling MediaSource timeline changes and
MediaPeriod transitions.
* CEA-608: Support for caption styling and positioning.
* MPEG-TS: Improved support:
* Support injection of custom TS payload readers.
* Support injection of custom section payload readers.
* Support SCTE-35 splice information messages.
* Support multiple table sections in a single PSI section.
* Fix NullPointerException when an unsupported stream type is encountered
([#2149](https://github.com/google/ExoPlayer/issues/2149)).
* Avoid failure when expected ID3 header not found
([#1966](https://github.com/google/ExoPlayer/issues/1966)).
* Improvements to the upstream cache package.
* Support caching of media segments for DASH, HLS and SmoothStreaming.
Note that caching of manifest and playlist files is still not supported
in the (normal) case where the corresponding responses are compressed.
* Support caching for ExtractorMediaSource based playbacks.
* Improved flexibility of SimpleExoPlayer
([#2102](https://github.com/google/ExoPlayer/issues/2102)).
* Fix issue where only the audio of a video would play due to capability
detection issues ([#2007](https://github.com/google/ExoPlayer/issues/2007),
[#2034](https://github.com/google/ExoPlayer/issues/2034) and
[#2157](https://github.com/google/ExoPlayer/issues/2157)).
* Fix issues that could cause ExtractorMediaSource based playbacks to get
stuck buffering ([#1962](https://github.com/google/ExoPlayer/issues/1962)).
* Correctly set SimpleExoPlayerView surface aspect ratio when an active player
is attached ([#2077](https://github.com/google/ExoPlayer/issues/2077)).
* OGG: Fix playback of short OGG files
([#1976](https://github.com/google/ExoPlayer/issues/1976)).
* MP4: Support `.mp3` tracks
([#2066](https://github.com/google/ExoPlayer/issues/2066)).
* SubRip: Don't fail playbacks if SubRip file contains negative timestamps
([#2145](https://github.com/google/ExoPlayer/issues/2145)).
* Misc bugfixes.
### r2.0.4 (2016-10-20)
* Fix crash on Jellybean devices when using playback controls
([#1965](https://github.com/google/ExoPlayer/issues/1965)).
### r2.0.3 (2016-10-17)
* Fixed NullPointerException in ExtractorMediaSource
([#1914](https://github.com/google/ExoPlayer/issues/1914)).
* Fixed NullPointerException in HlsMediaPeriod
([#1907](https://github.com/google/ExoPlayer/issues/1907)).
* Fixed memory leak in PlaybackControlView
([#1908](https://github.com/google/ExoPlayer/issues/1908)).
* Fixed strict mode violation when using
SimpleExoPlayer.setVideoPlayerTextureView().
* Fixed L3 Widevine provisioning
([#1925](https://github.com/google/ExoPlayer/issues/1925)).
* Fixed hiding of controls with use_controller="false"
([#1919](https://github.com/google/ExoPlayer/issues/1919)).
* Improvements to Cronet network stack extension.
* Misc bug fixes.
### r2.0.2 (2016-10-06)
* Fixes for MergingMediaSource and sideloaded subtitles.
([#1882](https://github.com/google/ExoPlayer/issues/1882),
[#1854](https://github.com/google/ExoPlayer/issues/1854),
[#1900](https://github.com/google/ExoPlayer/issues/1900)).
* Reduced effect of application code leaking player references
([#1855](https://github.com/google/ExoPlayer/issues/1855)).
* Initial support for fragmented MP4 in HLS.
* Misc bug fixes and minor features.
### r2.0.1 (2016-09-30)
* Fix playback of short duration content
([#1837](https://github.com/google/ExoPlayer/issues/1837)).
* Fix MergingMediaSource preparation issue
([#1853](https://github.com/google/ExoPlayer/issues/1853)).
* Fix live stream buffering (out of memory) issue
([#1825](https://github.com/google/ExoPlayer/issues/1825)).
### r2.0.0 (2016-09-14)
ExoPlayer 2.x is a major iteration of the library. It includes significant API
and architectural changes, new features and many bug fixes. You can read about
some of the motivations behind ExoPlayer 2.x
[here](https://medium.com/google-exoplayer/exoplayer-2-x-why-what-and-when-74fd9cb139#.am7h8nytm).
* Root package name changed to `com.google.android.exoplayer2`. The library
structure and class names have also been sanitized. Read more
[here](https://medium.com/google-exoplayer/exoplayer-2-x-new-package-and-class-names-ef8e1d9ba96f#.lv8sd4nez).
* Key architectural changes:
* Late binding between rendering and media source components. Allows the
same rendering components to be re-used from one playback to another.
Enables features such as gapless playback through playlists and DASH
multi-period support.
* Improved track selection design. More details can be found
[here](https://medium.com/google-exoplayer/exoplayer-2-x-track-selection-2b62ff712cc9#.n00zo76b6).
* LoadControl now used to control buffering and loading across all
playback types.
* Media source components given additional structure. A new MediaSource
class has been introduced. MediaSources expose Timelines that describe
the media they expose, and can consist of multiple MediaPeriods. This
enables features such as seeking in live playbacks and DASH multi-period
support.
* Responsibility for loading the initial DASH/SmoothStreaming/HLS manifest
is promoted to the corresponding MediaSource components and is no longer
the application's responsibility.
* Higher level abstractions such as SimpleExoPlayer have been added to the
library. These make the library easier to use for common use cases. The
demo app is halved in size as a result, whilst at the same time gaining
more functionality. Read more
[here](https://medium.com/google-exoplayer/exoplayer-2-x-improved-demo-app-d97171aaaaa1).
* Enhanced library support for implementing audio extensions. Read more
[here](https://medium.com/google-exoplayer/exoplayer-2-x-new-audio-features-cfb26c2883a#.ua75vu4s3).
* Format and MediaFormat are replaced by a single Format class.
* Key new features:
* Playlist support. Includes support for gapless playback between playlist
items and consistent application of LoadControl and TrackSelector
policies when transitioning between items
([#1270](https://github.com/google/ExoPlayer/issues/1270)).
* Seeking in live playbacks for DASH and SmoothStreaming
([#291](https://github.com/google/ExoPlayer/issues/291)).
* DASH multi-period support
([#557](https://github.com/google/ExoPlayer/issues/557)).
* MediaSource composition allows MediaSources to be concatenated into a
playlist, merged and looped. Read more
[here](https://medium.com/google-exoplayer/exoplayer-2-x-mediasource-composition-6c285fcbca1f#.zfha8qupz).
* Looping support (see above)
([#490](https://github.com/google/ExoPlayer/issues/490)).
* Ability to query information about all tracks in a piece of media
(including those not supported by the device)
([#1121](https://github.com/google/ExoPlayer/issues/1121)).
* Improved player controls.
* Support for PSSH in fMP4 moof atoms
([#1143](https://github.com/google/ExoPlayer/issues/1143)).
* Support for Opus in Ogg
([#1447](https://github.com/google/ExoPlayer/issues/1447)).
* CacheDataSource support for standalone media file playbacks (mp3, mp4
etc).
* FFMPEG extension (for audio only).
* Key bug fixes:
* Removed unnecessary secondary requests when playing standalone media
files ([#1041](https://github.com/google/ExoPlayer/issues/1041)).
* Fixed playback of video only (i.e. no audio) live streams
([#758](https://github.com/google/ExoPlayer/issues/758)).
* Fixed silent failure when media buffer is too small
([#583](https://github.com/google/ExoPlayer/issues/583)).
* Suppressed "Sending message to a Handler on a dead thread" warnings
([#426](https://github.com/google/ExoPlayer/issues/426)).
# Legacy release notes
Note: Since ExoPlayer V1 is still being maintained alongside V2, there is some
overlap between these notes and the notes above. r2.0.0 followed from r1.5.11,
and hence it can be assumed that all changes in r1.5.11 and earlier are included
in all V2 releases. This cannot be assumed for changes in r1.5.12 and later,
however it can be assumed that all such changes are included in the most recent
V2 release.
### r1.5.16
* VP9 extension: Reduced binary size.
* FLAC extension: Enabled 64 bit targets and fixed proguard config.
* Misc bugfixes.
### r1.5.15
* SmoothStreaming: Fixed handling of start_time placeholder
([#2447](https://github.com/google/ExoPlayer/issues/2447)).
* Misc bugfixes.
### r1.5.14
* Fixed cache failures when using an encrypted cache content index.
* SmoothStreaming: Work around "Offset to sample data was negative" failures
([#2292](https://github.com/google/ExoPlayer/issues/2292),
[#2101](https://github.com/google/ExoPlayer/issues/2101) and
[#1152](https://github.com/google/ExoPlayer/issues/1152)).
### r1.5.13
* Improvements to the upstream cache package.
* MP4: Support `.mp3` tracks
([#2066](https://github.com/google/ExoPlayer/issues/2066)).
* SubRip: Don't fail playbacks if SubRip file contains negative timestamps
([#2145](https://github.com/google/ExoPlayer/issues/2145)).
* MPEG-TS: Avoid failure when expected ID3 header not found
([#1966](https://github.com/google/ExoPlayer/issues/1966)).
* Misc bugfixes.
### r1.5.12
* Improvements to Cronet network stack extension.
* Fix bug in demo app introduced in r1.5.11 that caused L3 Widevine
provisioning requests to fail.
* Misc bugfixes.
### r1.5.11
* Cronet network stack extension.
* HLS: Fix propagation of language for alternative audio renditions
([#1784](https://github.com/google/ExoPlayer/issues/1784)).
* WebM: Support for subsample encryption.
* ID3: Fix EOS detection for 2-byte encodings
([#1774](https://github.com/google/ExoPlayer/issues/1774)).
* MPEG-TS: Support multiple tracks of the same type.
* MPEG-TS: Work toward robust handling of stream corruption.
* Fix ContentDataSource failures triggered by garbage collector
([#1759](https://github.com/google/ExoPlayer/issues/1759)).
### r1.5.10
* HLS: Stability fixes.
* MP4: Support for stz2 Atoms.
* Enable 4K format selection on Sony AndroidTV + nVidia SHIELD.
* TX3G caption fixes.
### r1.5.9
* MP4: Fixed incorrect sniffing in some cases (#1523).
* MP4: Improved file compatibility (#1567).
* ID3: Support for TIT2 and APIC frames.
* Fixed querying of platform decoders on some devices.
* Misc bug fixes.
### r1.5.8
* HLS: Fix handling of HTTP redirects.
* Audio: Minor adjustment to improve A/V sync.
* OGG: Support FLAC in OGG.
* TTML: Support regions.
* WAV/PCM: Support 8, 24 and 32-bit WAV and PCM audio.
* Misc bug fixes and performance optimizations.
### r1.5.7
* OGG: Support added for OGG.
* FLAC: Support for FLAC extraction and playback (via an extension).
* HLS: Multiple audio track support (via Renditions).
* FMP4: Support multiple tracks in fragmented MP4 (not applicable to
DASH/SmoothStreaming).
* WAV: Support for 16-bit WAV files.
* MKV: Support non-square pixel formats.
* Misc bug fixes.
### r1.5.6
* MP3: Fix mono streams playing at 2x speed on some MediaTek based devices
(#801).
* MP3: Fix playback of some streams when stream length is unknown.
* ID3: Support multiple frames of the same type in a single tag.
* CEA-608: Correctly handle repeated control characters, fixing an issue in
which captions would immediately disappear.
* AVC3: Fix decoder failures on some MediaTek devices in the case where the
first buffer fed to the decoder does not start with SPS/PPS NAL units.
* Misc bug fixes.
### r1.5.5
* DASH: Enable MP4 embedded WebVTT playback (#1185)
* HLS: Fix handling of extended ID3 tags in MPEG-TS (#1181)
* MP3: Fix incorrect position calculation in VBRI header (#1197)
* Fix issue seeking backward using SingleSampleSource (#1193)
### r1.5.4
* HLS: Support for variant selection and WebVtt subtitles.
* MP4: Support for embedded WebVtt.
* Improved device compatibility.
* Fix for resource leak (Issue #1066).
* Misc bug fixes + minor features.
### r1.5.3
* Support for FLV (without seeking).
* MP4: Fix for playback of media containing basic edit lists.
* QuickTime: Fix parsing of QuickTime style audio sample entry.
* HLS: Add H262 support for devices that have an H262 decoder.
* Allow AudioTrack PlaybackParams (e.g. speed/pitch) on API level 23+.
* Correctly detect 4K displays on API level 23+.
* Misc bug fixes.
### r1.5.2
* MPEG-TS/HLS: Fix frame drops playing H265 video.
* SmoothStreaming: Fix parsing of ProtectionHeader.
### r1.5.1
* Enable smooth frame release by default.
* Added OkHttpDataSource extension.
* AndroidTV: Correctly detect 4K display size on Bravia devices.
* FMP4: Handle non-sample data in mdat boxes.
* TTML: Fix parsing of some colors on Jellybean.
* SmoothStreaming: Ignore tfdt boxes.
* Misc bug fixes.
### r1.5.0
* Multi-track support.
* DASH: Limited support for multi-period manifests.
* HLS: Smoother format adaptation.
* HLS: Support for MP3 media segments.
* TTML: Support for most embedded TTML styling.
* WebVTT: Enhanced positioning support.
* Initial playback tests.
* Misc bug fixes.
### r1.4.2
* Implemented automatic format detection for regular container formats.
* Added UdpDataSource for connecting to multicast streams.
* Improved robustness for MP4 playbacks.
* Misc bug fixes.
### r1.4.1
* HLS: Fix premature playback failures that could occur in some cases.
### r1.4.0
* Support for extracting Matroska streams (implemented by WebmExtractor).
* Support for tx3g captions in MP4 streams.
* Support for H.265 in MPEG-TS streams on supported devices.
* HLS: Added support for MPEG audio (e.g. MP3) in TS media segments.
* HLS: Improved robustness against missing chunks and variants.
* MP4: Added support for embedded MPEG audio (e.g. MP3).
* TTML: Improved handling of whitespace.
* DASH: Support Mpd.Location element.
* Add option to TsExtractor to allow non-IDR keyframes.
* Added MulticastDataSource for connecting to multicast streams.
* (WorkInProgress) - First steps to supporting seeking in DASH DVR window.
* (WorkInProgress) - First steps to supporting styled + positioned subtitles.
* Misc bug fixes.
### r1.3.3
* HLS: Fix failure when playing HLS AAC streams.
* Misc bug fixes.
### r1.3.2
* DataSource improvements: `DefaultUriDataSource` now handles http://,
https://, file://, asset:// and content:// URIs automatically. It also
handles file:///android_asset/* URIs, and file paths like /path/to/media.mp4
where the scheme is omitted.
* HLS: Fix for some ID3 events being dropped.
* HLS: Correctly handle 0x0 and floating point RESOLUTION tags.
* Mp3Extractor: robustness improvements.
### r1.3.1
* No notes provided.
| google/ExoPlayer | RELEASENOTES.md | Markdown | apache-2.0 | 238,218 |
/*
* The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
* (the "License"). You may not use this work except in compliance with the License, which is
* available at www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied, as more fully set forth in the License.
*
* See the NOTICE file distributed with this work for information regarding copyright ownership.
*/
package alluxio.worker.block;
import alluxio.Configuration;
import alluxio.Constants;
import alluxio.PropertyKey;
import alluxio.RuntimeConstants;
import alluxio.Server;
import alluxio.Sessions;
import alluxio.exception.BlockAlreadyExistsException;
import alluxio.exception.BlockDoesNotExistException;
import alluxio.exception.ExceptionMessage;
import alluxio.exception.InvalidWorkerStateException;
import alluxio.exception.WorkerOutOfSpaceException;
import alluxio.heartbeat.HeartbeatContext;
import alluxio.heartbeat.HeartbeatThread;
import alluxio.master.MasterClientConfig;
import alluxio.metrics.MetricsSystem;
import alluxio.proto.dataserver.Protocol;
import alluxio.retry.RetryUtils;
import alluxio.retry.ExponentialTimeBoundedRetry;
import alluxio.thrift.BlockWorkerClientService;
import alluxio.underfs.UfsManager;
import alluxio.util.CommonUtils;
import alluxio.util.ThreadFactoryUtils;
import alluxio.wire.FileInfo;
import alluxio.wire.WorkerNetAddress;
import alluxio.worker.AbstractWorker;
import alluxio.worker.SessionCleaner;
import alluxio.worker.block.io.BlockReader;
import alluxio.worker.block.io.BlockWriter;
import alluxio.worker.block.meta.BlockMeta;
import alluxio.worker.block.meta.TempBlockMeta;
import alluxio.worker.file.FileSystemMasterClient;
import com.codahale.metrics.Gauge;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import org.apache.thrift.TProcessor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.time.Duration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import javax.annotation.concurrent.NotThreadSafe;
import javax.annotation.concurrent.ThreadSafe;
/**
* The class is responsible for managing all top level components of the Block Worker.
*
* This includes:
*
* Servers: {@link BlockWorkerClientServiceHandler} (RPC Server)
*
* Periodic Threads: {@link BlockMasterSync} (Worker to Master continuous communication)
*
* Logic: {@link DefaultBlockWorker} (Logic for all block related storage operations)
*/
@NotThreadSafe // TODO(jiri): make thread-safe (c.f. ALLUXIO-1624)
public final class DefaultBlockWorker extends AbstractWorker implements BlockWorker {
private static final Logger LOG = LoggerFactory.getLogger(DefaultBlockWorker.class);
/** Runnable responsible for heartbeating and registration with master. */
private BlockMasterSync mBlockMasterSync;
/** Runnable responsible for fetching pinlist from master. */
private PinListSync mPinListSync;
/** Runnable responsible for clean up potential zombie sessions. */
private SessionCleaner mSessionCleaner;
/** Client for all block master communication. */
private final BlockMasterClient mBlockMasterClient;
/**
* Block master clients. commitBlock is the only reason to keep a pool of block master clients
* on each worker. We should either improve our RPC model in the master or get rid of the
* necessity to call commitBlock in the workers.
*/
private final BlockMasterClientPool mBlockMasterClientPool;
/** Client for all file system master communication. */
private final FileSystemMasterClient mFileSystemMasterClient;
/** Block store delta reporter for master heartbeat. */
private BlockHeartbeatReporter mHeartbeatReporter;
/** Metrics reporter that listens on block events and increases metrics counters. */
private BlockMetricsReporter mMetricsReporter;
/** Session metadata, used to keep track of session heartbeats. */
private Sessions mSessions;
/** Block Store manager. */
private BlockStore mBlockStore;
private WorkerNetAddress mAddress;
/** The under file system block store. */
private final UnderFileSystemBlockStore mUnderFileSystemBlockStore;
/**
* The worker ID for this worker. This is initialized in {@link #start(WorkerNetAddress)} and may
* be updated by the block sync thread if the master requests re-registration.
*/
private AtomicReference<Long> mWorkerId;
/**
* Constructs a default block worker.
*
* @param ufsManager ufs manager
*/
DefaultBlockWorker(UfsManager ufsManager) {
this(new BlockMasterClientPool(), new FileSystemMasterClient(MasterClientConfig.defaults()),
new Sessions(), new TieredBlockStore(), ufsManager);
}
/**
* Constructs a default block worker.
*
* @param blockMasterClientPool a client pool for talking to the block master
* @param fileSystemMasterClient a client for talking to the file system master
* @param sessions an object for tracking and cleaning up client sessions
* @param blockStore an Alluxio block store
* @param ufsManager ufs manager
*/
DefaultBlockWorker(BlockMasterClientPool blockMasterClientPool,
FileSystemMasterClient fileSystemMasterClient, Sessions sessions, BlockStore blockStore,
UfsManager ufsManager) {
super(Executors
.newFixedThreadPool(4, ThreadFactoryUtils.build("block-worker-heartbeat-%d", true)));
mBlockMasterClientPool = blockMasterClientPool;
mBlockMasterClient = mBlockMasterClientPool.acquire();
mFileSystemMasterClient = fileSystemMasterClient;
mHeartbeatReporter = new BlockHeartbeatReporter();
mMetricsReporter = new BlockMetricsReporter();
mSessions = sessions;
mBlockStore = blockStore;
mWorkerId = new AtomicReference<>(-1L);
mBlockStore.registerBlockStoreEventListener(mHeartbeatReporter);
mBlockStore.registerBlockStoreEventListener(mMetricsReporter);
mUnderFileSystemBlockStore = new UnderFileSystemBlockStore(mBlockStore, ufsManager);
Metrics.registerGauges(this);
}
@Override
public Set<Class<? extends Server>> getDependencies() {
return new HashSet<>();
}
@Override
public String getName() {
return Constants.BLOCK_WORKER_NAME;
}
@Override
public BlockStore getBlockStore() {
return mBlockStore;
}
@Override
public BlockWorkerClientServiceHandler getWorkerServiceHandler() {
return new BlockWorkerClientServiceHandler(this);
}
@Override
public Map<String, TProcessor> getServices() {
Map<String, TProcessor> services = new HashMap<>();
services.put(Constants.BLOCK_WORKER_CLIENT_SERVICE_NAME,
new BlockWorkerClientService.Processor<>(getWorkerServiceHandler()));
return services;
}
@Override
public AtomicReference<Long> getWorkerId() {
return mWorkerId;
}
/**
* Runs the block worker. The thread must be called after all services (e.g., web, dataserver)
* started.
*/
@Override
public void start(WorkerNetAddress address) throws IOException {
mAddress = address;
try {
RetryUtils.retry("get worker id", () -> mWorkerId.set(mBlockMasterClient.getId(address)),
ExponentialTimeBoundedRetry.builder()
.withMaxDuration(Duration
.ofMillis(Configuration.getMs(PropertyKey.WORKER_MASTER_CONNECT_RETRY_TIMEOUT)))
.withInitialSleep(Duration.ofMillis(100))
.withMaxSleep(Duration.ofSeconds(5))
.build());
} catch (Exception e) {
throw new RuntimeException("Failed to get a worker id from block master: " + e.getMessage());
}
Preconditions.checkNotNull(mWorkerId, "mWorkerId");
Preconditions.checkNotNull(mAddress, "mAddress");
// Setup BlockMasterSync
mBlockMasterSync = new BlockMasterSync(this, mWorkerId, mAddress, mBlockMasterClient);
// Setup PinListSyncer
mPinListSync = new PinListSync(this, mFileSystemMasterClient);
// Setup session cleaner
mSessionCleaner = new SessionCleaner(mSessions, mBlockStore, mUnderFileSystemBlockStore);
// Setup space reserver
if (Configuration.getBoolean(PropertyKey.WORKER_TIERED_STORE_RESERVER_ENABLED)) {
getExecutorService().submit(
new HeartbeatThread(HeartbeatContext.WORKER_SPACE_RESERVER, new SpaceReserver(this),
(int) Configuration.getMs(PropertyKey.WORKER_TIERED_STORE_RESERVER_INTERVAL_MS)));
}
getExecutorService()
.submit(new HeartbeatThread(HeartbeatContext.WORKER_BLOCK_SYNC, mBlockMasterSync,
(int) Configuration.getMs(PropertyKey.WORKER_BLOCK_HEARTBEAT_INTERVAL_MS)));
// Start the pinlist syncer to perform the periodical fetching
getExecutorService()
.submit(new HeartbeatThread(HeartbeatContext.WORKER_PIN_LIST_SYNC, mPinListSync,
(int) Configuration.getMs(PropertyKey.WORKER_BLOCK_HEARTBEAT_INTERVAL_MS)));
// Start the session cleanup checker to perform the periodical checking
getExecutorService().submit(mSessionCleaner);
}
/**
* Stops the block worker. This method should only be called to terminate the worker.
*/
@Override
public void stop() {
// Steps to shutdown:
// 1. Gracefully shut down the runnables running in the executors.
// 2. Shutdown the executors.
// 3. Shutdown the clients. This needs to happen after the executors is shutdown because
// runnables running in the executors might be using the clients.
mSessionCleaner.stop();
// The executor shutdown needs to be done in a loop with retry because the interrupt
// signal can sometimes be ignored.
CommonUtils.waitFor("block worker executor shutdown", new Function<Void, Boolean>() {
@Override
public Boolean apply(Void input) {
getExecutorService().shutdownNow();
try {
return getExecutorService().awaitTermination(100, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
});
mBlockMasterClientPool.release(mBlockMasterClient);
try {
mBlockMasterClientPool.close();
} catch (IOException e) {
LOG.warn("Failed to close the block master client pool with error {}.", e.getMessage());
}
mFileSystemMasterClient.close();
}
@Override
public void abortBlock(long sessionId, long blockId) throws BlockAlreadyExistsException,
BlockDoesNotExistException, InvalidWorkerStateException, IOException {
mBlockStore.abortBlock(sessionId, blockId);
}
@Override
public void accessBlock(long sessionId, long blockId) throws BlockDoesNotExistException {
mBlockStore.accessBlock(sessionId, blockId);
}
@Override
public void commitBlock(long sessionId, long blockId)
throws BlockAlreadyExistsException, BlockDoesNotExistException, InvalidWorkerStateException,
IOException, WorkerOutOfSpaceException {
// NOTE: this may be invoked multiple times due to retry on client side.
// TODO(binfan): find a better way to handle retry logic
try {
mBlockStore.commitBlock(sessionId, blockId);
} catch (BlockAlreadyExistsException e) {
LOG.debug("Block {} has been in block store, this could be a retry due to master-side RPC "
+ "failure, therefore ignore the exception", blockId, e);
}
// TODO(calvin): Reconsider how to do this without heavy locking.
// Block successfully committed, update master with new block metadata
Long lockId = mBlockStore.lockBlock(sessionId, blockId);
BlockMasterClient blockMasterClient = mBlockMasterClientPool.acquire();
try {
BlockMeta meta = mBlockStore.getBlockMeta(sessionId, blockId, lockId);
BlockStoreLocation loc = meta.getBlockLocation();
Long length = meta.getBlockSize();
BlockStoreMeta storeMeta = mBlockStore.getBlockStoreMeta();
Long bytesUsedOnTier = storeMeta.getUsedBytesOnTiers().get(loc.tierAlias());
blockMasterClient.commitBlock(mWorkerId.get(), bytesUsedOnTier, loc.tierAlias(), blockId,
length);
} catch (Exception e) {
throw new IOException(ExceptionMessage.FAILED_COMMIT_BLOCK_TO_MASTER.getMessage(blockId), e);
} finally {
mBlockMasterClientPool.release(blockMasterClient);
mBlockStore.unlockBlock(lockId);
}
}
@Override
public String createBlock(long sessionId, long blockId, String tierAlias, long initialBytes)
throws BlockAlreadyExistsException, WorkerOutOfSpaceException, IOException {
BlockStoreLocation loc = BlockStoreLocation.anyDirInTier(tierAlias);
TempBlockMeta createdBlock;
try {
createdBlock = mBlockStore.createBlock(sessionId, blockId, loc, initialBytes);
} catch (WorkerOutOfSpaceException e) {
InetSocketAddress address =
InetSocketAddress.createUnresolved(mAddress.getHost(), mAddress.getRpcPort());
throw new WorkerOutOfSpaceException(ExceptionMessage.CANNOT_REQUEST_SPACE
.getMessageWithUrl(RuntimeConstants.ALLUXIO_DEBUG_DOCS_URL, address, blockId), e);
}
return createdBlock.getPath();
}
@Override
public void createBlockRemote(long sessionId, long blockId, String tierAlias, long initialBytes)
throws BlockAlreadyExistsException, WorkerOutOfSpaceException, IOException {
BlockStoreLocation loc = BlockStoreLocation.anyDirInTier(tierAlias);
mBlockStore.createBlock(sessionId, blockId, loc, initialBytes);
}
@Override
public void freeSpace(long sessionId, long availableBytes, String tierAlias)
throws WorkerOutOfSpaceException, BlockDoesNotExistException, IOException,
BlockAlreadyExistsException, InvalidWorkerStateException {
BlockStoreLocation location = BlockStoreLocation.anyDirInTier(tierAlias);
mBlockStore.freeSpace(sessionId, availableBytes, location);
}
@Override
public BlockWriter getTempBlockWriterRemote(long sessionId, long blockId)
throws BlockDoesNotExistException, BlockAlreadyExistsException, InvalidWorkerStateException,
IOException {
return mBlockStore.getBlockWriter(sessionId, blockId);
}
@Override
public BlockHeartbeatReport getReport() {
return mHeartbeatReporter.generateReport();
}
@Override
public BlockStoreMeta getStoreMeta() {
return mBlockStore.getBlockStoreMeta();
}
@Override
public BlockStoreMeta getStoreMetaFull() {
return mBlockStore.getBlockStoreMetaFull();
}
@Override
public BlockMeta getVolatileBlockMeta(long blockId) throws BlockDoesNotExistException {
return mBlockStore.getVolatileBlockMeta(blockId);
}
@Override
public BlockMeta getBlockMeta(long sessionId, long blockId, long lockId)
throws BlockDoesNotExistException, InvalidWorkerStateException {
return mBlockStore.getBlockMeta(sessionId, blockId, lockId);
}
@Override
public boolean hasBlockMeta(long blockId) {
return mBlockStore.hasBlockMeta(blockId);
}
@Override
public long lockBlock(long sessionId, long blockId) throws BlockDoesNotExistException {
return mBlockStore.lockBlock(sessionId, blockId);
}
@Override
public long lockBlockNoException(long sessionId, long blockId) {
return mBlockStore.lockBlockNoException(sessionId, blockId);
}
@Override
public void moveBlock(long sessionId, long blockId, String tierAlias)
throws BlockDoesNotExistException, BlockAlreadyExistsException, InvalidWorkerStateException,
WorkerOutOfSpaceException, IOException {
// TODO(calvin): Move this logic into BlockStore#moveBlockInternal if possible
// Because the move operation is expensive, we first check if the operation is necessary
BlockStoreLocation dst = BlockStoreLocation.anyDirInTier(tierAlias);
long lockId = mBlockStore.lockBlock(sessionId, blockId);
try {
BlockMeta meta = mBlockStore.getBlockMeta(sessionId, blockId, lockId);
if (meta.getBlockLocation().belongsTo(dst)) {
return;
}
} finally {
mBlockStore.unlockBlock(lockId);
}
// Execute the block move if necessary
mBlockStore.moveBlock(sessionId, blockId, dst);
}
@Override
public String readBlock(long sessionId, long blockId, long lockId)
throws BlockDoesNotExistException, InvalidWorkerStateException {
BlockMeta meta = mBlockStore.getBlockMeta(sessionId, blockId, lockId);
return meta.getPath();
}
@Override
public BlockReader readBlockRemote(long sessionId, long blockId, long lockId)
throws BlockDoesNotExistException, InvalidWorkerStateException, IOException {
return mBlockStore.getBlockReader(sessionId, blockId, lockId);
}
@Override
public BlockReader readUfsBlock(long sessionId, long blockId, long offset)
throws BlockDoesNotExistException, IOException {
return mUnderFileSystemBlockStore.getBlockReader(sessionId, blockId, offset);
}
@Override
public void removeBlock(long sessionId, long blockId)
throws InvalidWorkerStateException, BlockDoesNotExistException, IOException {
mBlockStore.removeBlock(sessionId, blockId);
}
@Override
public void requestSpace(long sessionId, long blockId, long additionalBytes)
throws BlockDoesNotExistException, WorkerOutOfSpaceException, IOException {
mBlockStore.requestSpace(sessionId, blockId, additionalBytes);
}
@Override
public void unlockBlock(long lockId) throws BlockDoesNotExistException {
mBlockStore.unlockBlock(lockId);
}
@Override
// TODO(calvin): Remove when lock and reads are separate operations.
public boolean unlockBlock(long sessionId, long blockId) {
return mBlockStore.unlockBlock(sessionId, blockId);
}
@Override
public void sessionHeartbeat(long sessionId) {
mSessions.sessionHeartbeat(sessionId);
}
@Override
public void updatePinList(Set<Long> pinnedInodes) {
mBlockStore.updatePinnedInodes(pinnedInodes);
}
@Override
public FileInfo getFileInfo(long fileId) throws IOException {
return mFileSystemMasterClient.getFileInfo(fileId);
}
@Override
public boolean openUfsBlock(long sessionId, long blockId, Protocol.OpenUfsBlockOptions options)
throws BlockAlreadyExistsException {
return mUnderFileSystemBlockStore.acquireAccess(sessionId, blockId, options);
}
@Override
public void closeUfsBlock(long sessionId, long blockId)
throws BlockAlreadyExistsException, IOException, WorkerOutOfSpaceException {
try {
mUnderFileSystemBlockStore.closeReaderOrWriter(sessionId, blockId);
if (mBlockStore.getTempBlockMeta(sessionId, blockId) != null) {
try {
commitBlock(sessionId, blockId);
} catch (BlockDoesNotExistException e) {
// This can only happen if the session is expired. Ignore this exception if that happens.
LOG.warn("Block {} does not exist while being committed.", blockId);
} catch (InvalidWorkerStateException e) {
// This can happen if there are multiple sessions writing to the same block.
// BlockStore#getTempBlockMeta does not check whether the temp block belongs to
// the sessionId.
LOG.debug("Invalid worker state while committing block.", e);
}
}
} finally {
mUnderFileSystemBlockStore.releaseAccess(sessionId, blockId);
}
}
@Override
public void cleanupSession(long sessionId) {
mBlockStore.cleanupSession(sessionId);
mUnderFileSystemBlockStore.cleanupSession(sessionId);
}
/**
* This class contains some metrics related to the block worker.
* This class is public because the metric names are referenced in
* {@link alluxio.web.WebInterfaceWorkerMetricsServlet}.
*/
@ThreadSafe
public static final class Metrics {
public static final String CAPACITY_TOTAL = "CapacityTotal";
public static final String CAPACITY_USED = "CapacityUsed";
public static final String CAPACITY_FREE = "CapacityFree";
public static final String BLOCKS_CACHED = "BlocksCached";
/**
* Registers metric gauges.
*
* @param blockWorker the block worker handle
*/
public static void registerGauges(final BlockWorker blockWorker) {
MetricsSystem.registerGaugeIfAbsent(MetricsSystem.getWorkerMetricName(CAPACITY_TOTAL),
new Gauge<Long>() {
@Override
public Long getValue() {
return blockWorker.getStoreMeta().getCapacityBytes();
}
});
MetricsSystem.registerGaugeIfAbsent(MetricsSystem.getWorkerMetricName(CAPACITY_USED),
new Gauge<Long>() {
@Override
public Long getValue() {
return blockWorker.getStoreMeta().getUsedBytes();
}
});
MetricsSystem.registerGaugeIfAbsent(MetricsSystem.getWorkerMetricName(CAPACITY_FREE),
new Gauge<Long>() {
@Override
public Long getValue() {
return blockWorker.getStoreMeta().getCapacityBytes() - blockWorker.getStoreMeta()
.getUsedBytes();
}
});
MetricsSystem.registerGaugeIfAbsent(MetricsSystem.getWorkerMetricName(BLOCKS_CACHED),
new Gauge<Integer>() {
@Override
public Integer getValue() {
return blockWorker.getStoreMetaFull().getNumberOfBlocks();
}
});
}
private Metrics() {} // prevent instantiation
}
}
| maboelhassan/alluxio | core/server/worker/src/main/java/alluxio/worker/block/DefaultBlockWorker.java | Java | apache-2.0 | 21,585 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ambari.server.state.alert;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.lang.reflect.Type;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.UUID;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.JsonDeserializationContext;
import com.google.gson.JsonDeserializer;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParseException;
import com.google.gson.reflect.TypeToken;
import com.google.inject.Singleton;
/**
* The {@link AlertDefinitionFactory} class is used to construct
* {@link AlertDefinition} instances from a variety of sources.
*/
@Singleton
public class AlertDefinitionFactory {
/**
* Logger.
*/
private final static Logger LOG = LoggerFactory.getLogger(AlertDefinitionFactory.class);
/**
* Builder used for type adapter registration.
*/
private final GsonBuilder m_builder = new GsonBuilder();
/**
* Thread safe deserializer.
*/
private final Gson m_gson;
/**
* Constructor.
*/
public AlertDefinitionFactory() {
m_builder.registerTypeAdapter(Source.class,
new AlertDefinitionSourceAdapter());
m_gson = m_builder.create();
}
/**
* Gets a list of all of the alert definitions defined in the specified JSON
* {@link File} for the given service. Each of the JSON files should have a
* mapping between the service and the alerts defined for that service. This
* is necessary since some services are combined in a single
* {@code metainfo.xml} and only have a single directory on the stack.
*
* @param alertDefinitionFile
* the JSON file from the stack to read (not {@code null}).
* @param serviceName
* the name of the service to extract definitions for (not
* {@code null}).
* @return the definitions for the specified service, or an empty set.
* @throws AmbariException
* if there was a problem reading the file or parsing the JSON.
*/
public Set<AlertDefinition> getAlertDefinitions(File alertDefinitionFile,
String serviceName) throws AmbariException {
try {
FileReader fileReader = new FileReader(alertDefinitionFile);
return getAlertDefinitions(fileReader, serviceName);
} catch (IOException ioe) {
String message = "Could not read the alert definition file";
LOG.error(message, ioe);
throw new AmbariException(message, ioe);
}
}
/**
* Gets a list of all of the alert definitions defined in the resource pointed
* to by the specified reader for the given service. There should have a
* mapping between the service and the alerts defined for that service. This
* is necessary since some services are combined in a single
* {@code metainfo.xml} and only have a single directory on the stack.
* <p/>
* The supplied reader is closed when this method completes.
*
* @param reader
* the reader to read from (not {@code null}). This will be closed
* after reading is done.
* @param serviceName
* the name of the service to extract definitions for (not
* {@code null}).
* @return the definitions for the specified service, or an empty set.
* @throws AmbariException
* if there was a problem reading or parsing the JSON.
*/
public Set<AlertDefinition> getAlertDefinitions(Reader reader,
String serviceName) throws AmbariException {
// { MAPR : {definitions}, YARN : {definitions} }
Map<String, Map<String, List<AlertDefinition>>> serviceDefinitionMap = null;
try {
Type type = new TypeToken<Map<String, Map<String, List<AlertDefinition>>>>() {}.getType();
serviceDefinitionMap = m_gson.fromJson(reader, type);
} catch (Exception e) {
LOG.error("Could not read the alert definitions", e);
throw new AmbariException("Could not read alert definitions", e);
} finally {
IOUtils.closeQuietly(reader);
}
Set<AlertDefinition> definitions = new HashSet<AlertDefinition>();
// it's OK if the service doesn't have any definitions; this can happen if
// 2 services are defined in a single metainfo.xml and only 1 service has
// alerts defined
Map<String, List<AlertDefinition>> definitionMap = serviceDefinitionMap.get(serviceName);
if (null == definitionMap) {
return definitions;
}
for (Entry<String, List<AlertDefinition>> entry : definitionMap.entrySet()) {
for (AlertDefinition ad : entry.getValue()) {
ad.setServiceName(serviceName);
if (!entry.getKey().equals("service")) {
ad.setComponentName(entry.getKey());
}
}
definitions.addAll(entry.getValue());
}
return definitions;
}
/**
* Gets an {@link AlertDefinition} constructed from the specified
* {@link AlertDefinitionEntity}.
*
* @param entity
* the entity to use to construct the {@link AlertDefinition} (not
* {@code null}).
* @return the definiion or {@code null} if it could not be coerced.
*/
public AlertDefinition coerce(AlertDefinitionEntity entity) {
if (null == entity) {
return null;
}
AlertDefinition definition = new AlertDefinition();
definition.setClusterId(entity.getClusterId());
definition.setDefinitionId(entity.getDefinitionId());
definition.setComponentName(entity.getComponentName());
definition.setEnabled(entity.getEnabled());
definition.setHostIgnored(entity.isHostIgnored());
definition.setInterval(entity.getScheduleInterval());
definition.setName(entity.getDefinitionName());
definition.setScope(entity.getScope());
definition.setServiceName(entity.getServiceName());
definition.setLabel(entity.getLabel());
definition.setHelpURL(entity.getHelpURL());
definition.setDescription(entity.getDescription());
definition.setUuid(entity.getHash());
try{
String sourceJson = entity.getSource();
Source source = m_gson.fromJson(sourceJson, Source.class);
definition.setSource(source);
} catch (Exception exception) {
LOG.error(
"Unable to deserialize the alert definition source during coercion",
exception);
return null;
}
return definition;
}
/**
* Gets an {@link AlertDefinitionEntity} constructed from the specified
* {@link AlertDefinition}.
* <p/>
* The new entity will have a UUID already set.
*
* @param clusterId
* the ID of the cluster.
* @param definition
* the definition to use to construct the
* {@link AlertDefinitionEntity} (not {@code null}).
* @return the definiion or {@code null} if it could not be coerced.
*/
public AlertDefinitionEntity coerce(long clusterId, AlertDefinition definition) {
if (null == definition) {
return null;
}
AlertDefinitionEntity entity = new AlertDefinitionEntity();
entity.setClusterId(clusterId);
return merge(definition, entity);
}
/**
* Merges the specified {@link AlertDefinition} into the
* {@link AlertDefinitionEntity}, leaving any fields not merged intact.
* <p/>
* The merged entity will have a new UUID.
*
* @param definition
* the definition to merge into the entity (not {@code null}).
* @param entity
* the entity to merge into (not {@code null}).
* @return a merged, but not yes persisted entity, or {@code null} if the
* merge could not be done.
*/
public AlertDefinitionEntity merge(AlertDefinition definition,
AlertDefinitionEntity entity) {
entity.setComponentName(definition.getComponentName());
entity.setDefinitionName(definition.getName());
entity.setEnabled(definition.isEnabled());
entity.setHostIgnored(definition.isHostIgnored());
entity.setLabel(definition.getLabel());
entity.setDescription(definition.getDescription());
entity.setScheduleInterval(definition.getInterval());
entity.setHelpURL(definition.getHelpURL());
entity.setServiceName(definition.getServiceName());
Scope scope = definition.getScope();
if (null == scope) {
scope = Scope.ANY;
}
entity.setScope(scope);
return mergeSource(definition.getSource(), entity);
}
/**
* Updates source and source type of <code>entity</code> from <code>source</code>.
* Also updates UUID, which must be done for any change in to the entity for it
* to take effect on the agents.
*
* @return the updated entity to be persisted, or null if alert source cannot be serialized to JSON
*/
public AlertDefinitionEntity mergeSource(Source source, AlertDefinitionEntity entity) {
entity.setSourceType(source.getType());
try {
String sourceJson = m_gson.toJson(source);
entity.setSource(sourceJson);
} catch (Exception e) {
LOG.error("Unable to serialize the alert definition source during merge", e);
return null;
}
assignNewUUID(entity);
return entity;
}
/**
* Updates <code>entity</code> with a new UUID.
*/
private static void assignNewUUID(AlertDefinitionEntity entity) {
if (entity != null) {
entity.setHash(UUID.randomUUID().toString());
}
}
/**
* Gets an instance of {@link Gson} that can correctly serialize and
* deserialize an {@link AlertDefinition}.
*
* @return a {@link Gson} instance (not {@code null}).
*/
public Gson getGson() {
return m_gson;
}
/**
* Deserializes {@link Source} implementations.
*/
private static final class AlertDefinitionSourceAdapter implements JsonDeserializer<Source>{
/**
*
*/
@Override
public Source deserialize(JsonElement json, Type typeOfT,
JsonDeserializationContext context) throws JsonParseException {
JsonObject jsonObj = (JsonObject) json;
SourceType type = SourceType.valueOf(jsonObj.get("type").getAsString());
Class<? extends Source> clazz = null;
switch (type) {
case METRIC:{
clazz = MetricSource.class;
break;
}
case AMS:{
clazz = AmsSource.class;
break;
}
case PORT:{
clazz = PortSource.class;
break;
}
case SCRIPT: {
clazz = ScriptSource.class;
break;
}
case AGGREGATE: {
clazz = AggregateSource.class;
break;
}
case PERCENT: {
clazz = PercentSource.class;
break;
}
case WEB: {
clazz = WebSource.class;
break;
}
case RECOVERY: {
clazz = RecoverySource.class;
break;
}
case SERVER:{
clazz = ServerSource.class;
break;
}
default:
break;
}
if (null == clazz) {
LOG.warn(
"Unable to deserialize an alert definition with source type {}",
type);
return null;
}
return context.deserialize(json, clazz);
}
}
}
| alexryndin/ambari | ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionFactory.java | Java | apache-2.0 | 12,241 |
/*
* Copyright 2018 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.spinnaker.rosco.services;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.jakewharton.retrofit.Ok3Client;
import com.netflix.spinnaker.config.OkHttp3ClientConfiguration;
import com.netflix.spinnaker.kork.core.RetrySupport;
import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import retrofit.RestAdapter;
import retrofit.converter.JacksonConverter;
@Configuration
public class ServiceConfig {
@Value("${services.clouddriver.base-url:http://localhost:7002}")
String clouddriverBaseUrl;
@Value("${retrofit.log-level:BASIC}")
String retrofitLogLevel;
@Bean
Ok3Client okClient(OkHttp3ClientConfiguration okHttpClientConfig) {
return new Ok3Client(okHttpClientConfig.create().build());
}
@Bean
RetrySupport retrySupport() {
return new RetrySupport();
}
// This should be service-agnostic if more integrations than clouddriver are used
@Bean
ClouddriverService clouddriverService(Ok3Client ok3Client) {
ObjectMapper objectMapper =
new ObjectMapper()
.enable(DeserializationFeature.READ_UNKNOWN_ENUM_VALUES_AS_NULL)
.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
return new RestAdapter.Builder()
.setEndpoint(clouddriverBaseUrl)
.setClient(ok3Client)
.setConverter(new JacksonConverter(objectMapper))
.setLogLevel(RestAdapter.LogLevel.valueOf(retrofitLogLevel))
.setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance())
.build()
.create(ClouddriverService.class);
}
}
| spinnaker/rosco | rosco-core/src/main/groovy/com/netflix/spinnaker/rosco/services/ServiceConfig.java | Java | apache-2.0 | 2,415 |
const Discord = require('discord.js');
exports.run = async (client, message, args, level) => { // eslint-disable-line no-unused-vars
const scoreLevel = client.points.get(`${message.guild.id}-${message.author.id}`).level || 0;
let embed = new Discord.RichEmbed()
.setColor('GREEN')
.setAuthor('You are currently')
.setDescription(scoreLevel)
.setFooter(client.user.username, client.user.avatarURL)
!scoreLevel ? message.channel.send('You have no levels yet.') : message.channel.send(embed);
};
exports.conf = {
enabled: true,
guildOnly: true,
aliases: [],
permLevel: 0
};
exports.help = {
name: 'level',
category: 'Fun',
description: 'Well, let\'s see how many levels you have!',
usage: 'level'
};
| NdT3Development/DiscordBot | commands/level.js | JavaScript | apache-2.0 | 717 |
package com.androsz.electricsleepbeta.app;
import android.app.ProgressDialog;
import android.content.Context;
import android.os.AsyncTask;
import android.widget.Toast;
import com.androsz.electricsleepbeta.R;
import com.androsz.electricsleepbeta.db.SleepSession;
public class DeleteSleepTask extends AsyncTask<Long, Void, Void> {
/**
*
*/
private final Context context;
private ProgressDialog progress;
/**
* @param context
* @param progress
* pass a non-null if you want a ProgressDialog to be managed by
* this Task's lifecycle
*/
DeleteSleepTask(Context context) {
this.context = context;
this.progress = new ProgressDialog(context);
}
@Override
protected Void doInBackground(final Long... params) {
for (Long rowId : params) {
if (rowId != null) {
context.getContentResolver().delete(SleepSession.CONTENT_URI,
SleepSession._ID + " =? ",
new String[] { Long.toString(rowId) });
}
}
return null;
}
@Override
protected void onPostExecute(final Void results) {
// mListView.removeAllViewsInLayout();
// getSupportLoaderManager().restartLoader(0,
// getLoaderArgs(getIntent(), false), HistoryActivity.this);
Toast.makeText(this.context,
this.context.getString(R.string.deleted_sleep_record),
Toast.LENGTH_SHORT).show();
if (this.progress != null && this.progress.isShowing()) {
this.progress.dismiss();
}
}
@Override
protected void onPreExecute() {
if (this.progress != null) {
this.progress.setMessage(this.context
.getString(R.string.deleting_sleep));
this.progress.show();
}
}
@Override
protected void onCancelled() {
super.onCancelled();
if (this.progress != null && this.progress.isShowing()) {
this.progress.dismiss();
}
}
} | jondwillis/ElectricSleep | src/com/androsz/electricsleepbeta/app/DeleteSleepTask.java | Java | apache-2.0 | 2,078 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
import org.junit.Test;
import java.io.IOException;
import java.io.StringReader;
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
public class WordDelimiterTokenFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
@Test
public void testDefault() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
.put("path.home", createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"Power", "Shot", "500", "42", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader(source));
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
@Test
public void testCatenateWords() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
.put("path.home", createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.catenate_words", "true")
.put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"PowerShot", "500", "42", "wifi", "wifi", "4000", "j", "2", "se", "ONeil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader(source));
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
@Test
public void testCatenateNumbers() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
.put("path.home", createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false")
.put("index.analysis.filter.my_word_delimiter.catenate_numbers", "true")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"Power", "Shot", "50042", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader(source));
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
@Test
public void testCatenateAll() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
.put("path.home", createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false")
.put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false")
.put("index.analysis.filter.my_word_delimiter.catenate_all", "true")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"PowerShot", "50042", "wifi", "wifi4000", "j2se", "ONeil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader(source));
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
@Test
public void testSplitOnCaseChange() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
.put("path.home", createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.split_on_case_change", "false")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
String source = "PowerShot";
String[] expected = new String[]{"PowerShot"};
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader(source));
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
@Test
public void testPreserveOriginal() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
.put("path.home", createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.preserve_original", "true")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"PowerShot", "Power", "Shot", "500-42", "500", "42", "wi-fi", "wi", "fi", "wi-fi-4000", "wi", "fi", "4000", "j2se", "j", "2", "se", "O'Neil's", "O", "Neil"};
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader(source));
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
@Test
public void testStemEnglishPossessive() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
.put("path.home", createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.stem_english_possessive", "false")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
String[] expected = new String[]{"Power", "Shot", "500", "42", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil", "s"};
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader(source));
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
/** Correct offset order when doing both parts and concatenation: PowerShot is a synonym of Power */
@Test
public void testPartsAndCatenate() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
.put("path.home", createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.catenate_words", "true")
.put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
String source = "PowerShot";
String[] expected = new String[]{"Power", "PowerShot", "Shot" };
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader(source));
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
/** Back compat:
* old offset order when doing both parts and concatenation: PowerShot is a synonym of Shot */
@Test
public void testDeprecatedPartsAndCatenate() throws IOException {
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
.put("path.home", createTempDir().toString())
.put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
.put("index.analysis.filter.my_word_delimiter.catenate_words", "true")
.put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true")
.put("index.analysis.filter.my_word_delimiter.version", "4.7")
.build());
TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
String source = "PowerShot";
String[] expected = new String[]{"Power", "Shot", "PowerShot" };
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader(source));
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
}
| vrkansagara/elasticsearch | src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java | Java | apache-2.0 | 10,241 |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<head>
<title>Reference</title>
<link rel="stylesheet" href="ldoc.css" type="text/css" />
</head>
<body>
<div id="container">
<div id="product">
<div id="product_logo"></div>
<div id="product_name"><big><b></b></big></div>
<div id="product_description"></div>
</div> <!-- id="product" -->
<div id="main">
<!-- Menu -->
<div id="navigation">
<br/>
<h1>ldoc</h1>
<h2>Classes</h2>
<ul class="nowrap">
<li><a href="classes/LinkedList.html">LinkedList</a></li>
<li><a href="classes/LNode.html">LNode</a></li>
</ul>
</div>
<div id="content">
<h2>Classes</h2>
<table class="module_list">
<tr>
<td class="name" nowrap><a href="classes/LinkedList.html">LinkedList</a></td>
<td class="summary">Doubly-linked List</td>
</tr>
<tr>
<td class="name" nowrap><a href="classes/LNode.html">LNode</a></td>
<td class="summary">A doubly linked list Node</td>
</tr>
</table>
</div> <!-- id="content" -->
</div> <!-- id="main" -->
<div id="about">
<i>generated by <a href="http://github.com/stevedonovan/LDoc">LDoc 1.4.5</a></i>
<i style="float:right;">Last updated 2016-09-14 20:44:57 </i>
</div> <!-- id="about" -->
</div> <!-- id="container" -->
</body>
</html>
| zunware/LuaLinkedList | docs/index.html | HTML | apache-2.0 | 1,377 |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
from . import types_of_service
class as_external_lsa(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/as-external-lsa. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Contents of the AS External LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__state", "__types_of_service")
_yang_name = "as-external-lsa"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__types_of_service = YANGDynClass(
base=types_of_service.types_of_service,
is_container="container",
yang_name="types-of-service",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"as-external-lsa",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state (container)
YANG Description: State parameters for the AS external LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters for the AS external LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_types_of_service(self):
"""
Getter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service (container)
YANG Description: Breakdown of External LSA contents specifying multiple
TOS values
"""
return self.__types_of_service
def _set_types_of_service(self, v, load=False):
"""
Setter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_types_of_service is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_types_of_service() directly.
YANG Description: Breakdown of External LSA contents specifying multiple
TOS values
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=types_of_service.types_of_service,
is_container="container",
yang_name="types-of-service",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """types_of_service must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=types_of_service.types_of_service, is_container='container', yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__types_of_service = t
if hasattr(self, "_set"):
self._set()
def _unset_types_of_service(self):
self.__types_of_service = YANGDynClass(
base=types_of_service.types_of_service,
is_container="container",
yang_name="types-of-service",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
types_of_service = __builtin__.property(_get_types_of_service)
_pyangbind_elements = OrderedDict(
[("state", state), ("types_of_service", types_of_service)]
)
from . import state
from . import types_of_service
class as_external_lsa(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/as-external-lsa. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Contents of the AS External LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__state", "__types_of_service")
_yang_name = "as-external-lsa"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__types_of_service = YANGDynClass(
base=types_of_service.types_of_service,
is_container="container",
yang_name="types-of-service",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"as-external-lsa",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state (container)
YANG Description: State parameters for the AS external LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters for the AS external LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_types_of_service(self):
"""
Getter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service (container)
YANG Description: Breakdown of External LSA contents specifying multiple
TOS values
"""
return self.__types_of_service
def _set_types_of_service(self, v, load=False):
"""
Setter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_types_of_service is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_types_of_service() directly.
YANG Description: Breakdown of External LSA contents specifying multiple
TOS values
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=types_of_service.types_of_service,
is_container="container",
yang_name="types-of-service",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """types_of_service must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=types_of_service.types_of_service, is_container='container', yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__types_of_service = t
if hasattr(self, "_set"):
self._set()
def _unset_types_of_service(self):
self.__types_of_service = YANGDynClass(
base=types_of_service.types_of_service,
is_container="container",
yang_name="types-of-service",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
types_of_service = __builtin__.property(_get_types_of_service)
_pyangbind_elements = OrderedDict(
[("state", state), ("types_of_service", types_of_service)]
)
| napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/__init__.py | Python | apache-2.0 | 19,446 |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xml:lang="en" lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<title>MyBatis Generator Core 1.3.3-SNAPSHOT Reference</title>
<link rel="stylesheet" type="text/css" href="stylesheet.css" title="style" />
</head>
<body>
<div class="overview">
<ul>
<li class="selected">Overview</li>
<li>Package</li>
</ul>
</div>
<div class="framenoframe">
<ul>
<li>
<a href="index.html" target="_top">FRAMES</a>
</li>
<li>
<a href="overview-summary.html" target="_top">NO FRAMES</a>
</li>
</ul>
</div>
<h2>MyBatis Generator Core 1.3.3-SNAPSHOT Reference</h2>
<table class="summary">
<thead>
<tr>
<th>Packages</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<a href="org/mybatis/generator/package-summary.html">org.mybatis.generator</a>
</td>
</tr>
<tr>
<td>
<a href="org/mybatis/generator/api/dom/java/package-summary.html">org.mybatis.generator.api.dom.java</a>
</td>
</tr>
<tr>
<td>
<a href="org/mybatis/generator/internal/util/package-summary.html">org.mybatis.generator.internal.util</a>
</td>
</tr>
</tbody>
</table>
<div class="overview">
<ul>
<li class="selected">Overview</li>
<li>Package</li>
</ul>
</div>
<div class="framenoframe">
<ul>
<li>
<a href="index.html" target="_top">FRAMES</a>
</li>
<li>
<a href="overview-summary.html" target="_top">NO FRAMES</a>
</li>
</ul>
</div>
<hr />
<div id="footer">
Copyright © 2010–2014 <a href="http://www.mybatis.org/">MyBatis.org</a>. All rights reserved.
</div>
</body>
</html> | hemingwang0902/doc.javake.cn | mybatis-generator-doc-zh/xref-test/overview-summary.html | HTML | apache-2.0 | 2,145 |
package mm4s.api
import java.io.File
import akka.actor.ActorSystem
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model._
import akka.stream.scaladsl.{FileIO, Source}
import mm4s.api.Streams._
import spray.json._
/**
*
*/
object Filez {
import FileModels._
import FileProtocols._
def get(path: FilePath, token: String)(implicit system: ActorSystem) = {
request(s"/files/get/${fileurl(path)}") { r =>
Marshal(path).to[MessageEntity].map(r.withHeaders(auth(token)).withEntity)
}
}
def put(fu: FileUpload, token: String)(implicit system: ActorSystem) = {
val file = fu.file
val formData = Multipart.FormData(
Source(List(
Multipart.FormData.BodyPart(
"files",
HttpEntity(ContentTypes.`text/plain(UTF-8)`, file.length(), FileIO.fromFile(file, chunkSize = 100000)),
Map("filename" -> fu.file.getName)
),
Multipart.FormData.BodyPart("channel_id", fu.channelId)
))
)
request(s"/files/upload") { r =>
Marshal(formData).to[RequestEntity].map(r.withHeaders(auth(token)).withMethod(HttpMethods.POST).withEntity)
}
}
def info(path: FilePath, token: String)(implicit system: ActorSystem) = {
request(s"/files/get_info/${fileurl(path)}") { r =>
Marshal(path).to[MessageEntity].map(r.withHeaders(auth(token)).withEntity)
}
}
def fileurl(path: FilePath): String = s"${path.channel_id}/${path.user_id}/${path.filename}"
}
object FileModels {
case class FileUpload(channelId: String, file: File)
case class FilesUploaded(filenames: Seq[String])
case class FilePath(channel_id: String, user_id: String, filename: String)
case class FileInfo(filename: String, size: Int, extension: String, mime_type: String, has_preview_image: Boolean)
}
object FileProtocols extends DefaultJsonProtocol with SprayJsonSupport {
import FileModels._
implicit val FilesUploadedFormat: RootJsonFormat[FilesUploaded] = jsonFormat1(FilesUploaded)
implicit val FileInfoFormat: RootJsonFormat[FileInfo] = jsonFormat5(FileInfo)
implicit val FilePathFormat: RootJsonFormat[FilePath] = jsonFormat3(FilePath)
}
| jw3/mm4s | api/src/main/scala/mm4s/api/Filez.scala | Scala | apache-2.0 | 2,222 |
#!/usr/bin/perl
#
# 14:13 2009/6/12
# Jonathan Tsai
# Ver 1.02
#
# monitor service and restart it
# Usage : mon_service.pl <config_file>
# * <config_file> : default is mon_service.conf
#
# 1.00 (2008/10/24) First Version Release
#
$prgname = substr($0, rindex($0,'/')+1);
$prgpath = substr($0, 0, rindex($0,'/'));
$ver = "1.02 (2009/6/12)";
$t_conffile =
$p_config = !defined($ARGV[0])?"/opt/trysrvtool/mon_service.conf":$ARGV[0];
@arr_config=();
if (-e $p_config) {
@tmp_config = split(/\n/, `/bin/cat $p_config | /bin/grep -v "#"`);
foreach $v_config (@tmp_config) {
if (length($v_config)>0) {
push @arr_config, $v_config;
}
}
}
if (@arr_config==0) {
exit;
}
$g_msg = "# $prgname Ver $ver \n";
$v_msg = "";
foreach $v_conf_line (@arr_config) {
($v_service_name, $v_check_ip, $v_check_port, $v_input_cmd, $v_except_msg_keyword, $v_run_cmd)=split(/\t/, $v_conf_line);
$t_msg = `echo $v_input_cmd | nc $v_check_ip $v_check_port`;
if (index($t_msg, $v_except_msg_keyword)<0) {
$t_nowdatetime = `date +"%Y-%m-%d %H:%M:%S"`;
$t_result=`$v_run_cmd`;
$v_msg .= $t_nowdatetime." Run:[".$v_run_cmd."]\n";
$v_msg .= $t_result;
}
}
if (length($v_msg)>0) {
print($g_msg);
print($v_msg);
print("-----\n");
}
| tryweb/trysrvtool | mon_service.pl | Perl | apache-2.0 | 1,229 |
package com.sarality.app.view.list;
import android.util.Log;
import android.util.SparseArray;
import android.view.View;
/**
* Cache of elements inside a ListView row.
* <p>
* Makes it more efficient to set data on the view rather than do a lookupById
* each time.
*
* @author abhideep@ (Abhideep Singh)
*/
public class ListRowViewCache {
private static final String TAG = "ListRowViewCache";
// Cache of ViewId and View
private final SparseArray<View> viewCache = new SparseArray<View>();
/**
* Returns the View for the given ViewId.
* <p>
* Returns the cached version or performs a lookup if no view is cached.
*
* @param rowView The View for the row to lookup elements in.
* @param viewId Id of the view to lookup.
* @return The element with the given Id.
*/
public View getViewById(View rowView, int viewId) {
View view = viewCache.get(viewId);
if (view == null) {
Log.w(TAG, "Row Item View with Id " + viewId + " not found in Cache. "
+ "Consideing caching elememts that you access in the renderer since it can improve "
+ "efficiency by 15%");
return rowView.findViewById(viewId);
} else {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "Found Row Item View with Id " + viewId + " in Cache.");
}
return view;
}
}
/**
* Lookup view in the given row View and cache it.
*
* @param rowView The View to lookup the view id.
* @param viewId The id of the view to lookup.
*/
public void cacheViewWithId(View rowView, int viewId) {
View view = rowView.findViewById(viewId);
if (view != null) {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "Caching Row Item View with Id " + viewId + " in row with Id " + rowView.getId());
}
viewCache.put(viewId, view);
}
}
@Override
public String toString() {
return viewCache.toString();
}
@Override
public int hashCode() {
return viewCache.hashCode();
}
}
| sarality/appblocks | src/main/java/com/sarality/app/view/list/ListRowViewCache.java | Java | apache-2.0 | 2,003 |
__author__ = 'thatcher'
from django.contrib import admin
# from django.contrib.auth.models import User
# from django.contrib.auth.admin import UserAdmin
# from django.contrib.sessions.
from django.contrib.sessions.models import Session
from .models import *
from base.forms import *
def images_thubmnail(self):
return '<img style="max-height: 80px; width: auto;" src="{}" alt="{}" >'.format(self.uri(), self.alt)
# return self.uri()
images_thubmnail.short_description = 'Thumbnail'
images_thubmnail.allow_tags = True
class TeamMemberAdmin(admin.ModelAdmin):
model = TeamMember
list_display = ['full_name', 'sort_weight', 'show_as_team']
admin.site.register(TeamMember, TeamMemberAdmin)
class NewsItemAdmin(admin.ModelAdmin):
model = NewsItem
list_display = ['id', 'title', 'publication_date', 'show', 'author']
admin.site.register(NewsItem, NewsItemAdmin)
class EventAdmin(admin.ModelAdmin):
model = Event
list_display = ['title', 'location', 'date_and_time']
admin.site.register(Event, EventAdmin)
class PostAdmin(admin.ModelAdmin):
model = GenericPost
list_display = ['title', 'category', 'publication_date']
admin.site.register(GenericPost, PostAdmin)
class CategoryAdmin(admin.ModelAdmin):
model = PostCategory
list_display = ['name', 'added_date']
admin.site.register(PostCategory, CategoryAdmin)
class ImageAdmin(admin.ModelAdmin):
model = Image
list_display = [images_thubmnail, 'alt', 'image_caption', 'image', ]
admin.site.register(Image, ImageAdmin)
class TagAdmin(admin.ModelAdmin):
model = Tag
list_display = ['name', 'added_date']
admin.site.register(Tag, TagAdmin)
| ZmG/trywsk | base/admin.py | Python | apache-2.0 | 1,654 |
apt-get --purge -y remove mysql-server
apt-get --purge -y remove mysql-client
apt-get --purge -y remove mysql-common
| bayvictor/distributed-polling-system | bin/remove_only_mysql.sh | Shell | apache-2.0 | 120 |
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.uiDesigner.binding;
import com.intellij.lang.properties.IProperty;
import com.intellij.lang.properties.psi.PropertiesFile;
import com.intellij.lang.properties.psi.Property;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.application.ReadAction;
import com.intellij.openapi.fileTypes.FileTypeRegistry;
import com.intellij.openapi.fileTypes.StdFileTypes;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.progress.ProgressManager;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.roots.ProjectRootManager;
import com.intellij.openapi.util.NullableComputable;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.psi.*;
import com.intellij.psi.impl.cache.CacheManager;
import com.intellij.psi.search.*;
import com.intellij.psi.search.searches.ReferencesSearch;
import com.intellij.psi.util.PsiTreeUtil;
import com.intellij.util.CommonProcessors;
import com.intellij.util.Processor;
import com.intellij.util.QueryExecutor;
import com.intellij.util.text.CharArrayUtil;
import org.jetbrains.annotations.NotNull;
import java.util.Arrays;
import java.util.List;
public class FormReferencesSearcher implements QueryExecutor<PsiReference, ReferencesSearch.SearchParameters> {
@Override
public boolean execute(@NotNull final ReferencesSearch.SearchParameters p, @NotNull final Processor<? super PsiReference> consumer) {
SearchScope userScope = p.getScopeDeterminedByUser();
if (!scopeCanContainForms(userScope)) return true;
final PsiElement refElement = p.getElementToSearch();
final PsiFile psiFile = ReadAction.compute(() -> {
if (!refElement.isValid()) return null;
return refElement.getContainingFile();
});
if (psiFile == null) return true;
final VirtualFile virtualFile = psiFile.getVirtualFile();
if (virtualFile == null) return true;
final GlobalSearchScope[] scope = new GlobalSearchScope[1];
Project project = ReadAction.compute(() -> {
Project project1 = psiFile.getProject();
Module module = ProjectRootManager.getInstance(project1).getFileIndex().getModuleForFile(virtualFile);
if (module != null) {
scope[0] = GlobalSearchScope.moduleWithDependenciesScope(module);
}
return project1;
});
if (scope[0] == null) {
return true;
}
final LocalSearchScope filterScope = userScope instanceof LocalSearchScope ? (LocalSearchScope)userScope : null;
PsiManager psiManager = PsiManager.getInstance(project);
if (refElement instanceof PsiPackage) {
//no need to do anything
//if (!UIFormUtil.processReferencesInUIForms(consumer, (PsiPackage)refElement, scope)) return false;
}
else if (refElement instanceof PsiClass) {
if (!processReferencesInUIForms(consumer, psiManager,(PsiClass)refElement, scope[0], filterScope)) return false;
}
else if (refElement instanceof PsiEnumConstant) {
if (!processEnumReferencesInUIForms(consumer, psiManager, (PsiEnumConstant)refElement, scope[0], filterScope)) return false;
}
else if (refElement instanceof PsiField) {
if (!processReferencesInUIForms(consumer, psiManager, (PsiField)refElement, scope[0], filterScope)) return false;
}
else if (refElement instanceof IProperty) {
if (!processReferencesInUIForms(consumer, psiManager, (Property)refElement, scope[0], filterScope)) return false;
}
else if (refElement instanceof PropertiesFile) {
if (!processReferencesInUIForms(consumer, psiManager, (PropertiesFile)refElement, scope[0], filterScope)) return false;
}
return true;
}
private static boolean scopeCanContainForms(SearchScope scope) {
if (!(scope instanceof LocalSearchScope)) return true;
LocalSearchScope localSearchScope = (LocalSearchScope) scope;
final PsiElement[] elements = localSearchScope.getScope();
for (final PsiElement element : elements) {
if (element instanceof PsiDirectory) return true;
boolean isForm = ReadAction.compute(() -> {
PsiFile file;
if (element instanceof PsiFile) {
file = (PsiFile)element;
}
else {
if (!element.isValid()) return false;
file = element.getContainingFile();
}
return file.getFileType() == StdFileTypes.GUI_DESIGNER_FORM;
});
if (isForm) return true;
}
return false;
}
private static boolean processReferencesInUIForms(Processor<? super PsiReference> processor,
PsiManager psiManager, final PsiClass aClass,
GlobalSearchScope scope, final LocalSearchScope filterScope) {
String className = getQualifiedName(aClass);
return className == null || processReferencesInUIFormsInner(className, aClass, processor, scope, psiManager, filterScope);
}
public static String getQualifiedName(final PsiClass aClass) {
return ReadAction.compute(() -> {
if (!aClass.isValid()) return null;
return aClass.getQualifiedName();
});
}
private static boolean processEnumReferencesInUIForms(Processor<? super PsiReference> processor,
PsiManager psiManager, final PsiEnumConstant enumConstant,
GlobalSearchScope scope, final LocalSearchScope filterScope) {
String className = ReadAction.compute(() -> enumConstant.getName());
return processReferencesInUIFormsInner(className, enumConstant, processor, scope, psiManager, filterScope);
}
private static boolean processReferencesInUIFormsInner(String name,
PsiElement element,
Processor<? super PsiReference> processor,
GlobalSearchScope scope1,
PsiManager manager,
final LocalSearchScope filterScope) {
GlobalSearchScope scope = GlobalSearchScope.projectScope(manager.getProject()).intersectWith(scope1);
List<PsiFile> files = FormClassIndex.findFormsBoundToClass(manager.getProject(), name, scope);
return processReferencesInFiles(files, manager, name, element, filterScope, processor);
}
private static boolean processReferencesInUIForms(Processor<? super PsiReference> processor,
PsiManager psiManager,
PsiField field,
GlobalSearchScope scope1,
LocalSearchScope filterScope) {
GlobalSearchScope scope = GlobalSearchScope.projectScope(psiManager.getProject()).intersectWith(scope1);
PsiClass containingClass = ReadAction.compute(() -> field.getContainingClass());
if (containingClass == null) return true;
String fieldName = ReadAction.compute(() -> field.getName());
final List<PsiFile> files = FormClassIndex.findFormsBoundToClass(psiManager.getProject(), containingClass, scope);
return processReferencesInFiles(files, psiManager, fieldName, field, filterScope, processor);
}
private static boolean processReferences(final Processor<? super PsiReference> processor,
final PsiFile file,
String name,
final PsiElement element,
final LocalSearchScope filterScope) {
CharSequence chars = ApplicationManager.getApplication().runReadAction((NullableComputable<CharSequence>)() -> {
if (filterScope != null) {
boolean isInScope = false;
for(PsiElement filterElement: filterScope.getScope()) {
if (PsiTreeUtil.isAncestor(filterElement, file, false)) {
isInScope = true;
break;
}
}
if (!isInScope) return null;
}
return file.getViewProvider().getContents();
});
if (chars == null) return true;
int index = 0;
final int offset = name.lastIndexOf('.');
while(true){
index = CharArrayUtil.indexOf(chars, name, index);
if (index < 0) break;
final int finalIndex = index;
final Boolean searchDone = ApplicationManager.getApplication().runReadAction((NullableComputable<Boolean>)() -> {
final PsiReference ref = file.findReferenceAt(finalIndex + offset + 1);
if (ref != null && ref.isReferenceTo(element)) {
return processor.process(ref);
}
return true;
});
if (!searchDone.booleanValue()) return false;
index++;
}
return true;
}
private static boolean processReferencesInUIForms(final Processor<? super PsiReference> processor,
PsiManager psiManager,
final Property property,
final GlobalSearchScope globalSearchScope,
final LocalSearchScope filterScope) {
final Project project = psiManager.getProject();
final GlobalSearchScope scope = GlobalSearchScope.projectScope(project).intersectWith(globalSearchScope);
String name = ReadAction.compute(() -> property.getName());
if (name == null) return true;
psiManager.startBatchFilesProcessingMode();
try {
CommonProcessors.CollectProcessor<VirtualFile> collector = new CommonProcessors.CollectProcessor<VirtualFile>() {
@Override
protected boolean accept(VirtualFile virtualFile) {
return FileTypeRegistry.getInstance().isFileOfType(virtualFile, StdFileTypes.GUI_DESIGNER_FORM);
}
};
PsiSearchHelper.getInstance(project).processCandidateFilesForText(scope, UsageSearchContext.IN_PLAIN_TEXT, true, name, collector);
for (final VirtualFile vfile:collector.getResults()) {
ProgressManager.checkCanceled();
PsiFile file = ReadAction.compute(() -> PsiManager.getInstance(project).findFile(vfile));
if (!processReferences(processor, file, name, property, filterScope)) return false;
}
}
finally {
psiManager.finishBatchFilesProcessingMode();
}
return true;
}
private static boolean processReferencesInUIForms(final Processor<? super PsiReference> processor,
PsiManager psiManager,
final PropertiesFile propFile,
final GlobalSearchScope globalSearchScope,
final LocalSearchScope filterScope) {
final Project project = psiManager.getProject();
GlobalSearchScope scope = GlobalSearchScope.projectScope(project).intersectWith(globalSearchScope);
final String baseName = ReadAction.compute(() -> propFile.getResourceBundle().getBaseName());
PsiFile containingFile = ReadAction.compute(() -> propFile.getContainingFile());
List<PsiFile> files = Arrays.asList(CacheManager.SERVICE.getInstance(project).getFilesWithWord(baseName, UsageSearchContext.IN_PLAIN_TEXT, scope, true));
return processReferencesInFiles(files, psiManager, baseName, containingFile, filterScope, processor);
}
private static boolean processReferencesInFiles(List<PsiFile> files,
PsiManager psiManager, String baseName,
PsiElement element,
LocalSearchScope filterScope,
Processor<? super PsiReference> processor) {
psiManager.startBatchFilesProcessingMode();
try {
for (PsiFile file : files) {
ProgressManager.checkCanceled();
if (file.getFileType() != StdFileTypes.GUI_DESIGNER_FORM) continue;
if (!processReferences(processor, file, baseName, element, filterScope)) return false;
}
}
finally {
psiManager.finishBatchFilesProcessingMode();
}
return true;
}
}
| leafclick/intellij-community | plugins/ui-designer/src/com/intellij/uiDesigner/binding/FormReferencesSearcher.java | Java | apache-2.0 | 12,535 |
#!/usr/bin/python
import json
import time
import parcon
import operator
import pprint
import os
import sys
import getopt
import re
import optparse
import string
import hashlib
import parse_objc as parser
import sign
#### tool version:
VERSION = parser.VERSION
VERSION_STR = sign.source_file_signature(__file__, VERSION)
#### shortcuts:
ID = parser.KEY_ID
pretty_json = parser.pretty_json
pretty_pprint = parser.pretty_pprint
log = parser.log
log_info = parser.log_info
################## Templates ############################
#########################################################
####
notifier_initializer_declaration_event_blocks_template = """
/**
Initialize an instance of the ${notifier_name} with the given event blocks (can be nil).
IMPORTANT:
1. to avoid retain cycles, the addedFirst/removedLast blocks should not reference '__strong self'. '__weak self' is fine.
2. rule of thumb: in case one of the addedFirst/removedLast blocks is provided, chances are the other block is needed as well.
@param addedFirst a block to be invoked after the first subscription has been added
@param removedLast a block to be invoked after the last subscription has been removed
*/
- (instancetype)initWithFirstSubscriptionAdded:(${notifier_name}FirstSubscriptionAdded)addedFirst
lastSubscriptionRemoved:(${notifier_name}LastSubscriptionRemoved)removedLast
""".strip()
####
notifier_initializer_declaration_template = """
/**
Initialize an instance of the ${notifier_name}
*/
- (instancetype)init
""".strip()
####
notifier_initializer_implementation_event_blocks_template = """
{
self = [super init];
if (self) {
_baseImplementation = [[WNNotifierBase alloc] initAtomic:${is_notifier_atomic} /* atomic */
firstSubscriptionBlock:addedFirst
lastSubscriptionBlock:removedLast];
}
return self;
}
- (instancetype)init
{
NSAssert(NO,
@"ERROR: please use: initWithFirstSubscriptionAdded:lastSubscriptionRemoved: to init this object");
return nil;
}
""".strip()
####
notifier_initializer_implementation_template = """
{
self = [super init];
if (self) {
_baseImplementation = [[WNNotifierBase alloc] initAtomic:${is_notifier_atomic}
firstSubscriptionBlock:nil
lastSubscriptionBlock:nil];
}
return self;
}
""".strip()
####
enumerator_typedef_template = """
typedef void (^${notifier_name}Visitor)(${notifier_name}Subcription* subscription)
""".strip()
####
first_subscription_added_typedef_template = """
typedef void (^${notifier_name}FirstSubscriptionAdded)(${notifier_name}Subcription* subscription)
""".strip()
####
last_subscription_removed_typedef_template = """
typedef void (^${notifier_name}LastSubscriptionRemoved)(${notifier_name}Subcription* subscription)
""".strip()
####
event_processor_typedef_template = """
typedef void (^${notifier_name}EventProcessor)(SEL selector, NSDictionary* arguments)
""".strip()
####
event_processor_property_template = """
/**
a block to process the notified events as a sequence of (SEL, NSDictionary* arguments) tuples.
a perfect use case for this feature is a file / network logger of events.
IMPORTANT: 1. even though this is a 'readwrite' property,
it's unadvised to write this property more than once.
2. to avoid a retain cycle, the block should avoid
referencing '__strong self', and prefer '__weak self' instead.
*/
@property (copy, readwrite) ${notifier_name}EventProcessor eventProcessor;
""".strip()
###
notifier_interface_template = """
@interface ${notifier_name} : NSObject <${listener_name}>
${notifier_initializer_declaration};
/**
Register the given subscription object ${listener_name} to be notified.
The notifications will be delivered to subscription->listener
for the lifecycle of the provided subscription object.
IMPORTANT: This API is NOT idempotent.
@param subscription - subscription object to be added.
*/
-(void)addSubscription:(${notifier_name}Subcription *)subscription;
/**
Unregister the given subscription object ${listener_name} from being notified.
@param subscription - subscription object to be removed
*/
-(void)removeSubscription:(${notifier_name}Subcription *)subscription;
${enumerator_declaration}
${event_processor_property}
@end
""".strip()
###
notifier_subscription_listener_context_property_template = """
@property (atomic, readonly, ${listener_context_ref}) id listenerContext
""".strip()
###
notifier_subscription_initializer_declaration_with_context_template = """
- (instancetype)initWithListener:(id <${listener_name}>)listener
listenerQueue:(dispatch_queue_t)listenerQueue
listenerContext:(id)listenerContext
""".strip()
###
notifier_subscription_initializer_declaration_no_context_template = """
- (instancetype)initWithListener:(id <${listener_name}>)listener
listenerQueue:(dispatch_queue_t)listenerQueue
""".strip()
###
notifier_subscription_interface_template = """
@interface ${notifier_name}Subcription : NSObject
${notifier_subscription_initializer_declaration}
@property (atomic, readonly, ${listener_ref}) id <${listener_name}> listener;
@property (atomic, readonly, strong) dispatch_queue_t listenerQueue;
${notifier_subscription_listener_context_property}
@end
"""
###
notifier_subscription_implementation_template = """
@implementation ${notifier_name}Subcription
${notifier_subscription_initializer_declaration}
{
self = [super init];
if (self) {
_listener = listener;
_listenerQueue = listenerQueue;
${notifier_subscription_implementation_extension}
}
return self;
}
@end
""".strip()
####
typedefs_template = """
${enumerator_typedef}${first_subscription_added_typedef}${last_subscription_removed_typedef}
${event_processor_typedef}
""".strip()
####
documentation_header_template = """
/**
Purpose:
========
Notifier for the ${listener_name} protocol defined in: ${listener_base_filename}
Annotations Used:
=================
${annotation_as_json_string}
*/
""".strip()
####
file_template_notifier_h = """
// @tool ${generated_by}
// @input_hash ${input_file_hash}
#import <Foundation/Foundation.h>
#import <dispatch/dispatch.h>
#import "${listener_name}.h"
${documentation_header}
@class ${notifier_name}Subcription;
${typedefs}${notifier_interface}${notifier_subscription_interface}
""".strip()
####
file_template_notifier_m = """
// @tool ${generated_by}
// @input_hash ${input_file_hash}
#if ! __has_feature(objc_arc)
#error This file must be compiled with ARC. Use -fobjc-arc flag (or convert project to ARC).
#endif
#import <dispatch/dispatch.h>
#import "${notifier_name}.h"
#import <WNNotifier/WNNotifierBase.h>
${notifier_subscription_implementation}
@implementation ${notifier_name} {
WNNotifierBase* _baseImplementation;
}
${notifier_initializer_declaration}
${notifier_initializer_implementation}
-(void)addSubscription:(${notifier_name}Subcription *)subscription
{
[_baseImplementation addSubscription:subscription];
}
-(void)removeSubscription:(${notifier_name}Subcription *)subscription
{
[_baseImplementation removeSubscription:subscription];
}
${enumerator_implementation}
${protocol_implementation}
@end
""".strip()
####
enumerator_implementation_template = """
-(void)enumerateSubscriptionsUsingBlock:(${notifier_name}Visitor)visitor
{
if (!visitor) {
return;
}
[_baseImplementation enumerateSubscriptionsUsingBlock:^bool(${notifier_name}Subcription * subscription) {
visitor(subscription);
return ((id<${listener_name}>)subscription.listener) != nil;
}];
}
""".strip()
####
enumerator_declaration_template = """
/**
Enumerate the current subscriptions collection with the given visitor block.
@param visitor - the block to be used to enumerate the current set of subscriptions
*/
-(void)enumerateSubscriptionsUsingBlock:(${notifier_name}Visitor)visitor;
""".strip()
####
method_event_processor_implementation_template = """
if (_eventProcessor) {
_eventProcessor(_cmd,
@{${event_dictionary_content}
});
}
""".strip()
####
method_required_implementation_template = """
${mdeclaration}
{
${method_event_processor_implementation}
[_baseImplementation enumerateSubscriptionsUsingBlock:^(${notifier_name}Subcription * subscription) {
return WNNotifierBaseNotify(
subscription.listener,
subscription.listenerQueue,
^(id<${listener_name}> listener) {
[listener ${minvocation}];
});
}];
}
""".strip()
####
method_optional_implementation_template = """
${mdeclaration}
{
${method_event_processor_implementation}
[_baseImplementation enumerateSubscriptionsUsingBlock:^(${notifier_name}Subcription * subscription) {
return WNNotifierBaseNotify(
subscription.listener,
subscription.listenerQueue,
^(id<${listener_name}> listener) {
if ([listener respondsToSelector:@selector(${mselector})]) {
[listener ${minvocation}];
}
});
}];
}
""".strip()
####
def verify_annotation(annotation):
log_info("annotation, %s" % annotation)
if annotation["atomic"] not in (True , False):
raise Exception("atomic : can only be 'true' or 'false', not, %s" % annotation["atomic"])
if annotation["listener-ref"] not in ("weak" , "strong"):
raise Exception("listener-ref : can only be 'weak' or 'strong', not, %s" % annotation["listener-ref"])
if annotation["event-blocks"] not in (True, False):
raise Exception("event-blocks : can only be 'True' or 'False', not, %s" % annotation["event-blocks"])
if annotation["event-processor-block"] not in (True, False):
raise Exception("event-processor-block : can only be 'True' or 'False', not, %s" % annotation["event-processor-block"])
if annotation["enumerate"] not in (True, False):
raise Exception("enumerate : can only be 'True' or 'False', not, %s" % annotation["enumerate"])
if len(annotation["listener-context-keyword"]) > 0 and not annotation["listener-context-keyword"].isalpha():
raise Exception("listener-context-keyword : should be a all alpha word, not, %s" % annotation["listener-context-keyword"])
if annotation["listener-context-ref"] not in ("weak", "strong", ""):
raise Exception("listener-context-ref : can only be 'weak' or 'strong' or '', not, %s" % annotation["listener-context-ref"])
####
def gen_event_processor_implementation(annotation, method):
"""
generate an event dictionary for the given 'method'
"""
event_dictionary_content = []
for keyword in method['mkeywords']:
keyword_name = keyword["keyword"]
keyword_type = keyword["type"]
if keyword.has_key("arg") and keyword['arg']:
at = ""
if keyword_type["name"] in parser.primitive_type_names():
at = "@"
keyword_arg = "WNNotifierBaseConvertToNSNull(" + at + "(" + keyword["arg"] + ")" + ")"
else:
keyword_arg = "[NSNull null]"
event_dictionary_content.append(
string.Template("""@"${keyword_name}" : ${keyword_arg}, """)
.substitute(
{ "keyword_name" : keyword_name,
"keyword_arg" : keyword_arg}))
event_dictionary_content = "\n ".join(event_dictionary_content)
method_event_processor_implementation = string.Template(
method_event_processor_implementation_template).substitute(
event_dictionary_content=event_dictionary_content)
return method_event_processor_implementation
####
def gen_notifier_v2_for_protocol(options, filename, objects, prot_object):
parser.protocol_methods_update_decorations(prot_object)
output_dir, base_filename = os.path.split(filename)
listener_name = prot_object["name"]
notifier_name = listener_name + "Notifier"
if options.notifier_name:
notifier_name = options.notifier_name
# get the annotation:
annotation_default = {
"atomic" : False,
"listener-ref" : "weak",
"event-blocks" : False,
"enumerate" : False,
"listener-context-keyword" : "",
"listener-context-ref" : "",
"event-processor-block" : False
}
annotation = annotation_default.copy()
if "json" in prot_object["WNNotifierGenerate"]:
annotation.update(prot_object["WNNotifierGenerate"]["json"])
verify_annotation(annotation)
protocol_implementation = ""
methods = prot_object["methods"]
# build up the implementation, method by method
for method in methods:
# default template params:
template_params = {}
# override the argument for listener-context-keyword
override_arg_for_keywords = {}
keyword_name = annotation["listener-context-keyword"]
if len(keyword_name):
keyword_arg = "%s ? %s : subscription.listenerContext" % (
keyword_name,
keyword_name)
override_arg_for_keywords = {keyword_name : keyword_arg}
# build a declaration, invocation and a selector for this method:
(mdeclaration, minvocation, mselector) = parser.protocol_method_recostruct(
method,
override_arg_for_keywords)
# generate the event processor code:
method_event_processor_implementation = ""
if annotation["event-processor-block"]:
method_event_processor_implementation = \
gen_event_processor_implementation(annotation, method)
# function implementation:
template_string = method_required_implementation_template
if method["decoration"] == ["@optional"]:
template_string = method_optional_implementation_template
template_string = template_string.strip()
# template parameters:
template_params.update({
"listener_name" : listener_name,
"notifier_name" : notifier_name,
"mdeclaration" : mdeclaration,
"minvocation" : minvocation,
"mselector" : mselector,
"method_event_processor_implementation" : method_event_processor_implementation,
})
# method implementation:
method_implementation = parser.remove_empty_lines(
string.Template(template_string).substitute(template_params))
# keep going:
protocol_implementation += "\n\n" + method_implementation
# hash the input file:
input_file_hash = base_filename + ":" + sign.sign_data(open(filename, "r").read())
is_notifier_atomic = "NO"
if annotation["atomic"]:
is_notifier_atomic = "YES"
# requested ref types:
listener_ref = annotation["listener-ref"]
listener_context_ref = annotation["listener-context-ref"]
# embed the annotations into the generated file:
annotation_as_json_string = "WNNotifierGenerate(%s)" % pretty_json(annotation)
# basic params:
template_params.update({
"generated_by" : VERSION_STR,
"notifier_name" : notifier_name,
"listener_name" : listener_name,
"is_notifier_atomic" : is_notifier_atomic,
"listener_ref" : listener_ref,
"listener_context_ref" : listener_context_ref,
"annotation_as_json_string" : annotation_as_json_string,
"listener_base_filename" : base_filename
})
# enumerators:
enumerator_implementation = ""
enumerator_declaration = ""
enumerator_typedef = ""
if annotation["enumerate"]:
enumerator_implementation = string.Template(enumerator_implementation_template).substitute(template_params)
enumerator_declaration = string.Template(enumerator_declaration_template).substitute(template_params)
enumerator_typedef = string.Template(enumerator_typedef_template).substitute(template_params) + ";\n"
# event blocks:
notifier_initializer_declaration = ""
notifier_initializer_implementation = ""
declaration_template = notifier_initializer_declaration_template
implementation_template = notifier_initializer_implementation_template
first_subscription_added_typedef = ""
last_subscription_removed_typedef = ""
if annotation["event-blocks"]:
declaration_template = notifier_initializer_declaration_event_blocks_template
implementation_template = notifier_initializer_implementation_event_blocks_template
first_subscription_added_typedef = string.Template(first_subscription_added_typedef_template).substitute(template_params) + ";\n"
last_subscription_removed_typedef = string.Template(last_subscription_removed_typedef_template).substitute(template_params) + ";\n"
notifier_initializer_declaration = string.Template(declaration_template).substitute(template_params)
notifier_initializer_implementation = string.Template(implementation_template).substitute(template_params)
notifier_subscription_listener_context_property = ""
notifier_subscription_implementation_extension = ""
notifier_subscription_initializer_declaration = string.Template(
notifier_subscription_initializer_declaration_no_context_template).substitute(template_params) + ";"
if len(annotation["listener-context-ref"]):
notifier_subscription_listener_context_property = string.Template(
notifier_subscription_listener_context_property_template).substitute(template_params) + ";"
notifier_subscription_initializer_declaration = string.Template(
notifier_subscription_initializer_declaration_with_context_template).substitute(template_params) + ";"
notifier_subscription_implementation_extension = "_listenerContext = listenerContext;"
# event processors:
event_processor_typedef = ""
event_processor_property = ""
if annotation["event-processor-block"]:
event_processor_typedef = string.Template(event_processor_typedef_template).substitute(template_params) + ";"
event_processor_property = string.Template(event_processor_property_template).substitute(template_params)
# populate the templates, and write the files:
template_params.update({
"protocol_implementation" : protocol_implementation,
"input_file_hash" : input_file_hash,
# enumerator:
"enumerator_implementation" : enumerator_implementation,
"enumerator_declaration" : enumerator_declaration,
"enumerator_typedef" : enumerator_typedef,
# initializer:
"notifier_initializer_declaration" : notifier_initializer_declaration,
"notifier_initializer_implementation" : notifier_initializer_implementation,
"first_subscription_added_typedef" : first_subscription_added_typedef,
"last_subscription_removed_typedef" : last_subscription_removed_typedef,
# listener context:
"notifier_subscription_listener_context_property" : notifier_subscription_listener_context_property,
"notifier_subscription_initializer_declaration" : notifier_subscription_initializer_declaration,
"notifier_subscription_implementation_extension" : notifier_subscription_implementation_extension,
# event processor:
"event_processor_typedef" : event_processor_typedef,
"event_processor_property" : event_processor_property,
})
# subscription object implementation:
notifier_subscription_implementation = parser.remove_empty_lines(
string.Template(
notifier_subscription_implementation_template).substitute(
template_params))
template_params.update({
"notifier_subscription_implementation" : notifier_subscription_implementation,
})
# subscription object interface:
notifier_subscription_interface = string.Template(
notifier_subscription_interface_template).substitute(
template_params)
# notifier interface:
notifier_interface = string.Template(
notifier_interface_template).substitute(
template_params)
# typedef section:
typedefs = string.Template(
typedefs_template).substitute(
template_params)
# doc header:
documentation_header = string.Template(
documentation_header_template).substitute(
template_params)
# clear out some spaces:
notifier_subscription_interface = parser.remove_empty_lines(notifier_subscription_interface) + "\n\n"
notifier_interface = parser.remove_empty_lines(notifier_interface) + "\n\n"
typedefs = parser.remove_empty_lines(typedefs)
if len(typedefs):
typedefs += "\n\n"
# extend the template params:
template_params.update({
"notifier_subscription_interface" : notifier_subscription_interface,
"notifier_interface" : notifier_interface,
"typedefs" : typedefs,
"documentation_header" : documentation_header
})
# write the files:
parser.write_class_files( output_dir,
notifier_name,
string.Template(file_template_notifier_h).substitute(template_params),
string.Template(file_template_notifier_m).substitute(template_params))
####
def get_objects_with_id(objects, id):
ret = []
for object in objects:
if object[ID] == id:
ret.append(object)
return ret
####
def gen_notifier_v2(options, filename):
in_data, in_lines = parser.read_file(filename)
objects = parser.process_input_lines(options, in_lines)
objects = objects["objects"]
log_info("objects = \n%s" % pretty_pprint(objects))
protocols = get_objects_with_id(objects, "protocol")
# has to contain a protocol:
if len(protocols) <= 0 or len(protocols) > 1:
raise Exception("file, %s, doesn't cotain a protocol" % filename)
for prot_object in protocols:
log_info("prot = %s" % pretty_json(prot_object))
gen_notifier_v2_for_protocol(options, filename, objects, prot_object)
####
def main():
oparser = optparse.OptionParser(usage="\n %prog <options> <protocol h file1>")
oparser.add_option("", "--version",
action="store_true",
help="print version number",
dest="version", default=False)
oparser.add_option("", "--verbose",
action="store_true",
help="print more information while testing",
dest="verbose",
default=False)
oparser.add_option("",
"--notifier_name",
help="set (override) the name of the notifier to be <notifier_name> (default <ProtocolName>Notifier)",
dest="notifier_name",
default=None)
(options, filenames) = oparser.parse_args()
parser.log_info_enabled = options.verbose
parser.log_info("generating notifier...")
log_info("filenames = %s" % str(filenames))
log_info("options = %s" % str(options))
if options.version:
print(VERSION_STR)
sys.exit(0)
if options.notifier_name and len(filenames) > 1:
raise Exception("--notifier_name can not be set when more than one fiename is specified")
for filename in filenames:
log("generating "+ filename)
gen_notifier_v2(options, filename)
####
if __name__ == "__main__":
main()
| korovkin/WNNotifier | notifier/gen_notifier.py | Python | apache-2.0 | 23,166 |
package pl.touk.nussknacker.ui.process.repository
import pl.touk.nussknacker.engine.api.process.{ProcessId, ProcessName, VersionId}
import pl.touk.nussknacker.restmodel.processdetails.{BaseProcessDetails, ProcessShapeFetchStrategy}
import pl.touk.nussknacker.ui.api.ListenerApiUser
import pl.touk.nussknacker.ui.listener.User
import pl.touk.nussknacker.ui.listener.services.{PullProcessRepository => ListenerPullProcessRepository}
import pl.touk.nussknacker.ui.security.api.LoggedUser
import scala.concurrent.{ExecutionContext, Future}
import scala.language.implicitConversions
class PullProcessRepository(fetchingProcessRepository: FetchingProcessRepository[Future]) extends ListenerPullProcessRepository {
private implicit def toLoggedUser(implicit user: User): LoggedUser =
user.asInstanceOf[ListenerApiUser].loggedUser
override def fetchLatestProcessDetailsForProcessId[PS: ProcessShapeFetchStrategy](id: ProcessId)
(implicit listenerUser: User, ec: ExecutionContext): Future[Option[BaseProcessDetails[PS]]] = {
fetchingProcessRepository.fetchLatestProcessDetailsForProcessId(id = id)
}
override def fetchProcessDetailsForId[PS: ProcessShapeFetchStrategy](processId: ProcessId, versionId: VersionId)
(implicit listenerUser: User, ec: ExecutionContext): Future[Option[BaseProcessDetails[PS]]] = {
fetchingProcessRepository.fetchProcessDetailsForId(processId, versionId)
}
override def fetchProcessDetailsForName[PS: ProcessShapeFetchStrategy](processName: ProcessName, versionId: VersionId)
(implicit listenerUser: User, ec: ExecutionContext): Future[Option[BaseProcessDetails[PS]]] = for {
maybeProcessId <- fetchingProcessRepository.fetchProcessId(processName)
processId <- maybeProcessId.fold(Future.failed[ProcessId](new IllegalArgumentException(s"ProcessId for $processName not found")))(Future.successful)
processDetails <- fetchLatestProcessDetailsForProcessId[PS](processId)
} yield processDetails
}
| TouK/nussknacker | ui/server/src/main/scala/pl/touk/nussknacker/ui/process/repository/PullProcessRepository.scala | Scala | apache-2.0 | 2,181 |
/* -*- Mode: Java; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set shiftwidth=2 tabstop=2 autoindent cindent expandtab: */
/* Copyright 2013 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* globals mozL10n, GrabToPan, Preferences, SecondaryToolbar */
//#include grab_to_pan.js
var HandTool = {
initialize: function handToolInitialize(options) {
var toggleHandTool = options.toggleHandTool;
this.handTool = new GrabToPan({
element: options.container,
onActiveChanged: function(isActive) {
if (!toggleHandTool) {
return;
}
if (isActive) {
toggleHandTool.title =
mozL10n.get('hand_tool_disable.title', null, 'Disable hand tool');
toggleHandTool.firstElementChild.textContent =
mozL10n.get('hand_tool_disable_label', null, 'Disable hand tool');
} else {
toggleHandTool.title =
mozL10n.get('hand_tool_enable.title', null, 'Enable hand tool');
toggleHandTool.firstElementChild.textContent =
mozL10n.get('hand_tool_enable_label', null, 'Enable hand tool');
}
}
});
if (toggleHandTool) {
toggleHandTool.addEventListener('click', this.toggle.bind(this), false);
window.addEventListener('localized', function (evt) {
Preferences.get('enableHandToolOnLoad').then(function resolved(value) {
if (value) {
this.handTool.activate();
}
}.bind(this), function rejected(reason) {});
}.bind(this));
}
},
toggle: function handToolToggle() {
this.handTool.toggle();
SecondaryToolbar.close();
},
enterPresentationMode: function handToolEnterPresentationMode() {
if (this.handTool.active) {
this.wasActive = true;
this.handTool.deactivate();
}
},
exitPresentationMode: function handToolExitPresentationMode() {
if (this.wasActive) {
this.wasActive = null;
this.handTool.activate();
}
}
};
| reggersusa/pdf.js | web/hand_tool.js | JavaScript | apache-2.0 | 2,539 |
/* ------------------------------------------------------------------------- */
/*
* Copyright 2007-2018 GRAHAM DUMPLETON
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* ------------------------------------------------------------------------- */
#include "wsgi_server.h"
#include "wsgi_daemon.h"
/* ------------------------------------------------------------------------- */
/* Base server object. */
server_rec *wsgi_server = NULL;
apr_pool_t *wsgi_daemon_pool = NULL;
const char *wsgi_daemon_group = "";
/* Process information. */
pid_t wsgi_parent_pid = 0;
pid_t wsgi_worker_pid = 0;
pid_t wsgi_daemon_pid = 0;
apr_time_t wsgi_restart_time = 0;
/* New Relic monitoring agent. */
const char *wsgi_newrelic_config_file = NULL;
const char *wsgi_newrelic_environment = NULL;
/* Python interpreter state. */
PyThreadState *wsgi_main_tstate = NULL;
/* Configuration objects. */
WSGIServerConfig *wsgi_server_config = NULL;
WSGIScriptFile *newWSGIScriptFile(apr_pool_t *p)
{
WSGIScriptFile *object = NULL;
object = (WSGIScriptFile *)apr_pcalloc(p, sizeof(WSGIScriptFile));
object->handler_script = NULL;
object->application_group = NULL;
object->process_group = NULL;
return object;
}
WSGIServerConfig *newWSGIServerConfig(apr_pool_t *p)
{
WSGIServerConfig *object = NULL;
object = (WSGIServerConfig *)apr_pcalloc(p, sizeof(WSGIServerConfig));
object->pool = p;
object->alias_list = NULL;
object->socket_prefix = NULL;
#if defined(MOD_WSGI_WITH_DAEMONS)
object->socket_prefix = DEFAULT_REL_RUNTIMEDIR "/wsgi";
object->socket_prefix = ap_server_root_relative(p, object->socket_prefix);
#endif
object->socket_rotation = 1;
object->verbose_debugging = 0;
object->python_warnings = NULL;
object->py3k_warning_flag = -1;
object->python_optimize = -1;
object->dont_write_bytecode = -1;
object->lang = NULL;
object->locale = NULL;
object->python_home = NULL;
object->python_path = NULL;
object->python_eggs = NULL;
object->python_hash_seed = NULL;
object->restrict_embedded = -1;
object->restrict_stdin = -1;
object->restrict_stdout = -1;
object->restrict_signal = -1;
#if defined(WIN32) || defined(DARWIN)
object->case_sensitivity = 0;
#else
object->case_sensitivity = 1;
#endif
object->restrict_process = NULL;
object->process_group = NULL;
object->application_group = NULL;
object->callable_object = NULL;
object->dispatch_script = NULL;
object->pass_apache_request = -1;
object->pass_authorization = -1;
object->script_reloading = -1;
object->error_override = -1;
object->chunked_request = -1;
object->ignore_activity = -1;
object->enable_sendfile = -1;
object->server_metrics = -1;
object->newrelic_config_file = NULL;
object->newrelic_environment = NULL;
return object;
}
/* ------------------------------------------------------------------------- */
/* vi: set sw=4 expandtab : */
| pexip/os-mod-wsgi | src/server/wsgi_server.c | C | apache-2.0 | 3,531 |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export global feature tensorflow inference model.
This model includes image pyramids for multi-scale processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow as tf
from delf.python.training.model import delf_model
from delf.python.training.model import export_model_utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ckpt_path', '/tmp/delf-logdir/delf-weights',
'Path to saved checkpoint.')
flags.DEFINE_string('export_path', None, 'Path where model will be exported.')
flags.DEFINE_list(
'input_scales_list', None,
'Optional input image scales to use. If None (default), an input end-point '
'"input_scales" is added for the exported model. If not None, the '
'specified list of floats will be hard-coded as the desired input scales.')
flags.DEFINE_enum(
'multi_scale_pool_type', 'None', ['None', 'average', 'sum'],
"If 'None' (default), the model is exported with an output end-point "
"'global_descriptors', where the global descriptor for each scale is "
"returned separately. If not 'None', the global descriptor of each scale is"
' pooled and a 1D global descriptor is returned, with output end-point '
"'global_descriptor'.")
flags.DEFINE_boolean('normalize_global_descriptor', False,
'If True, L2-normalizes global descriptor.')
class _ExtractModule(tf.Module):
"""Helper module to build and save global feature model."""
def __init__(self,
multi_scale_pool_type='None',
normalize_global_descriptor=False,
input_scales_tensor=None):
"""Initialization of global feature model.
Args:
multi_scale_pool_type: Type of multi-scale pooling to perform.
normalize_global_descriptor: Whether to L2-normalize global descriptor.
input_scales_tensor: If None, the exported function to be used should be
ExtractFeatures, where an input end-point "input_scales" is added for
the exported model. If not None, the specified 1D tensor of floats will
be hard-coded as the desired input scales, in conjunction with
ExtractFeaturesFixedScales.
"""
self._multi_scale_pool_type = multi_scale_pool_type
self._normalize_global_descriptor = normalize_global_descriptor
if input_scales_tensor is None:
self._input_scales_tensor = []
else:
self._input_scales_tensor = input_scales_tensor
# Setup the DELF model for extraction.
self._model = delf_model.Delf(block3_strides=False, name='DELF')
def LoadWeights(self, checkpoint_path):
self._model.load_weights(checkpoint_path)
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image'),
tf.TensorSpec(shape=[None], dtype=tf.float32, name='input_scales'),
tf.TensorSpec(
shape=[None], dtype=tf.int32, name='input_global_scales_ind')
])
def ExtractFeatures(self, input_image, input_scales, input_global_scales_ind):
extracted_features = export_model_utils.ExtractGlobalFeatures(
input_image,
input_scales,
input_global_scales_ind,
lambda x: self._model.backbone.build_call(x, training=False),
multi_scale_pool_type=self._multi_scale_pool_type,
normalize_global_descriptor=self._normalize_global_descriptor)
named_output_tensors = {}
if self._multi_scale_pool_type == 'None':
named_output_tensors['global_descriptors'] = tf.identity(
extracted_features, name='global_descriptors')
else:
named_output_tensors['global_descriptor'] = tf.identity(
extracted_features, name='global_descriptor')
return named_output_tensors
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image')
])
def ExtractFeaturesFixedScales(self, input_image):
return self.ExtractFeatures(input_image, self._input_scales_tensor,
tf.range(tf.size(self._input_scales_tensor)))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
export_path = FLAGS.export_path
if os.path.exists(export_path):
raise ValueError('export_path %s already exists.' % export_path)
if FLAGS.input_scales_list is None:
input_scales_tensor = None
else:
input_scales_tensor = tf.constant(
[float(s) for s in FLAGS.input_scales_list],
dtype=tf.float32,
shape=[len(FLAGS.input_scales_list)],
name='input_scales')
module = _ExtractModule(FLAGS.multi_scale_pool_type,
FLAGS.normalize_global_descriptor,
input_scales_tensor)
# Load the weights.
checkpoint_path = FLAGS.ckpt_path
module.LoadWeights(checkpoint_path)
print('Checkpoint loaded from ', checkpoint_path)
# Save the module
if FLAGS.input_scales_list is None:
served_function = module.ExtractFeatures
else:
served_function = module.ExtractFeaturesFixedScales
tf.saved_model.save(
module, export_path, signatures={'serving_default': served_function})
if __name__ == '__main__':
app.run(main)
| tombstone/models | research/delf/delf/python/training/model/export_global_model.py | Python | apache-2.0 | 5,972 |
finefoods
=========
| hai2219/finefoods | README.md | Markdown | apache-2.0 | 20 |
/*
* Copyright [2017] [Andy Moncsek]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jacpfx.entity.decoder;
import java.util.Optional;
import org.jacpfx.vxms.common.decoder.Decoder;
/** Created by Andy Moncsek on 18.11.15. */
public class ExampleByteDecoder implements Decoder.ByteDecoder<String> {
@Override
public Optional<String> decode(byte[] input) {
return Optional.of(new String(input));
}
}
| amoAHCP/vxms | vxms-testing/src/test/java/org/jacpfx/entity/decoder/ExampleByteDecoder.java | Java | apache-2.0 | 940 |
# Add-VSElasticBeanstalkApplicationApplicationVersionLifecycleConfig
## SYNOPSIS
Adds an AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig resource property to the template.
The application version lifecycle settings for an application.
Defines the rules that Elastic Beanstalk applies to an application's versions in order to avoid hitting the per-region limit for application versions.
## SYNTAX
```
Add-VSElasticBeanstalkApplicationApplicationVersionLifecycleConfig [[-MaxAgeRule] <Object>]
[[-MaxCountRule] <Object>] [<CommonParameters>]
```
## DESCRIPTION
Adds an AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig resource property to the template.
The application version lifecycle settings for an application.
Defines the rules that Elastic Beanstalk applies to an application's versions in order to avoid hitting the per-region limit for application versions.
When Elastic Beanstalk deletes an application version from its database, you can no longer deploy that version to an environment.
The source bundle remains in S3 unless you configure the rule to delete it.
ApplicationVersionLifecycleConfig is a property of the ApplicationResourceLifecycleConfig: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html property type.
## PARAMETERS
### -MaxAgeRule
Specify a max age rule to restrict the length of time that application versions are retained for an application.
Documentation: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule
Type: MaxAgeRule
UpdateType: Mutable
```yaml
Type: Object
Parameter Sets: (All)
Aliases:
Required: False
Position: 1
Default value: None
Accept pipeline input: False
Accept wildcard characters: False
```
### -MaxCountRule
Specify a max count rule to restrict the number of application versions that are retained for an application.
Documentation: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule
Type: MaxCountRule
UpdateType: Mutable
```yaml
Type: Object
Parameter Sets: (All)
Aliases:
Required: False
Position: 2
Default value: None
Accept pipeline input: False
Accept wildcard characters: False
```
### CommonParameters
This cmdlet supports the common parameters: -Debug, -ErrorAction, -ErrorVariable, -InformationAction, -InformationVariable, -OutVariable, -OutBuffer, -PipelineVariable, -Verbose, -WarningAction, and -WarningVariable. For more information, see [about_CommonParameters](http://go.microsoft.com/fwlink/?LinkID=113216).
## INPUTS
## OUTPUTS
### Vaporshell.Resource.ElasticBeanstalk.Application.ApplicationVersionLifecycleConfig
## NOTES
## RELATED LINKS
[http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html)
| scrthq/Vaporshell | docs/docs/glossary/Add-VSElasticBeanstalkApplicationApplicationVersionLifecycleConfig.md | Markdown | apache-2.0 | 3,311 |
/*
Template Name: BusyBusiness
File: Featured Slider CSS
Author: OS Templates
Author URI: http://www.os-templates.com/
Licence: <a href="http://www.os-templates.com/template-terms">Website Template Licence</a>
*/
#featured_slide{
padding:30px 0;
/*font-family:Georgia, "Times New Roman", Times, serif;*/
background-color: #eee;
}
#featured_slide h2, #featured_slide ul, #featured_slide p, #featured_slide a{
margin:0;
padding:0;
border:none;
outline:none;
list-style:none;
line-height:normal;
text-decoration:none;
}
.preview_holder{
display:block;
position:relative;
float:left;
width:640px;
height:270px;
overflow:hidden;
color:#FFFFFF;
background-color:#000000;
margin-left:20px;
}
.preview_holder img{
display:block;
width:660px;
height:270px;
}
.preview_holder .text_holder{
display:none;
position:absolute;
bottom:0;
left:0;
width:100%;
}
.preview_holder .introtext{
display:block;
width:640px;
margin:0;
padding:10px;
color:#FFFFFF;
background:#333333;
}
.selector{
display:block;
float:right;
width:300px;
}
.selector, .selector a{
color:#666666;
background:#F0F0F0;
}
.selector li, .selector li.middle{
display:block;
width:250px;
height:75px;
margin:0;
padding:15px 20px 0 30px;
cursor:pointer;
overflow:hidden;
}
.selector li.middle{
display:block;
height:73px;
border-top:1px solid #BFBFBF;
border-bottom:1px solid #BFBFBF;
}
.selector li.active, .selector li:hover{
color:#666666;
background:url("../images/active.gif") top left no-repeat #DDDDDD;
}
.selector li.active a, .selector li:hover a{
color:#333333;
background:#DDDDDD;
}
.selector li img{
float:left;
width:50px;
height:50px;
margin:0 15px 0 0;
padding:4px;
color:#333333;
background:#FFFFFF;
border:1px solid #000000;
}
.selector p{visibility:hidden;}
p.title{
font-size:20px;
font-weight:bold;
visibility:visible;
}
#featured_slide .selector .introtext p.title{
margin-top:6px;
}
.preview_holder p.title a{
font-size:16px;
color:#C6CFD5;
background:#333333;
} | annotationframework/AfShared | web-app/css/shared/business/featured_slide.css | CSS | apache-2.0 | 2,164 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.