code stringlengths 3 1.01M | repo_name stringlengths 5 116 | path stringlengths 3 311 | language stringclasses 30 values | license stringclasses 15 values | size int64 3 1.01M |
|---|---|---|---|---|---|
// Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Spencer Kimball (spencer.kimball@gmail.com)
package server
import (
"bytes"
"fmt"
"math"
"net"
"reflect"
"sort"
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/gossip/resolver"
"github.com/cockroachdb/cockroach/pkg/internal/client"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/server/status"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/engine"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/netutil"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/pkg/errors"
)
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(
addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T,
) (*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
cfg := storage.StoreConfig{}
stopper := stop.NewStopper()
cfg.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(log.AmbientContext{}, nodeTestBaseContext, cfg.Clock, stopper)
cfg.ScanInterval = 10 * time.Hour
cfg.ConsistencyCheckInterval = 10 * time.Hour
grpcServer := rpc.NewServer(nodeRPCContext)
serverCfg := makeTestConfig()
g := gossip.New(
log.AmbientContext{},
nodeRPCContext,
grpcServer,
serverCfg.GossipBootstrapResolvers,
stopper,
metric.NewRegistry())
ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr)
if err != nil {
t.Fatal(err)
}
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
g.SetResolvers([]resolver.Resolver{r})
g.Start(ln.Addr())
}
cfg.Gossip = g
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = stopper.ShouldQuiesce()
distSender := kv.NewDistSender(&kv.DistSenderConfig{
Clock: cfg.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, g)
cfg.AmbientCtx.Tracer = tracing.NewTracer()
ctx := tracing.WithTracer(context.Background(), cfg.AmbientCtx.Tracer)
sender := kv.NewTxnCoordSender(ctx, distSender, cfg.Clock, false, stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval))
cfg.DB = client.NewDB(sender)
cfg.Transport = storage.NewDummyRaftTransport()
cfg.MetricsSampleInterval = metric.TestSampleInterval
node := NewNode(cfg, status.NewMetricsRecorder(cfg.Clock), metric.NewRegistry(), stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval), sql.MakeEventLogger(nil))
roachpb.RegisterInternalServer(grpcServer, node)
return grpcServer, ln.Addr(), cfg.Clock, node, stopper
}
// createAndStartTestNode creates a new test node and starts it. The server and node are returned.
func createAndStartTestNode(
addr net.Addr,
engines []engine.Engine,
gossipBS net.Addr,
locality roachpb.Locality,
t *testing.T,
) (*grpc.Server, net.Addr, *Node, *stop.Stopper) {
grpcServer, addr, _, node, stopper := createTestNode(addr, engines, gossipBS, t)
if err := node.start(context.Background(), addr, engines, roachpb.Attributes{}, locality); err != nil {
t.Fatal(err)
}
if err := WaitForInitialSplits(node.storeCfg.DB); err != nil {
t.Fatal(err)
}
return grpcServer, addr, node, stopper
}
func formatKeys(keys []roachpb.Key) string {
var buf bytes.Buffer
for i, key := range keys {
fmt.Fprintf(&buf, "%d: %s\n", i, key)
}
return buf.String()
}
// keySlice implements sort.Interface.
type keySlice []roachpb.Key
func (s keySlice) Len() int { return len(s) }
func (s keySlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s keySlice) Less(i, j int) bool { return bytes.Compare(s[i], s[j]) < 0 }
// TestBootstrapCluster verifies the results of bootstrapping a
// cluster. Uses an in memory engine.
func TestBootstrapCluster(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
e := engine.NewInMem(roachpb.Attributes{}, 1<<20, stopper)
if _, err := bootstrapCluster([]engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval)); err != nil {
t.Fatal(err)
}
// Scan the complete contents of the local database directly from the engine.
rows, _, _, err := engine.MVCCScan(context.Background(), e, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, true, nil)
if err != nil {
t.Fatal(err)
}
var foundKeys keySlice
for _, kv := range rows {
foundKeys = append(foundKeys, kv.Key)
}
var expectedKeys = keySlice{
testutils.MakeKey(roachpb.Key("\x02"), roachpb.KeyMax),
testutils.MakeKey(roachpb.Key("\x03"), roachpb.KeyMax),
roachpb.Key("\x04node-idgen"),
roachpb.Key("\x04store-idgen"),
}
// Add the initial keys for sql.
for _, kv := range GetBootstrapSchema().GetInitialValues() {
expectedKeys = append(expectedKeys, kv.Key)
}
// Resort the list. The sql values are not sorted.
sort.Sort(expectedKeys)
if !reflect.DeepEqual(foundKeys, expectedKeys) {
t.Errorf("expected keys mismatch:\n%s\n -- vs. -- \n\n%s",
formatKeys(foundKeys), formatKeys(expectedKeys))
}
// TODO(spencer): check values.
}
// TestBootstrapNewStore starts a cluster with two unbootstrapped
// stores and verifies both stores are added and started.
func TestBootstrapNewStore(t *testing.T) {
defer leaktest.AfterTest(t)()
engineStopper := stop.NewStopper()
defer engineStopper.Stop()
e := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
if _, err := bootstrapCluster([]engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval)); err != nil {
t.Fatal(err)
}
// Start a new node with two new stores which will require bootstrapping.
engines := []engine.Engine{
e,
engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper),
engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper),
}
_, _, node, stopper := createAndStartTestNode(
util.TestAddr,
engines,
util.TestAddr,
roachpb.Locality{},
t,
)
defer stopper.Stop()
// Non-initialized stores (in this case the new in-memory-based
// store) will be bootstrapped by the node upon start. This happens
// in a goroutine, so we'll have to wait a bit until we can find the
// new node.
util.SucceedsSoon(t, func() error {
if n := node.stores.GetStoreCount(); n != 3 {
return errors.Errorf("expected 3 stores but got %d", n)
}
return nil
})
// Check whether all stores are started properly.
if err := node.stores.VisitStores(func(s *storage.Store) error {
if !s.IsStarted() {
return errors.Errorf("fail to start store: %s", s)
}
return nil
}); err != nil {
t.Error(err)
}
}
// TestNodeJoin verifies a new node is able to join a bootstrapped
// cluster consisting of one node.
func TestNodeJoin(t *testing.T) {
defer leaktest.AfterTest(t)()
engineStopper := stop.NewStopper()
defer engineStopper.Stop()
e := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
if _, err := bootstrapCluster([]engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval)); err != nil {
t.Fatal(err)
}
// Start the bootstrap node.
engines1 := []engine.Engine{e}
_, server1Addr, node1, stopper1 := createAndStartTestNode(
util.TestAddr,
engines1,
util.TestAddr,
roachpb.Locality{},
t,
)
defer stopper1.Stop()
// Create a new node.
engines2 := []engine.Engine{engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)}
_, server2Addr, node2, stopper2 := createAndStartTestNode(
util.TestAddr,
engines2,
server1Addr,
roachpb.Locality{},
t,
)
defer stopper2.Stop()
// Verify new node is able to bootstrap its store.
util.SucceedsSoon(t, func() error {
if sc := node2.stores.GetStoreCount(); sc != 1 {
return errors.Errorf("GetStoreCount() expected 1; got %d", sc)
}
return nil
})
// Verify node1 sees node2 via gossip and vice versa.
node1Key := gossip.MakeNodeIDKey(node1.Descriptor.NodeID)
node2Key := gossip.MakeNodeIDKey(node2.Descriptor.NodeID)
util.SucceedsSoon(t, func() error {
var nodeDesc1 roachpb.NodeDescriptor
if err := node1.storeCfg.Gossip.GetInfoProto(node2Key, &nodeDesc1); err != nil {
return err
}
if addr2Str, server2AddrStr := nodeDesc1.Address.String(), server2Addr.String(); addr2Str != server2AddrStr {
return errors.Errorf("addr2 gossip %s doesn't match addr2 address %s", addr2Str, server2AddrStr)
}
var nodeDesc2 roachpb.NodeDescriptor
if err := node2.storeCfg.Gossip.GetInfoProto(node1Key, &nodeDesc2); err != nil {
return err
}
if addr1Str, server1AddrStr := nodeDesc2.Address.String(), server1Addr.String(); addr1Str != server1AddrStr {
return errors.Errorf("addr1 gossip %s doesn't match addr1 address %s", addr1Str, server1AddrStr)
}
return nil
})
}
// TestNodeJoinSelf verifies that an uninitialized node trying to join
// itself will fail.
func TestNodeJoinSelf(t *testing.T) {
defer leaktest.AfterTest(t)()
engineStopper := stop.NewStopper()
defer engineStopper.Stop()
engines := []engine.Engine{engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)}
_, addr, _, node, stopper := createTestNode(util.TestAddr, engines, util.TestAddr, t)
defer stopper.Stop()
err := node.start(context.Background(), addr, engines, roachpb.Attributes{}, roachpb.Locality{})
if err != errCannotJoinSelf {
t.Fatalf("expected err %s; got %s", errCannotJoinSelf, err)
}
}
// TestCorruptedClusterID verifies that a node fails to start when a
// store's cluster ID is empty.
func TestCorruptedClusterID(t *testing.T) {
defer leaktest.AfterTest(t)()
engineStopper := stop.NewStopper()
e := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
defer engineStopper.Stop()
if _, err := bootstrapCluster([]engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval)); err != nil {
t.Fatal(err)
}
// Set the cluster ID to the empty UUID.
sIdent := roachpb.StoreIdent{
ClusterID: *uuid.EmptyUUID,
NodeID: 1,
StoreID: 1,
}
if err := engine.MVCCPutProto(context.Background(), e, nil, keys.StoreIdentKey(), hlc.ZeroTimestamp, nil, &sIdent); err != nil {
t.Fatal(err)
}
engines := []engine.Engine{e}
_, serverAddr, _, node, stopper := createTestNode(util.TestAddr, engines, nil, t)
stopper.Stop()
if err := node.start(context.Background(), serverAddr, engines, roachpb.Attributes{}, roachpb.Locality{}); !testutils.IsError(err, "unidentified store") {
t.Errorf("unexpected error %v", err)
}
}
// compareNodeStatus ensures that the actual node status for the passed in
// node is updated correctly. It checks that the Node Descriptor, StoreIDs,
// RangeCount, StartedAt, ReplicatedRangeCount and are exactly correct and that
// the bytes and counts for Live, Key and Val are at least the expected value.
// And that UpdatedAt has increased.
// The latest actual stats are returned.
func compareNodeStatus(
t *testing.T, ts *TestServer, expectedNodeStatus *status.NodeStatus, testNumber int,
) *status.NodeStatus {
// ========================================
// Read NodeStatus from server and validate top-level fields.
// ========================================
nodeStatusKey := keys.NodeStatusKey(ts.node.Descriptor.NodeID)
nodeStatus := &status.NodeStatus{}
if err := ts.db.GetProto(context.TODO(), nodeStatusKey, nodeStatus); err != nil {
t.Fatalf("%d: failure getting node status: %s", testNumber, err)
}
// Descriptor values should be exactly equal to expected.
if a, e := nodeStatus.Desc, expectedNodeStatus.Desc; !reflect.DeepEqual(a, e) {
t.Errorf("%d: Descriptor does not match expected.\nexpected: %s\nactual: %s", testNumber, e, a)
}
// ========================================
// Ensure all expected stores are represented in the node status.
// ========================================
storesToMap := func(ns *status.NodeStatus) map[roachpb.StoreID]status.StoreStatus {
strMap := make(map[roachpb.StoreID]status.StoreStatus, len(ns.StoreStatuses))
for _, str := range ns.StoreStatuses {
strMap[str.Desc.StoreID] = str
}
return strMap
}
actualStores := storesToMap(nodeStatus)
expectedStores := storesToMap(expectedNodeStatus)
if a, e := len(actualStores), len(expectedStores); a != e {
t.Errorf("%d: actual status contained %d stores, expected %d", testNumber, a, e)
}
for key := range expectedStores {
if _, ok := actualStores[key]; !ok {
t.Errorf("%d: actual node status did not contain expected store %d", testNumber, key)
}
}
if t.Failed() {
t.FailNow()
}
// ========================================
// Ensure all metric sets (node and store level) are consistent with
// expected status.
// ========================================
// CompareMetricMaps accepts an actual and expected metric maps, along with
// two lists of string keys. For metrics with keys in the 'equal' map, the
// actual value must be equal to the expected value. For keys in the
// 'greater' map, the actul value must be greater than or equal to the
// expected value.
compareMetricMaps := func(actual, expected map[string]float64, equal, greater []string) {
// Make sure the actual value map contains all values in expected map.
for key := range expected {
if _, ok := actual[key]; !ok {
t.Errorf("%d: actual node status did not contain expected metric %s", testNumber, key)
}
}
if t.Failed() {
return
}
// For each equal key, ensure that the actual value is equal to expected
// key.
for _, key := range equal {
if _, ok := actual[key]; !ok {
t.Errorf("%d, actual node status did not contain expected 'equal' metric key %s", testNumber, key)
continue
}
if a, e := actual[key], expected[key]; a != e {
t.Errorf("%d: %s does not match expected value.\nExpected %f, Actual %f", testNumber, key, e, a)
}
}
for _, key := range greater {
if _, ok := actual[key]; !ok {
t.Errorf("%d: actual node status did not contain expected 'greater' metric key %s", testNumber, key)
continue
}
if a, e := actual[key], expected[key]; a < e {
t.Errorf("%d: %s is not greater than or equal to expected value.\nExpected %f, Actual %f", testNumber, key, e, a)
}
}
}
compareMetricMaps(nodeStatus.Metrics, expectedNodeStatus.Metrics, nil, []string{
"exec.success",
"exec.error",
})
for key := range actualStores {
// Directly verify a subset of metrics which have predictable output.
compareMetricMaps(actualStores[key].Metrics, expectedStores[key].Metrics,
[]string{
"replicas",
"replicas.leaseholders",
},
[]string{
"livebytes",
"keybytes",
"valbytes",
"livecount",
"keycount",
"valcount",
})
}
if t.Failed() {
t.FailNow()
}
return nodeStatus
}
// TestStatusSummaries verifies that status summaries are written correctly for
// both the Node and stores within the node.
func TestStatusSummaries(t *testing.T) {
defer leaktest.AfterTest(t)()
// ========================================
// Start test server and wait for full initialization.
// ========================================
srv, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{
DisableEventLog: true,
})
defer srv.Stopper().Stop()
ts := srv.(*TestServer)
ctx := context.TODO()
// Retrieve the first store from the Node.
s, err := ts.node.stores.GetStore(roachpb.StoreID(1))
if err != nil {
t.Fatal(err)
}
s.WaitForInit()
content := "junk"
leftKey := "a"
// Scan over all keys to "wake up" all replicas (force a lease holder election).
if _, err := kvDB.Scan(context.TODO(), keys.MetaMax, keys.MaxKey, 0); err != nil {
t.Fatal(err)
}
// Wait for full replication of initial ranges.
initialRanges := ExpectedInitialRangeCount()
util.SucceedsSoon(t, func() error {
for i := 1; i <= int(initialRanges); i++ {
if s.RaftStatus(roachpb.RangeID(i)) == nil {
return errors.Errorf("Store %d replica %d is not present in raft", s.StoreID(), i)
}
}
return nil
})
// ========================================
// Construct an initial expectation for NodeStatus to compare to the first
// status produced by the server.
// ========================================
expectedNodeStatus := &status.NodeStatus{
Desc: ts.node.Descriptor,
StartedAt: 0,
UpdatedAt: 0,
Metrics: map[string]float64{
"exec.success": 0,
"exec.error": 0,
},
}
expectedStoreStatuses := make(map[roachpb.StoreID]status.StoreStatus)
if err := ts.node.stores.VisitStores(func(s *storage.Store) error {
desc, err := s.Descriptor()
if err != nil {
t.Fatal(err)
}
expectedReplicas := 0
if s.StoreID() == roachpb.StoreID(1) {
expectedReplicas = initialRanges
}
stat := status.StoreStatus{
Desc: *desc,
Metrics: map[string]float64{
"replicas": float64(expectedReplicas),
"replicas.leaseholders": float64(expectedReplicas),
"livebytes": 0,
"keybytes": 0,
"valbytes": 0,
"livecount": 0,
"keycount": 0,
"valcount": 0,
},
}
expectedNodeStatus.StoreStatuses = append(expectedNodeStatus.StoreStatuses, stat)
expectedStoreStatuses[s.StoreID()] = stat
return nil
}); err != nil {
t.Fatal(err)
}
// Function to force summaries to be written synchronously, including all
// data currently in the event pipeline. Only one of the stores has
// replicas, so there are no concerns related to quorum writes; if there
// were multiple replicas, more care would need to be taken in the initial
// syncFeed().
forceWriteStatus := func() {
if err := ts.node.computePeriodicMetrics(0); err != nil {
t.Fatalf("error publishing store statuses: %s", err)
}
if err := ts.WriteSummaries(); err != nil {
t.Fatalf("error writing summaries: %s", err)
}
}
// Verify initial status.
forceWriteStatus()
expectedNodeStatus = compareNodeStatus(t, ts, expectedNodeStatus, 1)
for _, s := range expectedNodeStatus.StoreStatuses {
expectedStoreStatuses[s.Desc.StoreID] = s
}
// ========================================
// Put some data into the K/V store and confirm change to status.
// ========================================
splitKey := "b"
rightKey := "c"
// Write some values left and right of the proposed split key.
if err := ts.db.Put(ctx, leftKey, content); err != nil {
t.Fatal(err)
}
if err := ts.db.Put(ctx, rightKey, content); err != nil {
t.Fatal(err)
}
// Increment metrics on the node
expectedNodeStatus.Metrics["exec.success"] += 2
// Increment metrics on the first store.
store1 := expectedStoreStatuses[roachpb.StoreID(1)].Metrics
store1["livecount"]++
store1["keycount"]++
store1["valcount"]++
store1["livebytes"]++
store1["keybytes"]++
store1["valbytes"]++
forceWriteStatus()
expectedNodeStatus = compareNodeStatus(t, ts, expectedNodeStatus, 2)
for _, s := range expectedNodeStatus.StoreStatuses {
expectedStoreStatuses[s.Desc.StoreID] = s
}
// ========================================
// Perform an admin split and verify that status is updated.
// ========================================
// Split the range.
if err := ts.db.AdminSplit(context.TODO(), splitKey); err != nil {
t.Fatal(err)
}
// Write on both sides of the split to ensure that the raft machinery
// is running.
if err := ts.db.Put(ctx, leftKey, content); err != nil {
t.Fatal(err)
}
if err := ts.db.Put(ctx, rightKey, content); err != nil {
t.Fatal(err)
}
// Increment metrics on the node
expectedNodeStatus.Metrics["exec.success"] += 2
// Increment metrics on the first store.
store1 = expectedStoreStatuses[roachpb.StoreID(1)].Metrics
store1["replicas"]++
store1["replicas.leaders"]++
store1["replicas.leaseholders"]++
store1["ranges.available"]++
forceWriteStatus()
expectedNodeStatus = compareNodeStatus(t, ts, expectedNodeStatus, 3)
for _, s := range expectedNodeStatus.StoreStatuses {
expectedStoreStatuses[s.Desc.StoreID] = s
}
}
// TestStartNodeWithLocality creates a new node and store and starts them with a
// collection of different localities.
func TestStartNodeWithLocality(t *testing.T) {
defer leaktest.AfterTest(t)()
testLocalityWitNewNode := func(locality roachpb.Locality) {
engineStopper := stop.NewStopper()
defer engineStopper.Stop()
e := engine.NewInMem(roachpb.Attributes{}, 1<<20, engineStopper)
if _, err := bootstrapCluster(
[]engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval),
); err != nil {
t.Fatal(err)
}
_, _, node, stopper := createAndStartTestNode(
util.TestAddr,
[]engine.Engine{e},
util.TestAddr,
locality,
t,
)
defer stopper.Stop()
// Check the node to make sure the locality was propagated to its
// nodeDescriptor.
if !reflect.DeepEqual(node.Descriptor.Locality, locality) {
t.Fatalf("expected node locality to be %s, but it was %s", locality, node.Descriptor.Locality)
}
// Check the store to make sure the locality was propagated to its
// nodeDescriptor.
if err := node.stores.VisitStores(func(store *storage.Store) error {
desc, err := store.Descriptor()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(desc.Node.Locality, locality) {
t.Fatalf("expected store's node locality to be %s, but it was %s", locality, desc.Node.Locality)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
testCases := []roachpb.Locality{
{},
{
Tiers: []roachpb.Tier{
{Key: "a", Value: "b"},
},
},
{
Tiers: []roachpb.Tier{
{Key: "a", Value: "b"},
{Key: "c", Value: "d"},
{Key: "e", Value: "f"},
},
},
}
for _, testCase := range testCases {
testLocalityWitNewNode(testCase)
}
}
| cockroachdb/cockroach | pkg/server/node_test.go | GO | apache-2.0 | 23,164 |
## ⚠️ Deprecation Warning ⚠️
**On February 1, 2022, this uploader will be fully sunset and no longer function**
We recommend all users migrate to the [uploader](https://github.com/codecov/uploader) to prevent any breakages in usage. You can learn more about our deprecation plan and the new uploader on our [blog](https://about.codecov.io/blog/introducing-codecovs-new-uploader/).
-----
Codecov Bash Uploader
=======================
### Upload reports to Codecov for almost every supported language.
[](https://codecov.io/gh/codecov/codecov-bash)
[Deployed Version](https://codecov.io/bash)
## Running the bash uploader
-----
```bash
# All CI
bash <(curl -s https://codecov.io/bash)
# Pipe to bash (Jenkins)
curl -s https://codecov.io/bash | bash -s - -t token
# ^ add your extra config here
# No bash method
curl -s https://codecov.io/bash > .codecov
chmod +x .codecov
./.codecov
```
-----
#### ⚠️ Verifying the bash uploader
As an additional layer of security, users may wish to check the script against the provided SHASUMs.
```bash
curl -fLso codecov https://codecov.io/bash;
VERSION=$(grep -o 'VERSION=\"[0-9\.]*\"' codecov | cut -d'"' -f2);
for i in 1 256 512
do
shasum -a $i -c --ignore-missing <(curl -s "https://raw.githubusercontent.com/codecov/codecov-bash/${VERSION}/SHA${i}SUM")
done
./codecov
```
or for older versions of `shasum`
```bash
curl -fLso codecov https://codecov.io/bash;
VERSION=$(grep -o 'VERSION=\"[0-9\.]*\"' codecov | cut -d'"' -f2);
for i in 1 256 512
do
shasum -a $i -c <(curl -s "https://raw.githubusercontent.com/codecov/codecov-bash/${VERSION}/SHA${i}SUM" | grep -w "codecov")
done
./codecov
```
### Languages
> Codecov supports many languages, you can find a full list here: https://docs.codecov.io/docs/supported-languages
### Other Usage
> Below are most commonly used settings. [View full list of commands](https://github.com/codecov/codecov-bash/blob/master/codecov#L56) to see the full list of commands.
```yaml
# public repo on Travis CI
after_success:
- bash <(curl -s https://codecov.io/bash)
```
```yaml
# private repo
after_success:
- bash <(curl -s https://codecov.io/bash) -t your-repository-upload-token
```
```yaml
# Flag build types
after_success:
- bash <(curl -s https://codecov.io/bash) -F unittests
```
```yaml
# Include environment variables to store per build
after_success:
- bash <(curl -s https://codecov.io/bash) -e TOX_ENV,CUSTOM_VAR
```
> When running the codecov-bash uploader on Alpine Linux, you are likely to run into a parsing issue because of the default shell. To be able to upload reports, you need to issue the following commands.
```yaml
after_success:
- apk -U add git curl bash findutils
- bash -c '/bin/bash <(curl -s https://codecov.io/bash)'
```
### Prevent build failures
If Codecov fails to upload reports, you can ensure the CI build does not fail by adding a catch-all:
```
bash <(curl -s https://codecov.io/bash) || echo "Codecov did not collect coverage reports"
```
### CI Providers
| Company | Supported | Token Required |
|:---------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------:|:----------------:|
| [Travis CI](https://travis-ci.org/) | Yes [](http://travis-ci.org/codecov/codecov-bash) [](https://app.fossa.com/projects/git%2Bgithub.com%2Fcodecov%2Fcodecov-bash?ref=badge_shield) | Private only |
| [Azure Pipelines](https://azure.microsoft.com/en-us/services/devops/pipelines/) | Yes | Private only |
| [CircleCI](https://circleci.com/) | Yes | Private only |
| [Codeship](https://codeship.com/) | Yes | Public & Private |
| [Jenkins](https://jenkins-ci.org/) | Yes | Public & Private |
| [Semaphore](https://semaphoreci.com/) | Yes | Public & Private |
| [TeamCity](https://www.jetbrains.com/teamcity/). | Yes | Public & Private |
| [drone.io](https://drone.io/) | Yes | Public & Private |
| [AppVeyor](http://www.appveyor.com/) | Yes | Private only |
| [Bamboo](https://www.atlassian.com/software/bamboo) | Yes | Public & Private |
| [Bitbucket](https://bitbucket.org/product/features/pipelines) | Yes | Public & Private |
| [Bitrise](https://bitrise.io/) | Yes | Public & Private |
| [buddybuild](https://buddybuild.com) | Yes | Public & Private |
| [Buildkite](https://buildkite.com) | Yes | Public & Private |
| [Heroku](https://heroku.com) | Yes | Public & Private |
| [Wercker](http://wercker.com/) | Yes | Public & Private |
| [Shippable](http://www.shippable.com/) | Yes | Public & Private |
| [Gitlab CI](https://about.gitlab.com/gitlab-ci/) | Yes | Public & Private |
| [Buildkite](https://buildkite.com) | Yes | Public & Private |
| [GitHub Actions](https://github.com/features/actions) | Yes | Private only |
| [Cirrus CI](https://cirrus-ci.org/) | Yes | Public & Private |
| [AWS CodeBuild](https://aws.amazon.com/codebuild/) | Yes | Public & Private |
| git | Yes (as a fallback) | Public & Private |
### Caveats
1. **Jenkins**: Unable to find reports? Try `PWD=WORKSPACE bash <(curl -s https://codecov.io/bash)`
### Development
To automatically update the hash files after a change to the Codecov uploader script, run
```
./install.sh
```
which will add the `pre-commit` hooks. You can also update the hash files manually via:
```bash
for i in 1 256 512; do shasum -a "${i}" codecov > "SHA${i}SUM"; done
```
and add the change to your pull request.
## License
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fcodecov%2Fcodecov-bash?ref=badge_large)
| codecov/codecov-bash | readme.md | Markdown | apache-2.0 | 6,524 |
package org.alloytools.alloy.lsp.provider;
import java.io.File;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Random;
import edu.mit.csail.sdg.alloy4.Err;
import edu.mit.csail.sdg.alloy4.ErrorFatal;
import edu.mit.csail.sdg.alloy4.OurDialog;
import edu.mit.csail.sdg.alloy4.Util;
import edu.mit.csail.sdg.ast.ExprVar;
import edu.mit.csail.sdg.ast.Module;
import edu.mit.csail.sdg.ast.Sig;
import edu.mit.csail.sdg.ast.Sig.Field;
import edu.mit.csail.sdg.sim.SimInstance;
import edu.mit.csail.sdg.sim.SimTuple;
import edu.mit.csail.sdg.sim.SimTupleset;
import edu.mit.csail.sdg.translator.A4Solution;
import edu.mit.csail.sdg.translator.A4Tuple;
import edu.mit.csail.sdg.translator.A4TupleSet;
public class AlloyAppUtil {
/**
* This variable caches the result of alloyHome() function call.
*/
private static String alloyHome = null;
/**
* Find a temporary directory to store Alloy files; it's guaranteed to be a
* canonical absolute path.
*/
public static synchronized String alloyHome() {
if (alloyHome != null)
return alloyHome;
String temp = System.getProperty("java.io.tmpdir");
if (temp == null || temp.length() == 0)
OurDialog.fatal(null, "Error. JVM need to specify a temporary directory using java.io.tmpdir property.");
String username = System.getProperty("user.name");
File tempfile = new File(temp + File.separatorChar + "alloy4tmp40-" + (username == null ? "" : username));
tempfile.mkdirs();
String ans = Util.canon(tempfile.getPath());
if (!tempfile.isDirectory()) {
OurDialog.fatal(null, "Error. Cannot create the temporary directory " + ans);
}
if (!Util.onWindows()) {
String[] args = {
"chmod", "700", ans
};
try {
Runtime.getRuntime().exec(args).waitFor();
} catch (Throwable ex) {
} // We only intend to make a best effort.
}
return alloyHome = ans;
}
/**
* Create an empty temporary directory for use, designate it "deleteOnExit",
* then return it. It is guaranteed to be a canonical absolute path.
*/
public static String maketemp() {
Random r = new Random(new Date().getTime());
while (true) {
int i = r.nextInt(1000000);
String dest = AlloyAppUtil.alloyHome() + File.separatorChar + "tmp" + File.separatorChar + i;
File f = new File(dest);
if (f.mkdirs()) {
f.deleteOnExit();
return Util.canon(dest);
}
}
}
/** Converts an A4TupleSet into a SimTupleset object. */
public static SimTupleset convert(Object object) throws Err {
if (!(object instanceof A4TupleSet))
throw new ErrorFatal("Unexpected type error: expecting an A4TupleSet.");
A4TupleSet s = (A4TupleSet) object;
if (s.size() == 0)
return SimTupleset.EMPTY;
List<SimTuple> list = new ArrayList<SimTuple>(s.size());
int arity = s.arity();
for (A4Tuple t : s) {
String[] array = new String[arity];
for (int i = 0; i < t.arity(); i++)
array[i] = t.atom(i);
list.add(SimTuple.make(array));
}
return SimTupleset.make(list);
}
/** Converts an A4Solution into a SimInstance object. */
public static SimInstance convert(Module root, A4Solution ans) throws Err {
SimInstance ct = new SimInstance(root, ans.getBitwidth(), ans.getMaxSeq());
for (Sig s : ans.getAllReachableSigs()) {
if (!s.builtin)
ct.init(s, convert(ans.eval(s)));
for (Field f : s.getFields())
if (!f.defined)
ct.init(f, convert(ans.eval(f)));
}
for (ExprVar a : ans.getAllAtoms())
ct.init(a, convert(ans.eval(a)));
for (ExprVar a : ans.getAllSkolems())
ct.init(a, convert(ans.eval(a)));
return ct;
}
public interface Func0<T> {
public T call() throws Exception;
}
public static <T> T uncheckedRun(Func0<T> func) {
try {
return func.call();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
| AlloyTools/org.alloytools.alloy | org.alloytools.alloy.lsp/src/main/java/org/alloytools/alloy/lsp/provider/AlloyAppUtil.java | Java | apache-2.0 | 4,470 |
/*
* Copyright © 2014 - 2019 Leipzig University (Database Research Group)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradoop.flink.io.api;
import org.gradoop.flink.model.impl.epgm.GraphCollection;
import org.gradoop.flink.model.impl.epgm.LogicalGraph;
import java.io.IOException;
/**
* Data source in analytical programs.
*/
public interface DataSink {
/**
* Writes a logical graph to the data sink.
*
* @param logicalGraph logical graph
*/
void write(LogicalGraph logicalGraph) throws IOException;
/**
* Writes a graph collection graph to the data sink.
*
* @param graphCollection graph collection
*/
void write(GraphCollection graphCollection) throws IOException;
/**
* Writes a logical graph to the data sink with overwrite option.
*
* @param logicalGraph logical graph
* @param overwrite true, if existing files should be overwritten
*/
void write(LogicalGraph logicalGraph, boolean overwrite) throws IOException;
/**
* Writes a graph collection to the data sink with overwrite option.
*
* @param graphCollection graph collection
* @param overwrite true, if existing files should be overwritten
*/
void write(GraphCollection graphCollection, boolean overwrite) throws IOException;
}
| rostam/gradoop | gradoop-flink/src/main/java/org/gradoop/flink/io/api/DataSink.java | Java | apache-2.0 | 1,799 |
# Copyright 2017 The Vispek Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==========================================================================
""" Example code about how to run raw_file_io
python3 -m vispek.examples.run_raw_file_io \
--in_path /Users/huaminli/Downloads/data \
--out_path /Users/huaminli/Desktop/vispek/data
"""
import argparse
from vispek.lib.io.raw_file_io import RawFileIO
def run_file_io(args):
my_file_io = RawFileIO(args.in_path, args.out_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Example code about how tun run raw_file_io')
parser.add_argument(
'--in_path', type=str,
help='absolute path to the directories that contains raw csv files')
parser.add_argument(
'--out_path', type=str,
help='absolute path to the directories that contains ' +
'preproceed files')
args = parser.parse_args()
print(args.in_path)
print(args.out_path)
run_file_io(args)
| hl475/vispek | examples/run_raw_file_io.py | Python | apache-2.0 | 1,604 |
/*
* Copyright 2002-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.servicebroker.model.instance;
import java.util.HashMap;
import java.util.Map;
import nl.jqno.equalsverifier.EqualsVerifier;
import nl.jqno.equalsverifier.Warning;
import org.junit.jupiter.api.Test;
import org.springframework.cloud.servicebroker.JsonUtils;
import org.springframework.cloud.servicebroker.model.Context;
import org.springframework.cloud.servicebroker.model.PlatformContext;
import org.springframework.cloud.servicebroker.model.catalog.MaintenanceInfo;
import org.springframework.cloud.servicebroker.model.instance.UpdateServiceInstanceRequest.PreviousValues;
import static org.assertj.core.api.Assertions.assertThat;
public class UpdateServiceInstanceRequestTest {
@Test
public void requestWithDefaultsIsBuilt() {
UpdateServiceInstanceRequest request = UpdateServiceInstanceRequest.builder()
.build();
assertThat(request.getServiceDefinitionId()).isNull();
assertThat(request.getServiceInstanceId()).isNull();
assertThat(request.getPlanId()).isNull();
assertThat(request.getServiceDefinition()).isNull();
assertThat(request.getContext()).isNull();
assertThat(request.getParameters()).hasSize(0);
assertThat(request.isAsyncAccepted()).isEqualTo(false);
assertThat(request.getPreviousValues()).isNull();
assertThat(request.getApiInfoLocation()).isNull();
assertThat(request.getPlatformInstanceId()).isNull();
assertThat(request.getOriginatingIdentity()).isNull();
assertThat(request.getRequestIdentity()).isNull();
assertThat(request.getMaintenanceInfo()).isNull();
}
@Test
void requestWithAllValuesIsBuilt() {
Map<String, Object> parameters = new HashMap<>();
parameters.put("field4", "value4");
parameters.put("field5", "value5");
Context context = PlatformContext.builder().build();
Context originatingIdentity = PlatformContext.builder()
.platform("test-platform")
.build();
UpdateServiceInstanceRequest request = UpdateServiceInstanceRequest.builder()
.serviceInstanceId("service-instance-id")
.serviceDefinitionId("service-definition-id")
.planId("plan-id")
.previousValues(new PreviousValues(
"previous-plan-id",
new MaintenanceInfo("1.1.0", "Patch for CVE-x")))
.context(context)
.parameters("field1", "value1")
.parameters("field2", 2)
.parameters("field3", true)
.parameters(parameters)
.asyncAccepted(true)
.platformInstanceId("platform-instance-id")
.apiInfoLocation("https://api.app.local")
.originatingIdentity(originatingIdentity)
.requestIdentity("request-id")
.maintenanceInfo(new MaintenanceInfo("2.0.0", null))
.build();
assertThat(request.getServiceInstanceId()).isEqualTo("service-instance-id");
assertThat(request.getServiceDefinitionId()).isEqualTo("service-definition-id");
assertThat(request.getPlanId()).isEqualTo("plan-id");
assertThat(request.getPreviousValues().getPlanId()).isEqualTo("previous-plan-id");
assertThat(request.getPreviousValues().getMaintenanceInfo()).isEqualTo(
new MaintenanceInfo("1.1.0", "Patch for CVE-x"));
assertThat(request.getParameters()).hasSize(5);
assertThat(request.getParameters().get("field1")).isEqualTo("value1");
assertThat(request.getParameters().get("field2")).isEqualTo(2);
assertThat(request.getParameters().get("field3")).isEqualTo(true);
assertThat(request.getParameters().get("field4")).isEqualTo("value4");
assertThat(request.getParameters().get("field5")).isEqualTo("value5");
assertThat(request.getContext()).isEqualTo(context);
assertThat(request.isAsyncAccepted()).isEqualTo(true);
assertThat(request.getPlatformInstanceId()).isEqualTo("platform-instance-id");
assertThat(request.getApiInfoLocation()).isEqualTo("https://api.app.local");
assertThat(request.getOriginatingIdentity()).isEqualTo(originatingIdentity);
assertThat(request.getRequestIdentity()).isEqualTo("request-id");
assertThat(request.getMaintenanceInfo().getVersion()).isEqualTo("2.0.0");
assertThat(request.getMaintenanceInfo().getDescription()).isNull();
}
@Test
public void requestIsDeserializedFromJson() {
UpdateServiceInstanceRequest request =
JsonUtils.readTestDataFile("updateRequest.json",
UpdateServiceInstanceRequest.class);
assertThat(request.getServiceDefinitionId()).isEqualTo("test-service-id");
assertThat(request.getPlanId()).isEqualTo("test-plan-id");
assertThat(request.getPreviousValues().getPlanId()).isEqualTo("previous-plan-id");
assertThat(request.getPreviousValues().getMaintenanceInfo()).isEqualTo(new MaintenanceInfo("1.1.0", "Patch for CVE-x"));
assertThat(request.getMaintenanceInfo().getVersion()).isEqualTo("2.0.0");
assertThat(request.getMaintenanceInfo().getDescription()).isNull();
}
@Test
public void equalsAndHashCode() {
EqualsVerifier
.forClass(UpdateServiceInstanceRequest.class)
.withRedefinedSuperclass()
.suppress(Warning.NONFINAL_FIELDS)
.suppress(Warning.TRANSIENT_FIELDS)
.verify();
}
}
| spring-cloud/spring-cloud-cloudfoundry-service-broker | spring-cloud-open-service-broker-core/src/test/java/org/springframework/cloud/servicebroker/model/instance/UpdateServiceInstanceRequestTest.java | Java | apache-2.0 | 5,585 |
/**
* Copyright 2010 Bazaarvoice, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author J. Ryan Stinnett (ryan.stinnett@bazaarvoice.com)
*/
package com.bazaarvoice.jless.ast.node;
import com.bazaarvoice.jless.ast.visitor.NodeAdditionVisitor;
import com.bazaarvoice.jless.ast.visitor.NodeNavigationVisitor;
public class ExpressionPhraseNode extends InternalNode {
public ExpressionPhraseNode(Node child) {
super(child);
}
@Override
protected boolean add(NodeAdditionVisitor visitor) {
return visitor.add(this);
}
@Override
protected boolean enter(NodeNavigationVisitor visitor) {
return visitor.enter(this);
}
@Override
protected boolean exit(NodeNavigationVisitor visitor) {
return visitor.exit(this);
}
@Override
protected boolean visitInvisible(NodeNavigationVisitor visitor) {
return visitor.visitInvisible(this);
}
}
| jryans/jless | src/main/java/com/bazaarvoice/jless/ast/node/ExpressionPhraseNode.java | Java | apache-2.0 | 1,449 |
package android.widget;
/*
* #%L
* Matos
* $Id:$
* $HeadURL:$
* %%
* Copyright (C) 2010 - 2014 Orange SA
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
@com.francetelecom.rd.stubs.annotation.ClassDone(0)
public class ImageButton
extends ImageView{
// Constructors
@com.francetelecom.rd.stubs.annotation.CallBackRegister("onCreate")
public ImageButton(android.content.Context arg1){
super((android.content.Context) null);
}
@com.francetelecom.rd.stubs.annotation.CallBackRegister("onCreate")
public ImageButton(android.content.Context arg1, android.util.AttributeSet arg2){
super((android.content.Context) null);
}
@com.francetelecom.rd.stubs.annotation.CallBackRegister("onCreate")
public ImageButton(android.content.Context arg1, android.util.AttributeSet arg2, int arg3){
super((android.content.Context) null);
}
// Methods
@com.francetelecom.rd.stubs.annotation.CallBackRegister("onCreate")
protected boolean onSetAlpha(int arg1){
return false;
}
}
| Orange-OpenSource/matos-profiles | matos-android/src/main/java/android/widget/ImageButton.java | Java | apache-2.0 | 1,537 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<HTML><HEAD><TITLE>Invalid Man Page</TITLE></HEAD>
<BODY>
<H1>Invalid Man Page</H1>
The requested file atanl.3 is not a valid (unformatted) man page.</BODY></HTML>
| cs-education/sysassets | man_pages/html/man3/atanl.3.html | HTML | apache-2.0 | 229 |
// Test incrementing the 8-bit memory byte pointed to by RSI. Increment RSI.
// Note that the sum of the average use of ports 2, 3 and 7 is equal to two,
// which tends to prove that there are two address generation instructions.
int main() {
unsigned char memory[1000];
for (int i = 0; i < LOOP_ITERATIONS; ++i) {
asm volatile(
R"(
movq %[address], %%rsi
.rept 1000
addb $1, (%%rsi)
inc %%rsi
.endr
)"
:
: [address] "r"(memory)
: "%rsi");
}
return 0;
}
| google/EXEgesis | exegesis/mysteries/read_modify_write/ex1_addrm8_array.cc | C++ | apache-2.0 | 557 |
/************************************************************************/
/* */
/* Free software: Progressive edge-growth (PEG) algorithm */
/* Created by Xiaoyu Hu */
/* Evangelos Eletheriou */
/* Dieter Arnold */
/* IBM Research, Zurich Research Lab., Switzerland */
/* */
/* The C++ sources files have been compiled using xlC compiler */
/* at IBM RS/6000 running AIX. For other compilers and platforms,*/
/* minor changes might be needed. */
/* */
/* Bug reporting to: xhu@zurich.ibm.com */
/**********************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <iostream>
using namespace std;
#include <iomanip>
#include <fstream>
#include <math.h>
#include "BigGirth.h"
#include "Random.h"
#include "CyclesOfGraph.h"
#define EPS 1e-6
int main(int argc, char * argv[]){
int i, j, m, N, M;
int sglConcent=1; // default to non-strictly concentrated parity-check distribution
int targetGirth=100000; // default to greedy PEG version
char codeName[100], degFileName[100];
int *degSeq, *deg;
double *degFrac;
BigGirth *bigGirth;
CyclesOfGraph *cog;
int numArgs=(argc-1)/2;
if (argc<9) {
USE:
cout<<"*******************************************************************************************"<<endl;
cout<<" Usage Reminder: MainPEG -numM M -numN N -codeName CodeName -degFileName DegFileName " <<endl;
cout<<" option: -sglConcent SglConcent " <<endl;
cout<<" sglConcent==0 ----- strictly concentrated parity-check " <<endl;
cout<<" degree distribution (including regular graphs)" <<endl;
cout<<" sglConcent==1 ----- Best-effort concentrated (DEFAULT) " <<endl;
cout<<" option: -tgtGirth TgtGirth " <<endl;
cout<<" TgtGirth==4, 6 ...; if very large, then greedy PEG (DEFAULT) " <<endl;
cout<<" IF sglConcent==0, TgtGirth is recommended to be set relatively small" <<endl;
cout<<" " <<endl;
cout<<" Remarks: File CodeName stores the generated PEG Tanner graph. The first line contains"<<endl;
cout<<" the block length, N. The second line defines the number of parity-checks, M."<<endl;
cout<<" The third line defines the number of columns of the compressed parity-check "<<endl;
cout<<" matrix. The following M lines are then the compressed parity-check matrix. "<<endl;
cout<<" Each of the M rows contains the indices (1 ... N) of 1's in the compressed "<<endl;
cout<<" row of parity-check matrix. If not all column entries are used, the column "<<endl;
cout<<" is filled up with 0's. "<<endl;
cout<<" "<<endl;
cout<<" File DegFileName is the input file to specify the degree distribution (node "<<endl;
cout<<" perspective). The first line contains the number of various degrees. The second"<<endl;
cout<<" defines the row vector of degree sequence in the increasing order. The vector"<<endl;
cout<<" of fractions of the corresponding degree is defined in the last line. "<<endl;
cout<<" "<<endl;
cout<<" A log file called 'leftHandGirth.dat' will also be generated and stored in the"<<endl;
cout<<" current directory, which gives the girth of the left-hand subgraph of j, where"<<endl;
cout<<" 1<=j<=N. The left-hand subgraph of j is defined as all the edges emanating from"<<endl;
cout<<" bit nodes {1 ... j} and their associated nodes. "<<endl;
cout<<" "<<endl;
cout<<" The last point is, when strictly concentrated parity-check degree distribution"<<endl;
cout<<" is invoked, i.e. sglConcent==0, the girth might be weaken to some extent as "<<endl;
cout<<" compared to the generic PEG algorithm. "<<endl;
cout<<"**********************************************************************************************"<<endl;
exit(-1);
}else {
for(i=0;i<numArgs;i++){
if (strcmp(argv[2*i+1], "-numM")==0) {
M=atoi(argv[2*i+2]);
} else if(strcmp(argv[2*i+1], "-numN")==0) {
N=atoi(argv[2*i+2]);
} else if(strcmp(argv[2*i+1], "-codeName")==0) {
strcpy(codeName, argv[2*i+2]);
} else if(strcmp(argv[2*i+1], "-degFileName")==0) {
strcpy(degFileName, argv[2*i+2]);
} else if(strcmp(argv[2*i+1], "-sglConcent")==0) {
sglConcent=atoi(argv[2*i+2]);
} else if(strcmp(argv[2*i+1], "-tgtGirth")==0) {
targetGirth=atoi(argv[2*i+2]);
} else{
goto USE;
}
}
if(M>N) {
cout<<"Warning: M must be samller than N"<<endl;
exit(-1);
}
}
degSeq=new int[N];
ifstream infn(degFileName);
if (!infn) {cout << "\nCannot open file " << degFileName << endl; exit(-1); }
infn >>m;
deg=new int[m];
degFrac=new double[m];
for(i=0;i<m;i++) infn>>deg[i];
for(i=0;i<m;i++) infn>>degFrac[i];
infn.close();
double dtmp=0.0;
for(i=0;i<m;i++) dtmp+=degFrac[i];
cout.setf(ios::fixed, ios::floatfield);
if(fabs(dtmp-1.0)>EPS) {
cout.setf(ios::fixed, ios::floatfield);
cout <<"\n Invalid degree distribution (node perspective): sum != 1.0 but "<<setprecision(10)<<dtmp<<endl; exit(-1);
}
for(i=1;i<m;i++) degFrac[i]+=degFrac[i-1];
for(i=0;i<N;i++) {
dtmp=(double)i/N;
for(j=m-1;j>=0;j--) {
if(dtmp>degFrac[j]) break;
}
if(dtmp<degFrac[0]) degSeq[i]=deg[0];
else degSeq[i]=deg[j+1];
}
bigGirth=new BigGirth(M, N, degSeq, codeName, sglConcent, targetGirth);
(*bigGirth).writeToFile_Hcompressed();
//(*bigGirth).writeToFile_Hmatrix() // different output format
//(*bigGirth).writeToFile(); // different output format: including generator matrix (compressed)
//computing local girth distribution
if(N<10000) {
cout<<" Now computing the local girth on the global Tanner graph setting. "<<endl;
cout<<" might take a bit long time. Please wait ... "<<endl;
(*bigGirth).loadH();
cog=new CyclesOfGraph(M, N, (*bigGirth).H);
(*cog).getCyclesTable();
(*cog).printCyclesTable();
delete cog;
cog=NULL;
}
delete [] degSeq; degSeq=NULL;
delete [] deg; deg=NULL;
delete [] degFrac; degFrac=NULL;
delete bigGirth;
}
| pjiangtw/HOPE | ldpcireg/PEG2/MainPEG.C | C++ | apache-2.0 | 7,423 |
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
module Spark.Core.GroupsSpec where
import Test.Hspec
import Data.Text(Text)
import Spark.Core.Context
import Spark.Core.Functions
import Spark.Core.ColumnFunctions
import Spark.Core.Column
import Spark.Core.IntegrationUtilities
import Spark.Core.CollectSpec(run)
import Spark.Core.Internal.Groups
sumGroup :: [MyPair] -> [(Text, Int)] -> IO ()
sumGroup l lexp = do
let ds = dataset l
let keys = ds // myKey'
let values = ds // myVal'
let g = groupByKey keys values
let ds2 = g `aggKey` sumCol
l2 <- exec1Def $ collect (asCol ds2)
l2 `shouldBe` lexp
spec :: Spec
spec = do
describe "Integration test - groups on (text, int)" $ do
run "empty" $
sumGroup [] []
run "one" $
sumGroup [MyPair "x" 1] [("x", 1)]
run "two" $
sumGroup [MyPair "x" 1, MyPair "x" 2, MyPair "y" 1] [("x", 3), ("y", 1)]
| krapsh/kraps-haskell | test-integration/Spark/Core/GroupsSpec.hs | Haskell | apache-2.0 | 915 |
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.primitives;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkElementIndex;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkPositionIndexes;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.base.Converter;
import java.io.Serializable;
import java.util.AbstractList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.RandomAccess;
/**
* Static utility methods pertaining to {@code short} primitives, that are not
* already found in either {@link Short} or {@link Arrays}.
*
* <p>See the Guava User Guide article on <a href=
* "http://code.google.com/p/guava-libraries/wiki/PrimitivesExplained">
* primitive utilities</a>.
*
* @author Kevin Bourrillion
* @since 1.0
*/
@GwtCompatible(emulated = true)
public final class Shorts {
private Shorts() {}
/**
* The number of bytes required to represent a primitive {@code short}
* value.
*/
public static final int BYTES = Short.SIZE / Byte.SIZE;
/**
* The largest power of two that can be represented as a {@code short}.
*
* @since 10.0
*/
public static final short MAX_POWER_OF_TWO = 1 << (Short.SIZE - 2);
/**
* Returns a hash code for {@code value}; equal to the result of invoking
* {@code ((Short) value).hashCode()}.
*
* @param value a primitive {@code short} value
* @return a hash code for the value
*/
public static int hashCode(short value) {
return value;
}
/**
* Returns the {@code short} value that is equal to {@code value}, if
* possible.
*
* @param value any value in the range of the {@code short} type
* @return the {@code short} value that equals {@code value}
* @throws IllegalArgumentException if {@code value} is greater than {@link
* Short#MAX_VALUE} or less than {@link Short#MIN_VALUE}
*/
public static short checkedCast(long value) {
short result = (short) value;
if (result != value) {
// don't use checkArgument here, to avoid boxing
throw new IllegalArgumentException("Out of range: " + value);
}
return result;
}
/**
* Returns the {@code short} nearest in value to {@code value}.
*
* @param value any {@code long} value
* @return the same value cast to {@code short} if it is in the range of the
* {@code short} type, {@link Short#MAX_VALUE} if it is too large,
* or {@link Short#MIN_VALUE} if it is too small
*/
public static short saturatedCast(long value) {
if (value > Short.MAX_VALUE) {
return Short.MAX_VALUE;
}
if (value < Short.MIN_VALUE) {
return Short.MIN_VALUE;
}
return (short) value;
}
/**
* Compares the two specified {@code short} values. The sign of the value
* returned is the same as that of {@code ((Short) a).compareTo(b)}.
*
* <p><b>Note for Java 7 and later:</b> this method should be treated as
* deprecated; use the equivalent {@link Short#compare} method instead.
*
* @param a the first {@code short} to compare
* @param b the second {@code short} to compare
* @return a negative value if {@code a} is less than {@code b}; a positive
* value if {@code a} is greater than {@code b}; or zero if they are equal
*/
public static int compare(short a, short b) {
return a - b; // safe due to restricted range
}
/**
* Returns {@code true} if {@code target} is present as an element anywhere in
* {@code array}.
*
* @param array an array of {@code short} values, possibly empty
* @param target a primitive {@code short} value
* @return {@code true} if {@code array[i] == target} for some value of {@code
* i}
*/
public static boolean contains(short[] array, short target) {
for (short value : array) {
if (value == target) {
return true;
}
}
return false;
}
/**
* Returns the index of the first appearance of the value {@code target} in
* {@code array}.
*
* @param array an array of {@code short} values, possibly empty
* @param target a primitive {@code short} value
* @return the least index {@code i} for which {@code array[i] == target}, or
* {@code -1} if no such index exists.
*/
public static int indexOf(short[] array, short target) {
return indexOf(array, target, 0, array.length);
}
// TODO(kevinb): consider making this public
private static int indexOf(
short[] array, short target, int start, int end) {
for (int i = start; i < end; i++) {
if (array[i] == target) {
return i;
}
}
return -1;
}
/**
* Returns the start position of the first occurrence of the specified {@code
* target} within {@code array}, or {@code -1} if there is no such occurrence.
*
* <p>More formally, returns the lowest index {@code i} such that {@code
* java.util.Arrays.copyOfRange(array, i, i + target.length)} contains exactly
* the same elements as {@code target}.
*
* @param array the array to search for the sequence {@code target}
* @param target the array to search for as a sub-sequence of {@code array}
*/
public static int indexOf(short[] array, short[] target) {
checkNotNull(array, "array");
checkNotNull(target, "target");
if (target.length == 0) {
return 0;
}
outer:
for (int i = 0; i < array.length - target.length + 1; i++) {
for (int j = 0; j < target.length; j++) {
if (array[i + j] != target[j]) {
continue outer;
}
}
return i;
}
return -1;
}
/**
* Returns the index of the last appearance of the value {@code target} in
* {@code array}.
*
* @param array an array of {@code short} values, possibly empty
* @param target a primitive {@code short} value
* @return the greatest index {@code i} for which {@code array[i] == target},
* or {@code -1} if no such index exists.
*/
public static int lastIndexOf(short[] array, short target) {
return lastIndexOf(array, target, 0, array.length);
}
// TODO(kevinb): consider making this public
private static int lastIndexOf(
short[] array, short target, int start, int end) {
for (int i = end - 1; i >= start; i--) {
if (array[i] == target) {
return i;
}
}
return -1;
}
/**
* Returns the least value present in {@code array}.
*
* @param array a <i>nonempty</i> array of {@code short} values
* @return the value present in {@code array} that is less than or equal to
* every other value in the array
* @throws IllegalArgumentException if {@code array} is empty
*/
public static short min(short... array) {
checkArgument(array.length > 0);
short min = array[0];
for (int i = 1; i < array.length; i++) {
if (array[i] < min) {
min = array[i];
}
}
return min;
}
/**
* Returns the greatest value present in {@code array}.
*
* @param array a <i>nonempty</i> array of {@code short} values
* @return the value present in {@code array} that is greater than or equal to
* every other value in the array
* @throws IllegalArgumentException if {@code array} is empty
*/
public static short max(short... array) {
checkArgument(array.length > 0);
short max = array[0];
for (int i = 1; i < array.length; i++) {
if (array[i] > max) {
max = array[i];
}
}
return max;
}
/**
* Returns the values from each provided array combined into a single array.
* For example, {@code concat(new short[] {a, b}, new short[] {}, new
* short[] {c}} returns the array {@code {a, b, c}}.
*
* @param arrays zero or more {@code short} arrays
* @return a single array containing all the values from the source arrays, in
* order
*/
public static short[] concat(short[]... arrays) {
int length = 0;
for (short[] array : arrays) {
length += array.length;
}
short[] result = new short[length];
int pos = 0;
for (short[] array : arrays) {
System.arraycopy(array, 0, result, pos, array.length);
pos += array.length;
}
return result;
}
/**
* Returns a big-endian representation of {@code value} in a 2-element byte
* array; equivalent to {@code
* ByteBuffer.allocate(2).putShort(value).array()}. For example, the input
* value {@code (short) 0x1234} would yield the byte array {@code {0x12,
* 0x34}}.
*
* <p>If you need to convert and concatenate several values (possibly even of
* different types), use a shared {@link java.nio.ByteBuffer} instance, or use
* {@link com.google.common.io.ByteStreams#newDataOutput()} to get a growable
* buffer.
*/
@GwtIncompatible("doesn't work")
public static byte[] toByteArray(short value) {
return new byte[] {
(byte) (value >> 8),
(byte) value};
}
/**
* Returns the {@code short} value whose big-endian representation is
* stored in the first 2 bytes of {@code bytes}; equivalent to {@code
* ByteBuffer.wrap(bytes).getShort()}. For example, the input byte array
* {@code {0x54, 0x32}} would yield the {@code short} value {@code 0x5432}.
*
* <p>Arguably, it's preferable to use {@link java.nio.ByteBuffer}; that
* library exposes much more flexibility at little cost in readability.
*
* @throws IllegalArgumentException if {@code bytes} has fewer than 2
* elements
*/
@GwtIncompatible("doesn't work")
public static short fromByteArray(byte[] bytes) {
checkArgument(bytes.length >= BYTES,
"array too small: %s < %s", bytes.length, BYTES);
return fromBytes(bytes[0], bytes[1]);
}
/**
* Returns the {@code short} value whose byte representation is the given 2
* bytes, in big-endian order; equivalent to {@code Shorts.fromByteArray(new
* byte[] {b1, b2})}.
*
* @since 7.0
*/
@GwtIncompatible("doesn't work")
public static short fromBytes(byte b1, byte b2) {
return (short) ((b1 << 8) | (b2 & 0xFF));
}
private static final class ShortConverter
extends Converter<String, Short> implements Serializable {
static final ShortConverter INSTANCE = new ShortConverter();
@Override
protected Short doForward(String value) {
return Short.decode(value);
}
@Override
protected String doBackward(Short value) {
return value.toString();
}
@Override
public String toString() {
return "Shorts.stringConverter()";
}
private Object readResolve() {
return INSTANCE;
}
private static final long serialVersionUID = 1;
}
/**
* Returns a serializable converter object that converts between strings and
* shorts using {@link Short#decode} and {@link Short#toString()}.
*
* @since 16.0
*/
@Beta
public static Converter<String, Short> stringConverter() {
return ShortConverter.INSTANCE;
}
/**
* Returns an array containing the same values as {@code array}, but
* guaranteed to be of a specified minimum length. If {@code array} already
* has a length of at least {@code minLength}, it is returned directly.
* Otherwise, a new array of size {@code minLength + padding} is returned,
* containing the values of {@code array}, and zeroes in the remaining places.
*
* @param array the source array
* @param minLength the minimum length the returned array must guarantee
* @param padding an extra amount to "grow" the array by if growth is
* necessary
* @throws IllegalArgumentException if {@code minLength} or {@code padding} is
* negative
* @return an array containing the values of {@code array}, with guaranteed
* minimum length {@code minLength}
*/
public static short[] ensureCapacity(
short[] array, int minLength, int padding) {
checkArgument(minLength >= 0, "Invalid minLength: %s", minLength);
checkArgument(padding >= 0, "Invalid padding: %s", padding);
return (array.length < minLength)
? copyOf(array, minLength + padding)
: array;
}
// Arrays.copyOf() requires Java 6
private static short[] copyOf(short[] original, int length) {
short[] copy = new short[length];
System.arraycopy(original, 0, copy, 0, Math.min(original.length, length));
return copy;
}
/**
* Returns a string containing the supplied {@code short} values separated
* by {@code separator}. For example, {@code join("-", (short) 1, (short) 2,
* (short) 3)} returns the string {@code "1-2-3"}.
*
* @param separator the text that should appear between consecutive values in
* the resulting string (but not at the start or end)
* @param array an array of {@code short} values, possibly empty
*/
public static String join(String separator, short... array) {
checkNotNull(separator);
if (array.length == 0) {
return "";
}
// For pre-sizing a builder, just get the right order of magnitude
StringBuilder builder = new StringBuilder(array.length * 6);
builder.append(array[0]);
for (int i = 1; i < array.length; i++) {
builder.append(separator).append(array[i]);
}
return builder.toString();
}
/**
* Returns a comparator that compares two {@code short} arrays
* lexicographically. That is, it compares, using {@link
* #compare(short, short)}), the first pair of values that follow any
* common prefix, or when one array is a prefix of the other, treats the
* shorter array as the lesser. For example, {@code [] < [(short) 1] <
* [(short) 1, (short) 2] < [(short) 2]}.
*
* <p>The returned comparator is inconsistent with {@link
* Object#equals(Object)} (since arrays support only identity equality), but
* it is consistent with {@link Arrays#equals(short[], short[])}.
*
* @see <a href="http://en.wikipedia.org/wiki/Lexicographical_order">
* Lexicographical order article at Wikipedia</a>
* @since 2.0
*/
public static Comparator<short[]> lexicographicalComparator() {
return LexicographicalComparator.INSTANCE;
}
private enum LexicographicalComparator implements Comparator<short[]> {
INSTANCE;
@Override
public int compare(short[] left, short[] right) {
int minLength = Math.min(left.length, right.length);
for (int i = 0; i < minLength; i++) {
int result = Shorts.compare(left[i], right[i]);
if (result != 0) {
return result;
}
}
return left.length - right.length;
}
}
/**
* Returns an array containing each value of {@code collection}, converted to
* a {@code short} value in the manner of {@link Number#shortValue}.
*
* <p>Elements are copied from the argument collection as if by {@code
* collection.toArray()}. Calling this method is as thread-safe as calling
* that method.
*
* @param collection a collection of {@code Number} instances
* @return an array containing the same values as {@code collection}, in the
* same order, converted to primitives
* @throws NullPointerException if {@code collection} or any of its elements
* is null
* @since 1.0 (parameter was {@code Collection<Short>} before 12.0)
*/
public static short[] toArray(Collection<? extends Number> collection) {
if (collection instanceof ShortArrayAsList) {
return ((ShortArrayAsList) collection).toShortArray();
}
Object[] boxedArray = collection.toArray();
int len = boxedArray.length;
short[] array = new short[len];
for (int i = 0; i < len; i++) {
// checkNotNull for GWT (do not optimize)
array[i] = ((Number) checkNotNull(boxedArray[i])).shortValue();
}
return array;
}
/**
* Returns a fixed-size list backed by the specified array, similar to {@link
* Arrays#asList(Object[])}. The list supports {@link List#set(int, Object)},
* but any attempt to set a value to {@code null} will result in a {@link
* NullPointerException}.
*
* <p>The returned list maintains the values, but not the identities, of
* {@code Short} objects written to or read from it. For example, whether
* {@code list.get(0) == list.get(0)} is true for the returned list is
* unspecified.
*
* @param backingArray the array to back the list
* @return a list view of the array
*/
public static List<Short> asList(short... backingArray) {
if (backingArray.length == 0) {
return Collections.emptyList();
}
return new ShortArrayAsList(backingArray);
}
@GwtCompatible
private static class ShortArrayAsList extends AbstractList<Short>
implements RandomAccess, Serializable {
final short[] array;
final int start;
final int end;
ShortArrayAsList(short[] array) {
this(array, 0, array.length);
}
ShortArrayAsList(short[] array, int start, int end) {
this.array = array;
this.start = start;
this.end = end;
}
@Override public int size() {
return end - start;
}
@Override public boolean isEmpty() {
return false;
}
@Override public Short get(int index) {
checkElementIndex(index, size());
return array[start + index];
}
@Override public boolean contains(Object target) {
// Overridden to prevent a ton of boxing
return (target instanceof Short)
&& Shorts.indexOf(array, (Short) target, start, end) != -1;
}
@Override public int indexOf(Object target) {
// Overridden to prevent a ton of boxing
if (target instanceof Short) {
int i = Shorts.indexOf(array, (Short) target, start, end);
if (i >= 0) {
return i - start;
}
}
return -1;
}
@Override public int lastIndexOf(Object target) {
// Overridden to prevent a ton of boxing
if (target instanceof Short) {
int i = Shorts.lastIndexOf(array, (Short) target, start, end);
if (i >= 0) {
return i - start;
}
}
return -1;
}
@Override public Short set(int index, Short element) {
checkElementIndex(index, size());
short oldValue = array[start + index];
// checkNotNull for GWT (do not optimize)
array[start + index] = checkNotNull(element);
return oldValue;
}
@Override public List<Short> subList(int fromIndex, int toIndex) {
int size = size();
checkPositionIndexes(fromIndex, toIndex, size);
if (fromIndex == toIndex) {
return Collections.emptyList();
}
return new ShortArrayAsList(array, start + fromIndex, start + toIndex);
}
@Override public boolean equals(Object object) {
if (object == this) {
return true;
}
if (object instanceof ShortArrayAsList) {
ShortArrayAsList that = (ShortArrayAsList) object;
int size = size();
if (that.size() != size) {
return false;
}
for (int i = 0; i < size; i++) {
if (array[start + i] != that.array[that.start + i]) {
return false;
}
}
return true;
}
return super.equals(object);
}
@Override public int hashCode() {
int result = 1;
for (int i = start; i < end; i++) {
result = 31 * result + Shorts.hashCode(array[i]);
}
return result;
}
@Override public String toString() {
StringBuilder builder = new StringBuilder(size() * 6);
builder.append('[').append(array[start]);
for (int i = start + 1; i < end; i++) {
builder.append(", ").append(array[i]);
}
return builder.append(']').toString();
}
short[] toShortArray() {
// Arrays.copyOfRange() is not available under GWT
int size = size();
short[] result = new short[size];
System.arraycopy(array, start, result, 0, size);
return result;
}
private static final long serialVersionUID = 0;
}
}
| mariusj/org.openntf.domino | domino/externals/guava/src/main/java/com/google/common/primitives/Shorts.java | Java | apache-2.0 | 21,441 |
/**
* @license
* Copyright 2019 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @fileoverview The class representing a cursor.
* Used primarily for keyboard navigation.
* @author aschmiedt@google.com (Abby Schmiedt)
*/
'use strict';
goog.provide('Blockly.Cursor');
goog.require('Blockly.ASTNode');
goog.require('Blockly.Marker');
goog.require('Blockly.registry');
goog.require('Blockly.utils.object');
/**
* Class for a cursor.
* A cursor controls how a user navigates the Blockly AST.
* @constructor
* @extends {Blockly.Marker}
*/
Blockly.Cursor = function() {
Blockly.Cursor.superClass_.constructor.call(this);
/**
* @override
*/
this.type = 'cursor';
};
Blockly.utils.object.inherits(Blockly.Cursor, Blockly.Marker);
/**
* Find the next connection, field, or block.
* @return {Blockly.ASTNode} The next element, or null if the current node is
* not set or there is no next value.
* @public
*/
Blockly.Cursor.prototype.next = function() {
var curNode = this.getCurNode();
if (!curNode) {
return null;
}
var newNode = curNode.next();
while (newNode && newNode.next() &&
(newNode.getType() == Blockly.ASTNode.types.NEXT ||
newNode.getType() == Blockly.ASTNode.types.BLOCK)) {
newNode = newNode.next();
}
if (newNode) {
this.setCurNode(newNode);
}
return newNode;
};
/**
* Find the in connection or field.
* @return {Blockly.ASTNode} The in element, or null if the current node is
* not set or there is no in value.
* @public
*/
Blockly.Cursor.prototype.in = function() {
var curNode = this.getCurNode();
if (!curNode) {
return null;
}
// If we are on a previous or output connection, go to the block level before
// performing next operation.
if (curNode.getType() == Blockly.ASTNode.types.PREVIOUS ||
curNode.getType() == Blockly.ASTNode.types.OUTPUT) {
curNode = curNode.next();
}
var newNode = curNode.in();
if (newNode) {
this.setCurNode(newNode);
}
return newNode;
};
/**
* Find the previous connection, field, or block.
* @return {Blockly.ASTNode} The previous element, or null if the current node
* is not set or there is no previous value.
* @public
*/
Blockly.Cursor.prototype.prev = function() {
var curNode = this.getCurNode();
if (!curNode) {
return null;
}
var newNode = curNode.prev();
while (newNode && newNode.prev() &&
(newNode.getType() == Blockly.ASTNode.types.NEXT ||
newNode.getType() == Blockly.ASTNode.types.BLOCK)) {
newNode = newNode.prev();
}
if (newNode) {
this.setCurNode(newNode);
}
return newNode;
};
/**
* Find the out connection, field, or block.
* @return {Blockly.ASTNode} The out element, or null if the current node is
* not set or there is no out value.
* @public
*/
Blockly.Cursor.prototype.out = function() {
var curNode = this.getCurNode();
if (!curNode) {
return null;
}
var newNode = curNode.out();
if (newNode && newNode.getType() == Blockly.ASTNode.types.BLOCK) {
newNode = newNode.prev() || newNode;
}
if (newNode) {
this.setCurNode(newNode);
}
return newNode;
};
Blockly.registry.register(
Blockly.registry.Type.CURSOR, Blockly.registry.DEFAULT, Blockly.Cursor);
| mark-friedman/blockly | core/keyboard_nav/cursor.js | JavaScript | apache-2.0 | 3,270 |
/*
* Copyright 2011-2013 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
#ifndef __DEVICE_TASK_H__
#define __DEVICE_TASK_H__
#include "device_memory.h"
#include "util_function.h"
#include "util_list.h"
#include "util_task.h"
CCL_NAMESPACE_BEGIN
/* Device Task */
class Device;
class RenderBuffers;
class RenderTile;
class Tile;
class DeviceTask : public Task {
public:
typedef enum { PATH_TRACE, FILM_CONVERT, SHADER } Type;
Type type;
int x, y, w, h;
device_ptr rgba_byte;
device_ptr rgba_half;
device_ptr buffer;
int sample;
int num_samples;
int offset, stride;
device_ptr shader_input;
device_ptr shader_output;
int shader_eval_type;
int shader_x, shader_w;
DeviceTask(Type type = PATH_TRACE);
int get_subtask_count(int num, int max_size = 0);
void split(list<DeviceTask>& tasks, int num, int max_size = 0);
void update_progress(RenderTile *rtile);
boost::function<bool(Device *device, RenderTile&)> acquire_tile;
boost::function<void(void)> update_progress_sample;
boost::function<void(RenderTile&)> update_tile_sample;
boost::function<void(RenderTile&)> release_tile;
boost::function<bool(void)> get_cancel;
bool need_finish_queue;
bool integrator_branched;
protected:
double last_update_time;
};
CCL_NAMESPACE_END
#endif /* __DEVICE_TASK_H__ */
| pyrochlore/cycles | src/device/device_task.h | C | apache-2.0 | 1,822 |
package com.flipkart.foxtrot.core.exception;
import com.google.common.collect.Maps;
import lombok.Getter;
import java.util.Map;
/**
* Created by rishabh.goyal on 19/12/15.
*/
@Getter
public class TableExistsException extends FoxtrotException {
private final String table;
protected TableExistsException(String table) {
super(ErrorCode.TABLE_ALREADY_EXISTS);
this.table = table;
}
@Override
public Map<String, Object> toMap() {
Map<String, Object> map = Maps.newHashMap();
map.put("table", this.table);
return map;
}
}
| Flipkart/foxtrot | foxtrot-core/src/main/java/com/flipkart/foxtrot/core/exception/TableExistsException.java | Java | apache-2.0 | 590 |
# -*- coding: utf-8 -*-
import datetime
from sqlalchemy import UniqueConstraint
from sqlalchemy.dialects.postgresql import JSONB
from pns.app import app, db
class SerializationMixin():
"""serialization mixin for sqlalchemy model object
"""
def to_dict(self, *exceptions, **extra_payload):
"""get dict representation of the object
:param list exceptions: a list to discard from dict
:param dict extra_payload: new parameters to add to dict
"""
_dict = ({c.name: getattr(self, c.name) for c in self.__table__.columns
if c.name not in exceptions})
_dict.update(**extra_payload)
return _dict
subscriptions = db.Table('subscriptions',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), nullable=False),
db.Column('channel_id', db.Integer, db.ForeignKey('channel.id'), nullable=False),
UniqueConstraint('user_id', 'channel_id'))
channel_devices = db.Table('channel_devices',
db.Column('channel_id', db.Integer, db.ForeignKey('channel.id'), nullable=False),
db.Column('device_id', db.Integer, db.ForeignKey('device.id'), nullable=False),
UniqueConstraint('channel_id', 'device_id'))
class User(db.Model, SerializationMixin):
"""user resource
"""
id = db.Column(db.Integer, primary_key=True)
# pns_id is a unique identifier for easy third-party integration (email, citizen id etc.)
pns_id = db.Column(db.String(255), unique=True, nullable=False)
subscriptions = db.relationship('Channel',
secondary=subscriptions,
lazy='dynamic',
backref=db.backref('subscribers', lazy='dynamic'))
devices = db.relationship('Device', backref='user', lazy='dynamic',
cascade='all, delete, delete-orphan')
created_at = db.Column(db.DateTime, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def __repr__(self):
return '<User %r>' % self.id
class Channel(db.Model, SerializationMixin):
"""channel resource
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), unique=True, nullable=False)
description = db.Column(db.Text)
devices = db.relationship('Device',
secondary=channel_devices,
lazy='dynamic',
backref=db.backref('channels', lazy='dynamic'))
alerts = db.relationship('Alert', backref='channel', lazy='dynamic',
cascade='all, delete, delete-orphan')
created_at = db.Column(db.DateTime, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def subscribe_user(self, user):
try:
self.subscribers.append(user)
for device in user.devices.all():
self.devices.append(device)
db.session.add(self)
db.session.commit()
except Exception as ex:
db.session.rollback()
app.logger.exception(ex)
return False
return True
def unsubscribe_user(self, user):
try:
self.subscribers.remove(user)
for device in user.devices.all():
self.devices.remove(device)
db.session.commit()
except Exception as ex:
db.session.rollback()
app.logger.exception(ex)
return False
return True
def __repr__(self):
return '<Channel %r>' % self.id
class Alert(db.Model, SerializationMixin):
"""alert resource
"""
id = db.Column(db.Integer, primary_key=True)
channel_id = db.Column(db.Integer, db.ForeignKey('channel.id'), index=True)
payload = db.Column(JSONB, nullable=False)
created_at = db.Column(db.DateTime, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def __repr__(self):
return '<Alert %r>' % self.id
class Device(db.Model, SerializationMixin):
"""device resource
"""
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True, nullable=False)
platform = db.Column(db.String(10), index=True, nullable=False)
platform_id = db.Column(db.Text, unique=True, nullable=False)
mobile_app_id = db.Column(db.Text, index=True)
mobile_app_ver = db.Column(db.Integer, index=True)
mute = db.Column(db.Boolean, default=False, nullable=False)
created_at = db.Column(db.DateTime, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def subscribe_to_channels(self):
"""subscribe new device to existing channels
"""
try:
for channel in self.user.subscriptions.all():
channel.devices.append(self)
db.session.add(self.user)
db.session.commit()
except Exception as ex:
db.session.rollback()
app.logger.exception(ex)
return False
return True
def __repr__(self):
return '<Device %r>' % self.id
if __name__ == '__main__':
db.create_all()
| Turksat/pns | pns/models.py | Python | apache-2.0 | 5,438 |
// Setup Canvas
var canvas = document.body.appendChild( document.createElement( 'canvas' ) );
// var canvas = document.body.appendChild( document.createElement( 'canvas' ) );
// canvas.style.zIndex = 1;
// Get WebGL Context
var gl = require('gl-context')( canvas, { preserveDrawingBuffer: true }, render );
// Import Webgl & Math Libraries
var glGeometry = require('gl-geometry');
var glShader = require('gl-shader');
var glslify = require('glslify');
var clear = require('gl-clear')( { color: [ 0.0, 0.0, 0.0, 1.0 ] } );
var mat4 = require('gl-matrix').mat4;
var mat3 = require('gl-matrix').mat3;
var vec3 = require('gl-matrix').vec3;
var quat = require('gl-matrix').quat;
// Import Web Helper Libraries
var fit = require('canvas-fit');
var isMobile = require('is-mobile');
// Import YCAM GRP Libraries
var cam = require('nsc')( canvas, {
position: [ 0.0, 2.5, -30.0 ],
rotation: mat4.fromQuat( mat4.create(), quat.rotationTo( quat.create(), [ 0, 0, 1 ], [ 0, -0.25, 0 ] ) ),
} );
var cga = require('cga');
var lgp = require('lgp');
var mda = require('mda');
var Mesh = mda.Mesh;
var FaceVertices = mda.FaceVertices;
var InsertVertexOperator = mda.InsertVertexOperator;
var InsertEdgeOperator = mda.InsertEdgeOperator;
var DeleteEdgeOperator = mda.DeleteEdgeOperator;
var ExtrudeOperator = mda.ExtrudeOperator;
var PipeOperator = mda.PipeOperator;
var DuplicateOperator = mda.DuplicateOperator;
var CombineOperator = mda.CombineOperator;
var ScaleOperator = mda.ScaleOperator;
var MoveOperator = mda.MoveOperator;
var InvertOperator = mda.InvertOperator;
var MeshIntegrity = mda.MeshIntegrity;
var TriangulateOperator = mda.TriangulateOperator;
var WireframeOperator = mda.WireframeOperator;
var LoopSmoothOperator = mda.LoopOperator;
var CatmullClarkOperator = mda.CatmullClarkOperator;
var QuadSubdivideOperator = mda.QuadSubdivideOperator;
var MeshCentroid = mda.MeshCentroid;
var vertexNormals = require('guf').vertexNormals;
var calculateNormal = require('guf').calculateNormal;
var ycam = require('ycam');
//Interaction
var keyPressed = require('key-pressed');
var omesh, meshOut, meshOutTri;
var positions, cells;
var geoOut;
var geoWireOut, geoWire;
var geoPointsOut, geoPoints;
var renderSolid = true;
var color = [ 1.0, 1.0, 1.0, 1.0 ];
var colorPoints = [ 1.0, 1.0, 1.0, 0.125 ];
var colorWire = [ 1.0, 1.0, 1.0, 0.25 ];
var model = mat4.create();
var ypositions = [];
var ycells = [];
var tmp = [];
var tmp2 = [];
var ylen = ycam.positions.length;
for( var i = 0; i < ylen; i++ ) {
ypositions.push( [ ycam.positions[ i ][ 0 ], ycam.positions[ i ][ 1 ], 0.0 ] );
tmp.push( i );
tmp2.push( ylen - 1 - i );
}
ycells.push( tmp );
ycells.push( tmp2 );
omesh = new Mesh();
omesh.setPositions( ypositions );
omesh.setCells( ycells );
omesh.process();
ScaleOperator( omesh, 20.0 );
MoveOperator( omesh, [ 0.0, 0.0, 5.0 ] );
function buildGeometry() {
meshOut = DuplicateOperator( omesh );
for( var i = 0; i < 5; i++ ) {
ExtrudeOperator( meshOut, 0, 2.0, 0.0 );
// ExtrudeOperator( meshOut, 0, 0.0, 1.0 );
// ExtrudeOperator( meshOut, 1, 10.0, 0.0 );
// ExtrudeOperator( meshOut, 1, 0.0, 1.0 );
}
TriangulateOperator( meshOut );
WireframeOperator( meshOut, 0.5 );
// CatmullClarkOperator( meshOut );
// CatmullClarkOperator( meshOut );
// CatmullClarkOperator( meshOut );
// CatmullClarkOperator( meshOut );
TriangulateOperator( meshOut );
// LoopSmoothOperator( meshOut );
// LoopSmoothOperator( meshOut );
// LoopSmoothOperator( meshOut );
// LoopSmoothOperator( meshOut );
// MeshIntegrity( meshOut );
geoWireOut = createGeoWire( meshOut.getPositions(), meshOut.getCells() );
positions = positionsOut = meshOut.getPositions();
cells = cellsOut = meshOut.getCells();
geoOut = createGeo( positionsOut, cellsOut );
geoPointsOut = createGeoPoints( positionsOut );
}
function smooth() {
// CatmullClarkOperator( meshOut );
TriangulateOperator( meshOut );
LoopSmoothOperator( meshOut );
buildGeometry();
}
function createGeo( positions, cells ) {
var newPositions = [];
var newNormals = [];
for( var i = 0; i < cells.length; i++ ) {
var a = positions[ cells[ i ][ 0 ] ];
var b = positions[ cells[ i ][ 1 ] ];
var c = positions[ cells[ i ][ 2 ] ];
var n = calculateNormal( a, b, c );
newPositions.push( a, b, c );
newNormals.push( n, n, n );
}
var geo = glGeometry( gl );
geo.attr( 'aPosition', newPositions );
geo.attr( 'aNormal', newNormals );
return geo;
}
function createGeoWire( positions, cells ) {
var lines = [];
for( var i = 0; i < cells.length; i++ ) {
var cell = cells[ i ];
var clen = cell.length;
for( var j = 0; j < clen; j++ ) {
var i0 = cell[ j ];
var i1 = cell[ ( j + 1 ) % clen ];
lines.push( i0, i1 );
}
}
var geoWire = glGeometry( gl );
geoWire.attr( 'aPosition', positions );
geoWire.faces( lines, { size: 2 } );
return geoWire;
}
function createGeoPoints( positions ) {
var geoPoints = glGeometry( gl );
geoPoints.attr( 'aPosition', positions );
return geoPoints;
}
// Set the canvas size to fill the window and its pixel density
var mobile = isMobile( navigator.userAgent );
var dpr = mobile ? 1 : ( window.devicePixelRatio || 1 );
window.addEventListener( 'resize', fit( canvas, null, dpr ), false );
// Setup Matricies
var projection = mat4.create();
var normalm4 = mat4.create();
var normalm3 = mat3.create();
var view = mat4.create();
// Setup Shaders
var vertexShader = glslify( './shaders/shader.vert' );
var fragmentShader = glslify( './shaders/shader.frag' );
var shader = glShader( gl, vertexShader, fragmentShader );
var vertexWireframeShader = glslify( './shaders/shaderDebug.vert' );
var fragmentWireframeShader = glslify( './shaders/shaderDebug.frag' );
var shaderDebug = glShader( gl, vertexWireframeShader, fragmentWireframeShader );
// Setup Sketch Variables
var height;
var width;
var frame = Math.PI;
function update() {
buildGeometry();
// set projection
width = gl.drawingBufferWidth;
height = gl.drawingBufferHeight;
var aspectRatio = gl.drawingBufferWidth / gl.drawingBufferHeight;
var fieldOfView = Math.PI / 3.0;
var near = 0.01;
var far = 1000.0;
mat4.perspective( projection, fieldOfView, aspectRatio, near, far );
// model = mat4.fromRotation( model, frame, [ 0, 1, 0 ] );
// get view from camera
cam.view( view );
cam.update();
mat4.copy( normalm4, view );
mat4.invert( normalm4, normalm4 );
mat4.transpose( normalm4, normalm4 );
mat3.fromMat4( normalm3, normalm4 );
frame += 0.05;
}
function render() {
update();
gl.viewport( 0, 0, width, height );
clear( gl );
if( renderSolid ) {
gl.enable( gl.DEPTH_TEST );
drawGeo( geoOut );
}
else {
gl.disable( gl.DEPTH_TEST );
}
drawGeoWireframe( geoWireOut );
drawGeoPoints( geoPointsOut );
}
function drawGeo( geo ) {
if( geo ) {
gl.enable( gl.BLEND );
gl.blendFunc( gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA );
geo.bind( shader );
if( isMobile ) { shader.uniforms.dpr = dpr * 2.0; } else { shader.uniforms.dpr = dpr; }
shader.uniforms.uPointSize = 1.0;
shader.uniforms.uProjection = projection;
shader.uniforms.uView = view;
shader.uniforms.uNormalMatrix = normalm3;
shader.uniforms.uModel = model;
shader.uniforms.uColor = color;
geo.draw( gl.TRIANGLES );
geo.unbind();
}
}
function drawGeoPoints( geoPoints ) {
if( geoPoints ) {
gl.enable( gl.BLEND );
gl.blendFunc( gl.SRC_ALPHA, gl.ONE );
geoPoints.bind( shaderDebug );
if( isMobile ) { shaderDebug.uniforms.dpr = dpr * 1.0; } else { shaderDebug.uniforms.dpr = 1.0; }
shaderDebug.uniforms.uPointSize = 0.10;
shaderDebug.uniforms.uProjection = projection;
shaderDebug.uniforms.uView = view;
shaderDebug.uniforms.uModel = model;
shaderDebug.uniforms.uColor = colorPoints;
geoPoints.draw( gl.POINTS );
geoPoints.unbind();
}
}
function drawGeoWireframe( geoWire ) {
if( geoWire ) {
gl.enable( gl.BLEND );
gl.blendFunc( gl.SRC_ALPHA, gl.ONE );
gl.lineWidth( 2.0 );
geoWire.bind( shaderDebug );
if( isMobile ) { shaderDebug.uniforms.dpr = dpr * 2.0; } else { shaderDebug.uniforms.dpr = dpr; }
shaderDebug.uniforms.uPointSize = 1.0;
shaderDebug.uniforms.uProjection = projection;
shaderDebug.uniforms.uView = view;
shaderDebug.uniforms.uModel = model;
shaderDebug.uniforms.uColor = colorWire;
geoWire.draw( gl.LINES );
geoWire.unbind();
}
}
window.addEventListener( 'keydown', function( event ) {
if( keyPressed( 'S' ) ) {
lgp.imageWriter( 'ycam.png', canvas.toDataURL('image/png') );
return;
}
if( keyPressed( 'E' ) ) {
lgp.fileWriter( "ycam.stl", lgp.stlSerializer( { positions: positions, cells: cells } ) );
lgp.fileWriter( "ycam.obj", lgp.objSerializer( { positions: positions, cells: cells } ) );
return;
}
if( keyPressed( 'W' ) ) {
renderSolid = !renderSolid;
return;
}
if( keyPressed( 'C' ) ) {
smooth();
return;
}
if( keyPressed( 'R' ) ) {
setupGeometry();
}
}, false );
| rezaali/webgl-sketches | ycam/ycam.js | JavaScript | apache-2.0 | 9,099 |
package fi.bitrite.android.ws.persistence.converters;
import fi.bitrite.android.ws.model.Feedback;
public final class RatingConverter {
private static final int CODE_POSITIVE = 0;
private static final int CODE_NEUTRAL = 1;
private static final int CODE_NEGATIVE = 2;
public static Integer ratingToInt(Feedback.Rating rating) {
if (rating == null) {
return null;
}
switch (rating) {
case Positive: return CODE_POSITIVE;
case Neutral: return CODE_NEUTRAL;
case Negative: return CODE_NEGATIVE;
default:
throw new IllegalArgumentException("Invalid rating value: " + rating);
}
}
public static Feedback.Rating intToRating(Integer rating) {
if (rating == null) {
return null;
}
switch (rating) {
case CODE_POSITIVE: return Feedback.Rating.Positive;
case CODE_NEUTRAL: return Feedback.Rating.Neutral;
case CODE_NEGATIVE: return Feedback.Rating.Negative;
default:
throw new IllegalArgumentException("Invalid rating value: " + rating);
}
}
}
| CyrilWendl/wsandroid | app/src/main/java/fi/bitrite/android/ws/persistence/converters/RatingConverter.java | Java | apache-2.0 | 1,185 |
# -*- coding: UTF-8; indent-tabs-mode:nil; tab-width:4 -*-
# This file is part of DITA DTD Generator.
#
# Copyright 2009 Jarno Elovirta <http://www.elovirta.com/>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ditagen.dita
from ditagen.dtdgen import Particle as Particle
from ditagen.dtdgen import Choice as Choice
from ditagen.dtdgen import Name as Name
from ditagen.dtdgen import Seq as Seq
from ditagen.dtdgen import Attribute as Attribute
from ditagen.dtdgen import Param as Param
from ditagen.dtdgen import ParameterEntity as ParameterEntity
# Elements
#####################################################################
OPTIONAL = Particle.Occurrences.OPTIONAL
ZERO_OR_MORE = Particle.Occurrences.ZERO_OR_MORE
class TopicElement(ditagen.dita.DitaElement):
"""Topic element."""
name = u"topic"
cls = u"- topic/topic "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("body"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class ConceptElement(ditagen.dita.DitaElement):
"""Concept element."""
name = u"concept"
cls = u"- topic/topic concept/concept "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("conbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class TaskElement(ditagen.dita.DitaElement):
"""Task element."""
name = u"task"
cls = u"- topic/topic task/task "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("taskbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class ReferenceElement(ditagen.dita.DitaElement):
"""Reference element."""
name = u"reference"
cls = u"- topic/topic reference/reference "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("refbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class GlossentryElement(ditagen.dita.DitaElement):
"""Glossary entry element."""
name = u"glossentry"
cls = u"- topic/topic concept/concept glossentry/glossentry "
model = Seq([
Choice(ParameterEntity("glossterm")),
Choice(ParameterEntity("glossdef"), OPTIONAL),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("glossBody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class GlossgroupElement(ditagen.dita.DitaElement):
"""Glossary group element."""
name = u"glossgroup"
cls = u"- topic/topic concept/concept glossgroup/glossgroup "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class LearningBaseElement(ditagen.dita.DitaElement):
"""Learning Base element."""
name = u"learningBase"
cls = u"- topic/topic learningBase/learningBase "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningBasebody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class LearningAssessmentElement(ditagen.dita.DitaElement):
"""Learning Assessment element."""
name = u"learningAssessment"
cls = u"- topic/topic learningBase/learningBase learningAssessment/learningAssessment "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningAssessmentbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningOverviewElement(ditagen.dita.DitaElement):
"""Learning Overview element."""
name = u"learningOverview"
cls = u"- topic/topic learningBase/learningBase learningOverview/learningOverview "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningOverviewbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningPlanElement(ditagen.dita.DitaElement):
"""Learning Plan element."""
name = u"learningPlan"
cls = u"- topic/topic learningBase/learningBase learningPlan/learningPlan "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningPlanbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningSummaryElement(ditagen.dita.DitaElement):
"""Learning Summary element."""
name = u"learningSummary"
cls = u"- topic/topic learningBase/learningBase learningSummary/learningSummary "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningSummarybody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningContentElement(ditagen.dita.DitaElement):
"""Learning Content element."""
name = u"learningContent"
cls = u"- topic/topic learningBase/learningBase learningContent/learningContent "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningContentbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class SubjectSchemeElement(ditagen.dita.DitaElement):
"""Subject scheme element."""
name = u"subjectScheme"
cls = u"- map/map subjectScheme/subjectScheme "
model = Seq([
Choice(ParameterEntity("title"), OPTIONAL),
Choice(ParameterEntity("topicmeta"), OPTIONAL),
Choice([
ParameterEntity("anchor"),
ParameterEntity("data.elements.incl"),
ParameterEntity("enumerationdef"),
ParameterEntity("hasInstance"),
ParameterEntity("hasKind"),
ParameterEntity("hasNarrower"),
ParameterEntity("hasPart"),
ParameterEntity("hasRelated"),
ParameterEntity("navref"),
ParameterEntity("relatedSubjects"),
ParameterEntity("reltable"),
ParameterEntity("schemeref"),
ParameterEntity("subjectdef"),
ParameterEntity("subjectHead"),
ParameterEntity("subjectRelTable"),
ParameterEntity("topicref")
], ZERO_OR_MORE)
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
class MapElement(ditagen.dita.DitaElement):
"""Map element."""
name = u"map"
cls = u"- map/map "
model = Seq([
Choice(ParameterEntity("title"), OPTIONAL),
Choice(ParameterEntity("topicmeta"), OPTIONAL),
Choice([
ParameterEntity("anchor"),
ParameterEntity("data.elements.incl"),
ParameterEntity("navref"),
ParameterEntity("reltable"),
ParameterEntity("topicref")
], ZERO_OR_MORE)
])
attrs = [
Attribute("title", "CDATA", "#IMPLIED"),
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
class BookMapElement(ditagen.dita.DitaElement):
"""BookMap element."""
name = u"bookmap"
cls = u"- map/map bookmap/bookmap "
model = Seq([
Choice([Choice(ParameterEntity("title")), Choice(ParameterEntity("booktitle"))], OPTIONAL),
Choice(ParameterEntity("bookmeta"), OPTIONAL),
Choice(ParameterEntity("frontmatter"), OPTIONAL),
Choice(ParameterEntity("chapter"), ZERO_OR_MORE),
Choice(ParameterEntity("part"), ZERO_OR_MORE),
Choice([Choice(ParameterEntity("appendices"), OPTIONAL), Choice(ParameterEntity("appendix"), ZERO_OR_MORE)]),
Choice(ParameterEntity("backmatter"), OPTIONAL),
Choice(ParameterEntity("reltable"), ZERO_OR_MORE)
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
# Topic types
#####################################################################
class TopicType(ditagen.dita.Type):
"""Topic topic type."""
id = u"topic"
file = u"base/dtd/topic" # the .dtd file is at technicalContent
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Topic//EN"
title = u"Topic"
parent = None
root = TopicElement()
class ConceptType(TopicType):
"""Concept topic type."""
id = u"concept"
file = u"technicalContent/dtd/concept"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Concept//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Concept//EN"
title = u"Concept"
parent = TopicType()
root = ConceptElement()
class TaskType(TopicType):
"""Task topic type."""
id = u"task"
file = u"technicalContent/dtd/task"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Task//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Task//EN"
title = u"Task"
parent = TopicType()
root = TaskElement()
def __init__(self):
super(TaskType, self).__init__()
#self.required_domains = [StrictTaskbodyConstraints]
class GeneralTaskType(ditagen.dita.ShellType):
"""General Task topic type."""
def __init__(self):
super(GeneralTaskType, self).__init__(u"generalTask", u"General Task", TaskType())
#self.parent.required_domains = []
class ReferenceType(TopicType):
"""Reference topic type."""
id = u"reference"
file = u"technicalContent/dtd/reference"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Reference//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Reference//EN"
title = u"Reference"
parent = TopicType()
root = ReferenceElement()
class MapType(ditagen.dita.Type):
"""Map topic type."""
id = u"map"
file = u"base/dtd/map" # the .dtd file is at technicalContent
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Map//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Map//EN"
title = u"Map"
parent = None
root = MapElement()
class BookMapType(MapType):
"""BookMap topic type."""
id = u"bookmap"
file = u"bookmap/dtd/bookmap"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 BookMap//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 BookMap//EN"
title = u"BookMap"
parent = MapType()
root = BookMapElement()
class GlossentryType(ConceptType):
"""Glossary entry topic type."""
id = u"glossentry"
file = u"technicalContent/dtd/glossentry"
pi_entity = u"-//OASIS//ENTITIES DITA Glossary Entry//EN"
pi_module = u"-//OASIS//ELEMENTS DITA Glossary Entry//EN"
title = u"Glossary Entry"
parent = ConceptType()
root = GlossentryElement()
class GlossgroupType(ConceptType):
"""Glossary group topic type."""
id = u"glossgroup"
file = u"technicalContent/dtd/glossgroup"
pi_entity = u"-//OASIS//ENTITIES DITA Glossary Group//EN"
pi_module = u"-//OASIS//ELEMENTS DITA Glossary Group//EN"
title = u"Glossary Group"
parent = ConceptType()
root = GlossgroupElement()
class MachineryTaskType(ditagen.dita.ShellType):
"""Machinery Task topic type."""
def __init__(self):
super(MachineryTaskType, self).__init__(u"machineryTask", u"Machinery Task", TaskType(), file=u"machineryIndustry/dtd/machineryTask")
#self.parent.required_domains = [MachineryTaskbodyConstraints]
class LearningBaseType(TopicType):
"""Learning Base topic type."""
id = u"learningBase"
file = u"learning/dtd/learningBase"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Base//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Base//EN"
title = u"Learning Base"
parent = TopicType()
root = LearningBaseElement()
class LearningAssessmentType(LearningBaseType):
"""Learning Assessment topic type."""
id = u"learningAssessment"
file = u"learning/dtd/learningAssessment"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Assessment//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Assessment//EN"
title = u"Learning Assessment"
parent = LearningBaseType()
root = LearningAssessmentElement()
class LearningOverviewType(LearningBaseType):
"""Learning Overview topic type."""
id = u"learningOverview"
file = u"learning/dtd/learningOverview"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Overview//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Overview//EN"
title = u"Learning Overview"
parent = LearningBaseType()
root = LearningOverviewElement()
class LearningPlanType(LearningBaseType):
"""Learning Plan topic type."""
id = u"learningPlan"
file = u"learning/dtd/learningPlan"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Plan//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Plan//EN"
title = u"Learning Plan"
parent = LearningBaseType()
root = LearningPlanElement()
class LearningSummaryType(LearningBaseType):
"""Learning Summary topic type."""
id = u"learningSummary"
file = u"learning/dtd/learningSummary"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Summary//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Summary//EN"
title = u"Learning Summary"
parent = LearningBaseType()
root = LearningSummaryElement()
class LearningContentType(LearningBaseType):
"""Learning Content topic type."""
id = u"learningContent"
file = u"learning/dtd/learningContent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Content//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Content//EN"
title = u"Learning Content"
parent = LearningBaseType()
root = LearningContentElement()
def __init__(self):
super(LearningContentType, self).__init__()
self.required_types = [TaskType, ConceptType, ReferenceType, LearningSummaryType, LearningAssessmentType]
class LearningMapType(ditagen.dita.ShellType):
"""Learning Map topic type."""
def __init__(self):
super(LearningMapType, self).__init__(u"learningMap", u"Learning Map", MapType(), file=u"learning/dtd/learningMap")
#self.parent.required_domains = []
class LearningBookMapType(ditagen.dita.ShellType):
"""Learning BookMap topic type."""
def __init__(self):
super(LearningBookMapType, self).__init__(u"learningBookmap", u"Learning BookMap", BookMapType(), file=u"learning/dtd/learningBookmap")
#self.parent.required_domains = []
class ClassificationMapType(ditagen.dita.ShellType):
"""Classification Map topic type."""
def __init__(self):
super(ClassificationMapType, self).__init__(u"classifyMap", u"Classification Map", MapType(), file=u"subjectScheme/dtd/classifyMap")
#self.parent.required_domains = []
class SubjectSchemeType(MapType):
"""Subject Scheme Map topic type."""
id = u"subjectScheme"
file = u"subjectScheme/dtd/subjectScheme"
title = u"Subject Scheme Map"
parent = MapType()
root = SubjectSchemeElement()
# Domains
#####################################################################
class Constraints(ditagen.dita.DomainBase):
"""Base class for constraints."""
# file_suffix = u""
pi_suffix = u" Constraint"
elements = []
att_id = None
def get_file_name(self, extension):
return self.file + self.file_suffix + "." + extension
class AttributeDomain(ditagen.dita.DomainBase):
"""Base class for attribute domains."""
# file_suffix = u"Att"
pi_suffix = u" Attribute Domain"
#elements = []
attributes = []
def get_file_name(self, extension):
return self.file + self.file_suffix + "." + extension
# Domains
class UiDomain(ditagen.dita.Domain):
"""User interface domain."""
id = u"ui-d"
si_module = u"technicalContent/dtd/uiDomain.mod"
si_entity = u"technicalContent/dtd/uiDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 User Interface Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 User Interface Domain//EN"
title = u"User Interface"
elements = [u"pre", u"keyword", u"ph"]
parent = [TopicType]
class HiDomain(ditagen.dita.Domain):
"""Hilight domain."""
id = u"hi-d"
si_module = u"base/dtd/highlightDomain.mod"
si_entity = u"base/dtd/highlightDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Highlight Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Highlight Domain//EN"
title = u"Highlight"
elements = [u"ph"]
parent = [TopicType]
class PrDomain(ditagen.dita.Domain):
"""Programmign domain."""
id = u"pr-d"
si_module = u"technicalContent/dtd/programmingDomain.mod"
si_entity = u"technicalContent/dtd/programmingDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Programming Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Programming Domain//EN"
title = u"Programming"
elements = [u"pre", u"keyword", u"ph", u"fig", u"dl"]
parent = [TopicType]
class SwDomain(ditagen.dita.Domain):
"""Software development domain."""
id = u"sw-d"
si_module = u"technicalContent/dtd/softwareDomain.mod"
si_entity = u"technicalContent/dtd/softwareDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Software Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Software Domain//EN"
title = u"Software"
elements = [u"pre", u"keyword", u"ph"]
parent = [TopicType]
class UtDomain(ditagen.dita.Domain):
"""Utilities domain."""
id = u"ut-d"
si_module = u"base/dtd/utilitiesDomain.mod"
si_entity = u"base/dtd/utilitiesDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Utilities Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Utilities Domain//EN"
title = u"Utilities"
elements = [u"fig"]
parent = [TopicType]
class IndexingDomain(ditagen.dita.Domain):
"""Indexing domain."""
id = u"indexing-d"
si_module = u"base/dtd/indexingDomain.mod"
si_entity = u"base/dtd/indexingDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Indexing Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Indexing Domain//EN"
title = u"Indexing"
elements = [u"index-base"]
parent = [TopicType, MapType]
class LearningDomain(ditagen.dita.Domain):
"""Learning domain."""
id = u"learning-d"
si_module = u"learning/dtd/learningDomain.mod"
si_entity = u"learning/dtd/learningDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Domain//EN"
title = u"Learning"
elements = [u"note", u"fig"]
# XXX: This builds on
parent = [TopicType]
required_domains = [UtDomain]
class LearningMetaDomain(ditagen.dita.Domain):
"""Learning metadata domain."""
id = u"learningmeta-d"
si_module = u"learning/dtd/learningMetadataDomain.mod"
si_entity = u"learning/dtd/learningMetadataDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Metadata Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Metadata Domain//EN"
title = u"Learning Metadata"
elements = [u"metadata"]
parent = [TopicType]
class LearningMapDomain(ditagen.dita.Domain):
"""Learning map domain."""
id = u"learningmap-d"
si_module = u"learning/dtd/learningMapDomain.mod"
si_entity = u"learning/dtd/learningMapDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Map Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Map Domain//EN"
title = u"Learning Map"
elements = [u"topicref"]
parent = [MapType]
class TaskRequirementsDomain(ditagen.dita.Domain):
"""Task requirements domain."""
id = u"taskreq-d"
si_module = u"technicalContent/dtd/taskreqDomain.mod"
si_entity = u"technicalContent/dtd/taskreqDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Task Requirements Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Task Requirements Domain//EN"
title = u"Machine Industry Task"
elements = [u"prereq", u"postreq"]
parent = [TaskType]
class HazardStatementDomain(ditagen.dita.Domain):
"""Hazard statement domain."""
id = u"hazard-d"
si_module = u"base/dtd/hazardstatementDomain.mod"
si_entity = u"base/dtd/hazardstatementDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Hazard Statement Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Hazard Statement Domain//EN"
title = u"Hazard Statement"
elements = [u"note"]
parent = [TopicType]
class MapGroupDomain(ditagen.dita.Domain):
"""Map group domain."""
id = u"mapgroup-d"
si_module = u"base/dtd/mapGroup.mod"
si_entity = u"base/dtd/mapGroup.ent" # This is an exception to DITA's naming scheme
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Map Group Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Map Group Domain//EN"
title = u"Map Group"
elements = [u"topicref"]
parent = [MapType]
class AbbreviatedFormDomain(ditagen.dita.Domain):
"""Abbreviated form domain."""
id = u"abbrev-d"
si_module = u"technicalContent/dtd/abbreviateDomain.mod"
si_entity = u"technicalContent/dtd/abbreviateDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Abbreviated Form Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Abbreviated Form Domain//EN"
title = u"Abbreviated Form"
elements = [u"term"]
parent = [TopicType]
class XNALDomain(ditagen.dita.Domain):
"""XNAL domain."""
id = u"xnal-d"
si_module = u"xnal/dtd/xnalDomain.mod"
si_entity = u"xnal/dtd/xnalDomain.ent"
title = u"XNAL"
elements = [u"author"]
parent = [MapType]
class UserDelayedResolutionDomain(ditagen.dita.Domain):
"""User delayed resolution domain."""
id = u"delay-d"
si_module = u"base/dtd/delayResolutionDomain.mod"
si_entity = u"base/dtd/delayResolutionDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Delayed Resolution Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Delayed Resolution Domain//EN"
title = u"Delayed Resolution"
elements = [u"keywords"]
parent = [TopicType, MapType]
class ClassifyDomain(ditagen.dita.Domain):
"""Classify domain."""
id = u"classify-d"
si_module = u"subjectScheme/dtd/classifyDomain.mod"
si_entity = u"subjectScheme/dtd/classifyDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Classification Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Classification Domain//EN"
title = u"Map Subject Classification"
elements = [u"topicref", u"reltable"]
parent = [TopicType, MapType]
class GlossaryReferenceDomain(ditagen.dita.Domain):
"""Glossary reference domain."""
id = u"glossref-d"
si_module = u"technicalContent/dtd/glossrefDomain.mod"
si_entity = u"technicalContent/dtd/glossrefDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Glossary Reference Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Glossary Reference Domain//EN"
title = u"Glossary Reference"
elements = [u"topicref"]
parent = [MapType]
# Constraints
class StrictTaskbodyConstraints(Constraints):
"""Strict taskbody constraints."""
id = u"strictTaskbody-c"
si_module = u"technicalContent/dtd/strictTaskbodyConstraint.mod"
si_entity = u"technicalContent/dtd/strictTaskbodyConstraint.ent"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Strict Taskbody Constraint//EN"
title = u"Strict Taskbody"
parent = [TaskType]
att_id = u"taskbody"
class MachineryTaskbodyConstraints(Constraints):
"""Machinery taskbody constraints."""
id = u"machineryTaskbody-c"
si_module = u"machineryIndustry/dtd/machineryTaskbodyConstraint.mod"
si_entity = u"machineryIndustry/dtd/machineryTaskbodyConstraint.ent"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Machinery Taskbody Constraint//EN"
title = u"Machinery Taskbody"
parent = [TaskType]
att_id = u"taskbody"
# Defaults
TopicType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
ConceptType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
TaskType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain, StrictTaskbodyConstraints]
GeneralTaskType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
ReferenceType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
MachineryTaskType.default_domains = [TaskRequirementsDomain, HazardStatementDomain, HiDomain, UtDomain, IndexingDomain, PrDomain, SwDomain, UiDomain, MachineryTaskbodyConstraints]
MapType.default_domains = [MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, GlossaryReferenceDomain]
BookMapType.default_domains = [MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, XNALDomain]
ClassificationMapType.default_domains = [MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, ClassifyDomain]
SubjectSchemeType.default_domains = [MapGroupDomain]
LearningAssessmentType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningBookMapType.default_domains = [LearningMapDomain, LearningMetaDomain, MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, XNALDomain]
LearningContentType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningMapType.default_domains = [LearningMapDomain, LearningMetaDomain, MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain]
LearningOverviewType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningPlanType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningSummaryType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
GlossentryType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
GlossgroupType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
| jelovirt/dita-generator | src/ditagen/dita/v1_2.py | Python | apache-2.0 | 32,591 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-30 12:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('webcore', '0016_profile_emails'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='emails',
),
]
| Nikita1710/ANUFifty50-Online-Mentoring-Platform | project/fifty_fifty/webcore/migrations/0017_remove_profile_emails.py | Python | apache-2.0 | 388 |
package catalog
import (
"fmt"
"poule/operations"
"strings"
"time"
"poule/configuration"
"poule/gh"
"poule/operations/settings"
"github.com/google/go-github/github"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/urfave/cli"
)
func init() {
registerOperation(&pruneDescriptor{})
}
type pruneDescriptor struct{}
type pruneConfig struct {
Action string `mapstructure:"action"`
GracePeriod string `mapstructure:"grace-period"`
OutdatedThreshold string `mapstructure:"outdated-threshold"`
}
func (d *pruneDescriptor) CommandLineDescription() CommandLineDescription {
return CommandLineDescription{
Name: "prune",
Description: "Prune outdated issues",
Flags: []cli.Flag{
cli.StringFlag{
Name: "action",
Usage: "action to take for outdated issues",
Value: "ping",
},
cli.StringFlag{
Name: "grace-period",
Usage: "grace period before closing",
Value: "2w",
},
cli.StringFlag{
Name: "threshold",
Usage: "threshold in days, weeks, months, or years",
Value: "6m",
},
},
}
}
func (d *pruneDescriptor) OperationFromCli(c *cli.Context) (operations.Operation, error) {
pruneConfig := &pruneConfig{
Action: c.String("action"),
GracePeriod: c.String("grace-period"),
OutdatedThreshold: c.String("threshold"),
}
return d.makeOperation(pruneConfig)
}
func (d *pruneDescriptor) OperationFromConfig(c operations.Configuration) (operations.Operation, error) {
pruneConfig := &pruneConfig{}
if err := mapstructure.Decode(c, &pruneConfig); err != nil {
return nil, errors.Wrap(err, "decoding configuration")
}
return d.makeOperation(pruneConfig)
}
func (d *pruneDescriptor) makeOperation(config *pruneConfig) (operations.Operation, error) {
var (
err error
operation pruneOperation
)
if operation.action, err = parseAction(config.Action); err != nil {
return nil, err
}
if operation.gracePeriod, err = settings.ParseExtDuration(config.GracePeriod); err != nil {
return nil, err
}
if operation.outdatedThreshold, err = settings.ParseExtDuration(config.OutdatedThreshold); err != nil {
return nil, err
}
return &operation, nil
}
type pruneOperation struct {
action string
gracePeriod settings.ExtDuration
outdatedThreshold settings.ExtDuration
}
func (o *pruneOperation) Accepts() operations.AcceptedType {
return operations.Issues
}
func (o *pruneOperation) Apply(c *operations.Context, item gh.Item, userData interface{}) error {
issue := item.Issue
switch o.action {
case "close":
// TODO Find the last ping/warn message, and take the grace period into account.
break
case "force-close":
state := "closed"
_, _, err := c.Client.Issues().Edit(c.Username, c.Repository, *issue.Number, &github.IssueRequest{
State: &state,
})
return err
case "ping":
body := formatPingComment(issue, o)
_, _, err := c.Client.Issues().CreateComment(c.Username, c.Repository, *issue.Number, &github.IssueComment{
Body: &body,
})
return err
case "warn":
body := formatWarnComment(issue, o)
_, _, err := c.Client.Issues().CreateComment(c.Username, c.Repository, *issue.Number, &github.IssueComment{
Body: &body,
})
return err
}
return nil
}
func (o *pruneOperation) Describe(c *operations.Context, item gh.Item, userData interface{}) string {
issue := item.Issue
return fmt.Sprintf("Execute %s action on issue #%d (last commented on %s)",
o.action, *issue.Number, userData.(time.Time).Format(time.RFC3339))
}
func (o *pruneOperation) Filter(c *operations.Context, item gh.Item) (operations.FilterResult, interface{}, error) {
// Retrieve comments for that issue since our threshold plus our grace
// period plus one day.
issue := item.Issue
comments, _, err := c.Client.Issues().ListComments(c.Username, c.Repository, *issue.Number, &github.IssueListCommentsOptions{
Since: time.Now().Add(-1*o.outdatedThreshold.Duration()).Add(-1*o.gracePeriod.Duration()).AddDate(0, 0, -1),
ListOptions: github.ListOptions{
PerPage: 200,
},
})
if err != nil {
return operations.Reject, nil, errors.Wrapf(err, "failed to retrieve comments for issue #%d", *issue.Number)
}
// Figure out the last time the issue was commented on.
lastCommented := *issue.UpdatedAt
for size := len(comments); size > 0; size-- {
// Skip all comments produced by the tool itself (as indicated by the
// presence of the PouleToken).
if strings.Contains(*comments[size-1].Body, configuration.PouleToken) {
comments = comments[0 : size-1]
continue
}
lastCommented = *comments[size-1].UpdatedAt
break
}
// Filter out issues which last commented date is under our threshold. We
// retrieve the issues in ascending update order: no more issues will be
// accepted after that.
if !lastCommented.Add(o.outdatedThreshold.Duration()).Before(time.Now()) {
return operations.Terminal, nil, nil
}
return operations.Accept, lastCommented, nil
}
func (o *pruneOperation) IssueListOptions(c *operations.Context) *github.IssueListByRepoOptions {
return &github.IssueListByRepoOptions{
State: "open",
Sort: "updated",
Direction: "asc",
ListOptions: github.ListOptions{
PerPage: 200,
},
}
}
func (o *pruneOperation) PullRequestListOptions(c *operations.Context) *github.PullRequestListOptions {
// pruneOperation doesn't apply to GitHub pull requests.
return nil
}
func formatPingComment(issue *github.Issue, o *pruneOperation) string {
comment := `<!-- %s:%s:%d%c -->
@%s It has been detected that this issue has not received any activity in over %s. Can you please let us know if it is still relevant:
- For a bug: do you still experience the issue with the latest version?
- For a feature request: was your request appropriately answered in a later version?
Thank you!`
return fmt.Sprintf(comment,
configuration.PouleToken,
o.action,
o.outdatedThreshold.Quantity,
o.outdatedThreshold.Unit,
*issue.User.Login,
o.outdatedThreshold.String(),
)
}
func formatWarnComment(issue *github.Issue, o *pruneOperation) string {
comment := `%s
This issue will be **automatically closed in %s** unless it is commented on.
`
base := formatPingComment(issue, o)
return fmt.Sprintf(comment, base, o.gracePeriod.String())
}
func parseAction(action string) (string, error) {
switch action {
case "close", "force-close", "ping", "warn":
break
default:
return "", fmt.Errorf("Invalid action %q", action)
}
return action, nil
}
| icecrime/poule | src/poule/operations/catalog/prune.go | GO | apache-2.0 | 6,487 |
from distutils.core import setup
PKGLIST = ['gearman_geodis']
setup(name='gearman-geodis',
version='1.0.0',
description='Geolocation Gearman worker powered by Geodis',
author_email='engineering@shazamteam.com',
license='Apache License, Version 2.0',
packages=PKGLIST,
scripts=['gearman_geodis/geodis_worker.py', 'gearman_geodis/gearman_geodisd.py', 'gearman_geodis/stdin_geodis_worker.py'],
data_files=[('/etc/sysconfig/',['support/gearman_geodis.sysconfig']),
('/etc/init.d/',['support/gearman_geodis'])]
)
| shazamengineering/gearman-geodis | setup.py | Python | apache-2.0 | 570 |
/*
* Copyright 2017 Cisco Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <unistd.h>
#include "debug.h"
#include "nfapi_pnf_interface.h"
#include "nfapi.h"
#include <sys/socket.h>
#include <sys/time.h>
#include <netinet/in.h>
#include <assert.h>
#include <arpa/inet.h>
#include <boost/foreach.hpp>
#include <boost/property_tree/xml_parser.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/exception/diagnostic_information.hpp>
#include <boost/exception_ptr.hpp>
#include <vendor_ext.h>
#include "fapi_stub.h"
#include "pool.h"
#include <mutex>
#include <list>
#include <queue>
#include <map>
#include <vector>
#include <algorithm>
#include <stdlib.h>
#define NUM_P5_PHY 2
uint16_t phy_antenna_capability_values[] = { 1, 2, 4, 8, 16 };
static uint32_t rand_range(uint32_t min, uint32_t max)
{
return ((rand() % (max + 1 - min)) + min);
}
extern "C" nfapi_pnf_param_response_t g_pnf_param_resp;
extern "C" {
void* pnf_allocate(size_t size)
{
return (void*)memory_pool::allocate(size);
}
void pnf_deallocate(void* ptr)
{
memory_pool::deallocate((uint8_t*)ptr);
}
int read_xml(const char *xml_file);
};
class udp_data
{
public:
bool enabled;
uint32_t rx_port;
uint32_t tx_port;
std::string tx_addr;
};
class phy_info
{
public:
phy_info()
: first_subframe_ind(0), fapi(0),
dl_ues_per_subframe(0), ul_ues_per_subframe(0),
timing_window(0), timing_info_mode(0), timing_info_period(0)
{
index = 0;
id = 0;
local_port = 0;
remote_addr = 0;
remote_port = 0;
duplex_mode = 0;
dl_channel_bw_support = 0;
ul_channel_bw_support = 0;
num_dl_layers_supported = 0;
num_ul_layers_supported = 0;
release_supported = 0;
nmm_modes_supported = 0;
}
uint16_t index;
uint16_t id;
std::vector<uint8_t> rfs;
std::vector<uint8_t> excluded_rfs;
udp_data udp;
std::string local_addr;
int local_port;
char* remote_addr;
int remote_port;
uint8_t duplex_mode;
uint16_t dl_channel_bw_support;
uint16_t ul_channel_bw_support;
uint8_t num_dl_layers_supported;
uint8_t num_ul_layers_supported;
uint16_t release_supported;
uint8_t nmm_modes_supported;
uint8_t dl_ues_per_subframe;
uint8_t ul_ues_per_subframe;
uint8_t first_subframe_ind;
// timing information recevied from the vnf
uint8_t timing_window;
uint8_t timing_info_mode;
uint8_t timing_info_period;
fapi_t* fapi;
};
class rf_info
{
public:
uint16_t index;
uint16_t band;
int16_t max_transmit_power;
int16_t min_transmit_power;
uint8_t num_antennas_supported;
uint32_t min_downlink_frequency;
uint32_t max_downlink_frequency;
uint32_t max_uplink_frequency;
uint32_t min_uplink_frequency;
};
class pnf_info
{
public:
pnf_info()
: release(13), wireshark_test_mode(0),
max_total_power(0), oui(0)
{
release = 0;
sync_mode = 0;
location_mode = 0;
dl_config_timing = 0;
ul_config_timing = 0;
tx_timing = 0;
hi_dci0_timing = 0;
max_phys = 0;
max_total_bw = 0;
max_total_dl_layers = 0;
max_total_ul_layers = 0;
shared_bands = 0;
shared_pa = 0;
}
int release;
std::vector<phy_info> phys;
std::vector<rf_info> rfs;
uint8_t sync_mode;
uint8_t location_mode;
uint8_t location_coordinates[6];
uint32_t dl_config_timing;
uint32_t ul_config_timing;
uint32_t tx_timing;
uint32_t hi_dci0_timing;
uint16_t max_phys;
uint16_t max_total_bw;
uint16_t max_total_dl_layers;
uint16_t max_total_ul_layers;
uint8_t shared_bands;
uint8_t shared_pa;
int16_t max_total_power;
uint8_t oui;
uint8_t wireshark_test_mode;
};
struct pnf_phy_user_data_t
{
uint16_t phy_id;
nfapi_pnf_config_t* config;
phy_info* phy;
nfapi_pnf_p7_config_t* p7_config;
};
int read_pnf_xml(pnf_info& pnf, const char* xml_file)
{
try
{
std::ifstream input(xml_file);
using boost::property_tree::ptree;
ptree pt;
read_xml(input, pt);
pnf.wireshark_test_mode = pt.get<unsigned>("pnf.wireshark_test_mode", 0);
pnf.sync_mode = pt.get<unsigned>("pnf.sync_mode");
pnf.location_mode= pt.get<unsigned>("pnf.location_mode");
//pnf.sync_mode = pt.get<unsigned>("pnf.location_coordinates");
pnf.dl_config_timing= pt.get<unsigned>("pnf.dl_config_timing");
pnf.ul_config_timing = pt.get<unsigned>("pnf.ul_config_timing");
pnf.tx_timing = pt.get<unsigned>("pnf.tx_timing");
pnf.hi_dci0_timing = pt.get<unsigned>("pnf.hi_dci0_timing");
pnf.max_phys = pt.get<unsigned>("pnf.max_phys");
pnf.max_total_bw = pt.get<unsigned>("pnf.max_total_bandwidth");
pnf.max_total_dl_layers = pt.get<unsigned>("pnf.max_total_num_dl_layers");
pnf.max_total_ul_layers = pt.get<unsigned>("pnf.max_total_num_ul_layers");
pnf.shared_bands = pt.get<unsigned>("pnf.shared_bands");
pnf.shared_pa = pt.get<unsigned>("pnf.shared_pas");
pnf.max_total_power = pt.get<signed>("pnf.maximum_total_power");
//"oui");
for(const auto& v : pt.get_child("pnf.phys"))
{
if(v.first == "phy")
{
phy_info phy;
phy.index = v.second.get<unsigned>("index");
phy.local_port = v.second.get<unsigned>("port");
phy.local_addr = v.second.get<std::string>("address");
phy.duplex_mode = v.second.get<unsigned>("duplex_mode");
phy.dl_channel_bw_support = v.second.get<unsigned>("downlink_channel_bandwidth_support");
phy.ul_channel_bw_support = v.second.get<unsigned>("uplink_channel_bandwidth_support");
phy.num_dl_layers_supported = v.second.get<unsigned>("number_of_dl_layers");
phy.num_ul_layers_supported = v.second.get<unsigned>("number_of_ul_layers");
phy.release_supported = v.second.get<unsigned>("3gpp_release_supported");
phy.nmm_modes_supported = v.second.get<unsigned>("nmm_modes_supported");
for(const auto& v2 : v.second.get_child("rfs"))
{
if(v2.first == "index")
phy.rfs.push_back(v2.second.get_value<unsigned>());
}
for(const auto& v2 : v.second.get_child("excluded_rfs"))
{
if(v2.first == "index")
phy.excluded_rfs.push_back(v2.second.get_value<unsigned>());
}
boost::optional<const boost::property_tree::ptree&> d = v.second.get_child_optional("data.udp");
if(d.is_initialized())
{
phy.udp.enabled = true;
phy.udp.rx_port = d.get().get<unsigned>("rx_port");
phy.udp.tx_port = d.get().get<unsigned>("tx_port");
phy.udp.tx_addr = d.get().get<std::string>("tx_addr");
}
else
{
phy.udp.enabled = false;
}
phy.dl_ues_per_subframe = v.second.get<unsigned>("dl_ues_per_subframe");
phy.ul_ues_per_subframe = v.second.get<unsigned>("ul_ues_per_subframe");
pnf.phys.push_back(phy);
}
}
for(const auto& v : pt.get_child("pnf.rfs"))
{
if(v.first == "rf")
{
rf_info rf;
rf.index = v.second.get<unsigned>("index");
rf.band = v.second.get<unsigned>("band");
rf.max_transmit_power = v.second.get<signed>("max_transmit_power");
rf.min_transmit_power = v.second.get<signed>("min_transmit_power");
rf.num_antennas_supported = v.second.get<unsigned>("num_antennas_supported");
rf.min_downlink_frequency = v.second.get<unsigned>("min_downlink_frequency");
rf.max_downlink_frequency = v.second.get<unsigned>("max_downlink_frequency");
rf.min_uplink_frequency = v.second.get<unsigned>("max_uplink_frequency");
rf.max_uplink_frequency = v.second.get<unsigned>("min_uplink_frequency");
pnf.rfs.push_back(rf);
}
}
}
catch(std::exception& e)
{
printf("%s", e.what());
return -1;
}
catch(boost::exception& e)
{
printf("%s", boost::diagnostic_information(e).c_str());
return -1;
}
return 0;
}
void pnf_sim_trace(nfapi_trace_level_t level, const char* message, ...)
{
va_list args;
va_start(args, message);
vprintf(message, args);
va_end(args);
}
void set_thread_priority(int priority)
{
//printf("%s(priority:%d)\n", __FUNCTION__, priority);
pthread_attr_t ptAttr;
struct sched_param schedParam;
schedParam.__sched_priority = priority; //79;
if(sched_setscheduler(0, SCHED_RR, &schedParam) != 0)
{
printf("failed to set SCHED_RR\n");
}
if(pthread_attr_setschedpolicy(&ptAttr, SCHED_RR) != 0)
{
printf("failed to set pthread SCHED_RR %d\n", errno);
}
pthread_attr_setinheritsched(&ptAttr, PTHREAD_EXPLICIT_SCHED);
struct sched_param thread_params;
thread_params.sched_priority = 20;
if(pthread_attr_setschedparam(&ptAttr, &thread_params) != 0)
{
printf("failed to set sched param\n");
}
}
void* pnf_p7_thread_start(void* ptr)
{
set_thread_priority(79);
nfapi_pnf_p7_config_t* config = (nfapi_pnf_p7_config_t*)ptr;
nfapi_pnf_p7_start(config);
return 0;
}
int pnf_param_request(nfapi_pnf_config_t* config, nfapi_pnf_param_request_t* req)
{
printf("[PNF_SIM] pnf param request\n");
nfapi_pnf_param_response_t resp;
memset(&resp, 0, sizeof(resp));
resp.header.message_id = NFAPI_PNF_PARAM_RESPONSE;
resp.error_code = NFAPI_MSG_OK;
pnf_info* pnf = (pnf_info*)(config->user_data);
resp.pnf_param_general.tl.tag = NFAPI_PNF_PARAM_GENERAL_TAG;
resp.pnf_param_general.nfapi_sync_mode = pnf->sync_mode;
resp.pnf_param_general.location_mode = pnf->location_mode;
//uint8_t location_coordinates[NFAPI_PNF_PARAM_GENERAL_LOCATION_LENGTH];
resp.pnf_param_general.dl_config_timing = pnf->dl_config_timing;
resp.pnf_param_general.tx_timing = pnf->tx_timing;
resp.pnf_param_general.ul_config_timing = pnf->ul_config_timing;
resp.pnf_param_general.hi_dci0_timing = pnf->hi_dci0_timing;
resp.pnf_param_general.maximum_number_phys = pnf->max_phys;
resp.pnf_param_general.maximum_total_bandwidth = pnf->max_total_bw;
resp.pnf_param_general.maximum_total_number_dl_layers = pnf->max_total_dl_layers;
resp.pnf_param_general.maximum_total_number_ul_layers = pnf->max_total_ul_layers;
resp.pnf_param_general.shared_bands = pnf->shared_bands;
resp.pnf_param_general.shared_pa = pnf->shared_pa;
resp.pnf_param_general.maximum_total_power = pnf->max_total_power;
//uint8_t oui[NFAPI_PNF_PARAM_GENERAL_OUI_LENGTH];
resp.pnf_phy.tl.tag = NFAPI_PNF_PHY_TAG;
resp.pnf_phy.number_of_phys = pnf->phys.size();
for(int i = 0; i < pnf->phys.size(); ++i)
{
resp.pnf_phy.phy[i].phy_config_index = pnf->phys[i].index;
resp.pnf_phy.phy[i].downlink_channel_bandwidth_supported = pnf->phys[i].dl_channel_bw_support;
resp.pnf_phy.phy[i].uplink_channel_bandwidth_supported = pnf->phys[i].ul_channel_bw_support;
resp.pnf_phy.phy[i].number_of_dl_layers_supported = pnf->phys[i].num_dl_layers_supported;
resp.pnf_phy.phy[i].number_of_ul_layers_supported = pnf->phys[i].num_ul_layers_supported;
resp.pnf_phy.phy[i].maximum_3gpp_release_supported = pnf->phys[i].release_supported;
resp.pnf_phy.phy[i].nmm_modes_supported = pnf->phys[i].nmm_modes_supported;
resp.pnf_phy.phy[i].number_of_rfs = pnf->phys[i].rfs.size();
for(int j = 0; j < pnf->phys[i].rfs.size(); ++j)
{
resp.pnf_phy.phy[i].rf_config[j].rf_config_index = pnf->phys[i].rfs[j];
}
resp.pnf_phy.phy[i].number_of_rf_exclusions = pnf->phys[i].excluded_rfs.size();
for(int j = 0; j < pnf->phys[i].excluded_rfs.size(); ++j)
{
resp.pnf_phy.phy[i].excluded_rf_config[j].rf_config_index = pnf->phys[i].excluded_rfs[j];
}
}
resp.pnf_rf.tl.tag = NFAPI_PNF_RF_TAG;
resp.pnf_rf.number_of_rfs = pnf->rfs.size();
for(int i = 0; i < pnf->rfs.size(); ++i)
{
resp.pnf_rf.rf[i].rf_config_index = pnf->rfs[i].index;
resp.pnf_rf.rf[i].band = pnf->rfs[i].band;
resp.pnf_rf.rf[i].maximum_transmit_power = pnf->rfs[i].max_transmit_power;
resp.pnf_rf.rf[i].minimum_transmit_power = pnf->rfs[i].min_transmit_power;
resp.pnf_rf.rf[i].number_of_antennas_suppported = pnf->rfs[i].num_antennas_supported;
resp.pnf_rf.rf[i].minimum_downlink_frequency = pnf->rfs[i].min_downlink_frequency;
resp.pnf_rf.rf[i].maximum_downlink_frequency = pnf->rfs[i].max_downlink_frequency;
resp.pnf_rf.rf[i].minimum_uplink_frequency = pnf->rfs[i].min_uplink_frequency;
resp.pnf_rf.rf[i].maximum_uplink_frequency = pnf->rfs[i].max_uplink_frequency;
}
if(pnf->release >= 10)
{
resp.pnf_phy_rel10.tl.tag = NFAPI_PNF_PHY_REL10_TAG;
resp.pnf_phy_rel10.number_of_phys = pnf->phys.size();
for(int i = 0; i < pnf->phys.size(); ++i)
{
resp.pnf_phy_rel10.phy[i].phy_config_index = pnf->phys[i].index;
resp.pnf_phy_rel10.phy[i].transmission_mode_7_supported = 0;
resp.pnf_phy_rel10.phy[i].transmission_mode_8_supported = 1;
resp.pnf_phy_rel10.phy[i].two_antenna_ports_for_pucch = 0;
resp.pnf_phy_rel10.phy[i].transmission_mode_9_supported = 1;
resp.pnf_phy_rel10.phy[i].simultaneous_pucch_pusch = 0;
resp.pnf_phy_rel10.phy[i].four_layer_tx_with_tm3_and_tm4 = 1;
}
}
if(pnf->release >= 11)
{
resp.pnf_phy_rel11.tl.tag = NFAPI_PNF_PHY_REL11_TAG;
resp.pnf_phy_rel11.number_of_phys = pnf->phys.size();
for(int i = 0; i < pnf->phys.size(); ++i)
{
resp.pnf_phy_rel11.phy[i].phy_config_index = pnf->phys[i].index;
resp.pnf_phy_rel11.phy[i].edpcch_supported = 0;
resp.pnf_phy_rel11.phy[i].multi_ack_csi_reporting = 1;
resp.pnf_phy_rel11.phy[i].pucch_tx_diversity = 0;
resp.pnf_phy_rel11.phy[i].ul_comp_supported = 1;
resp.pnf_phy_rel11.phy[i].transmission_mode_5_supported = 0;
}
}
if(pnf->release >= 12)
{
resp.pnf_phy_rel12.tl.tag = NFAPI_PNF_PHY_REL12_TAG;
resp.pnf_phy_rel12.number_of_phys = pnf->phys.size();
for(int i = 0; i < pnf->phys.size(); ++i)
{
resp.pnf_phy_rel12.phy[i].phy_config_index = pnf->phys[i].index;
resp.pnf_phy_rel12.phy[i].csi_subframe_set = 0;
resp.pnf_phy_rel12.phy[i].enhanced_4tx_codebook = 2; // yes this is invalid
resp.pnf_phy_rel12.phy[i].drs_supported = 0;
resp.pnf_phy_rel12.phy[i].ul_64qam_supported = 1;
resp.pnf_phy_rel12.phy[i].transmission_mode_10_supported = 0;
resp.pnf_phy_rel12.phy[i].alternative_bts_indices = 1;
}
}
if(pnf->release >= 13)
{
resp.pnf_phy_rel13.tl.tag = NFAPI_PNF_PHY_REL13_TAG;
resp.pnf_phy_rel13.number_of_phys = pnf->phys.size();
for(int i = 0; i < pnf->phys.size(); ++i)
{
resp.pnf_phy_rel13.phy[i].phy_config_index = pnf->phys[i].index;
resp.pnf_phy_rel13.phy[i].pucch_format4_supported = 0;
resp.pnf_phy_rel13.phy[i].pucch_format5_supported = 1;
resp.pnf_phy_rel13.phy[i].more_than_5_ca_support = 0;
resp.pnf_phy_rel13.phy[i].laa_supported = 1;
resp.pnf_phy_rel13.phy[i].laa_ending_in_dwpts_supported = 0;
resp.pnf_phy_rel13.phy[i].laa_starting_in_second_slot_supported = 1;
resp.pnf_phy_rel13.phy[i].beamforming_supported = 0;
resp.pnf_phy_rel13.phy[i].csi_rs_enhancement_supported = 1;
resp.pnf_phy_rel13.phy[i].drms_enhancement_supported = 0;
resp.pnf_phy_rel13.phy[i].srs_enhancement_supported = 1;
}
resp.pnf_phy_rel13_nb_iot.tl.tag = NFAPI_PNF_PHY_REL13_NB_IOT_TAG;
resp.pnf_phy_rel13_nb_iot.number_of_phys = pnf->phys.size();
for(int i = 0; i < pnf->phys.size(); ++i)
{
resp.pnf_phy_rel13_nb_iot.phy[i].phy_config_index = pnf->phys[i].index;
resp.pnf_phy_rel13_nb_iot.phy[i].number_of_rfs = pnf->phys[i].rfs.size();
for(int j = 0; j < pnf->phys[i].rfs.size(); ++j)
{
resp.pnf_phy_rel13_nb_iot.phy[i].rf_config[j].rf_config_index = pnf->phys[i].rfs[j];
}
resp.pnf_phy_rel13_nb_iot.phy[i].number_of_rf_exclusions = pnf->phys[i].excluded_rfs.size();
for(int j = 0; j < pnf->phys[i].excluded_rfs.size(); ++j)
{
resp.pnf_phy_rel13_nb_iot.phy[i].excluded_rf_config[j].rf_config_index = pnf->phys[i].excluded_rfs[j];
}
resp.pnf_phy_rel13_nb_iot.phy[i].number_of_dl_layers_supported = pnf->phys[i].num_dl_layers_supported;
resp.pnf_phy_rel13_nb_iot.phy[i].number_of_ul_layers_supported = pnf->phys[i].num_ul_layers_supported;
resp.pnf_phy_rel13_nb_iot.phy[i].maximum_3gpp_release_supported = pnf->phys[i].release_supported;
resp.pnf_phy_rel13_nb_iot.phy[i].nmm_modes_supported = pnf->phys[i].nmm_modes_supported;
}
}
nfapi_pnf_pnf_param_resp(config, &resp);
return 0;
}
int pnf_config_request(nfapi_pnf_config_t* config, nfapi_pnf_config_request_t* req)
{
printf("[PNF_SIM] pnf config request\n");
pnf_info* pnf = (pnf_info*)(config->user_data);
for(int i = 0; i < req->pnf_phy_rf_config.number_phy_rf_config_info; ++i)
{
auto found = std::find_if(pnf->phys.begin(), pnf->phys.end(), [&](phy_info& item)
{ return item.index == req->pnf_phy_rf_config.phy_rf_config[i].phy_config_index; });
if(found != pnf->phys.end())
{
phy_info& phy = (*found);
phy.id = req->pnf_phy_rf_config.phy_rf_config[i].phy_id;
printf("[PNF_SIM] pnf config request assigned phy_id %d to phy_config_index %d\n", phy.id, req->pnf_phy_rf_config.phy_rf_config[i].phy_config_index);
}
else
{
// did not find the phy
printf("[PNF_SIM] pnf config request did not find phy_config_index %d\n", req->pnf_phy_rf_config.phy_rf_config[i].phy_config_index);
}
}
nfapi_pnf_config_response_t resp;
memset(&resp, 0, sizeof(resp));
resp.header.message_id = NFAPI_PNF_CONFIG_RESPONSE;
resp.error_code = NFAPI_MSG_OK;
nfapi_pnf_pnf_config_resp(config, &resp);
return 0;
}
int fapi_param_response(fapi_t* fapi, fapi_param_resp_t* resp)
{
printf("[PNF_SIM] fapi param response\n");
pnf_phy_user_data_t* data = (pnf_phy_user_data_t*)(fapi->user_data);
nfapi_param_response_t nfapi_resp;
memset(&nfapi_resp, 0, sizeof(nfapi_resp));
nfapi_resp.header.message_id = NFAPI_PARAM_RESPONSE;
nfapi_resp.header.phy_id = data->phy_id;
nfapi_resp.error_code = resp->error_code;
for(int i = 0; i < resp->number_of_tlvs; ++i)
{
switch(resp->tlvs[i].tag)
{
case FAPI_PHY_STATE_TAG:
nfapi_resp.l1_status.phy_state.tl.tag = NFAPI_L1_STATUS_PHY_STATE_TAG;
nfapi_resp.l1_status.phy_state.value = resp->tlvs[i].value;
nfapi_resp.num_tlv++;
break;
case FAPI_PHY_CAPABILITIES_DL_BANDWIDTH_SUPPORT_TAG:
nfapi_resp.phy_capabilities.dl_bandwidth_support.tl.tag = NFAPI_PHY_CAPABILITIES_DL_BANDWIDTH_SUPPORT_TAG;
nfapi_resp.phy_capabilities.dl_bandwidth_support.value = resp->tlvs[i].value;
nfapi_resp.num_tlv++;
break;
case FAPI_PHY_CAPABILITIES_UL_BANDWIDTH_SUPPORT_TAG:
nfapi_resp.phy_capabilities.ul_bandwidth_support.tl.tag = NFAPI_PHY_CAPABILITIES_UL_BANDWIDTH_SUPPORT_TAG;
nfapi_resp.phy_capabilities.ul_bandwidth_support.value = resp->tlvs[i].value;
nfapi_resp.num_tlv++;
break;
}
}
if(1)
{
// just code to populate all the tlv for testing with wireshark
// todo : these should be move up so that they are populated by fapi
nfapi_resp.phy_capabilities.dl_modulation_support.tl.tag = NFAPI_PHY_CAPABILITIES_DL_MODULATION_SUPPORT_TAG;
nfapi_resp.phy_capabilities.dl_modulation_support.value = rand_range(0, 0x0F);
nfapi_resp.num_tlv++;
nfapi_resp.phy_capabilities.ul_modulation_support.tl.tag = NFAPI_PHY_CAPABILITIES_UL_MODULATION_SUPPORT_TAG;
nfapi_resp.phy_capabilities.ul_modulation_support.value = rand_range(0, 0x07);
nfapi_resp.num_tlv++;
nfapi_resp.phy_capabilities.phy_antenna_capability.tl.tag = NFAPI_PHY_CAPABILITIES_PHY_ANTENNA_CAPABILITY_TAG;
nfapi_resp.phy_capabilities.phy_antenna_capability.value = phy_antenna_capability_values[rand_range(0, 4)];
nfapi_resp.num_tlv++;
nfapi_resp.phy_capabilities.release_capability.tl.tag = NFAPI_PHY_CAPABILITIES_RELEASE_CAPABILITY_TAG;
nfapi_resp.phy_capabilities.release_capability.value = rand_range(0, 0x3F);
nfapi_resp.num_tlv++;
nfapi_resp.phy_capabilities.mbsfn_capability.tl.tag = NFAPI_PHY_CAPABILITIES_MBSFN_CAPABILITY_TAG;
nfapi_resp.phy_capabilities.mbsfn_capability.value = rand_range(0, 1);
nfapi_resp.num_tlv++;
nfapi_resp.laa_capability.laa_support.tl.tag = NFAPI_LAA_CAPABILITY_LAA_SUPPORT_TAG;
nfapi_resp.laa_capability.laa_support.value = rand_range(0, 1);
nfapi_resp.num_tlv++;
nfapi_resp.laa_capability.pd_sensing_lbt_support.tl.tag = NFAPI_LAA_CAPABILITY_PD_SENSING_LBT_SUPPORT_TAG;
nfapi_resp.laa_capability.pd_sensing_lbt_support.value = rand_range(0, 1);
nfapi_resp.num_tlv++;
nfapi_resp.laa_capability.multi_carrier_lbt_support.tl.tag = NFAPI_LAA_CAPABILITY_MULTI_CARRIER_LBT_SUPPORT_TAG;
nfapi_resp.laa_capability.multi_carrier_lbt_support.value = rand_range(0, 0x0F);
nfapi_resp.num_tlv++;
nfapi_resp.laa_capability.partial_sf_support.tl.tag = NFAPI_LAA_CAPABILITY_PARTIAL_SF_SUPPORT_TAG;
nfapi_resp.laa_capability.partial_sf_support.value = rand_range(0, 1);
nfapi_resp.num_tlv++;
nfapi_resp.nb_iot_capability.nb_iot_support.tl.tag = NFAPI_LAA_CAPABILITY_NB_IOT_SUPPORT_TAG;
nfapi_resp.nb_iot_capability.nb_iot_support.value = rand_range(0, 2);
nfapi_resp.num_tlv++;
nfapi_resp.nb_iot_capability.nb_iot_operating_mode_capability.tl.tag = NFAPI_LAA_CAPABILITY_NB_IOT_OPERATING_MODE_CAPABILITY_TAG;
nfapi_resp.nb_iot_capability.nb_iot_operating_mode_capability.value = rand_range(0, 1);
nfapi_resp.num_tlv++;
nfapi_resp.subframe_config.duplex_mode.tl.tag = NFAPI_SUBFRAME_CONFIG_DUPLEX_MODE_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.subframe_config.pcfich_power_offset.tl.tag = NFAPI_SUBFRAME_CONFIG_PCFICH_POWER_OFFSET_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.subframe_config.pb.tl.tag = NFAPI_SUBFRAME_CONFIG_PB_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.subframe_config.dl_cyclic_prefix_type.tl.tag = NFAPI_SUBFRAME_CONFIG_DL_CYCLIC_PREFIX_TYPE_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.subframe_config.ul_cyclic_prefix_type.tl.tag = NFAPI_SUBFRAME_CONFIG_UL_CYCLIC_PREFIX_TYPE_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.rf_config.dl_channel_bandwidth.tl.tag = NFAPI_RF_CONFIG_DL_CHANNEL_BANDWIDTH_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.rf_config.ul_channel_bandwidth.tl.tag = NFAPI_RF_CONFIG_UL_CHANNEL_BANDWIDTH_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.rf_config.reference_signal_power.tl.tag = NFAPI_RF_CONFIG_REFERENCE_SIGNAL_POWER_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.rf_config.tx_antenna_ports.tl.tag = NFAPI_RF_CONFIG_TX_ANTENNA_PORTS_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.rf_config.rx_antenna_ports.tl.tag = NFAPI_RF_CONFIG_RX_ANTENNA_PORTS_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.phich_config.phich_resource.tl.tag = NFAPI_PHICH_CONFIG_PHICH_RESOURCE_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.phich_config.phich_duration.tl.tag = NFAPI_PHICH_CONFIG_PHICH_DURATION_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.phich_config.phich_power_offset.tl.tag = NFAPI_PHICH_CONFIG_PHICH_POWER_OFFSET_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.sch_config.primary_synchronization_signal_epre_eprers.tl.tag = NFAPI_SCH_CONFIG_PRIMARY_SYNCHRONIZATION_SIGNAL_EPRE_EPRERS_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.sch_config.secondary_synchronization_signal_epre_eprers.tl.tag = NFAPI_SCH_CONFIG_SECONDARY_SYNCHRONIZATION_SIGNAL_EPRE_EPRERS_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.sch_config.physical_cell_id.tl.tag = NFAPI_SCH_CONFIG_PHYSICAL_CELL_ID_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.prach_config.configuration_index.tl.tag = NFAPI_PRACH_CONFIG_CONFIGURATION_INDEX_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.prach_config.root_sequence_index.tl.tag = NFAPI_PRACH_CONFIG_ROOT_SEQUENCE_INDEX_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.prach_config.zero_correlation_zone_configuration.tl.tag = NFAPI_PRACH_CONFIG_ZERO_CORRELATION_ZONE_CONFIGURATION_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.prach_config.high_speed_flag.tl.tag = NFAPI_PRACH_CONFIG_HIGH_SPEED_FLAG_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.prach_config.frequency_offset.tl.tag = NFAPI_PRACH_CONFIG_FREQUENCY_OFFSET_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.pusch_config.hopping_mode.tl.tag = NFAPI_PUSCH_CONFIG_HOPPING_MODE_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.pusch_config.hopping_offset.tl.tag = NFAPI_PUSCH_CONFIG_HOPPING_OFFSET_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.pusch_config.number_of_subbands.tl.tag = NFAPI_PUSCH_CONFIG_NUMBER_OF_SUBBANDS_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.pucch_config.delta_pucch_shift.tl.tag = NFAPI_PUCCH_CONFIG_DELTA_PUCCH_SHIFT_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.pucch_config.n_cqi_rb.tl.tag = NFAPI_PUCCH_CONFIG_N_CQI_RB_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.pucch_config.n_an_cs.tl.tag = NFAPI_PUCCH_CONFIG_N_AN_CS_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.pucch_config.n1_pucch_an.tl.tag = NFAPI_PUCCH_CONFIG_N1_PUCCH_AN_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.srs_config.bandwidth_configuration.tl.tag = NFAPI_SRS_CONFIG_BANDWIDTH_CONFIGURATION_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.srs_config.max_up_pts.tl.tag = NFAPI_SRS_CONFIG_MAX_UP_PTS_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.srs_config.srs_subframe_configuration.tl.tag = NFAPI_SRS_CONFIG_SRS_SUBFRAME_CONFIGURATION_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.srs_config.srs_acknack_srs_simultaneous_transmission.tl.tag = NFAPI_SRS_CONFIG_SRS_ACKNACK_SRS_SIMULTANEOUS_TRANSMISSION_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.uplink_reference_signal_config.uplink_rs_hopping.tl.tag = NFAPI_UPLINK_REFERENCE_SIGNAL_CONFIG_UPLINK_RS_HOPPING_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.uplink_reference_signal_config.group_assignment.tl.tag = NFAPI_UPLINK_REFERENCE_SIGNAL_CONFIG_GROUP_ASSIGNMENT_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.uplink_reference_signal_config.cyclic_shift_1_for_drms.tl.tag = NFAPI_UPLINK_REFERENCE_SIGNAL_CONFIG_CYCLIC_SHIFT_1_FOR_DRMS_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.tdd_frame_structure_config.subframe_assignment.tl.tag = NFAPI_TDD_FRAME_STRUCTURE_SUBFRAME_ASSIGNMENT_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.tdd_frame_structure_config.special_subframe_patterns.tl.tag = NFAPI_TDD_FRAME_STRUCTURE_SPECIAL_SUBFRAME_PATTERNS_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.l23_config.data_report_mode.tl.tag = NFAPI_L23_CONFIG_DATA_REPORT_MODE_TAG;
nfapi_resp.num_tlv++;
nfapi_resp.l23_config.sfnsf.tl.tag = NFAPI_L23_CONFIG_SFNSF_TAG;
nfapi_resp.num_tlv++;
}
{
//if(phy->state == NFAPI_PNF_PHY_IDLE)
//if(nfapi_resp.l1_status.phy_state.value == NFAPI_PNF_PHY_IDLE)
{
// -- NFAPI
// Downlink UEs per Subframe
nfapi_resp.nfapi_config.dl_ue_per_sf.tl.tag = NFAPI_NFAPI_DOWNLINK_UES_PER_SUBFRAME_TAG;
nfapi_resp.nfapi_config.dl_ue_per_sf.value = data->phy->dl_ues_per_subframe;
nfapi_resp.num_tlv++;
// Uplink UEs per Subframe
nfapi_resp.nfapi_config.ul_ue_per_sf.tl.tag = NFAPI_NFAPI_UPLINK_UES_PER_SUBFRAME_TAG;
nfapi_resp.nfapi_config.ul_ue_per_sf.value = data->phy->ul_ues_per_subframe;
nfapi_resp.num_tlv++;
// nFAPI RF Bands
nfapi_resp.nfapi_config.rf_bands.tl.tag = NFAPI_PHY_RF_BANDS_TAG;
nfapi_resp.nfapi_config.rf_bands.number_rf_bands = 2;
nfapi_resp.nfapi_config.rf_bands.rf_band[0] = 23;
nfapi_resp.nfapi_config.rf_bands.rf_band[1] = 7;
// P7 PNF Address IPv4
nfapi_resp.nfapi_config.p7_pnf_address_ipv4.tl.tag = NFAPI_NFAPI_P7_PNF_ADDRESS_IPV4_TAG;
struct sockaddr_in pnf_p7_sockaddr;
pnf_p7_sockaddr.sin_addr.s_addr = inet_addr(data->phy->local_addr.c_str());
memcpy(&(nfapi_resp.nfapi_config.p7_pnf_address_ipv4.address[0]), &pnf_p7_sockaddr.sin_addr.s_addr, 4);
nfapi_resp.num_tlv++;
// P7 PNF Address IPv6
// P7 PNF Port
nfapi_resp.nfapi_config.p7_pnf_port.tl.tag = NFAPI_NFAPI_P7_PNF_PORT_TAG;
nfapi_resp.nfapi_config.p7_pnf_port.value = data->phy->local_port;
nfapi_resp.num_tlv++;
// NMM GSM Frequency Bands
nfapi_resp.nfapi_config.nmm_gsm_frequency_bands.tl.tag = NFAPI_NFAPI_NMM_GSM_FREQUENCY_BANDS_TAG;
nfapi_resp.nfapi_config.nmm_gsm_frequency_bands.number_of_rf_bands = 1;
nfapi_resp.nfapi_config.nmm_gsm_frequency_bands.bands[0] = 23;
nfapi_resp.num_tlv++;
// NMM UMTS Frequency Bands
nfapi_resp.nfapi_config.nmm_umts_frequency_bands.tl.tag = NFAPI_NFAPI_NMM_UMTS_FREQUENCY_BANDS_TAG;
nfapi_resp.nfapi_config.nmm_umts_frequency_bands.number_of_rf_bands = 1;
nfapi_resp.nfapi_config.nmm_umts_frequency_bands.bands[0] = 23;
nfapi_resp.num_tlv++;
// NMM LTE Frequency Bands
nfapi_resp.nfapi_config.nmm_lte_frequency_bands.tl.tag = NFAPI_NFAPI_NMM_LTE_FREQUENCY_BANDS_TAG;
nfapi_resp.nfapi_config.nmm_lte_frequency_bands.number_of_rf_bands = 1;
nfapi_resp.nfapi_config.nmm_lte_frequency_bands.bands[0] = 23;
nfapi_resp.num_tlv++;
// NMM Uplink RSSI supported
nfapi_resp.nfapi_config.nmm_uplink_rssi_supported.tl.tag = NFAPI_NFAPI_NMM_UPLINK_RSSI_SUPPORTED_TAG;
nfapi_resp.nfapi_config.nmm_uplink_rssi_supported.value = 1;
nfapi_resp.num_tlv++;
}
}
nfapi_pnf_param_resp(data->config, &nfapi_resp);
return 0;
}
int fapi_config_response(fapi_t* fapi, fapi_config_resp_t* resp)
{
pnf_phy_user_data_t* data = (pnf_phy_user_data_t*)(fapi->user_data);
nfapi_config_response_t nfapi_resp;
memset(&nfapi_resp, 0, sizeof(nfapi_resp));
nfapi_resp.header.message_id = NFAPI_CONFIG_RESPONSE;
nfapi_resp.header.phy_id = data->phy_id;
nfapi_resp.error_code = resp->error_code;
nfapi_pnf_config_resp(data->config, &nfapi_resp);
return 0;
}
int fapi_subframe_ind(fapi_t* fapi, fapi_subframe_ind_t* resp)
{
pnf_phy_user_data_t* data = (pnf_phy_user_data_t*)(fapi->user_data);
if(data->phy->first_subframe_ind == 0)
{
printf("Sending nfapi_pnf_start_resp phy_id:%d\n", data->phy_id);
nfapi_start_response_t start_resp;
memset(&start_resp, 0, sizeof(start_resp));
start_resp.header.message_id = NFAPI_START_RESPONSE;
start_resp.header.phy_id = data->phy_id;
start_resp.error_code = NFAPI_MSG_OK;
nfapi_pnf_start_resp(data->config, &start_resp);
data->phy->first_subframe_ind = 1;
if(data->phy->udp.enabled)
{
fapi_start_data(fapi,
data->phy->udp.rx_port,
data->phy->udp.tx_addr.c_str(),
data->phy->udp.tx_port);
}
}
nfapi_pnf_p7_subframe_ind(data->p7_config, data->phy_id, resp->sfn_sf);
return 0;
}
int fapi_harq_ind(fapi_t* fapi, fapi_harq_ind_t* ind)
{
pnf_phy_user_data_t* data = (pnf_phy_user_data_t*)(fapi->user_data);
nfapi_harq_indication_t harq_ind;
memset(&harq_ind, 0, sizeof(harq_ind));
harq_ind.header.message_id = NFAPI_HARQ_INDICATION;
harq_ind.header.phy_id = data->p7_config->phy_id;
harq_ind.sfn_sf = ind->sfn_sf;
if(((pnf_info*)(data->config->user_data))->wireshark_test_mode)
{
harq_ind.harq_indication_body.tl.tag = NFAPI_HARQ_INDICATION_BODY_TAG;
harq_ind.harq_indication_body.number_of_harqs = 1;
nfapi_harq_indication_pdu_t pdus[harq_ind.harq_indication_body.number_of_harqs];
memset(&pdus, 0, sizeof(pdus));
pdus[0].rx_ue_information.tl.tag = NFAPI_RX_UE_INFORMATION_TAG;
pdus[0].rx_ue_information.handle = rand_range(0, 9999);
pdus[0].rx_ue_information.rnti = rand_range(1, 65535);
pdus[0].harq_indication_tdd_rel8.tl.tag = NFAPI_HARQ_INDICATION_TDD_REL8_TAG;
pdus[0].harq_indication_tdd_rel8.mode = rand_range(0, 4);
pdus[0].harq_indication_tdd_rel8.number_of_ack_nack = rand_range(1, 4);
switch(pdus[0].harq_indication_tdd_rel8.mode)
{
case 0:
{
pdus[0].harq_indication_tdd_rel8.harq_data.bundling.value_0 = rand_range(1, 7);
pdus[0].harq_indication_tdd_rel8.harq_data.bundling.value_1 = rand_range(1, 7);
}
break;
case 1:
{
pdus[0].harq_indication_tdd_rel8.harq_data.multiplex.value_0 = rand_range(1, 7);
pdus[0].harq_indication_tdd_rel8.harq_data.multiplex.value_1 = rand_range(1, 7);
pdus[0].harq_indication_tdd_rel8.harq_data.multiplex.value_2 = rand_range(1, 7);
pdus[0].harq_indication_tdd_rel8.harq_data.multiplex.value_3 = rand_range(1, 7);
}
break;
case 2:
{
pdus[0].harq_indication_tdd_rel8.harq_data.special_bundling.value_0 = rand_range(1, 7);
}
break;
};
pdus[0].harq_indication_tdd_rel9.tl.tag = NFAPI_HARQ_INDICATION_TDD_REL9_TAG;
pdus[0].harq_indication_tdd_rel9.mode = rand_range(0, 4);
pdus[0].harq_indication_tdd_rel9.number_of_ack_nack = 1;
switch(pdus[0].harq_indication_tdd_rel9.mode)
{
case 0:
{
pdus[0].harq_indication_tdd_rel9.harq_data[0].bundling.value_0 = rand_range(1, 7);
}
break;
case 1:
{
pdus[0].harq_indication_tdd_rel9.harq_data[0].multiplex.value_0 = rand_range(1, 7);
}
break;
case 2:
{
pdus[0].harq_indication_tdd_rel9.harq_data[0].special_bundling.value_0 = rand_range(1, 7);
}
break;
case 3:
{
pdus[0].harq_indication_tdd_rel9.harq_data[0].channel_selection.value_0 = rand_range(1, 7);
}
break;
case 4:
{
pdus[0].harq_indication_tdd_rel9.harq_data[0].format_3.value_0 = rand_range(1, 7);
}
break;
};
pdus[0].harq_indication_tdd_rel13.tl.tag = NFAPI_HARQ_INDICATION_TDD_REL13_TAG;
pdus[0].harq_indication_tdd_rel13.mode = rand_range(0, 6);
pdus[0].harq_indication_tdd_rel13.number_of_ack_nack = 1;
switch(pdus[0].harq_indication_tdd_rel13.mode)
{
case 0:
{
pdus[0].harq_indication_tdd_rel13.harq_data[0].bundling.value_0 = rand_range(1, 7);
}
break;
case 1:
{
pdus[0].harq_indication_tdd_rel13.harq_data[0].multiplex.value_0 = rand_range(1, 7);
}
break;
case 2:
{
pdus[0].harq_indication_tdd_rel13.harq_data[0].special_bundling.value_0 = rand_range(1, 7);
}
break;
case 3:
{
pdus[0].harq_indication_tdd_rel13.harq_data[0].channel_selection.value_0 = rand_range(1, 7);
}
break;
case 4:
{
pdus[0].harq_indication_tdd_rel13.harq_data[0].format_3.value_0 = rand_range(1, 7);
}
break;
case 5:
{
pdus[0].harq_indication_tdd_rel13.harq_data[0].format_4.value_0 = rand_range(1, 7);
}
break;
case 6:
{
pdus[0].harq_indication_tdd_rel13.harq_data[0].format_5.value_0 = rand_range(1, 7);
}
break;
};
pdus[0].harq_indication_fdd_rel8.tl.tag = NFAPI_HARQ_INDICATION_FDD_REL8_TAG;
pdus[0].harq_indication_fdd_rel8.harq_tb1 = rand_range(1, 7);
pdus[0].harq_indication_fdd_rel8.harq_tb2 = rand_range(1, 7);
pdus[0].harq_indication_fdd_rel9.tl.tag = NFAPI_HARQ_INDICATION_FDD_REL9_TAG;
pdus[0].harq_indication_fdd_rel9.mode = rand_range(0, 2);
pdus[0].harq_indication_fdd_rel9.number_of_ack_nack = 2;
pdus[0].harq_indication_fdd_rel9.harq_tb_n[0] = rand_range(1, 7);
pdus[0].harq_indication_fdd_rel9.harq_tb_n[1] = rand_range(1, 7);
pdus[0].harq_indication_fdd_rel13.tl.tag = NFAPI_HARQ_INDICATION_FDD_REL13_TAG;
pdus[0].harq_indication_fdd_rel13.mode = rand_range(0, 2);
pdus[0].harq_indication_fdd_rel13.number_of_ack_nack = 2;
pdus[0].harq_indication_fdd_rel13.harq_tb_n[0] = rand_range(1, 7);
pdus[0].harq_indication_fdd_rel13.harq_tb_n[1] = rand_range(1, 7);
pdus[0].ul_cqi_information.tl.tag = NFAPI_UL_CQI_INFORMATION_TAG;
pdus[0].ul_cqi_information.ul_cqi = rand_range(0,255);
pdus[0].ul_cqi_information.channel = rand_range(0, 1);
harq_ind.harq_indication_body.harq_pdu_list = pdus;
nfapi_pnf_p7_harq_ind(data->p7_config, &harq_ind);
}
else
{
harq_ind.harq_indication_body.tl.tag = NFAPI_HARQ_INDICATION_BODY_TAG;
harq_ind.harq_indication_body.number_of_harqs = 8;
nfapi_harq_indication_pdu_t pdus[8];
memset(&pdus, 0, sizeof(pdus));
for(int i = 0; i < 8; ++i)
{
pdus[i].rx_ue_information.tl.tag = NFAPI_RX_UE_INFORMATION_TAG;
pdus[i].rx_ue_information.handle = 0xFF;
pdus[i].rx_ue_information.rnti = i;
pdus[i].harq_indication_fdd_rel8.tl.tag = NFAPI_HARQ_INDICATION_FDD_REL8_TAG;
pdus[i].harq_indication_fdd_rel8.harq_tb1 = 1;
pdus[i].harq_indication_fdd_rel8.harq_tb2 = 2;
}
harq_ind.harq_indication_body.harq_pdu_list = pdus;
nfapi_pnf_p7_harq_ind(data->p7_config, &harq_ind);
}
return 0;
}
int fapi_crc_ind(fapi_t* fapi, fapi_crc_ind_t* ind)
{
pnf_phy_user_data_t* data = (pnf_phy_user_data_t*)(fapi->user_data);
nfapi_crc_indication_t crc_ind;
memset(&crc_ind, 0, sizeof(crc_ind));
crc_ind.header.message_id = NFAPI_CRC_INDICATION;
crc_ind.header.phy_id = data->p7_config->phy_id;
crc_ind.sfn_sf = ind->sfn_sf;
if(((pnf_info*)(data->config->user_data))->wireshark_test_mode)
{
crc_ind.crc_indication_body.tl.tag = NFAPI_CRC_INDICATION_BODY_TAG;
crc_ind.crc_indication_body.number_of_crcs = 1;
nfapi_crc_indication_pdu_t pdus[crc_ind.crc_indication_body.number_of_crcs];
memset(&pdus, 0, sizeof(pdus));
pdus[0].rx_ue_information.tl.tag = NFAPI_RX_UE_INFORMATION_TAG;
pdus[0].rx_ue_information.handle = rand_range(0, 9999);
pdus[0].rx_ue_information.rnti = rand_range(1, 65535);
pdus[0].crc_indication_rel8.tl.tag = NFAPI_CRC_INDICATION_REL8_TAG;
pdus[0].crc_indication_rel8.crc_flag = rand_range(0, 1);
crc_ind.crc_indication_body.crc_pdu_list = pdus;
nfapi_pnf_p7_crc_ind(data->p7_config, &crc_ind);
}
else
{
crc_ind.crc_indication_body.tl.tag = NFAPI_CRC_INDICATION_BODY_TAG;
crc_ind.crc_indication_body.number_of_crcs = ind->body.number_of_crcs;
crc_ind.crc_indication_body.crc_pdu_list = (nfapi_crc_indication_pdu_t*)malloc(sizeof(nfapi_crc_indication_pdu_t) * ind->body.number_of_crcs);
for(int i = 0; i < ind->body.number_of_crcs; ++i)
{
crc_ind.crc_indication_body.crc_pdu_list[i].rx_ue_information.tl.tag = NFAPI_RX_UE_INFORMATION_TAG;
crc_ind.crc_indication_body.crc_pdu_list[i].rx_ue_information.handle = ind->body.pdus[i].rx_ue_info.handle;
crc_ind.crc_indication_body.crc_pdu_list[i].rx_ue_information.rnti = ind->body.pdus[i].rx_ue_info.rnti;
crc_ind.crc_indication_body.crc_pdu_list[i].crc_indication_rel8.tl.tag = NFAPI_CRC_INDICATION_REL8_TAG;
crc_ind.crc_indication_body.crc_pdu_list[i].crc_indication_rel8.crc_flag = ind->body.pdus[i].rel8_pdu.crc_flag;
}
nfapi_pnf_p7_crc_ind(data->p7_config, &crc_ind);
free(crc_ind.crc_indication_body.crc_pdu_list);
}
return 0;
}
int fapi_rx_ulsch_ind(fapi_t* fapi, fapi_rx_ulsch_ind_t* ind)
{
pnf_phy_user_data_t* data = (pnf_phy_user_data_t*)(fapi->user_data);
nfapi_rx_indication_t rx_ind;
memset(&rx_ind, 0, sizeof(rx_ind));
rx_ind.header.message_id = NFAPI_RX_ULSCH_INDICATION;
rx_ind.header.phy_id = data->p7_config->phy_id;
rx_ind.sfn_sf = ind->sfn_sf;
if(((pnf_info*)(data->config->user_data))->wireshark_test_mode)
{
rx_ind.rx_indication_body.tl.tag = NFAPI_RX_INDICATION_BODY_TAG;
rx_ind.rx_indication_body.number_of_pdus = 8;
uint8_t rx_data[1024];
nfapi_rx_indication_pdu_t pdus[rx_ind.rx_indication_body.number_of_pdus];
memset(&pdus, 0, sizeof(pdus));
for(int i = 0; i < rx_ind.rx_indication_body.number_of_pdus;++i)
{
pdus[i].rx_ue_information.tl.tag = NFAPI_RX_UE_INFORMATION_TAG;
pdus[i].rx_ue_information.handle = rand_range(0, 9999);
pdus[i].rx_ue_information.rnti = rand_range(1, 65535);
pdus[i].rx_indication_rel8.tl.tag = NFAPI_RX_INDICATION_REL8_TAG;
pdus[i].rx_indication_rel8.length = rand_range(0, 1024);
pdus[i].rx_indication_rel8.offset = 1;
pdus[i].rx_indication_rel8.ul_cqi = rand_range(0, 255);
pdus[i].rx_indication_rel8.timing_advance = rand_range(0, 63);
pdus[i].rx_indication_rel9.tl.tag = NFAPI_RX_INDICATION_REL9_TAG;
pdus[i].rx_indication_rel9.timing_advance_r9 = rand_range(0, 7690);
pdus[i].data = &rx_data[0];
}
rx_ind.rx_indication_body.rx_pdu_list = pdus;
nfapi_pnf_p7_rx_ind(data->p7_config, &rx_ind);
}
else
{
rx_ind.rx_indication_body.tl.tag = NFAPI_RX_INDICATION_BODY_TAG;
rx_ind.rx_indication_body.number_of_pdus = ind->body.number_of_pdus;
rx_ind.rx_indication_body.rx_pdu_list = (nfapi_rx_indication_pdu_t*)malloc(sizeof(nfapi_rx_indication_pdu_t) * ind->body.number_of_pdus);
for(int i = 0; i < ind->body.number_of_pdus; ++i)
{
rx_ind.rx_indication_body.rx_pdu_list[i].rx_ue_information.tl.tag = NFAPI_RX_UE_INFORMATION_TAG;
rx_ind.rx_indication_body.rx_pdu_list[i].rx_ue_information.handle = ind->body.pdus[i].rx_ue_info.handle;
rx_ind.rx_indication_body.rx_pdu_list[i].rx_indication_rel8.tl.tag = NFAPI_RX_INDICATION_REL8_TAG;
rx_ind.rx_indication_body.rx_pdu_list[i].rx_indication_rel8.length = ind->body.pdus[i].rel8_pdu.length;
rx_ind.rx_indication_body.rx_pdu_list[i].rx_indication_rel8.offset = 1;
rx_ind.rx_indication_body.rx_pdu_list[i].rx_indication_rel9.tl.tag = 0;
rx_ind.rx_indication_body.rx_pdu_list[i].data = (uint8_t*)ind->body.data[i];
}
nfapi_pnf_p7_rx_ind(data->p7_config, &rx_ind);
free(rx_ind.rx_indication_body.rx_pdu_list);
}
return 0;
}
int fapi_rx_cqi_ind(fapi_t* fapi, fapi_rx_cqi_ind_t* ind)
{
pnf_phy_user_data_t* data = (pnf_phy_user_data_t*)(fapi->user_data);
nfapi_cqi_indication_t cqi_ind;
memset(&cqi_ind, 0, sizeof(cqi_ind));
cqi_ind.header.message_id = NFAPI_RX_CQI_INDICATION;
cqi_ind.header.phy_id = data->p7_config->phy_id;
cqi_ind.sfn_sf = ind->sfn_sf;
if(((pnf_info*)(data->config->user_data))->wireshark_test_mode)
{
cqi_ind.cqi_indication_body.tl.tag = NFAPI_CQI_INDICATION_BODY_TAG;
cqi_ind.cqi_indication_body.number_of_cqis = 3;
nfapi_cqi_indication_pdu_t cqi_pdu_list[cqi_ind.cqi_indication_body.number_of_cqis];
memset(&cqi_pdu_list, 0, sizeof(cqi_pdu_list));
nfapi_cqi_indication_raw_pdu_t cqi_raw_pdu_list[cqi_ind.cqi_indication_body.number_of_cqis];
//memset(&cqi_raw_pdu_list, 0, sizeof(cqi_raw_pdu_list));
for(int i = 0; i < cqi_ind.cqi_indication_body.number_of_cqis; ++i)
{
cqi_pdu_list[i].rx_ue_information.tl.tag = NFAPI_RX_UE_INFORMATION_TAG;
cqi_pdu_list[i].rx_ue_information.handle = rand_range(0, 9999);
cqi_pdu_list[i].rx_ue_information.rnti = rand_range(1, 65535);
uint8_t rel8_or_9 = rand_range(0, 1);
if(rel8_or_9)
{
cqi_pdu_list[i].cqi_indication_rel8.tl.tag = NFAPI_CQI_INDICATION_REL8_TAG;
cqi_pdu_list[i].cqi_indication_rel8.length = 8; //rand_range(1, 12);
cqi_pdu_list[i].cqi_indication_rel8.data_offset = 1; //rand_range(0, 1);
cqi_pdu_list[i].cqi_indication_rel8.ul_cqi = 0;
cqi_pdu_list[i].cqi_indication_rel8.ri = rand_range(0, 4);
cqi_pdu_list[i].cqi_indication_rel8.timing_advance = rand_range(0, 63);
}
else
{
cqi_pdu_list[i].cqi_indication_rel9.tl.tag = NFAPI_CQI_INDICATION_REL9_TAG;
cqi_pdu_list[i].cqi_indication_rel9.length = 8; //rand_range(1, 12);
cqi_pdu_list[i].cqi_indication_rel9.data_offset = 1; //rand_range(0, 1);
cqi_pdu_list[i].cqi_indication_rel9.ul_cqi = 0; //rand_range(0, 1);
cqi_pdu_list[i].cqi_indication_rel9.number_of_cc_reported = 1;
cqi_pdu_list[i].cqi_indication_rel9.ri[0] = rand_range(0, 8);
cqi_pdu_list[i].cqi_indication_rel9.timing_advance = rand_range(0, 63);
cqi_pdu_list[i].cqi_indication_rel9.timing_advance_r9 = rand_range(0, 7690);
}
cqi_pdu_list[i].ul_cqi_information.tl.tag = NFAPI_UL_CQI_INFORMATION_TAG;
cqi_pdu_list[i].ul_cqi_information.ul_cqi = rand_range(0,255);
cqi_pdu_list[i].ul_cqi_information.channel = rand_range(0, 1);
}
cqi_ind.cqi_indication_body.cqi_pdu_list = cqi_pdu_list;
cqi_ind.cqi_indication_body.cqi_raw_pdu_list = cqi_raw_pdu_list;
nfapi_pnf_p7_cqi_ind(data->p7_config, &cqi_ind);
}
else
{
nfapi_pnf_p7_cqi_ind(data->p7_config, &cqi_ind);
}
return 0;
}
int fapi_rx_sr_ind(fapi_t* fapi, fapi_rx_sr_ind_t* ind)
{
pnf_phy_user_data_t* data = (pnf_phy_user_data_t*)(fapi->user_data);
nfapi_sr_indication_t sr_ind;
memset(&sr_ind, 0, sizeof(sr_ind));
sr_ind.header.message_id = NFAPI_RX_SR_INDICATION;
sr_ind.header.phy_id = data->p7_config->phy_id;
sr_ind.sfn_sf = ind->sfn_sf;
if(((pnf_info*)(data->config->user_data))->wireshark_test_mode)
{
sr_ind.sr_indication_body.tl.tag = NFAPI_SR_INDICATION_BODY_TAG;
sr_ind.sr_indication_body.number_of_srs = 1;
nfapi_sr_indication_pdu_t pdus[sr_ind.sr_indication_body.number_of_srs];
memset(&pdus, 0, sizeof(pdus));
pdus[0].rx_ue_information.tl.tag = NFAPI_RX_UE_INFORMATION_TAG;
pdus[0].rx_ue_information.handle = rand_range(0, 9999);
pdus[0].rx_ue_information.rnti = rand_range(1, 65535);
pdus[0].ul_cqi_information.tl.tag = NFAPI_UL_CQI_INFORMATION_TAG;
pdus[0].ul_cqi_information.ul_cqi = rand_range(0,255);
pdus[0].ul_cqi_information.channel = rand_range(0, 1);
sr_ind.sr_indication_body.sr_pdu_list = pdus;
nfapi_pnf_p7_sr_ind(data->p7_config, &sr_ind);
}
else
{
nfapi_pnf_p7_sr_ind(data->p7_config, &sr_ind);
}
return 0;
}
int fapi_rach_ind(fapi_t* fapi, fapi_rach_ind_t* ind)
{
pnf_phy_user_data_t* data = (pnf_phy_user_data_t*)(fapi->user_data);
nfapi_rach_indication_t rach_ind;
memset(&rach_ind, 0, sizeof(rach_ind));
rach_ind.header.message_id = NFAPI_RACH_INDICATION;
rach_ind.header.phy_id = data->p7_config->phy_id;
rach_ind.sfn_sf = ind->sfn_sf;
if(((pnf_info*)(data->config->user_data))->wireshark_test_mode)
{
rach_ind.rach_indication_body.tl.tag = NFAPI_RACH_INDICATION_BODY_TAG;
rach_ind.rach_indication_body.number_of_preambles = 1;
nfapi_preamble_pdu_t pdus[rach_ind.rach_indication_body.number_of_preambles];
memset(&pdus, 0, sizeof(pdus));
pdus[0].preamble_rel8.tl.tag = NFAPI_PREAMBLE_REL8_TAG;
pdus[0].preamble_rel8.rnti = rand_range(1, 65535);
pdus[0].preamble_rel8.preamble = rand_range(0, 63);
pdus[0].preamble_rel8.timing_advance = rand_range(0, 1282);
pdus[0].preamble_rel9.tl.tag = NFAPI_PREAMBLE_REL9_TAG;
pdus[0].preamble_rel9.timing_advance_r9 = rand_range(0, 7690);
pdus[0].preamble_rel13.tl.tag = NFAPI_PREAMBLE_REL13_TAG;
pdus[0].preamble_rel13.rach_resource_type = rand_range(0, 4);
rach_ind.rach_indication_body.preamble_list = pdus;
nfapi_pnf_p7_rach_ind(data->p7_config, &rach_ind);
}
else
{
nfapi_pnf_p7_rach_ind(data->p7_config, &rach_ind);
}
return 0;
}
int fapi_srs_ind(fapi_t* fapi, fapi_srs_ind_t* ind)
{
pnf_phy_user_data_t* data = (pnf_phy_user_data_t*)(fapi->user_data);
nfapi_srs_indication_t srs_ind;
memset(&srs_ind, 0, sizeof(srs_ind));
srs_ind.header.message_id = NFAPI_SRS_INDICATION;
srs_ind.header.phy_id = data->p7_config->phy_id;
srs_ind.sfn_sf = ind->sfn_sf;
if(((pnf_info*)(data->config->user_data))->wireshark_test_mode)
{
srs_ind.srs_indication_body.tl.tag = NFAPI_SRS_INDICATION_BODY_TAG;
srs_ind.srs_indication_body.number_of_ues = 1;
nfapi_srs_indication_pdu_t pdus[srs_ind.srs_indication_body.number_of_ues];
memset(&pdus, 0, sizeof(pdus));
pdus[0].rx_ue_information.tl.tag = NFAPI_RX_UE_INFORMATION_TAG;
pdus[0].rx_ue_information.handle = rand_range(0, 9999);
pdus[0].rx_ue_information.rnti = rand_range(1, 65535);
pdus[0].srs_indication_fdd_rel8.tl.tag = NFAPI_SRS_INDICATION_FDD_REL8_TAG;
pdus[0].srs_indication_fdd_rel8.doppler_estimation = rand_range(0, 255);
pdus[0].srs_indication_fdd_rel8.timing_advance = rand_range(0, 63);
pdus[0].srs_indication_fdd_rel8.number_of_resource_blocks = 2; //rand_range(0, 255);
pdus[0].srs_indication_fdd_rel8.rb_start = rand_range(0, 245);
pdus[0].srs_indication_fdd_rel8.snr[0] = rand_range(0, 255);
pdus[0].srs_indication_fdd_rel8.snr[1] = rand_range(0, 255);
pdus[0].srs_indication_fdd_rel9.tl.tag = NFAPI_SRS_INDICATION_FDD_REL9_TAG;
pdus[0].srs_indication_fdd_rel9.timing_advance_r9 = rand_range(0, 7690);
pdus[0].srs_indication_tdd_rel10.tl.tag = NFAPI_SRS_INDICATION_TDD_REL10_TAG;
pdus[0].srs_indication_tdd_rel10.uppts_symbol = rand_range(0, 1);
pdus[0].srs_indication_fdd_rel11.tl.tag = NFAPI_SRS_INDICATION_FDD_REL11_TAG;
pdus[0].srs_indication_fdd_rel11.ul_rtoa;
pdus[0].tdd_channel_measurement.tl.tag = NFAPI_TDD_CHANNEL_MEASUREMENT_TAG;
pdus[0].tdd_channel_measurement.num_prb_per_subband = rand_range(0, 255);
pdus[0].tdd_channel_measurement.number_of_subbands = 1;
pdus[0].tdd_channel_measurement.num_atennas = 2;
pdus[0].tdd_channel_measurement.subands[0].subband_index = rand_range(0, 255);
pdus[0].tdd_channel_measurement.subands[0].channel[0] = rand_range(0, 9999);
pdus[0].tdd_channel_measurement.subands[0].channel[1] = rand_range(0, 9999);
srs_ind.srs_indication_body.srs_pdu_list = pdus;
nfapi_pnf_p7_srs_ind(data->p7_config, &srs_ind);
}
else
{
nfapi_pnf_p7_srs_ind(data->p7_config, &srs_ind);
}
return 0;
}
int fapi_lbt_dl_ind(fapi_t* fapi, fapi_lbt_dl_ind_t* ind)
{
pnf_phy_user_data_t* data = (pnf_phy_user_data_t*)(fapi->user_data);
nfapi_lbt_dl_indication_t lbt_dl_ind;
memset(&lbt_dl_ind, 0, sizeof(lbt_dl_ind));
lbt_dl_ind.header.message_id = NFAPI_LBT_DL_INDICATION;
lbt_dl_ind.header.phy_id = data->p7_config->phy_id;
lbt_dl_ind.sfn_sf = ind->sfn_sf;
if(((pnf_info*)(data->config->user_data))->wireshark_test_mode)
{
lbt_dl_ind.lbt_dl_indication_body.tl.tag = NFAPI_LBT_DL_INDICATION_BODY_TAG;
lbt_dl_ind.lbt_dl_indication_body.number_of_pdus = 2;
nfapi_lbt_dl_indication_pdu_t pdus[lbt_dl_ind.lbt_dl_indication_body.number_of_pdus];
memset(&pdus, 0, sizeof(pdus));
pdus[0].pdu_type = 0; // LBT_PDSCH_RSP PDU
pdus[0].pdu_size = 0;
pdus[0].lbt_pdsch_rsp_pdu.lbt_pdsch_rsp_pdu_rel13.tl.tag = NFAPI_LBT_PDSCH_RSP_PDU_REL13_TAG;
pdus[0].lbt_pdsch_rsp_pdu.lbt_pdsch_rsp_pdu_rel13.handle = 0xABCD;
pdus[0].lbt_pdsch_rsp_pdu.lbt_pdsch_rsp_pdu_rel13.result = rand_range(0, 1);
pdus[0].lbt_pdsch_rsp_pdu.lbt_pdsch_rsp_pdu_rel13.lte_txop_symbols = rand_range(0, 0xFFFF);
pdus[0].lbt_pdsch_rsp_pdu.lbt_pdsch_rsp_pdu_rel13.initial_partial_sf = rand_range(0, 1);
pdus[1].pdu_type = 1; // LBT_DRS_RSP PDU
pdus[1].pdu_size = 0;
pdus[1].lbt_drs_rsp_pdu.lbt_drs_rsp_pdu_rel13.tl.tag = NFAPI_LBT_DRS_RSP_PDU_REL13_TAG;
pdus[1].lbt_drs_rsp_pdu.lbt_drs_rsp_pdu_rel13.handle = 0xABCD;
pdus[1].lbt_drs_rsp_pdu.lbt_drs_rsp_pdu_rel13.result = rand_range(0, 1);
lbt_dl_ind.lbt_dl_indication_body.lbt_indication_pdu_list = pdus;
nfapi_pnf_p7_lbt_dl_ind(data->p7_config, &lbt_dl_ind);
}
else
{
nfapi_pnf_p7_lbt_dl_ind(data->p7_config, &lbt_dl_ind);
}
return 0;
}
int fapi_nb_harq_ind(fapi_t* fapi, fapi_nb_harq_ind_t* ind)
{
pnf_phy_user_data_t* data = (pnf_phy_user_data_t*)(fapi->user_data);
nfapi_nb_harq_indication_t nb_harq_ind;
memset(&nb_harq_ind, 0, sizeof(nb_harq_ind));
nb_harq_ind.header.message_id = NFAPI_NB_HARQ_INDICATION;
nb_harq_ind.header.phy_id = data->p7_config->phy_id;
nb_harq_ind.sfn_sf = ind->sfn_sf;
if(((pnf_info*)(data->config->user_data))->wireshark_test_mode)
{
nb_harq_ind.nb_harq_indication_body.tl.tag = NFAPI_NB_HARQ_INDICATION_BODY_TAG;
nb_harq_ind.nb_harq_indication_body.number_of_harqs = 1;
nfapi_nb_harq_indication_pdu_t pdus[nb_harq_ind.nb_harq_indication_body.number_of_harqs];
memset(&pdus, 0, sizeof(pdus));
pdus[0].rx_ue_information.tl.tag = NFAPI_RX_UE_INFORMATION_TAG;
pdus[0].rx_ue_information.handle = rand_range(0, 0xFFFF);
pdus[0].rx_ue_information.rnti = rand_range(0, 65535);
pdus[0].nb_harq_indication_fdd_rel13.tl.tag = NFAPI_NB_HARQ_INDICATION_FDD_REL13_TAG;
pdus[0].nb_harq_indication_fdd_rel13.harq_tb1 = rand_range(1, 7);
pdus[0].ul_cqi_information.tl.tag = NFAPI_UL_CQI_INFORMATION_TAG;
pdus[0].ul_cqi_information.ul_cqi = rand_range(0, 255);
pdus[0].ul_cqi_information.channel = rand_range(0, 1);
nb_harq_ind.nb_harq_indication_body.nb_harq_pdu_list = pdus;
nfapi_pnf_p7_nb_harq_ind(data->p7_config, &nb_harq_ind);
}
else
{
nfapi_pnf_p7_nb_harq_ind(data->p7_config, &nb_harq_ind);
}
return 0;
}
int fapi_nrach_ind(fapi_t* fapi, fapi_nrach_ind_t* ind)
{
pnf_phy_user_data_t* data = (pnf_phy_user_data_t*)(fapi->user_data);
nfapi_nrach_indication_t nrach_ind;
memset(&nrach_ind, 0, sizeof(nrach_ind));
nrach_ind.header.message_id = NFAPI_NRACH_INDICATION;
nrach_ind.header.phy_id = data->p7_config->phy_id;
nrach_ind.sfn_sf = ind->sfn_sf;
if(((pnf_info*)(data->config->user_data))->wireshark_test_mode)
{
nrach_ind.nrach_indication_body.tl.tag = NFAPI_NRACH_INDICATION_BODY_TAG;
nrach_ind.nrach_indication_body.number_of_initial_scs_detected = 1;
nfapi_nrach_indication_pdu_t pdus[nrach_ind.nrach_indication_body.number_of_initial_scs_detected];
memset(&pdus, 0, sizeof(pdus));
pdus[0].nrach_indication_rel13.tl.tag = NFAPI_NRACH_INDICATION_REL13_TAG;
pdus[0].nrach_indication_rel13.rnti = rand_range(1, 65535);
pdus[0].nrach_indication_rel13.initial_sc = rand_range(0, 47);
pdus[0].nrach_indication_rel13.timing_advance = rand_range(0, 3840);
pdus[0].nrach_indication_rel13.nrach_ce_level = rand_range(0, 2);
nrach_ind.nrach_indication_body.nrach_pdu_list = pdus;
nfapi_pnf_p7_nrach_ind(data->p7_config, &nrach_ind);
}
else
{
nfapi_pnf_p7_nrach_ind(data->p7_config, &nrach_ind);
}
return 0;
}
int pnf_start_request(nfapi_pnf_config_t* config, nfapi_pnf_start_request_t* req)
{
pnf_info* pnf = (pnf_info*)(config->user_data);
// start all phys that have been configured
for(phy_info& phy : pnf->phys)
{
if(phy.id != 0)
{
//auto found = std::find_if(pnf->phys.begin(), pnf->phys.end(), [&](phy_info& item)
// { return item.id == req->header.phy_id; });
//
// if(found != pnf->phys.end())
// {
// phy_info& phy = (*found);
fapi_cb_t cb;
cb.fapi_param_response = &fapi_param_response;
cb.fapi_config_response = &fapi_config_response;
cb.fapi_subframe_ind = &fapi_subframe_ind;
cb.fapi_harq_ind = fapi_harq_ind;
cb.fapi_crc_ind = fapi_crc_ind;
cb.fapi_rx_ulsch_ind = fapi_rx_ulsch_ind;
cb.fapi_rx_cqi_ind = fapi_rx_cqi_ind;
cb.fapi_rx_sr_ind = fapi_rx_sr_ind;
cb.fapi_rach_ind = fapi_rach_ind;
cb.fapi_srs_ind = fapi_srs_ind;
cb.fapi_lbt_dl_ind = fapi_lbt_dl_ind;
cb.fapi_nb_harq_ind = fapi_nb_harq_ind;
cb.fapi_nrach_ind = fapi_nrach_ind;
fapi_config_t c;
c.duplex_mode = phy.duplex_mode;
c.dl_channel_bw_support = phy.dl_channel_bw_support;
c.ul_channel_bw_support = phy.ul_channel_bw_support;
phy.fapi = fapi_create(&cb, &c);
printf("[PNF_SIM] staring fapi %p phy_id:%d\n", phy.fapi, phy.id);
pnf_phy_user_data_t* data = (pnf_phy_user_data_t*)malloc(sizeof(pnf_phy_user_data_t));
data->phy_id = phy.id;
data->config = config;
data->phy = &phy;
phy.fapi->user_data = data;
}
}
nfapi_pnf_start_response_t resp;
memset(&resp, 0, sizeof(resp));
resp.header.message_id = NFAPI_PNF_START_RESPONSE;
resp.error_code = NFAPI_MSG_OK;
nfapi_pnf_pnf_start_resp(config, &resp);
return 0;
}
int pnf_stop_request(nfapi_pnf_config_t* config, nfapi_pnf_stop_request_t* req)
{
printf("[PNF_SIM] pnf stop request\n");
nfapi_pnf_stop_response_t resp;
memset(&resp, 0, sizeof(resp));
resp.header.message_id = NFAPI_PNF_STOP_RESPONSE;
resp.error_code = NFAPI_MSG_OK;
nfapi_pnf_pnf_stop_resp(config, &resp);
return 0;
}
int param_request(nfapi_pnf_config_t* config, nfapi_pnf_phy_config_t* phy, nfapi_param_request_t* req)
{
printf("[PNF_SIM] param request phy_id:%d\n", req->header.phy_id);
pnf_info* pnf = (pnf_info*)(config->user_data);
auto found = std::find_if(pnf->phys.begin(), pnf->phys.end(), [&](phy_info& item)
{ return item.id == req->header.phy_id; });
if(found != pnf->phys.end())
{
phy_info& phy_info = (*found);
fapi_param_req_t fapi_req;
fapi_req.header.message_id = req->header.message_id;
fapi_req.header.length = 0;
// convert nfapi to fapi
fapi_param_request(phy_info.fapi, &fapi_req);
}
else
{
// did not find the phy
}
printf("[PNF_SIM] param request .. exit\n");
return 0;
}
int config_request(nfapi_pnf_config_t* config, nfapi_pnf_phy_config_t* phy, nfapi_config_request_t* req)
{
printf("[PNF_SIM] config request phy_id:%d\n", req->header.phy_id);
pnf_info* pnf = (pnf_info*)(config->user_data);
auto found = std::find_if(pnf->phys.begin(), pnf->phys.end(), [&](phy_info& item)
{ return item.id == req->header.phy_id; });
if(found != pnf->phys.end())
{
phy_info& phy_info = (*found);
if(req->nfapi_config.timing_window.tl.tag == NFAPI_NFAPI_TIMING_WINDOW_TAG)
{
phy_info.timing_window = req->nfapi_config.timing_window.value;
}
if(req->nfapi_config.timing_info_mode.tl.tag == NFAPI_NFAPI_TIMING_INFO_MODE_TAG)
{
printf("timing info mode provided\n");
phy_info.timing_info_mode = req->nfapi_config.timing_info_mode.value;
}
else
{
phy_info.timing_info_mode = 0;
printf("NO timing info mode provided\n");
}
if(req->nfapi_config.timing_info_period.tl.tag == NFAPI_NFAPI_TIMING_INFO_PERIOD_TAG)
{
printf("timing info period provided\n");
phy_info.timing_info_period = req->nfapi_config.timing_info_period.value;
}
else
{
phy_info.timing_info_period = 0;
}
phy_info.remote_port = req->nfapi_config.p7_vnf_port.value;
struct sockaddr_in vnf_p7_sockaddr;
memcpy(&vnf_p7_sockaddr.sin_addr.s_addr, &(req->nfapi_config.p7_vnf_address_ipv4.address[0]), 4);
phy_info.remote_addr = inet_ntoa(vnf_p7_sockaddr.sin_addr);
printf("[PNF_SIM] %d vnf p7 %s:%d timing %d %d %d\n", phy_info.id, phy_info.remote_addr, phy_info.remote_port,
phy_info.timing_window, phy_info.timing_info_mode, phy_info.timing_info_period);
fapi_config_req_t fapi_req;
fapi_config_request(phy_info.fapi, &fapi_req);
}
return 0;
}
nfapi_p7_message_header_t* phy_allocate_p7_vendor_ext(uint16_t message_id, uint16_t* msg_size)
{
if(message_id == P7_VENDOR_EXT_REQ)
{
(*msg_size) = sizeof(vendor_ext_p7_req);
return (nfapi_p7_message_header_t*)malloc(sizeof(vendor_ext_p7_req));
}
return 0;
}
void phy_deallocate_p7_vendor_ext(nfapi_p7_message_header_t* header)
{
free(header);
}
int phy_dl_config_req(nfapi_pnf_p7_config_t* pnf_p7, nfapi_dl_config_request_t* req)
{
//printf("[PNF_SIM] dl config request\n");
if(req->vendor_extension)
free(req->vendor_extension);
phy_info* phy = (phy_info*)(pnf_p7->user_data);
fapi_dl_config_req_t fapi_req;
// convert
fapi_dl_config_request(phy->fapi, &fapi_req);
return 0;
}
int phy_ul_config_req(nfapi_pnf_p7_config_t* pnf_p7, nfapi_ul_config_request_t* req)
{
//printf("[PNF_SIM] ul config request\n");
phy_info* phy = (phy_info*)(pnf_p7->user_data);
fapi_ul_config_req_t fapi_req;
// convert
fapi_ul_config_request(phy->fapi, &fapi_req);
return 0;
}
int phy_hi_dci0_req(nfapi_pnf_p7_config_t* pnf_p7, nfapi_hi_dci0_request_t* req)
{
//printf("[PNF_SIM] hi dci0 request\n");
phy_info* phy = (phy_info*)(pnf_p7->user_data);
fapi_hi_dci0_req_t fapi_req;
// convert
fapi_hi_dci0_request(phy->fapi, &fapi_req);
return 0;
}
int phy_tx_req(nfapi_pnf_p7_config_t* pnf_p7, nfapi_tx_request_t* req)
{
//printf("[PNF_SIM] tx request\n");
phy_info* phy = (phy_info*)(pnf_p7->user_data);
fapi_tx_req_t fapi_req;
fapi_req.header.message_id = FAPI_TX_REQUEST;
fapi_req.sfn_sf = req->sfn_sf;
fapi_req.body.number_of_pdus = req->tx_request_body.number_of_pdus;
fapi_tx_req_pdu_t pdus[8];
fapi_req.body.pdus = &pdus[0];
for(int i = 0; i < fapi_req.body.number_of_pdus; ++i)
{
fapi_req.body.pdus[i].pdu_length = req->tx_request_body.tx_pdu_list[i].pdu_length;
fapi_req.body.pdus[i].pdu_index = req->tx_request_body.tx_pdu_list[i].pdu_index;
fapi_req.body.pdus[i].num_tlv = 1;
fapi_req.body.pdus[i].tlvs[0].value = (uint32_t*)req->tx_request_body.tx_pdu_list[i].segments[0].segment_data;
//if the pnf wants to retain the pointer then req->tx_request_body.tx_pdu_list[i].segments[0].segment_data should be set to 0
}
fapi_tx_request(phy->fapi, &fapi_req);
/*
if(fapi_req.body.number_of_pdus > 0)
{
for(int i = 0; i < fapi_req.body.number_of_pdus; ++i)
{
//printf("freeing tx pdu %p\n", fapi_req.body.pdus[i].tlvs[0].value);
if(0)
{
free(fapi_req.body.pdus[i].tlvs[0].value);
}
else
{
pnf_deallocate(fapi_req.body.pdus[i].tlvs[0].value);
}
}
}
*/
return 0;
}
int phy_lbt_dl_config_req(nfapi_pnf_p7_config_t*, nfapi_lbt_dl_config_request_t* req)
{
//printf("[PNF_SIM] lbt dl config request\n");
return 0;
}
int phy_vendor_ext(nfapi_pnf_p7_config_t* config, nfapi_p7_message_header_t* msg)
{
if(msg->message_id == P7_VENDOR_EXT_REQ)
{
vendor_ext_p7_req* req = (vendor_ext_p7_req*)msg;
//printf("[PNF_SIM] vendor request (1:%d 2:%d)\n", req->dummy1, req->dummy2);
}
else
{
printf("[PNF_SIM] unknown vendor ext\n");
}
return 0;
}
int phy_pack_p7_vendor_extension(nfapi_p7_message_header_t* header, uint8_t** ppWritePackedMsg, uint8_t *end, nfapi_p7_codec_config_t* codex)
{
//NFAPI_TRACE(NFAPI_TRACE_INFO, "%s\n", __FUNCTION__);
if(header->message_id == P7_VENDOR_EXT_IND)
{
vendor_ext_p7_ind* ind = (vendor_ext_p7_ind*)(header);
if(!push16(ind->error_code, ppWritePackedMsg, end))
return 0;
return 1;
}
return -1;
}
int phy_unpack_p7_vendor_extension(nfapi_p7_message_header_t* header, uint8_t** ppReadPackedMessage, uint8_t *end, nfapi_p7_codec_config_t* codec)
{
if(header->message_id == P7_VENDOR_EXT_REQ)
{
//NFAPI_TRACE(NFAPI_TRACE_INFO, "%s\n", __FUNCTION__);
vendor_ext_p7_req* req = (vendor_ext_p7_req*)(header);
if(!(pull16(ppReadPackedMessage, &req->dummy1, end) &&
pull16(ppReadPackedMessage, &req->dummy2, end)))
return 0;
return 1;
}
return -1;
}
int phy_unpack_vendor_extension_tlv(nfapi_tl_t* tl, uint8_t **ppReadPackedMessage, uint8_t* end, void** ve, nfapi_p7_codec_config_t* config)
{
//NFAPI_TRACE(NFAPI_TRACE_INFO, "phy_unpack_vendor_extension_tlv\n");
switch(tl->tag)
{
case VENDOR_EXT_TLV_1_TAG:
*ve = malloc(sizeof(vendor_ext_tlv_1));
if(!pull32(ppReadPackedMessage, &((vendor_ext_tlv_1*)(*ve))->dummy, end))
return 0;
return 1;
break;
}
return -1;
}
int phy_pack_vendor_extention_tlv(void* ve, uint8_t **ppWritePackedMsg, uint8_t* end, nfapi_p7_codec_config_t* config)
{
//printf("%s\n", __FUNCTION__);
(void)ve;
(void)ppWritePackedMsg;
return -1;
}
int pnf_sim_unpack_vendor_extension_tlv(nfapi_tl_t* tl, uint8_t **ppReadPackedMessage, uint8_t *end, void** ve, nfapi_p4_p5_codec_config_t* config)
{
//NFAPI_TRACE(NFAPI_TRACE_INFO, "pnf_sim_unpack_vendor_extension_tlv\n");
switch(tl->tag)
{
case VENDOR_EXT_TLV_2_TAG:
*ve = malloc(sizeof(vendor_ext_tlv_2));
if(!pull32(ppReadPackedMessage, &((vendor_ext_tlv_2*)(*ve))->dummy, end))
return 0;
return 1;
break;
}
return -1;
}
int pnf_sim_pack_vendor_extention_tlv(void* ve, uint8_t **ppWritePackedMsg, uint8_t *end, nfapi_p4_p5_codec_config_t* config)
{
//printf("%s\n", __FUNCTION__);
(void)ve;
(void)ppWritePackedMsg;
return -1;
}
int start_request(nfapi_pnf_config_t* config, nfapi_pnf_phy_config_t* phy, nfapi_start_request_t* req)
{
printf("[PNF_SIM] start request phy_id:%d\n", req->header.phy_id);
pnf_info* pnf = (pnf_info*)(config->user_data);
auto found = std::find_if(pnf->phys.begin(), pnf->phys.end(), [&](phy_info& item)
{ return item.id == req->header.phy_id; });
if(found != pnf->phys.end())
{
phy_info& phy_info = (*found);
nfapi_pnf_p7_config_t* p7_config = nfapi_pnf_p7_config_create();
p7_config->phy_id = phy->phy_id;
p7_config->remote_p7_port = phy_info.remote_port;
p7_config->remote_p7_addr = phy_info.remote_addr;
p7_config->local_p7_port = phy_info.local_port;
p7_config->local_p7_addr = (char*)phy_info.local_addr.c_str();
p7_config->user_data = &phy_info;
p7_config->malloc = &pnf_allocate;
p7_config->free = &pnf_deallocate;
p7_config->codec_config.allocate = &pnf_allocate;
p7_config->codec_config.deallocate = &pnf_deallocate;
p7_config->trace = &pnf_sim_trace;
phy->user_data = p7_config;
p7_config->subframe_buffer_size = phy_info.timing_window;
if(phy_info.timing_info_mode & 0x1)
{
p7_config->timing_info_mode_periodic = 1;
p7_config->timing_info_period = phy_info.timing_info_period;
}
if(phy_info.timing_info_mode & 0x2)
{
p7_config->timing_info_mode_aperiodic = 1;
}
p7_config->dl_config_req = &phy_dl_config_req;
p7_config->ul_config_req = &phy_ul_config_req;
p7_config->hi_dci0_req = &phy_hi_dci0_req;
p7_config->tx_req = &phy_tx_req;
p7_config->lbt_dl_config_req = &phy_lbt_dl_config_req;
p7_config->vendor_ext = &phy_vendor_ext;
p7_config->allocate_p7_vendor_ext = &phy_allocate_p7_vendor_ext;
p7_config->deallocate_p7_vendor_ext = &phy_deallocate_p7_vendor_ext;
p7_config->codec_config.unpack_p7_vendor_extension = &phy_unpack_p7_vendor_extension;
p7_config->codec_config.pack_p7_vendor_extension = &phy_pack_p7_vendor_extension;
p7_config->codec_config.unpack_vendor_extension_tlv = &phy_unpack_vendor_extension_tlv;
p7_config->codec_config.pack_vendor_extension_tlv = &phy_pack_vendor_extention_tlv;
pthread_t p7_thread;
pthread_create(&p7_thread, NULL, &pnf_p7_thread_start, p7_config);
((pnf_phy_user_data_t*)(phy_info.fapi->user_data))->p7_config = p7_config;
fapi_start_req_t fapi_req;
fapi_start_request(phy_info.fapi, &fapi_req);
}
return 0;
}
int measurement_request(nfapi_pnf_config_t* config, nfapi_pnf_phy_config_t* phy, nfapi_measurement_request_t* req)
{
nfapi_measurement_response_t resp;
memset(&resp, 0, sizeof(resp));
resp.header.message_id = NFAPI_MEASUREMENT_RESPONSE;
resp.header.phy_id = req->header.phy_id;
resp.error_code = NFAPI_MSG_OK;
nfapi_pnf_measurement_resp(config, &resp);
return 0;
}
int rssi_request(nfapi_pnf_config_t* config, nfapi_pnf_phy_config_t* phy, nfapi_rssi_request_t* req)
{
nfapi_rssi_response_t resp;
memset(&resp, 0, sizeof(resp));
resp.header.message_id = NFAPI_RSSI_RESPONSE;
resp.header.phy_id = req->header.phy_id;
resp.error_code = NFAPI_P4_MSG_OK;
nfapi_pnf_rssi_resp(config, &resp);
nfapi_rssi_indication_t ind;
memset(&ind, 0, sizeof(ind));
ind.header.message_id = NFAPI_RSSI_INDICATION;
ind.header.phy_id = req->header.phy_id;
ind.error_code = NFAPI_P4_MSG_OK;
ind.rssi_indication_body.tl.tag = NFAPI_RSSI_INDICATION_TAG;
ind.rssi_indication_body.number_of_rssi = 1;
ind.rssi_indication_body.rssi[0] = -42;
nfapi_pnf_rssi_ind(config, &ind);
return 0;
}
int cell_search_request(nfapi_pnf_config_t* config, nfapi_pnf_phy_config_t* phy, nfapi_cell_search_request_t* req)
{
nfapi_cell_search_response_t resp;
memset(&resp, 0, sizeof(resp));
resp.header.message_id = NFAPI_CELL_SEARCH_RESPONSE;
resp.header.phy_id = req->header.phy_id;
resp.error_code = NFAPI_P4_MSG_OK;
nfapi_pnf_cell_search_resp(config, &resp);
nfapi_cell_search_indication_t ind;
memset(&ind, 0, sizeof(ind));
ind.header.message_id = NFAPI_CELL_SEARCH_INDICATION;
ind.header.phy_id = req->header.phy_id;
ind.error_code = NFAPI_P4_MSG_OK;
switch(req->rat_type)
{
case NFAPI_RAT_TYPE_LTE:
{
ind.lte_cell_search_indication.tl.tag = NFAPI_LTE_CELL_SEARCH_INDICATION_TAG;
ind.lte_cell_search_indication.number_of_lte_cells_found = 1;
ind.lte_cell_search_indication.lte_found_cells[0].pci = 123;
ind.lte_cell_search_indication.lte_found_cells[0].rsrp = 123;
ind.lte_cell_search_indication.lte_found_cells[0].rsrq = 123;
ind.lte_cell_search_indication.lte_found_cells[0].frequency_offset = 123;
}
break;
case NFAPI_RAT_TYPE_UTRAN:
{
ind.utran_cell_search_indication.tl.tag = NFAPI_UTRAN_CELL_SEARCH_INDICATION_TAG;
ind.utran_cell_search_indication.number_of_utran_cells_found = 1;
ind.utran_cell_search_indication.utran_found_cells[0].psc = 89;
ind.utran_cell_search_indication.utran_found_cells[0].rscp = 89;
ind.utran_cell_search_indication.utran_found_cells[0].ecno = 89;
ind.utran_cell_search_indication.utran_found_cells[0].frequency_offset = -89;
}
break;
case NFAPI_RAT_TYPE_GERAN:
{
ind.geran_cell_search_indication.tl.tag = NFAPI_GERAN_CELL_SEARCH_INDICATION_TAG;
ind.geran_cell_search_indication.number_of_gsm_cells_found = 1;
ind.geran_cell_search_indication.gsm_found_cells[0].bsic = 23;
ind.geran_cell_search_indication.gsm_found_cells[0].rxlev = 23;
ind.geran_cell_search_indication.gsm_found_cells[0].rxqual = 23;
ind.geran_cell_search_indication.gsm_found_cells[0].frequency_offset = -23;
ind.geran_cell_search_indication.gsm_found_cells[0].sfn_offset = 230;
}
break;
}
ind.pnf_cell_search_state.tl.tag = NFAPI_PNF_CELL_SEARCH_STATE_TAG;
ind.pnf_cell_search_state.length = 3;
nfapi_pnf_cell_search_ind(config, &ind);
return 0;
}
int broadcast_detect_request(nfapi_pnf_config_t* config, nfapi_pnf_phy_config_t* phy, nfapi_broadcast_detect_request_t* req)
{
nfapi_broadcast_detect_response_t resp;
memset(&resp, 0, sizeof(resp));
resp.header.message_id = NFAPI_BROADCAST_DETECT_RESPONSE;
resp.header.phy_id = req->header.phy_id;
resp.error_code = NFAPI_P4_MSG_OK;
nfapi_pnf_broadcast_detect_resp(config, &resp);
nfapi_broadcast_detect_indication_t ind;
memset(&ind, 0, sizeof(ind));
ind.header.message_id = NFAPI_BROADCAST_DETECT_INDICATION;
ind.header.phy_id = req->header.phy_id;
ind.error_code = NFAPI_P4_MSG_OK;
switch(req->rat_type)
{
case NFAPI_RAT_TYPE_LTE:
{
ind.lte_broadcast_detect_indication.tl.tag = NFAPI_LTE_BROADCAST_DETECT_INDICATION_TAG;
ind.lte_broadcast_detect_indication.number_of_tx_antenna = 1;
ind.lte_broadcast_detect_indication.mib_length = 4;
//ind.lte_broadcast_detect_indication.mib...
ind.lte_broadcast_detect_indication.sfn_offset = 77;
}
break;
case NFAPI_RAT_TYPE_UTRAN:
{
ind.utran_broadcast_detect_indication.tl.tag = NFAPI_UTRAN_BROADCAST_DETECT_INDICATION_TAG;
ind.utran_broadcast_detect_indication.mib_length = 4;
//ind.utran_broadcast_detect_indication.mib...
ind.utran_broadcast_detect_indication.sfn_offset;
}
break;
}
ind.pnf_cell_broadcast_state.tl.tag = NFAPI_PNF_CELL_BROADCAST_STATE_TAG;
ind.pnf_cell_broadcast_state.length = 3;
nfapi_pnf_broadcast_detect_ind(config, &ind);
return 0;
}
int system_information_schedule_request(nfapi_pnf_config_t* config, nfapi_pnf_phy_config_t* phy, nfapi_system_information_schedule_request_t* req)
{
nfapi_system_information_schedule_response_t resp;
memset(&resp, 0, sizeof(resp));
resp.header.message_id = NFAPI_SYSTEM_INFORMATION_SCHEDULE_RESPONSE;
resp.header.phy_id = req->header.phy_id;
resp.error_code = NFAPI_P4_MSG_OK;
nfapi_pnf_system_information_schedule_resp(config, &resp);
nfapi_system_information_schedule_indication_t ind;
memset(&ind, 0, sizeof(ind));
ind.header.message_id = NFAPI_SYSTEM_INFORMATION_SCHEDULE_INDICATION;
ind.header.phy_id = req->header.phy_id;
ind.error_code = NFAPI_P4_MSG_OK;
ind.lte_system_information_indication.tl.tag = NFAPI_LTE_SYSTEM_INFORMATION_INDICATION_TAG;
ind.lte_system_information_indication.sib_type = 3;
ind.lte_system_information_indication.sib_length = 5;
//ind.lte_system_information_indication.sib...
nfapi_pnf_system_information_schedule_ind(config, &ind);
return 0;
}
int system_information_request(nfapi_pnf_config_t* config, nfapi_pnf_phy_config_t* phy, nfapi_system_information_request_t* req)
{
nfapi_system_information_response_t resp;
memset(&resp, 0, sizeof(resp));
resp.header.message_id = NFAPI_SYSTEM_INFORMATION_RESPONSE;
resp.header.phy_id = req->header.phy_id;
resp.error_code = NFAPI_P4_MSG_OK;
nfapi_pnf_system_information_resp(config, &resp);
nfapi_system_information_indication_t ind;
memset(&ind, 0, sizeof(ind));
ind.header.message_id = NFAPI_SYSTEM_INFORMATION_INDICATION;
ind.header.phy_id = req->header.phy_id;
ind.error_code = NFAPI_P4_MSG_OK;
switch(req->rat_type)
{
case NFAPI_RAT_TYPE_LTE:
{
ind.lte_system_information_indication.tl.tag = NFAPI_LTE_SYSTEM_INFORMATION_INDICATION_TAG;
ind.lte_system_information_indication.sib_type = 1;
ind.lte_system_information_indication.sib_length = 3;
//ind.lte_system_information_indication.sib...
}
break;
case NFAPI_RAT_TYPE_UTRAN:
{
ind.utran_system_information_indication.tl.tag = NFAPI_UTRAN_SYSTEM_INFORMATION_INDICATION_TAG;
ind.utran_system_information_indication.sib_length = 3;
//ind.utran_system_information_indication.sib...
}
break;
case NFAPI_RAT_TYPE_GERAN:
{
ind.geran_system_information_indication.tl.tag = NFAPI_GERAN_SYSTEM_INFORMATION_INDICATION_TAG;
ind.geran_system_information_indication.si_length = 3;
//ind.geran_system_information_indication.si...
}
break;
}
nfapi_pnf_system_information_ind(config, &ind);
return 0;
}
int nmm_stop_request(nfapi_pnf_config_t* config, nfapi_pnf_phy_config_t* phy, nfapi_nmm_stop_request_t* req)
{
nfapi_nmm_stop_response_t resp;
memset(&resp, 0, sizeof(resp));
resp.header.message_id = NFAPI_NMM_STOP_RESPONSE;
resp.header.phy_id = req->header.phy_id;
resp.error_code = NFAPI_P4_MSG_OK;
nfapi_pnf_nmm_stop_resp(config, &resp);
return 0;
}
int vendor_ext(nfapi_pnf_config_t* config, nfapi_p4_p5_message_header_t* msg)
{
NFAPI_TRACE(NFAPI_TRACE_INFO, "[PNF_SIM] P5 %s %p\n", __FUNCTION__, msg);
switch(msg->message_id)
{
case P5_VENDOR_EXT_REQ:
{
vendor_ext_p5_req* req = (vendor_ext_p5_req*)msg;
NFAPI_TRACE(NFAPI_TRACE_INFO, "[PNF_SIM] P5 Vendor Ext Req (%d %d)\n", req->dummy1, req->dummy2);
// send back the P5_VENDOR_EXT_RSP
vendor_ext_p5_rsp rsp;
memset(&rsp, 0, sizeof(rsp));
rsp.header.message_id = P5_VENDOR_EXT_RSP;
rsp.error_code = NFAPI_MSG_OK;
nfapi_pnf_vendor_extension(config, &rsp.header, sizeof(vendor_ext_p5_rsp));
}
break;
}
return 0;
}
nfapi_p4_p5_message_header_t* pnf_sim_allocate_p4_p5_vendor_ext(uint16_t message_id, uint16_t* msg_size)
{
if(message_id == P5_VENDOR_EXT_REQ)
{
(*msg_size) = sizeof(vendor_ext_p5_req);
return (nfapi_p4_p5_message_header_t*)malloc(sizeof(vendor_ext_p5_req));
}
return 0;
}
void pnf_sim_deallocate_p4_p5_vendor_ext(nfapi_p4_p5_message_header_t* header)
{
free(header);
}
int pnf_sim_pack_p4_p5_vendor_extension(nfapi_p4_p5_message_header_t* header, uint8_t** ppWritePackedMsg, uint8_t *end, nfapi_p4_p5_codec_config_t* config)
{
//NFAPI_TRACE(NFAPI_TRACE_INFO, "%s\n", __FUNCTION__);
if(header->message_id == P5_VENDOR_EXT_RSP)
{
vendor_ext_p5_rsp* rsp = (vendor_ext_p5_rsp*)(header);
return (!push16(rsp->error_code, ppWritePackedMsg, end));
}
return 0;
}
int pnf_sim_unpack_p4_p5_vendor_extension(nfapi_p4_p5_message_header_t* header, uint8_t** ppReadPackedMessage, uint8_t *end, nfapi_p4_p5_codec_config_t* codec)
{
//NFAPI_TRACE(NFAPI_TRACE_INFO, "%s\n", __FUNCTION__);
if(header->message_id == P5_VENDOR_EXT_REQ)
{
vendor_ext_p5_req* req = (vendor_ext_p5_req*)(header);
return (!(pull16(ppReadPackedMessage, &req->dummy1, end) &&
pull16(ppReadPackedMessage, &req->dummy2, end)));
//NFAPI_TRACE(NFAPI_TRACE_INFO, "%s (%d %d)\n", __FUNCTION__, req->dummy1, req->dummy2);
}
return 0;
}
int main(int argc, char *argv[])
{
if (argc < 3)
{
printf("Use parameters: <IP Address of VNF P5> <P5 Port> <Config File>\n");
return 0;
}
set_thread_priority(50);
pnf_info pnf;
if(read_pnf_xml(pnf, argv[3]) < 0)
{
printf("Failed to read xml file>\n");
return 0;
}
nfapi_pnf_config_t* config = nfapi_pnf_config_create();
config->vnf_ip_addr = argv[1];
config->vnf_p5_port = atoi(argv[2]);
config->pnf_param_req = &pnf_param_request;
config->pnf_config_req = &pnf_config_request;
config->pnf_start_req = &pnf_start_request;
config->pnf_stop_req = &pnf_stop_request;
config->param_req = ¶m_request;
config->config_req = &config_request;
config->start_req = &start_request;
config->measurement_req = &measurement_request;
config->rssi_req = &rssi_request;
config->cell_search_req = &cell_search_request;
config->broadcast_detect_req = &broadcast_detect_request;
config->system_information_schedule_req = &system_information_schedule_request;
config->system_information_req = &system_information_request;
config->nmm_stop_req = &nmm_stop_request;
config->vendor_ext = &vendor_ext;
config->trace = &pnf_sim_trace;
config->user_data = &pnf;
// To allow custom vendor extentions to be added to nfapi
config->codec_config.unpack_vendor_extension_tlv = &pnf_sim_unpack_vendor_extension_tlv;
config->codec_config.pack_vendor_extension_tlv = &pnf_sim_pack_vendor_extention_tlv;
config->allocate_p4_p5_vendor_ext = &pnf_sim_allocate_p4_p5_vendor_ext;
config->deallocate_p4_p5_vendor_ext = &pnf_sim_deallocate_p4_p5_vendor_ext;
config->codec_config.unpack_p4_p5_vendor_extension = &pnf_sim_unpack_p4_p5_vendor_extension;
config->codec_config.pack_p4_p5_vendor_extension = &pnf_sim_pack_p4_p5_vendor_extension;
return nfapi_pnf_start(config);
}
| cisco/open-nFAPI | pnf_sim/src/main.cpp | C++ | apache-2.0 | 76,384 |
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"encoding/json"
"fmt"
"net"
goruntime "runtime"
"sort"
"strconv"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/rest"
core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/features"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
taintutil "k8s.io/kubernetes/pkg/util/taints"
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/volume/util"
)
const (
maxImageTagsForTest = 20
)
// generateTestingImageLists generate randomly generated image list and corresponding expectedImageList.
func generateTestingImageLists(count int, maxImages int) ([]kubecontainer.Image, []v1.ContainerImage) {
// imageList is randomly generated image list
var imageList []kubecontainer.Image
for ; count > 0; count-- {
imageItem := kubecontainer.Image{
ID: string(uuid.NewUUID()),
RepoTags: generateImageTags(),
Size: rand.Int63nRange(minImgSize, maxImgSize+1),
}
imageList = append(imageList, imageItem)
}
expectedImageList := makeExpectedImageList(imageList, maxImages)
return imageList, expectedImageList
}
func makeExpectedImageList(imageList []kubecontainer.Image, maxImages int) []v1.ContainerImage {
// expectedImageList is generated by imageList according to size and maxImages
// 1. sort the imageList by size
sort.Sort(sliceutils.ByImageSize(imageList))
// 2. convert sorted imageList to v1.ContainerImage list
var expectedImageList []v1.ContainerImage
for _, kubeImage := range imageList {
apiImage := v1.ContainerImage{
Names: kubeImage.RepoTags[0:nodestatus.MaxNamesPerImageInNodeStatus],
SizeBytes: kubeImage.Size,
}
expectedImageList = append(expectedImageList, apiImage)
}
// 3. only returns the top maxImages images in expectedImageList
if maxImages == -1 { // -1 means no limit
return expectedImageList
}
return expectedImageList[0:maxImages]
}
func generateImageTags() []string {
var tagList []string
// Generate > MaxNamesPerImageInNodeStatus tags so that the test can verify
// that kubelet report up to MaxNamesPerImageInNodeStatus tags.
count := rand.IntnRange(nodestatus.MaxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1)
for ; count > 0; count-- {
tagList = append(tagList, "k8s.gcr.io:v"+strconv.Itoa(count))
}
return tagList
}
func applyNodeStatusPatch(originalNode *v1.Node, patch []byte) (*v1.Node, error) {
original, err := json.Marshal(originalNode)
if err != nil {
return nil, fmt.Errorf("failed to marshal original node %#v: %v", originalNode, err)
}
updated, err := strategicpatch.StrategicMergePatch(original, patch, v1.Node{})
if err != nil {
return nil, fmt.Errorf("failed to apply strategic merge patch %q on node %#v: %v",
patch, originalNode, err)
}
updatedNode := &v1.Node{}
if err := json.Unmarshal(updated, updatedNode); err != nil {
return nil, fmt.Errorf("failed to unmarshal updated node %q: %v", updated, err)
}
return updatedNode, nil
}
func notImplemented(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
}
func addNotImplatedReaction(kubeClient *fake.Clientset) {
if kubeClient == nil {
return
}
kubeClient.AddReactor("*", "*", notImplemented)
}
type localCM struct {
cm.ContainerManager
allocatableReservation v1.ResourceList
capacity v1.ResourceList
}
func (lcm *localCM) GetNodeAllocatableReservation() v1.ResourceList {
return lcm.allocatableReservation
}
func (lcm *localCM) GetCapacity() v1.ResourceList {
return lcm.capacity
}
// sortableNodeAddress is a type for sorting []v1.NodeAddress
type sortableNodeAddress []v1.NodeAddress
func (s sortableNodeAddress) Len() int { return len(s) }
func (s sortableNodeAddress) Less(i, j int) bool {
return (string(s[i].Type) + s[i].Address) < (string(s[j].Type) + s[j].Address)
}
func (s sortableNodeAddress) Swap(i, j int) { s[j], s[i] = s[i], s[j] }
func sortNodeAddresses(addrs sortableNodeAddress) {
sort.Sort(addrs)
}
func TestUpdateNewNodeStatus(t *testing.T) {
cases := []struct {
desc string
nodeStatusMaxImages int32
}{
{
desc: "5 image limit",
nodeStatusMaxImages: 5,
},
{
desc: "no image limit",
nodeStatusMaxImages: -1,
},
}
for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
// generate one more in inputImageList than we configure the Kubelet to report,
// or 5 images if unlimited
numTestImages := int(tc.nodeStatusMaxImages) + 1
if tc.nodeStatusMaxImages == -1 {
numTestImages = 5
}
inputImageList, expectedImageList := generateTestingImageLists(numTestImages, int(tc.nodeStatusMaxImages))
testKubelet := newTestKubeletWithImageList(
t, inputImageList, false /* controllerAttachDetachEnabled */, true /*initFakeVolumePlugin*/)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.nodeStatusMaxImages = tc.nodeStatusMaxImages
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
}
// Since this test retroactively overrides the stub container manager,
// we have to regenerate default status setters.
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
kubeClient := testKubelet.fakeKubeClient
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 10E9, // 10G
}
kubelet.machineInfo = machineInfo
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasNoDiskPressure",
Message: fmt.Sprintf("kubelet has no disk pressure"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientPID",
Message: fmt.Sprintf("kubelet has sufficient PID available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
},
NodeInfo: v1.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: cadvisortest.FakeKernelVersion,
OSImage: cadvisortest.FakeContainerOsVersion,
OperatingSystem: goruntime.GOOS,
Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
},
Addresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
Images: expectedImageList,
},
}
kubelet.updateRuntimeUp()
assert.NoError(t, kubelet.updateNodeStatus())
actions := kubeClient.Actions()
require.Len(t, actions, 2)
require.True(t, actions[1].Matches("patch", "nodes"))
require.Equal(t, actions[1].GetSubresource(), "status")
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
assert.NoError(t, err)
for i, cond := range updatedNode.Status.Conditions {
assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
"NotReady should be last")
assert.Len(t, updatedNode.Status.Images, len(expectedImageList))
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
})
}
}
func TestUpdateExistingNodeStatus(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
}
// Since this test retroactively overrides the stub container manager,
// we have to regenerate default status setters.
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
kubeClient := testKubelet.fakeKubeClient
existingNode := v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientPID",
Message: fmt.Sprintf("kubelet has sufficient PID available"),
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
},
}
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 20E9,
}
kubelet.machineInfo = machineInfo
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientPID",
Message: fmt.Sprintf("kubelet has sufficient PID available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: metav1.Time{}, // placeholder
LastTransitionTime: metav1.Time{}, // placeholder
},
},
NodeInfo: v1.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: cadvisortest.FakeKernelVersion,
OSImage: cadvisortest.FakeContainerOsVersion,
OperatingSystem: goruntime.GOOS,
Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Addresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
// images will be sorted from max to min in node status.
Images: []v1.ContainerImage{
{
Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
SizeBytes: 123,
},
{
Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
SizeBytes: 456,
},
},
},
}
kubelet.updateRuntimeUp()
assert.NoError(t, kubelet.updateNodeStatus())
actions := kubeClient.Actions()
assert.Len(t, actions, 2)
assert.IsType(t, core.PatchActionImpl{}, actions[1])
patchAction := actions[1].(core.PatchActionImpl)
updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch())
require.NoError(t, err)
for i, cond := range updatedNode.Status.Conditions {
old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time
// Expect LastHearbeat to be updated to Now, while LastTransitionTime to be the same.
assert.NotEqual(t, old, cond.LastHeartbeatTime.Rfc3339Copy().UTC(), "LastHeartbeatTime for condition %v", cond.Type)
assert.EqualValues(t, old, cond.LastTransitionTime.Rfc3339Copy().UTC(), "LastTransitionTime for condition %v", cond.Type)
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
"NodeReady should be the last condition")
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
}
func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
attempts := int64(0)
failureCallbacks := int64(0)
// set up a listener that hangs connections
ln, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
defer ln.Close()
go func() {
// accept connections and just let them hang
for {
_, err := ln.Accept()
if err != nil {
t.Log(err)
return
}
t.Log("accepted connection")
atomic.AddInt64(&attempts, 1)
}
}()
config := &rest.Config{
Host: "http://" + ln.Addr().String(),
QPS: -1,
Timeout: time.Second,
}
assert.NoError(t, err)
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.heartbeatClient, err = clientset.NewForConfig(config)
kubelet.onRepeatedHeartbeatFailure = func() {
atomic.AddInt64(&failureCallbacks, 1)
}
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
},
}
// should return an error, but not hang
assert.Error(t, kubelet.updateNodeStatus())
// should have attempted multiple times
if actualAttempts := atomic.LoadInt64(&attempts); actualAttempts < nodeStatusUpdateRetry {
t.Errorf("Expected at least %d attempts, got %d", nodeStatusUpdateRetry, actualAttempts)
}
// should have gotten multiple failure callbacks
if actualFailureCallbacks := atomic.LoadInt64(&failureCallbacks); actualFailureCallbacks < (nodeStatusUpdateRetry - 1) {
t.Errorf("Expected %d failure callbacks, got %d", (nodeStatusUpdateRetry - 1), actualFailureCallbacks)
}
}
func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
},
}
// Since this test retroactively overrides the stub container manager,
// we have to regenerate default status setters.
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
clock := testKubelet.fakeClock
kubeClient := testKubelet.fakeKubeClient
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 10E9,
}
kubelet.machineInfo = machineInfo
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasNoDiskPressure",
Message: fmt.Sprintf("kubelet has no disk pressure"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientPID",
Message: fmt.Sprintf("kubelet has sufficient PID available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{}, //placeholder
},
NodeInfo: v1.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: cadvisortest.FakeKernelVersion,
OSImage: cadvisortest.FakeContainerOsVersion,
OperatingSystem: goruntime.GOOS,
Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI),
},
Addresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
Images: []v1.ContainerImage{
{
Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
SizeBytes: 123,
},
{
Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
SizeBytes: 456,
},
},
},
}
checkNodeStatus := func(status v1.ConditionStatus, reason string) {
kubeClient.ClearActions()
assert.NoError(t, kubelet.updateNodeStatus())
actions := kubeClient.Actions()
require.Len(t, actions, 2)
require.True(t, actions[1].Matches("patch", "nodes"))
require.Equal(t, actions[1].GetSubresource(), "status")
updatedNode, err := kubeClient.CoreV1().Nodes().Get(testKubeletHostname, metav1.GetOptions{})
require.NoError(t, err, "can't apply node status patch")
for i, cond := range updatedNode.Status.Conditions {
assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
lastIndex := len(updatedNode.Status.Conditions) - 1
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[lastIndex].Type, "NodeReady should be the last condition")
assert.NotEmpty(t, updatedNode.Status.Conditions[lastIndex].Message)
updatedNode.Status.Conditions[lastIndex].Message = ""
expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{
Type: v1.NodeReady,
Status: status,
Reason: reason,
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
}
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
}
// TODO(random-liu): Refactor the unit test to be table driven test.
// Should report kubelet not ready if the runtime check is out of date
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report kubelet ready if the runtime check is updated
clock.SetTime(time.Now())
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionTrue, "KubeletReady")
// Should report kubelet not ready if the runtime check is out of date
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report kubelet not ready if the runtime check failed
fakeRuntime := testKubelet.fakeRuntime
// Inject error into fake runtime status check, node should be NotReady
fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error")
clock.SetTime(time.Now())
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
fakeRuntime.StatusErr = nil
// Should report node not ready if runtime status is nil.
fakeRuntime.RuntimeStatus = nil
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report node not ready if runtime status is empty.
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{}
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report node not ready if RuntimeReady is false.
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
Conditions: []kubecontainer.RuntimeCondition{
{Type: kubecontainer.RuntimeReady, Status: false},
{Type: kubecontainer.NetworkReady, Status: true},
},
}
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report node ready if RuntimeReady is true.
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
Conditions: []kubecontainer.RuntimeCondition{
{Type: kubecontainer.RuntimeReady, Status: true},
{Type: kubecontainer.NetworkReady, Status: true},
},
}
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionTrue, "KubeletReady")
// Should report node not ready if NetworkReady is false.
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
Conditions: []kubecontainer.RuntimeCondition{
{Type: kubecontainer.RuntimeReady, Status: true},
{Type: kubecontainer.NetworkReady, Status: false},
},
}
kubelet.updateRuntimeUp()
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
}
func TestUpdateNodeStatusError(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.kubeClient = nil // ensure only the heartbeat client is used
// No matching node for the kubelet
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
assert.Error(t, kubelet.updateNodeStatus())
assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry)
}
func TestUpdateNodeStatusWithLease(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, true)()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
clock := testKubelet.fakeClock
kubelet := testKubelet.kubelet
kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
}
// Since this test retroactively overrides the stub container manager,
// we have to regenerate default status setters.
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
kubelet.nodeStatusReportFrequency = time.Minute
kubeClient := testKubelet.fakeKubeClient
existingNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*existingNode}}).ReactionChain
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 20E9,
}
kubelet.machineInfo = machineInfo
now := metav1.NewTime(clock.Now()).Rfc3339Copy()
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: now,
LastTransitionTime: now,
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasNoDiskPressure",
Message: fmt.Sprintf("kubelet has no disk pressure"),
LastHeartbeatTime: now,
LastTransitionTime: now,
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientPID",
Message: fmt.Sprintf("kubelet has sufficient PID available"),
LastHeartbeatTime: now,
LastTransitionTime: now,
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: now,
LastTransitionTime: now,
},
},
NodeInfo: v1.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: cadvisortest.FakeKernelVersion,
OSImage: cadvisortest.FakeContainerOsVersion,
OperatingSystem: goruntime.GOOS,
Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Addresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
// images will be sorted from max to min in node status.
Images: []v1.ContainerImage{
{
Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
SizeBytes: 123,
},
{
Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
SizeBytes: 456,
},
},
},
}
// Update node status when node status is created.
// Report node status.
kubelet.updateRuntimeUp()
assert.NoError(t, kubelet.updateNodeStatus())
actions := kubeClient.Actions()
assert.Len(t, actions, 2)
assert.IsType(t, core.GetActionImpl{}, actions[0])
assert.IsType(t, core.PatchActionImpl{}, actions[1])
patchAction := actions[1].(core.PatchActionImpl)
updatedNode, err := applyNodeStatusPatch(existingNode, patchAction.GetPatch())
require.NoError(t, err)
for _, cond := range updatedNode.Status.Conditions {
cond.LastHeartbeatTime = cond.LastHeartbeatTime.Rfc3339Copy()
cond.LastTransitionTime = cond.LastTransitionTime.Rfc3339Copy()
}
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
"NodeReady should be the last condition")
// Update node status again when nothing is changed (except heatbeat time).
// Report node status if it has exceeded the duration of nodeStatusReportFrequency.
clock.Step(time.Minute)
assert.NoError(t, kubelet.updateNodeStatus())
// 2 more action (There were 2 actions before).
actions = kubeClient.Actions()
assert.Len(t, actions, 4)
assert.IsType(t, core.GetActionImpl{}, actions[2])
assert.IsType(t, core.PatchActionImpl{}, actions[3])
patchAction = actions[3].(core.PatchActionImpl)
updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
require.NoError(t, err)
for _, cond := range updatedNode.Status.Conditions {
cond.LastHeartbeatTime = cond.LastHeartbeatTime.Rfc3339Copy()
cond.LastTransitionTime = cond.LastTransitionTime.Rfc3339Copy()
}
// Expect LastHearbeat updated, other things unchanged.
for i, cond := range expectedNode.Status.Conditions {
expectedNode.Status.Conditions[i].LastHeartbeatTime = metav1.NewTime(cond.LastHeartbeatTime.Time.Add(time.Minute)).Rfc3339Copy()
}
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
// Update node status again when nothing is changed (except heatbeat time).
// Do not report node status if it is within the duration of nodeStatusReportFrequency.
clock.Step(10 * time.Second)
assert.NoError(t, kubelet.updateNodeStatus())
// Only 1 more action (There were 4 actions before).
actions = kubeClient.Actions()
assert.Len(t, actions, 5)
assert.IsType(t, core.GetActionImpl{}, actions[4])
// Update node status again when something is changed.
// Report node status even if it is still within the duration of nodeStatusReportFrequency.
clock.Step(10 * time.Second)
var newMemoryCapacity int64 = 40E9
kubelet.machineInfo.MemoryCapacity = uint64(newMemoryCapacity)
assert.NoError(t, kubelet.updateNodeStatus())
// 2 more action (There were 5 actions before).
actions = kubeClient.Actions()
assert.Len(t, actions, 7)
assert.IsType(t, core.GetActionImpl{}, actions[5])
assert.IsType(t, core.PatchActionImpl{}, actions[6])
patchAction = actions[6].(core.PatchActionImpl)
updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
require.NoError(t, err)
memCapacity, _ := updatedNode.Status.Capacity[v1.ResourceMemory]
updatedMemoryCapacity, _ := (&memCapacity).AsInt64()
assert.Equal(t, newMemoryCapacity, updatedMemoryCapacity, "Memory capacity")
now = metav1.NewTime(clock.Now()).Rfc3339Copy()
for _, cond := range updatedNode.Status.Conditions {
// Expect LastHearbeat updated, while LastTransitionTime unchanged.
assert.Equal(t, now, cond.LastHeartbeatTime.Rfc3339Copy(),
"LastHeartbeatTime for condition %v", cond.Type)
assert.Equal(t, now, metav1.NewTime(cond.LastTransitionTime.Time.Add(time.Minute+20*time.Second)).Rfc3339Copy(),
"LastTransitionTime for condition %v", cond.Type)
}
// Update node status when changing pod CIDR.
// Report node status if it is still within the duration of nodeStatusReportFrequency.
clock.Step(10 * time.Second)
assert.Equal(t, "", kubelet.runtimeState.podCIDR(), "Pod CIDR should be empty")
podCIDR := "10.0.0.0/24"
updatedNode.Spec.PodCIDR = podCIDR
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*updatedNode}}).ReactionChain
assert.NoError(t, kubelet.updateNodeStatus())
assert.Equal(t, podCIDR, kubelet.runtimeState.podCIDR(), "Pod CIDR should be updated now")
// 2 more action (There were 7 actions before).
actions = kubeClient.Actions()
assert.Len(t, actions, 9)
assert.IsType(t, core.GetActionImpl{}, actions[7])
assert.IsType(t, core.PatchActionImpl{}, actions[8])
patchAction = actions[8].(core.PatchActionImpl)
// Update node status when keeping the pod CIDR.
// Do not report node status if it is within the duration of nodeStatusReportFrequency.
clock.Step(10 * time.Second)
assert.Equal(t, podCIDR, kubelet.runtimeState.podCIDR(), "Pod CIDR should already be updated")
assert.NoError(t, kubelet.updateNodeStatus())
// Only 1 more action (There were 9 actions before).
actions = kubeClient.Actions()
assert.Len(t, actions, 10)
assert.IsType(t, core.GetActionImpl{}, actions[9])
}
func TestUpdateNodeStatusAndVolumesInUseWithoutNodeLease(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, false)()
cases := []struct {
desc string
existingVolumes []v1.UniqueVolumeName // volumes to initially populate volumeManager
existingNode *v1.Node // existing node object
expectedNode *v1.Node // new node object after patch
expectedReportedInUse []v1.UniqueVolumeName // expected volumes reported in use in volumeManager
}{
{
desc: "no volumes and no update",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
},
{
desc: "volumes inuse on node and volumeManager",
existingVolumes: []v1.UniqueVolumeName{"vol1"},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
},
{
desc: "volumes inuse on node but not in volumeManager",
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
},
{
desc: "volumes inuse in volumeManager but not on node",
existingVolumes: []v1.UniqueVolumeName{"vol1"},
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
expectedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
},
}
for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
// Setup
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{ContainerManager: cm.NewStubContainerManager()}
kubelet.lastStatusReportTime = kubelet.clock.Now()
kubelet.nodeStatusReportFrequency = time.Hour
kubelet.machineInfo = &cadvisorapi.MachineInfo{}
// override test volumeManager
fakeVolumeManager := kubeletvolume.NewFakeVolumeManager(tc.existingVolumes)
kubelet.volumeManager = fakeVolumeManager
// Only test VolumesInUse setter
kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
kubelet.volumeManager.GetVolumesInUse),
}
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
// Execute
assert.NoError(t, kubelet.updateNodeStatus())
// Validate
actions := kubeClient.Actions()
if tc.expectedNode != nil {
assert.Len(t, actions, 2)
assert.IsType(t, core.GetActionImpl{}, actions[0])
assert.IsType(t, core.PatchActionImpl{}, actions[1])
patchAction := actions[1].(core.PatchActionImpl)
updatedNode, err := applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
require.NoError(t, err)
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedNode, updatedNode), "%s", diff.ObjectDiff(tc.expectedNode, updatedNode))
} else {
assert.Len(t, actions, 1)
assert.IsType(t, core.GetActionImpl{}, actions[0])
}
reportedInUse := fakeVolumeManager.GetVolumesReportedInUse()
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedReportedInUse, reportedInUse), "%s", diff.ObjectDiff(tc.expectedReportedInUse, reportedInUse))
})
}
}
func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, true)()
cases := []struct {
desc string
existingVolumes []v1.UniqueVolumeName // volumes to initially populate volumeManager
existingNode *v1.Node // existing node object
expectedNode *v1.Node // new node object after patch
expectedReportedInUse []v1.UniqueVolumeName // expected volumes reported in use in volumeManager
}{
{
desc: "no volumes and no update",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
},
{
desc: "volumes inuse on node and volumeManager",
existingVolumes: []v1.UniqueVolumeName{"vol1"},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
},
{
desc: "volumes inuse on node but not in volumeManager",
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
},
{
desc: "volumes inuse in volumeManager but not on node",
existingVolumes: []v1.UniqueVolumeName{"vol1"},
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
expectedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
},
}
for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
// Setup
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{ContainerManager: cm.NewStubContainerManager()}
kubelet.lastStatusReportTime = kubelet.clock.Now()
kubelet.nodeStatusReportFrequency = time.Hour
kubelet.machineInfo = &cadvisorapi.MachineInfo{}
// override test volumeManager
fakeVolumeManager := kubeletvolume.NewFakeVolumeManager(tc.existingVolumes)
kubelet.volumeManager = fakeVolumeManager
// Only test VolumesInUse setter
kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
kubelet.volumeManager.GetVolumesInUse),
}
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
// Execute
assert.NoError(t, kubelet.updateNodeStatus())
// Validate
actions := kubeClient.Actions()
if tc.expectedNode != nil {
assert.Len(t, actions, 2)
assert.IsType(t, core.GetActionImpl{}, actions[0])
assert.IsType(t, core.PatchActionImpl{}, actions[1])
patchAction := actions[1].(core.PatchActionImpl)
updatedNode, err := applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
require.NoError(t, err)
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedNode, updatedNode), "%s", diff.ObjectDiff(tc.expectedNode, updatedNode))
} else {
assert.Len(t, actions, 1)
assert.IsType(t, core.GetActionImpl{}, actions[0])
}
reportedInUse := fakeVolumeManager.GetVolumesReportedInUse()
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedReportedInUse, reportedInUse), "%s", diff.ObjectDiff(tc.expectedReportedInUse, reportedInUse))
})
}
}
func TestRegisterWithApiServer(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
// Return an error on create.
return true, &v1.Node{}, &apierrors.StatusError{
ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
}
})
kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
// Return an existing (matching) node on get.
return true, &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: testKubeletHostname,
Labels: map[string]string{
v1.LabelHostname: testKubeletHostname,
v1.LabelOS: goruntime.GOOS,
v1.LabelArch: goruntime.GOARCH,
v1.LegacyLabelOS: goruntime.GOOS,
v1.LegacyLabelArch: goruntime.GOARCH,
},
},
}, nil
})
addNotImplatedReaction(kubeClient)
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
kubelet.machineInfo = machineInfo
done := make(chan struct{})
go func() {
kubelet.registerWithAPIServer()
done <- struct{}{}
}()
select {
case <-time.After(wait.ForeverTestTimeout):
assert.Fail(t, "timed out waiting for registration")
case <-done:
return
}
}
func TestTryRegisterWithApiServer(t *testing.T) {
alreadyExists := &apierrors.StatusError{
ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
}
conflict := &apierrors.StatusError{
ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict},
}
newNode := func(cmad bool) *v1.Node {
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: testKubeletHostname,
v1.LabelOS: goruntime.GOOS,
v1.LabelArch: goruntime.GOARCH,
v1.LegacyLabelOS: goruntime.GOOS,
v1.LegacyLabelArch: goruntime.GOARCH,
},
},
}
if cmad {
node.Annotations = make(map[string]string)
node.Annotations[util.ControllerManagedAttachAnnotation] = "true"
}
return node
}
cases := []struct {
name string
newNode *v1.Node
existingNode *v1.Node
createError error
getError error
patchError error
deleteError error
expectedResult bool
expectedActions int
testSavedNode bool
savedNodeIndex int
savedNodeCMAD bool
}{
{
name: "success case - new node",
newNode: &v1.Node{},
expectedResult: true,
expectedActions: 1,
},
{
name: "success case - existing node - no change in CMAD",
newNode: newNode(true),
createError: alreadyExists,
existingNode: newNode(true),
expectedResult: true,
expectedActions: 2,
},
{
name: "success case - existing node - CMAD disabled",
newNode: newNode(false),
createError: alreadyExists,
existingNode: newNode(true),
expectedResult: true,
expectedActions: 3,
testSavedNode: true,
savedNodeIndex: 2,
savedNodeCMAD: false,
},
{
name: "success case - existing node - CMAD enabled",
newNode: newNode(true),
createError: alreadyExists,
existingNode: newNode(false),
expectedResult: true,
expectedActions: 3,
testSavedNode: true,
savedNodeIndex: 2,
savedNodeCMAD: true,
},
{
name: "create failed",
newNode: newNode(false),
createError: conflict,
expectedResult: false,
expectedActions: 1,
},
{
name: "get existing node failed",
newNode: newNode(false),
createError: alreadyExists,
getError: conflict,
expectedResult: false,
expectedActions: 2,
},
{
name: "update existing node failed",
newNode: newNode(false),
createError: alreadyExists,
existingNode: newNode(true),
patchError: conflict,
expectedResult: false,
expectedActions: 3,
},
}
for _, tc := range cases {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled is a don't-care for this test */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, tc.createError
})
kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
// Return an existing (matching) node on get.
return true, tc.existingNode, tc.getError
})
kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "status" {
return true, nil, tc.patchError
}
return notImplemented(action)
})
kubeClient.AddReactor("delete", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, tc.deleteError
})
addNotImplatedReaction(kubeClient)
result := kubelet.tryRegisterWithAPIServer(tc.newNode)
require.Equal(t, tc.expectedResult, result, "test [%s]", tc.name)
actions := kubeClient.Actions()
assert.Len(t, actions, tc.expectedActions, "test [%s]", tc.name)
if tc.testSavedNode {
var savedNode *v1.Node
t.Logf("actions: %v: %+v", len(actions), actions)
action := actions[tc.savedNodeIndex]
if action.GetVerb() == "create" {
createAction := action.(core.CreateAction)
obj := createAction.GetObject()
require.IsType(t, &v1.Node{}, obj)
savedNode = obj.(*v1.Node)
} else if action.GetVerb() == "patch" {
patchAction := action.(core.PatchActionImpl)
var err error
savedNode, err = applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
require.NoError(t, err)
}
actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[util.ControllerManagedAttachAnnotation])
assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name)
}
}
}
func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
const nodeStatusMaxImages = 5
// generate one more in inputImageList than we configure the Kubelet to report
inputImageList, _ := generateTestingImageLists(nodeStatusMaxImages+1, nodeStatusMaxImages)
testKubelet := newTestKubeletWithImageList(
t, inputImageList, false /* controllerAttachDetachEnabled */, true /* initFakeVolumePlugin */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.nodeStatusMaxImages = nodeStatusMaxImages
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(40000, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(1000, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
},
}
// Since this test retroactively overrides the stub container manager,
// we have to regenerate default status setters.
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
kubeClient := testKubelet.fakeKubeClient
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 10E9, // 10G
}
kubelet.machineInfo = machineInfo
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
},
},
}
kubelet.updateRuntimeUp()
assert.NoError(t, kubelet.updateNodeStatus())
actions := kubeClient.Actions()
require.Len(t, actions, 2)
require.True(t, actions[1].Matches("patch", "nodes"))
require.Equal(t, actions[1].GetSubresource(), "status")
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
assert.NoError(t, err)
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable), "%s", diff.ObjectDiff(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable))
}
func TestUpdateDefaultLabels(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
cases := []struct {
name string
initialNode *v1.Node
existingNode *v1.Node
needsUpdate bool
finalLabels map[string]string
}{
{
name: "make sure default labels exist",
initialNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
v1.LabelZoneRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOS: "new-os",
v1.LabelArch: "new-arch",
},
},
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{},
},
},
needsUpdate: true,
finalLabels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
v1.LabelZoneRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOS: "new-os",
v1.LabelArch: "new-arch",
},
},
{
name: "make sure default labels are up to date",
initialNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
v1.LabelZoneRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOS: "new-os",
v1.LabelArch: "new-arch",
},
},
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "old-hostname",
v1.LabelZoneFailureDomain: "old-zone-failure-domain",
v1.LabelZoneRegion: "old-zone-region",
v1.LabelInstanceType: "old-instance-type",
v1.LabelOS: "old-os",
v1.LabelArch: "old-arch",
},
},
},
needsUpdate: true,
finalLabels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
v1.LabelZoneRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOS: "new-os",
v1.LabelArch: "new-arch",
},
},
{
name: "make sure existing labels do not get deleted",
initialNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
v1.LabelZoneRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOS: "new-os",
v1.LabelArch: "new-arch",
},
},
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
v1.LabelZoneRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOS: "new-os",
v1.LabelArch: "new-arch",
"please-persist": "foo",
},
},
},
needsUpdate: false,
finalLabels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
v1.LabelZoneRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOS: "new-os",
v1.LabelArch: "new-arch",
"please-persist": "foo",
},
},
{
name: "make sure existing labels do not get deleted when initial node has no opinion",
initialNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{},
},
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
v1.LabelZoneRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOS: "new-os",
v1.LabelArch: "new-arch",
"please-persist": "foo",
},
},
},
needsUpdate: false,
finalLabels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
v1.LabelZoneRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOS: "new-os",
v1.LabelArch: "new-arch",
"please-persist": "foo",
},
},
{
name: "no update needed",
initialNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
v1.LabelZoneRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOS: "new-os",
v1.LabelArch: "new-arch",
},
},
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
v1.LabelZoneRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOS: "new-os",
v1.LabelArch: "new-arch",
},
},
},
needsUpdate: false,
finalLabels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
v1.LabelZoneRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOS: "new-os",
v1.LabelArch: "new-arch",
},
},
{
name: "not panic when existing node has nil labels",
initialNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
v1.LabelZoneRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOS: "new-os",
v1.LabelArch: "new-arch",
},
},
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{},
},
needsUpdate: true,
finalLabels: map[string]string{
v1.LabelHostname: "new-hostname",
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
v1.LabelZoneRegion: "new-zone-region",
v1.LabelInstanceType: "new-instance-type",
v1.LabelOS: "new-os",
v1.LabelArch: "new-arch",
},
},
}
for _, tc := range cases {
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
needsUpdate := kubelet.updateDefaultLabels(tc.initialNode, tc.existingNode)
assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
assert.Equal(t, tc.finalLabels, tc.existingNode.Labels, tc.name)
}
}
func TestReconcileExtendedResource(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
extendedResourceName1 := v1.ResourceName("test.com/resource1")
extendedResourceName2 := v1.ResourceName("test.com/resource2")
cases := []struct {
name string
existingNode *v1.Node
expectedNode *v1.Node
needsUpdate bool
}{
{
name: "no update needed without extended resource",
existingNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
},
},
needsUpdate: false,
},
{
name: "extended resource capacity is zeroed",
existingNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
},
},
},
expectedNode: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(0), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(0), resource.DecimalSI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
extendedResourceName1: *resource.NewQuantity(int64(0), resource.DecimalSI),
extendedResourceName2: *resource.NewQuantity(int64(0), resource.DecimalSI),
},
},
},
needsUpdate: true,
},
}
for _, tc := range cases {
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
initialNode := &v1.Node{}
needsUpdate := kubelet.reconcileExtendedResource(initialNode, tc.existingNode)
assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
assert.Equal(t, tc.expectedNode, tc.existingNode, tc.name)
}
}
func TestValidateNodeIPParam(t *testing.T) {
type test struct {
nodeIP string
success bool
testName string
}
tests := []test{
{
nodeIP: "",
success: false,
testName: "IP not set",
},
{
nodeIP: "127.0.0.1",
success: false,
testName: "IPv4 loopback address",
},
{
nodeIP: "::1",
success: false,
testName: "IPv6 loopback address",
},
{
nodeIP: "224.0.0.1",
success: false,
testName: "multicast IPv4 address",
},
{
nodeIP: "ff00::1",
success: false,
testName: "multicast IPv6 address",
},
{
nodeIP: "169.254.0.1",
success: false,
testName: "IPv4 link-local unicast address",
},
{
nodeIP: "fe80::0202:b3ff:fe1e:8329",
success: false,
testName: "IPv6 link-local unicast address",
},
{
nodeIP: "0.0.0.0",
success: false,
testName: "Unspecified IPv4 address",
},
{
nodeIP: "::",
success: false,
testName: "Unspecified IPv6 address",
},
{
nodeIP: "1.2.3.4",
success: false,
testName: "IPv4 address that doesn't belong to host",
},
}
addrs, err := net.InterfaceAddrs()
if err != nil {
assert.Error(t, err, fmt.Sprintf(
"Unable to obtain a list of the node's unicast interface addresses."))
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip.IsLoopback() || ip.IsLinkLocalUnicast() {
break
}
successTest := test{
nodeIP: ip.String(),
success: true,
testName: fmt.Sprintf("Success test case for address %s", ip.String()),
}
tests = append(tests, successTest)
}
for _, test := range tests {
err := validateNodeIP(net.ParseIP(test.nodeIP))
if test.success {
assert.NoError(t, err, "test %s", test.testName)
} else {
assert.Error(t, err, fmt.Sprintf("test %s", test.testName))
}
}
}
func TestRegisterWithApiServerWithTaint(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
kubelet.machineInfo = machineInfo
var gotNode runtime.Object
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
createAction := action.(core.CreateAction)
gotNode = createAction.GetObject()
return true, gotNode, nil
})
addNotImplatedReaction(kubeClient)
// Make node to be unschedulable.
kubelet.registerSchedulable = false
forEachFeatureGate(t, []utilfeature.Feature{features.TaintNodesByCondition}, func(t *testing.T) {
// Reset kubelet status for each test.
kubelet.registrationCompleted = false
// Register node to apiserver.
kubelet.registerWithAPIServer()
// Check the unschedulable taint.
got := gotNode.(*v1.Node)
unschedulableTaint := &v1.Taint{
Key: schedulerapi.TaintNodeUnschedulable,
Effect: v1.TaintEffectNoSchedule,
}
require.Equal(t,
utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition),
taintutil.TaintExists(got.Spec.Taints, unschedulableTaint),
"test unschedulable taint for TaintNodesByCondition")
return
})
}
func TestNodeStatusHasChanged(t *testing.T) {
fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
fakeFuture := metav1.Time{Time: fakeNow.Time.Add(time.Minute)}
readyCondition := v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: fakeNow,
LastTransitionTime: fakeNow,
}
readyConditionAtDiffHearbeatTime := v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: fakeFuture,
LastTransitionTime: fakeNow,
}
readyConditionAtDiffTransitionTime := v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: fakeFuture,
LastTransitionTime: fakeFuture,
}
notReadyCondition := v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
LastHeartbeatTime: fakeNow,
LastTransitionTime: fakeNow,
}
memoryPressureCondition := v1.NodeCondition{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
LastHeartbeatTime: fakeNow,
LastTransitionTime: fakeNow,
}
testcases := []struct {
name string
originalStatus *v1.NodeStatus
status *v1.NodeStatus
expectChange bool
}{
{
name: "Node status does not change with nil status.",
originalStatus: nil,
status: nil,
expectChange: false,
},
{
name: "Node status does not change with default status.",
originalStatus: &v1.NodeStatus{},
status: &v1.NodeStatus{},
expectChange: false,
},
{
name: "Node status changes with nil and default status.",
originalStatus: nil,
status: &v1.NodeStatus{},
expectChange: true,
},
{
name: "Node status changes with nil and status.",
originalStatus: nil,
status: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
expectChange: true,
},
{
name: "Node status does not change with empty conditions.",
originalStatus: &v1.NodeStatus{Conditions: []v1.NodeCondition{}},
status: &v1.NodeStatus{Conditions: []v1.NodeCondition{}},
expectChange: false,
},
{
name: "Node status does not change",
originalStatus: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
status: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
expectChange: false,
},
{
name: "Node status does not change even if heartbeat time changes.",
originalStatus: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
status: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyConditionAtDiffHearbeatTime, memoryPressureCondition},
},
expectChange: false,
},
{
name: "Node status does not change even if the orders of conditions are different.",
originalStatus: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
status: &v1.NodeStatus{
Conditions: []v1.NodeCondition{memoryPressureCondition, readyConditionAtDiffHearbeatTime},
},
expectChange: false,
},
{
name: "Node status changes if condition status differs.",
originalStatus: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
status: &v1.NodeStatus{
Conditions: []v1.NodeCondition{notReadyCondition, memoryPressureCondition},
},
expectChange: true,
},
{
name: "Node status changes if transition time changes.",
originalStatus: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
status: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyConditionAtDiffTransitionTime, memoryPressureCondition},
},
expectChange: true,
},
{
name: "Node status changes with different number of conditions.",
originalStatus: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition},
},
status: &v1.NodeStatus{
Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
},
expectChange: true,
},
{
name: "Node status changes with different phase.",
originalStatus: &v1.NodeStatus{
Phase: v1.NodePending,
Conditions: []v1.NodeCondition{readyCondition},
},
status: &v1.NodeStatus{
Phase: v1.NodeRunning,
Conditions: []v1.NodeCondition{readyCondition},
},
expectChange: true,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
originalStatusCopy := tc.originalStatus.DeepCopy()
statusCopy := tc.status.DeepCopy()
changed := nodeStatusHasChanged(tc.originalStatus, tc.status)
assert.Equal(t, tc.expectChange, changed, "Expect node status change to be %t, but got %t.", tc.expectChange, changed)
assert.True(t, apiequality.Semantic.DeepEqual(originalStatusCopy, tc.originalStatus), "%s", diff.ObjectDiff(originalStatusCopy, tc.originalStatus))
assert.True(t, apiequality.Semantic.DeepEqual(statusCopy, tc.status), "%s", diff.ObjectDiff(statusCopy, tc.status))
})
}
}
| vmware/kubernetes | pkg/kubelet/kubelet_node_status_test.go | GO | apache-2.0 | 79,261 |
package com.lijuyong.startup.auth;
import com.lijuyong.startup.auth.filter.JwtAuthenticationFilter;
import com.lijuyong.startup.auth.filter.JwtLoginFilter;
import com.lijuyong.startup.auth.security.JwtAuthenticationSuccessHandler;
import com.lijuyong.startup.auth.security.RestAuthenticationEntryPoint;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.security.config.annotation.authentication.builders.AuthenticationManagerBuilder;
import org.springframework.security.config.annotation.web.builders.HttpSecurity;
import org.springframework.security.config.annotation.web.builders.WebSecurity;
import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity;
import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter;
import org.springframework.security.config.http.SessionCreationPolicy;
import org.springframework.security.core.userdetails.UserDetailsService;
import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder;
import org.springframework.security.crypto.password.PasswordEncoder;
import org.springframework.security.web.authentication.UsernamePasswordAuthenticationFilter;
/**
* Created by john on 2017/3/8.
*/
@Configuration
@EnableWebSecurity
public class WebSecurityConfig extends WebSecurityConfigurerAdapter {
@Autowired
private RestAuthenticationEntryPoint restAuthenticationEntryPoint;
@Autowired
private UserDetailsService userDetailsService;
@Autowired
public void configureAuthentication(AuthenticationManagerBuilder authenticationManagerBuilder) throws Exception {
authenticationManagerBuilder
.userDetailsService(this.userDetailsService)
.passwordEncoder(passwordEncoder());
}
@Bean
public PasswordEncoder passwordEncoder() {
//$2a$10$qsYvMwvld7FMGKp45AQjpun6otC8b.eFN7Be5KAr0vuEQWgT.uvgm
//对应的密码是111
// return new BCryptPasswordEncoder();
return new BCryptPasswordEncoder();
}
@Bean
public JwtAuthenticationFilter authenticationTokenFilterBean() throws Exception {
return new JwtAuthenticationFilter();
}
@Bean
public JwtLoginFilter jwtLoginFilterBean() throws Exception {
JwtLoginFilter jwtLoginFilter = new JwtLoginFilter("/auth/signin",
authenticationManager());
jwtLoginFilter.setAuthenticationSuccessHandler(new JwtAuthenticationSuccessHandler());
return jwtLoginFilter;
}
@Override
public void configure(WebSecurity webSecurity) {
}
@Override
protected void configure(HttpSecurity httpSecurity) throws Exception {
httpSecurity
// we don't need CSRF because our token is invulnerable
.csrf().disable()
.exceptionHandling().authenticationEntryPoint(restAuthenticationEntryPoint)
.and()
// don't create session
.sessionManagement().sessionCreationPolicy(SessionCreationPolicy.STATELESS)
.and()
.authorizeRequests()
.antMatchers("/**").permitAll()
//.antMatchers("/auth/open").permitAll()
.anyRequest().authenticated();
//custom login for jwt token generation
httpSecurity.addFilterBefore(jwtLoginFilterBean(),
UsernamePasswordAuthenticationFilter.class);
// Custom JWT based auth filter
httpSecurity
.addFilterBefore(authenticationTokenFilterBean(),
UsernamePasswordAuthenticationFilter.class);
// disable page caching
httpSecurity.headers().cacheControl();
}
} | capesonlee/tangtang-spring-cloud | zuul-server/src/main/java/com/lijuyong/startup/auth/WebSecurityConfig.java | Java | apache-2.0 | 3,843 |
namespace ChinaUnion_Agent.InvoiceForm
{
partial class frmAgentInvoicePaymentModification
{
/// <summary>
/// Required designer variable.
/// </summary>
private System.ComponentModel.IContainer components = null;
/// <summary>
/// Clean up any resources being used.
/// </summary>
/// <param name="disposing">true if managed resources should be disposed; otherwise, false.</param>
protected override void Dispose(bool disposing)
{
if (disposing && (components != null))
{
components.Dispose();
}
base.Dispose(disposing);
}
#region Windows Form Designer generated code
/// <summary>
/// Required method for Designer support - do not modify
/// the contents of this method with the code editor.
/// </summary>
private void InitializeComponent()
{
this.groupBox3 = new System.Windows.Forms.GroupBox();
this.txtPayStatus = new System.Windows.Forms.TextBox();
this.label9 = new System.Windows.Forms.Label();
this.txtSummary = new System.Windows.Forms.TextBox();
this.label8 = new System.Windows.Forms.Label();
this.txtPayFee = new System.Windows.Forms.TextBox();
this.label7 = new System.Windows.Forms.Label();
this.txtInvoiceFee = new System.Windows.Forms.TextBox();
this.label6 = new System.Windows.Forms.Label();
this.txtAgentNo = new System.Windows.Forms.TextBox();
this.label5 = new System.Windows.Forms.Label();
this.txtAgentName = new System.Windows.Forms.TextBox();
this.label4 = new System.Windows.Forms.Label();
this.txtProcessTime = new System.Windows.Forms.TextBox();
this.btnSave = new System.Windows.Forms.Button();
this.btnClose = new System.Windows.Forms.Button();
this.label2 = new System.Windows.Forms.Label();
this.groupBox3.SuspendLayout();
this.SuspendLayout();
//
// groupBox3
//
this.groupBox3.Controls.Add(this.txtPayStatus);
this.groupBox3.Controls.Add(this.label9);
this.groupBox3.Controls.Add(this.txtSummary);
this.groupBox3.Controls.Add(this.label8);
this.groupBox3.Controls.Add(this.txtPayFee);
this.groupBox3.Controls.Add(this.label7);
this.groupBox3.Controls.Add(this.txtInvoiceFee);
this.groupBox3.Controls.Add(this.label6);
this.groupBox3.Controls.Add(this.txtAgentNo);
this.groupBox3.Controls.Add(this.label5);
this.groupBox3.Controls.Add(this.txtAgentName);
this.groupBox3.Controls.Add(this.label4);
this.groupBox3.Controls.Add(this.txtProcessTime);
this.groupBox3.Controls.Add(this.btnSave);
this.groupBox3.Controls.Add(this.btnClose);
this.groupBox3.Controls.Add(this.label2);
this.groupBox3.Dock = System.Windows.Forms.DockStyle.Fill;
this.groupBox3.Location = new System.Drawing.Point(0, 0);
this.groupBox3.Name = "groupBox3";
this.groupBox3.Size = new System.Drawing.Size(497, 353);
this.groupBox3.TabIndex = 10;
this.groupBox3.TabStop = false;
//
// txtPayStatus
//
this.txtPayStatus.Location = new System.Drawing.Point(136, 256);
this.txtPayStatus.Name = "txtPayStatus";
this.txtPayStatus.Size = new System.Drawing.Size(297, 21);
this.txtPayStatus.TabIndex = 36;
//
// label9
//
this.label9.AutoSize = true;
this.label9.Location = new System.Drawing.Point(56, 259);
this.label9.Name = "label9";
this.label9.Size = new System.Drawing.Size(59, 12);
this.label9.TabIndex = 35;
this.label9.Text = "票据状态:";
//
// txtSummary
//
this.txtSummary.Location = new System.Drawing.Point(136, 219);
this.txtSummary.Name = "txtSummary";
this.txtSummary.Size = new System.Drawing.Size(297, 21);
this.txtSummary.TabIndex = 34;
//
// label8
//
this.label8.AutoSize = true;
this.label8.Location = new System.Drawing.Point(56, 222);
this.label8.Name = "label8";
this.label8.Size = new System.Drawing.Size(35, 12);
this.label8.TabIndex = 33;
this.label8.Text = "摘要:";
//
// txtPayFee
//
this.txtPayFee.Location = new System.Drawing.Point(136, 182);
this.txtPayFee.Name = "txtPayFee";
this.txtPayFee.Size = new System.Drawing.Size(297, 21);
this.txtPayFee.TabIndex = 32;
//
// label7
//
this.label7.AutoSize = true;
this.label7.Location = new System.Drawing.Point(56, 185);
this.label7.Name = "label7";
this.label7.Size = new System.Drawing.Size(59, 12);
this.label7.TabIndex = 31;
this.label7.Text = "付款金额:";
//
// txtInvoiceFee
//
this.txtInvoiceFee.Location = new System.Drawing.Point(136, 145);
this.txtInvoiceFee.Name = "txtInvoiceFee";
this.txtInvoiceFee.Size = new System.Drawing.Size(297, 21);
this.txtInvoiceFee.TabIndex = 30;
//
// label6
//
this.label6.AutoSize = true;
this.label6.Location = new System.Drawing.Point(56, 148);
this.label6.Name = "label6";
this.label6.Size = new System.Drawing.Size(59, 12);
this.label6.TabIndex = 29;
this.label6.Text = "发票金额:";
//
// txtAgentNo
//
this.txtAgentNo.Enabled = false;
this.txtAgentNo.Location = new System.Drawing.Point(136, 34);
this.txtAgentNo.Name = "txtAgentNo";
this.txtAgentNo.Size = new System.Drawing.Size(297, 21);
this.txtAgentNo.TabIndex = 28;
//
// label5
//
this.label5.AutoSize = true;
this.label5.Location = new System.Drawing.Point(56, 37);
this.label5.Name = "label5";
this.label5.Size = new System.Drawing.Size(71, 12);
this.label5.TabIndex = 27;
this.label5.Text = "代理商编号:";
//
// txtAgentName
//
this.txtAgentName.Location = new System.Drawing.Point(136, 71);
this.txtAgentName.Name = "txtAgentName";
this.txtAgentName.Size = new System.Drawing.Size(297, 21);
this.txtAgentName.TabIndex = 26;
//
// label4
//
this.label4.AutoSize = true;
this.label4.Location = new System.Drawing.Point(56, 74);
this.label4.Name = "label4";
this.label4.Size = new System.Drawing.Size(71, 12);
this.label4.TabIndex = 25;
this.label4.Text = "代理商全称:";
//
// txtProcessTime
//
this.txtProcessTime.Enabled = false;
this.txtProcessTime.Location = new System.Drawing.Point(136, 108);
this.txtProcessTime.Name = "txtProcessTime";
this.txtProcessTime.Size = new System.Drawing.Size(297, 21);
this.txtProcessTime.TabIndex = 22;
//
// btnSave
//
this.btnSave.DialogResult = System.Windows.Forms.DialogResult.Cancel;
this.btnSave.Location = new System.Drawing.Point(145, 294);
this.btnSave.Name = "btnSave";
this.btnSave.Size = new System.Drawing.Size(75, 31);
this.btnSave.TabIndex = 20;
this.btnSave.Text = "保存";
this.btnSave.UseVisualStyleBackColor = true;
this.btnSave.Click += new System.EventHandler(this.btnSave_Click);
//
// btnClose
//
this.btnClose.DialogResult = System.Windows.Forms.DialogResult.Cancel;
this.btnClose.Location = new System.Drawing.Point(290, 294);
this.btnClose.Name = "btnClose";
this.btnClose.Size = new System.Drawing.Size(75, 31);
this.btnClose.TabIndex = 19;
this.btnClose.Text = "关闭";
this.btnClose.UseVisualStyleBackColor = true;
this.btnClose.Click += new System.EventHandler(this.btnClose_Click);
//
// label2
//
this.label2.AutoSize = true;
this.label2.Location = new System.Drawing.Point(56, 111);
this.label2.Name = "label2";
this.label2.Size = new System.Drawing.Size(59, 12);
this.label2.TabIndex = 2;
this.label2.Text = "处理时间:";
//
// frmAgentInvoicePaymentModification
//
this.AutoScaleDimensions = new System.Drawing.SizeF(6F, 12F);
this.AutoScaleMode = System.Windows.Forms.AutoScaleMode.Font;
this.ClientSize = new System.Drawing.Size(497, 353);
this.Controls.Add(this.groupBox3);
this.FormBorderStyle = System.Windows.Forms.FormBorderStyle.FixedToolWindow;
this.Name = "frmAgentInvoicePaymentModification";
this.ShowIcon = false;
this.Text = "支付修改";
this.Load += new System.EventHandler(this.frmAgentInvoicePaymentModification_Load);
this.groupBox3.ResumeLayout(false);
this.groupBox3.PerformLayout();
this.ResumeLayout(false);
}
#endregion
private System.Windows.Forms.GroupBox groupBox3;
private System.Windows.Forms.TextBox txtAgentNo;
private System.Windows.Forms.Label label5;
private System.Windows.Forms.TextBox txtAgentName;
private System.Windows.Forms.Label label4;
private System.Windows.Forms.TextBox txtProcessTime;
private System.Windows.Forms.Button btnSave;
private System.Windows.Forms.Button btnClose;
private System.Windows.Forms.Label label2;
private System.Windows.Forms.TextBox txtSummary;
private System.Windows.Forms.Label label8;
private System.Windows.Forms.TextBox txtPayFee;
private System.Windows.Forms.Label label7;
private System.Windows.Forms.TextBox txtInvoiceFee;
private System.Windows.Forms.Label label6;
private System.Windows.Forms.TextBox txtPayStatus;
private System.Windows.Forms.Label label9;
}
} | ZhouAnPing/Mail | Mail/ChinaUnion_Agent/InvoiceForm/frmAgentInvoicePaymentModification.Designer.cs | C# | apache-2.0 | 10,983 |
DROP function ejecutar_rerate()
/*
* ejecuta el rerate de todo lo que este en la tabla balance_temp
*/
CREATE OR REPLACE FUNCTION ejecutar_rerate()
RETURNS record AS
$BODY$
DECLARE
b RECORD;
t RECORD;
result boolean;
min date;
max date;
idAction RECORD;
BEGIN
SELECT * INTO idAction FROM log_action WHERE name = 'Rerate Completado';
SELECT MIN(date_balance), MAX(date_balance) INTO min, max FROM balance_temp;
WHILE min <= max LOOP
FOR b IN SELECT id FROM balance WHERE date_balance=min ORDER BY id ASC LOOP
SELECT statuscero(b.id) INTO result;
END LOOP;
min:=min + '1 days'::interval;
END LOOP;
FOR t IN SELECT * FROM balance_temp ORDER BY id ASC LOOP
SELECT compara_balances(t.id) INTO result;
END LOOP;
INSERT INTO log(date, hour, id_log_action, id_users, description_date) VALUES (current_date, current_time, idAction.id, 1, current_date);
RETURN idAction;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION ejecutar_rerate()
OWNER TO postgres;
/*
* Funcion que compara dos balances
*/
CREATE OR REPLACE FUNCTION statuscero(ide integer)
RETURNS boolean AS
$BODY$
DECLARE
b RECORD;
t RECORD;
BEGIN
/*Busco el registro en la tabla balance_temp*/
SELECT * INTO b FROM balance WHERE id=ide;
/*Busco el registro mas parecido en la tabla balance*/
IF b.id_destination IS NOT NULL THEN
SELECT * INTO t FROM balance_temp WHERE date_balance=b.date_balance AND id_destination=b.id_destination AND id_carrier_supplier=b.id_carrier_supplier AND id_carrier_customer=b.id_carrier_customer;
ELSE
SELECT * INTO t FROM balance_temp WHERE date_balance=b.date_balance AND id_destination_int=b.id_destination_int AND id_carrier_supplier=b.id_carrier_supplier AND id_carrier_customer=b.id_carrier_customer;
END IF;
/*si es nulo retorno falso*/
IF t.id IS NULL THEN
UPDATE balance SET status=0 WHERE id=b.id;
RETURN false;
ELSE
RETURN true;
END IF;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION statuscero(integer)
OWNER TO postgres;
/*
* Funcion que compara dos balances
*/
CREATE OR REPLACE FUNCTION compara_balances(ide integer)
RETURNS boolean AS
$BODY$
DECLARE
b RECORD;
t RECORD;
rr boolean;
bb boolean;
tt boolean;
BEGIN
/*Busco el registro en la tabla balance_temp*/
SELECT * INTO t FROM balance_temp WHERE id=ide;
/*Busco el registro mas parecido en la tabla balance*/
IF t.id_destination IS NOT NULL THEN
SELECT * INTO b FROM balance WHERE date_balance=t.date_balance AND id_carrier_supplier=t.id_carrier_supplier AND id_destination=t.id_destination AND status=1 AND id_carrier_customer=t.id_carrier_customer;
ELSE
SELECT * INTO b FROM balance WHERE date_balance=t.date_balance AND id_carrier_supplier=t.id_carrier_supplier AND id_destination_int=t.id_destination_int AND status=1 AND id_carrier_customer=t.id_carrier_customer;
END IF;
/*Verifico que trajo algo*/
IF b.id IS NOT NULL THEN
IF b.minutes=t.minutes AND b.revenue=t.revenue AND b.cost=t.cost AND b.margin=t.margin THEN
/*Si son iguales lo dejo asi*/
DELETE FROM balance_temp WHERE id=t.id;
RETURN true;
ELSE
/*de lo contrario paso para rrhistory*/
SELECT pasar_a_rrhistory(b.id) INTO rr;
/*y actualizo el registro*/
IF rr=true THEN
SELECT actualizar_balance(t.id, b.id) INTO bb;
IF bb=true THEN
DELETE FROM balance_temp WHERE id=t.id;
RETURN true;
ELSE
RETURN false;
END IF;
ELSE
RETURN false;
END IF;
END IF;
ELSE
/*Si no existe alguno parecido lo guardo enseguida en la tabla balance*/
SELECT pasar_a_balance(t.id) INTO tt;
IF tt=true THEN
DELETE FROM balance_temp WHERE id=t.id;
END IF;
RETURN true;
END IF;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION compara_balances(integer)
OWNER TO postgres;
/*
* Permite actualizar registros de la tabla balances con datos de la tabla balance_temp a traves del id
*/
CREATE OR REPLACE FUNCTION actualizar_balance(tid integer, bid integer)
RETURNS boolean AS
$BODY$
DECLARE
t RECORD;
BEGIN
SELECT * INTO t FROM balance_temp WHERE id=tid;
IF t.id IS NOT NULL THEN
UPDATE balance SET minutes=t.minutes, acd=t.acd, asr=t.asr, margin_percentage=t.margin_percentage, margin_per_minute=t.margin_per_minute, cost_per_minute=t.cost_per_minute, revenue_per_minute=t.revenue_per_minute, pdd=t.pdd, incomplete_calls=t.incomplete_calls, incomplete_calls_ner=t.incomplete_calls_ner, complete_calls=t.complete_calls, complete_calls_ner=t.complete_calls_ner, calls_attempts=t.calls_attempts, duration_real=t.duration_real, duration_cost=t.duration_cost, ner02_efficient=t.ner02_efficient, ner02_seizure=t.ner02_seizure, pdd_calls=t.pdd_calls, revenue=t.revenue, cost=t.cost, margin=t.margin, date_change=t.date_change WHERE id=bid;
RETURN true;
ELSE
RETURN false;
END IF;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION actualizar_balance(integer, integer)
OWNER TO postgres;
/*
* Permite copiar registros de la tabla balance_temp a la tabla balance a traves del id
*/
CREATE OR REPLACE FUNCTION pasar_a_balance(ide integer)
RETURNS boolean AS
$BODY$
DECLARE
ids RECORD;
BEGIN
SELECT * INTO ids FROM balance_temp WHERE id=ide;
IF ids.id IS NOT NULL THEN
INSERT INTO balance(date_balance, minutes, acd, asr, margin_percentage, margin_per_minute, cost_per_minute, revenue_per_minute, pdd, incomplete_calls, incomplete_calls_ner, complete_calls, complete_calls_ner, calls_attempts, duration_real, duration_cost, ner02_efficient, ner02_seizure, pdd_calls, revenue, cost, margin, date_change, id_carrier_supplier, id_destination, id_destination_int, status, id_carrier_customer) VALUES (ids.date_balance, ids.minutes, ids.acd, ids.asr, ids.margin_percentage, ids.margin_per_minute, ids.cost_per_minute, ids.revenue_per_minute, ids.pdd, ids.incomplete_calls, ids.incomplete_calls_ner, ids.complete_calls, ids.complete_calls_ner, ids.calls_attempts, ids.duration_real, ids.duration_cost, ids.ner02_efficient, ids.ner02_seizure, ids.pdd_calls, ids.revenue, ids.cost, ids.margin, ids.date_change, ids.id_carrier_supplier, ids.id_destination, ids.id_destination_int, 1, ids.id_carrier_customer);
RETURN true;
ELSE
RETURN false;
END IF;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION pasar_a_balance(integer)
OWNER TO postgres;
/*
* Permite copiar registros de la tabla balances a la tabla balance_temp a traves del id
*/
CREATE OR REPLACE FUNCTION pasar_a_rrhistory(ide integer)
RETURNS boolean AS
$BODY$
DECLARE
ids RECORD;
BEGIN
SELECT * INTO ids FROM balance WHERE id=ide;
IF ids.id IS NOT NULL THEN
INSERT INTO rrhistory(date_balance, minutes, acd, asr, margin_percentage, margin_per_minute, cost_per_minute, revenue_per_minute, pdd, incomplete_calls, incomplete_calls_ner, complete_calls, complete_calls_ner, calls_attempts, duration_real, duration_cost, ner02_efficient, ner02_seizure, pdd_calls, revenue, cost, margin, date_change, id_balance, id_destination, id_destination_int, id_carrier_supplier, id_carrier_customer) VALUES(ids.date_balance, ids.minutes, ids.acd, ids.asr, ids.margin_percentage, ids.margin_per_minute, ids.cost_per_minute, ids.revenue_per_minute, ids.pdd, ids.incomplete_calls, ids.incomplete_calls_ner, ids.complete_calls, ids.complete_calls_ner, ids.calls_attempts, ids.duration_real, ids.duration_cost, ids.ner02_efficient, ids.ner02_seizure, ids.pdd_calls, ids.revenue, ids.cost, ids.margin, ids.date_change, ids.id, ids.id_destination, ids.id_destination_int, ids.id_carrier_supplier, ids.id_carrier_customer);
RETURN true;
ELSE
RETURN false;
END IF;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION pasar_a_rrhistory(integer)
OWNER TO postgres;
/*
* Permite copiar registros de la tabla balances a la tabla balance_temp a traves de la fecha
*/
CREATE OR REPLACE FUNCTION pasar_a_balance_temp(fecha date)
RETURNS boolean AS
$BODY$
DECLARE
ids RECORD;
BEGIN
FOR ids IN SELECT * FROM balance WHERE date_balance=fecha LOOP
INSERT INTO balance_temp(date_balance, minutes, acd, asr, margin_percentage, margin_per_minute, cost_per_minute, revenue_per_minute, pdd, incomplete_calls, incomplete_calls_ner, complete_calls, complete_calls_ner, calls_attempts, duration_real, duration_cost, ner02_efficient, ner02_seizure, pdd_calls, revenue, cost, margin, date_change, id_destination, id_destination_int, id_carrier_supplier, id_carrier_customer) VALUES (ids.date_balance, ids.minutes, ids.acd, ids.asr, ids.margin_percentage, ids.margin_per_minute, ids.cost_per_minute, ids.revenue_per_minute, ids.pdd, ids.incomplete_calls, ids.incomplete_calls_ner, ids.complete_calls, ids.complete_calls_ner, ids.calls_attempts, ids.duration_real, ids.duration_cost, ids.ner02_efficient, ids.ner02_seizure, ids.pdd_calls, ids.revenue, ids.cost, ids.margin, ids.date_change, ids.id_destination, ids.id_destination_int, ids.id_carrier_supplier, ids.id_carrier_customer);
END LOOP;
RETURN true;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION pasar_a_balance_temp(date)
OWNER TO postgres;
/*
* Permite copiar registros de la tabla balances a la tabla balance_temp a traves de la fecha
*/
CREATE OR REPLACE FUNCTION rrhistory_a_balance_temp(fecha date)
RETURNS boolean AS
$BODY$
DECLARE
ids RECORD;
BEGIN
FOR ids IN SELECT * FROM rrhistory WHERE date_balance=fecha LOOP
INSERT INTO balance_temp(date_balance, minutes, acd, asr, margin_percentage, margin_per_minute, cost_per_minute, revenue_per_minute, pdd, incomplete_calls, incomplete_calls_ner, complete_calls, complete_calls_ner, calls_attempts, duration_real, duration_cost, ner02_efficient, ner02_seizure, pdd_calls, revenue, cost, margin, date_change, id_destination, id_destination_int, id_carrier_supplier, id_carrier_customer) VALUES (ids.date_balance, ids.minutes, ids.acd, ids.asr, ids.margin_percentage, ids.margin_per_minute, ids.cost_per_minute, ids.revenue_per_minute, ids.pdd, ids.incomplete_calls, ids.incomplete_calls_ner, ids.complete_calls, ids.complete_calls_ner, ids.calls_attempts, ids.duration_real, ids.duration_cost, ids.ner02_efficient, ids.ner02_seizure, ids.pdd_calls, ids.revenue, ids.cost, ids.margin, ids.date_change, ids.id_destination, ids.id_destination_int, ids.id_carrier_supplier, ids.id_carrier_customer);
END LOOP;
RETURN true;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION rrhistory_a_balance_temp(date)
OWNER TO postgres;
/*
* Funcion que llama el trigger
*/
CREATE OR REPLACE FUNCTION condicion()
RETURNS trigger AS
$BODY$
DECLARE
valor integer;
result RECORD;
registro RECORD;
es RECORD;
BEGIN
SELECT * INTO es FROM log_action WHERE name='Rerate';
SELECT * INTO registro FROM log order by id desc limit 1;
IF registro.id_log_action=es.id THEN
SELECT ejecutar_rerate() INTO result;
RETURN result;
ELSE
RETURN NULL;
END IF;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION condicion()
OWNER TO postgres;
DROP TRIGGER rerate ON log;
CREATE TRIGGER rerate
AFTER INSERT ON log
FOR EACH STATEMENT
EXECUTE PROCEDURE condicion(); | angelosuarez/PUNTO_VENTA | src/db/declarar_funciones.sql | SQL | apache-2.0 | 11,097 |
/**
* <copyright>
* </copyright>
*
* $Id$
*/
package Leveleditor;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.eclipse.emf.common.util.Enumerator;
/**
* <!-- begin-user-doc -->
* A representation of the literals of the enumeration '<em><b>EElement</b></em>',
* and utility methods for working with them.
* <!-- end-user-doc -->
* @see Leveleditor.LeveleditorPackage#getEElement()
* @model
* @generated
*/
public enum EElement implements Enumerator {
/**
* The '<em><b>LIGHTNING</b></em>' literal object.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #LIGHTNING_VALUE
* @generated
* @ordered
*/
LIGHTNING(0, "LIGHTNING", "LIGHTNING"),
/**
* The '<em><b>POISON</b></em>' literal object.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #POISON_VALUE
* @generated
* @ordered
*/
POISON(1, "POISON", "POISON"),
/**
* The '<em><b>FIRE</b></em>' literal object.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #FIRE_VALUE
* @generated
* @ordered
*/
FIRE(2, "FIRE", "FIRE"),
/**
* The '<em><b>ICE</b></em>' literal object.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #ICE_VALUE
* @generated
* @ordered
*/
ICE(3, "ICE", "ICE"), /**
* The '<em><b>NORMAL</b></em>' literal object.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #NORMAL_VALUE
* @generated
* @ordered
*/
NORMAL(4, "NORMAL", "NORMAL");
/**
* The '<em><b>LIGHTNING</b></em>' literal value.
* <!-- begin-user-doc -->
* <p>
* If the meaning of '<em><b>LIGHTNING</b></em>' literal object isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @see #LIGHTNING
* @model
* @generated
* @ordered
*/
public static final int LIGHTNING_VALUE = 0;
/**
* The '<em><b>POISON</b></em>' literal value.
* <!-- begin-user-doc -->
* <p>
* If the meaning of '<em><b>POISON</b></em>' literal object isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @see #POISON
* @model
* @generated
* @ordered
*/
public static final int POISON_VALUE = 1;
/**
* The '<em><b>FIRE</b></em>' literal value.
* <!-- begin-user-doc -->
* <p>
* If the meaning of '<em><b>FIRE</b></em>' literal object isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @see #FIRE
* @model
* @generated
* @ordered
*/
public static final int FIRE_VALUE = 2;
/**
* The '<em><b>ICE</b></em>' literal value.
* <!-- begin-user-doc -->
* <p>
* If the meaning of '<em><b>ICE</b></em>' literal object isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @see #ICE
* @model
* @generated
* @ordered
*/
public static final int ICE_VALUE = 3;
/**
* The '<em><b>NORMAL</b></em>' literal value.
* <!-- begin-user-doc -->
* <p>
* If the meaning of '<em><b>NORMAL</b></em>' literal object isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @see #NORMAL
* @model
* @generated
* @ordered
*/
public static final int NORMAL_VALUE = 4;
/**
* An array of all the '<em><b>EElement</b></em>' enumerators.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
private static final EElement[] VALUES_ARRAY =
new EElement[] {
LIGHTNING,
POISON,
FIRE,
ICE,
NORMAL,
};
/**
* A public read-only list of all the '<em><b>EElement</b></em>' enumerators.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public static final List<EElement> VALUES = Collections.unmodifiableList(Arrays.asList(VALUES_ARRAY));
/**
* Returns the '<em><b>EElement</b></em>' literal with the specified literal value.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public static EElement get(String literal) {
for (int i = 0; i < VALUES_ARRAY.length; ++i) {
EElement result = VALUES_ARRAY[i];
if (result.toString().equals(literal)) {
return result;
}
}
return null;
}
/**
* Returns the '<em><b>EElement</b></em>' literal with the specified name.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public static EElement getByName(String name) {
for (int i = 0; i < VALUES_ARRAY.length; ++i) {
EElement result = VALUES_ARRAY[i];
if (result.getName().equals(name)) {
return result;
}
}
return null;
}
/**
* Returns the '<em><b>EElement</b></em>' literal with the specified integer value.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public static EElement get(int value) {
switch (value) {
case LIGHTNING_VALUE: return LIGHTNING;
case POISON_VALUE: return POISON;
case FIRE_VALUE: return FIRE;
case ICE_VALUE: return ICE;
case NORMAL_VALUE: return NORMAL;
}
return null;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
private final int value;
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
private final String name;
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
private final String literal;
/**
* Only this class can construct instances.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
private EElement(int value, String name, String literal) {
this.value = value;
this.name = name;
this.literal = literal;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public int getValue() {
return value;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public String getName() {
return name;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public String getLiteral() {
return literal;
}
/**
* Returns the literal value of the enumerator, which is its string representation.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public String toString() {
return literal;
}
} //EElement
| sarquah/Gelamen | TowerOfZaldagor.leveleditor/src/Leveleditor/EElement.java | Java | apache-2.0 | 6,502 |
package com.pwnscone.pewpew.actor;
import java.util.ArrayList;
import java.util.HashMap;
import com.pwnscone.pewpew.Game;
import com.pwnscone.pewpew.Particle;
import com.pwnscone.pewpew.Simulation;
import com.pwnscone.pewpew.Spring;
import com.pwnscone.pewpew.util.Pool;
public class Hub extends Actor {
public static final float centering = .125f;
public static final float spin = .0025f;
public ArrayList<Actor> modules;
public HashMap<Actor, Spring> umbilicalSpringMap;
public ArrayList<Spring> umbilicalSpringList;
public HashMap<Actor, Particle> dockingParticleMap;
public ArrayList<Particle> dockingParticleList;
public ArrayList<Particle> valenceParticleList;
public Hub() {
initFromMesh();
modules = new ArrayList<Actor>();
umbilicalSpringMap = new HashMap<Actor, Spring>();
umbilicalSpringList = new ArrayList<Spring>();
dockingParticleMap = new HashMap<Actor, Particle>();
dockingParticleList = new ArrayList<Particle>();
}
@Override
public void update() {
Particle center = particles[42];
Particle top = particles[11];
Particle bottom = particles[0];
x = center.x;
y = center.y;
center.ox += center.x * centering;
center.oy += center.y * centering;
center.oz += center.z * centering;
float spin = (1.0f + modules.size()) * .5f;
top.ox -= .0011f * spin;
top.oy -= .0011f * spin;
top.oz -= .0058f * spin;
bottom.ox += .0011f * spin;
bottom.oy += .0011f * spin;
bottom.oz += .0058f * spin;
spin *= 2 * 0.0375f * Hub.spin;
for (int i = 0; i < particles.length; i++) {
Particle p = particles[i];
p.ox += p.y * spin;
p.oy -= p.x * spin;
}
for (int i = 0; i < umbilicalSpringList.size(); i++) {
Spring s = umbilicalSpringList.get(i);
if (s.length2 > .00025f) {
s.length2 *= 0.99f;
}
}
}
public void create() {
loadMesh();
for (int i = 0; i < particles.length; i++) {
particles[i].dampening = 0.95f;
}
umbilicalSpringMap.clear();
umbilicalSpringList.clear();
dockingParticleList.add(particles[22]);
dockingParticleList.add(particles[31]);
dockingParticleList.add(particles[26]);
dockingParticleList.add(particles[24]);
dockingParticleList.add(particles[23]);
dockingParticleList.add(particles[28]);
dockingParticleList.add(particles[27]);
dockingParticleList.add(particles[25]);
dockingParticleList.add(particles[30]);
dockingParticleList.add(particles[29]);
health = 5000;
}
public void spawn(Class clazz) {
if (dockingParticleList.size() > 0) {
Simulation sim = Game.get().getSimulation();
Pool<Actor> pool = sim.mActorMap.get(clazz);
Actor actor = pool.add();
actor.create();
modules.add(actor);
Particle docPart = dockingParticleList.remove(0);
dockingParticleMap.put(actor, docPart);
float bestDist = 0;
Particle closest = actor.particles[0];
bestDist = -Float.MAX_VALUE;
for (int i = 0; i < actor.particles.length; i++) {
Particle p = actor.particles[i];
float dx = p.x - docPart.x;
float dy = p.y - docPart.y;
float dist2 = dx * dx + dy * dy;
if (dist2 > bestDist) {
closest = p;
bestDist = dist2;
}
}
actor.setTransform(docPart.x - closest.x, docPart.y - closest.y, 0);
actor.setVelocity(0.0f, 0.0f);
Spring spring = sim.addSpring(docPart, closest);
umbilicalSpringMap.put(actor, spring);
umbilicalSpringList.add(spring);
}
}
@Override
public void forget(Actor actor) {
if (modules.contains(actor)) {
Simulation sim = Game.get().getSimulation();
modules.remove(actor);
Spring s = umbilicalSpringMap.remove(actor);
umbilicalSpringList.remove(s);
sim.removeSpring(s);
dockingParticleList.add(dockingParticleMap.remove(actor));
}
}
}
| underclocker/pewpew | core/src/com/pwnscone/pewpew/actor/Hub.java | Java | apache-2.0 | 3,699 |
require 'envelope_resource'
module MetadataRegistry
# Search service
class Search
attr_reader :params
# Params:
# - params: [Hash] hash containing the search params
def initialize(params)
@params = params
.with_indifferent_access
.except(:metadata_only, :page, :per_page)
@sort_by = @params.delete(:sort_by)
@sort_order = @params.delete(:sort_order)
end
def run
@query = EnvelopeResource.select_scope(include_deleted).joins(:envelope)
# match by each method if they have valid entries
query_methods.each { |method| send(:"search_#{method}") if send(method) }
search_prepared_queries if community
search_resource_fields
sort_results
@query.includes(envelope: %i[organization publishing_organization])
end
# filter methods
def query_methods
%i[
fts community type resource_type date_range envelope_ceterms_ctid
envelope_id envelope_ctdl_type owned_by published_by with_bnodes
]
end
def sort_columns
%w[created_at updated_at]
end
def include_deleted
@include_deleted ||= params.delete(:include_deleted)
end
# full-text-search param
def fts
@fts ||= params.delete(:fts)
end
# Get the community either from the `envelope_community` url param or
# from the `community` query param
def community
@community ||= begin
comm = params.delete(:envelope_community) || params.delete(:community)
comm.present? ? comm.underscore : nil
end
end
def type
@type ||= params.delete(:type)
end
# get the resource_type from the config.
def resource_type
@resource_type ||= begin
rtype = params.delete(:resource_type)
rtype.present? ? rtype.singularize : nil
end
end
# get date_range hash. Accepts dates in natural-language description,
# i.e: from: 'yesterday', until: '3 hours ago', are accepted values.
def date_range
@date_range ||= begin
range = {
from: Chronic.parse(params.delete(:from)),
until: Chronic.parse(params.delete(:until))
}.compact
range.blank? ? nil : range
end
end
def envelope_ceterms_ctid
@envelope_ceterms_ctid ||= extract_param(:envelope_ceterms_ctid)
end
def envelope_id
@envelope_id ||= extract_param(:envelope_id)
end
def envelope_ctdl_type
@envelope_ctdl_type ||= extract_param(:envelope_ctdl_type)
end
def owned_by
@owned_by ||= extract_param(:owned_by)
end
def published_by
@published_by ||= extract_param(:published_by)
end
def with_bnodes
@with_bnodes ||= extract_param(:with_bnodes)&.first || 'false'
end
# Search using the Searchable#search model method
def search_fts
@query = @query.search(fts)
end
def search_community
@query = @query.in_community(community)
end
def search_type
@query = @query.where(envelope_type: EnvelopeResource.envelope_types[type])
end
def search_resource_type
@query = @query.where(resource_type: resource_type)
end
def search_date_range
from = date_range[:from]
till = date_range[:until]
@query = @query.where('envelopes.updated_at >= ?', from) if from
@query = @query.where('envelopes.updated_at <= ?', till) if till
end
def search_prepared_queries
prepared_queries = config.try(:[], 'prepared_queries')
prepared_queries&.each do |key, query_tpl|
term = params.delete(key)
next if term.blank?
@query = @query.where(query_tpl.gsub('$term', '%s'), term)
end
end
# Build a jsonb query for all the remainig params.
# The keys can be aliased, on this case we lookup the `aliases` config
# The values can be any json piece to search using the 'contained' query
def search_resource_fields
params.each do |key, val|
prop = config.dig('aliases', key) || key
json = { prop => parsed_value(val) }.to_json
q = 'envelope_resources.processed_resource @> ?'
@query = @query.where(q, json)
end
end
def search_envelope_ceterms_ctid
@query = @query
.where(envelopes: { envelope_ceterms_ctid: envelope_ceterms_ctid })
end
def search_envelope_id
@query = @query
.where(envelopes: { envelope_id: envelope_id })
end
def search_envelope_ctdl_type
@query = @query
.where(envelopes: { envelope_ctdl_type: envelope_ctdl_type })
end
def search_owned_by
envelope_ids = Envelope
.joins(:organization)
.where(organizations: { _ctid: owned_by })
.select(:id)
@query = @query.where(envelopes: { id: envelope_ids })
end
def search_published_by
envelope_ids = Envelope
.joins(:publishing_organization)
.where(organizations: { _ctid: published_by })
.select(:id)
@query = @query.where(envelopes: { id: envelope_ids })
end
def search_with_bnodes
@query =
case with_bnodes
when 'only' then @query.where("resource_id LIKE '_:%'")
when 'true' then @query
else @query.where("resource_id NOT LIKE '_:%'")
end
end
def sort_results
sort_by = sort_columns.include?(@sort_by) ? @sort_by : 'updated_at'
sort_order = %w[asc desc].include?(@sort_order) ? @sort_order : 'desc'
@query.order!(sort_by => sort_order)
end
def config
@config ||= EnvelopeCommunity.find_by(name: community)&.config
end
def parsed_value(val)
JSON.parse(val)
rescue JSON::ParserError
val
end
def extract_param(key)
(params.delete(key) || '').split(',').presence
end
end
end
| CredentialEngine/CredentialRegistry | app/services/search.rb | Ruby | apache-2.0 | 5,821 |
//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.8-b130911.1802
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2016.10.09 at 10:10:23 AM CST
//
package elong;
import javax.xml.bind.annotation.XmlEnum;
import javax.xml.bind.annotation.XmlEnumValue;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for EnumDrrRuleCode.
*
* <p>The following schema fragment specifies the expected content contained within this class.
* <p>
* <pre>
* <simpleType name="EnumDrrRuleCode">
* <restriction base="{http://www.w3.org/2001/XMLSchema}string">
* <enumeration value="None"/>
* <enumeration value="DRRBookAhead"/>
* <enumeration value="DRRStayPerRoomPerNight"/>
* <enumeration value="DRRStayLastNight"/>
* <enumeration value="DRRStayTheNightAndAfter"/>
* <enumeration value="DRRStayPerLastNight"/>
* <enumeration value="DRRStayWeekDay"/>
* <enumeration value="DRRCheckInWeekDay"/>
* </restriction>
* </simpleType>
* </pre>
*
*/
@XmlType(name = "EnumDrrRuleCode")
@XmlEnum
public enum EnumDrrRuleCode {
@XmlEnumValue("None")
None("None"),
@XmlEnumValue("DRRBookAhead")
DRRBookAhead("DRRBookAhead"),
@XmlEnumValue("DRRStayPerRoomPerNight")
DRRStayPerRoomPerNight("DRRStayPerRoomPerNight"),
@XmlEnumValue("DRRStayLastNight")
DRRStayLastNight("DRRStayLastNight"),
@XmlEnumValue("DRRStayTheNightAndAfter")
DRRStayTheNightAndAfter("DRRStayTheNightAndAfter"),
@XmlEnumValue("DRRStayPerLastNight")
DRRStayPerLastNight("DRRStayPerLastNight"),
@XmlEnumValue("DRRStayWeekDay")
DRRStayWeekDay("DRRStayWeekDay"),
@XmlEnumValue("DRRCheckInWeekDay")
DRRCheckInWeekDay("DRRCheckInWeekDay");
private final String value;
EnumDrrRuleCode(String v) {
value = v;
}
public String value() {
return value;
}
public static EnumDrrRuleCode fromValue(String v) {
for (EnumDrrRuleCode c: EnumDrrRuleCode.values()) {
if (c.value.equals(v)) {
return c;
}
}
throw new IllegalArgumentException(v);
}
}
| leonmaybe/eLong-OpenAPI-JAVA-demo | src/elong/EnumDrrRuleCode.java | Java | apache-2.0 | 2,346 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_162) on Sat Feb 02 18:57:42 CET 2019 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>com.communote.server.web.commons.imagecache.controller (Communote 3.5 API)</title>
<meta name="date" content="2019-02-02">
<link rel="stylesheet" type="text/css" href="../../../../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="com.communote.server.web.commons.imagecache.controller (Communote 3.5 API)";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../overview-summary.html">Overview</a></li>
<li class="navBarCell1Rev">Package</li>
<li>Class</li>
<li><a href="package-use.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../../../com/communote/server/web/commons/i18n/package-summary.html">Prev Package</a></li>
<li><a href="../../../../../../../com/communote/server/web/commons/resolver/package-summary.html">Next Package</a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../index.html?com/communote/server/web/commons/imagecache/controller/package-summary.html" target="_top">Frames</a></li>
<li><a href="package-summary.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h1 title="Package" class="title">Package com.communote.server.web.commons.imagecache.controller</h1>
</div>
<div class="contentContainer">
<ul class="blockList">
<li class="blockList">
<table class="typeSummary" border="0" cellpadding="3" cellspacing="0" summary="Class Summary table, listing classes, and an explanation">
<caption><span>Class Summary</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Class</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><a href="../../../../../../../com/communote/server/web/commons/imagecache/controller/ImageDownloadController.html" title="class in com.communote.server.web.commons.imagecache.controller">ImageDownloadController</a></td>
<td class="colLast">
<div class="block">Controller for supporting image download.</div>
</td>
</tr>
</tbody>
</table>
</li>
</ul>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../overview-summary.html">Overview</a></li>
<li class="navBarCell1Rev">Package</li>
<li>Class</li>
<li><a href="package-use.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../../../com/communote/server/web/commons/i18n/package-summary.html">Prev Package</a></li>
<li><a href="../../../../../../../com/communote/server/web/commons/resolver/package-summary.html">Next Package</a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../index.html?com/communote/server/web/commons/imagecache/controller/package-summary.html" target="_top">Frames</a></li>
<li><a href="package-summary.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2019 <a href="https://communote.github.io/">Communote team</a>. All rights reserved.</small></p>
</body>
</html>
| Communote/communote.github.io | generated/javadoc/com/communote/server/web/commons/imagecache/controller/package-summary.html | HTML | apache-2.0 | 5,637 |
/*
* Copyright 2015 Odnoklassniki Ltd, Mail.Ru Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package one.nio.mem;
import one.nio.util.Hash;
import java.io.IOException;
public class SharedMemoryStringMap<V> extends SharedMemoryMap<String, V> {
public SharedMemoryStringMap(int capacity, String fileName, long fileSize) throws IOException {
super(capacity, fileName, fileSize);
}
public SharedMemoryStringMap(int capacity, String fileName, long fileSize, long expirationTime) throws IOException {
super(capacity, fileName, fileSize, expirationTime);
}
@Override
protected String keyAt(long entry) {
int keyLength = (int) (unsafe.getLong(entry + HASH_OFFSET) >>> 33);
long keyOffset = entry + HEADER_SIZE;
char[] key = new char[keyLength];
for (int i = 0; i < keyLength; i++, keyOffset += 2) {
key[i] = unsafe.getChar(keyOffset);
}
return new String(key);
}
@Override
protected long hashCode(String key) {
int stringHashCode = Hash.murmur3(key);
return (long) key.length() << 33 | (stringHashCode & 0xffffffffL);
}
@Override
protected boolean equalsAt(long entry, String key) {
int keyLength = key.length();
long keyOffset = entry + HEADER_SIZE;
for (int i = 0; i < keyLength; i++, keyOffset += 2) {
if (unsafe.getChar(keyOffset) != key.charAt(i)) {
return false;
}
}
return true;
}
@Override
protected long allocateEntry(String key, long hashCode, int size) {
int keyLength = key.length();
long entry = allocator.segmentFor(hashCode).malloc(HEADER_SIZE + keyLength * 2 + size);
long keyOffset = entry + HEADER_SIZE;
for (int i = 0; i < keyLength; i++, keyOffset += 2) {
unsafe.putChar(keyOffset, key.charAt(i));
}
return entry;
}
@Override
protected int headerSize(long entry) {
int keySizeInBytes = (int) (unsafe.getLong(entry + HASH_OFFSET) >>> 32);
return HEADER_SIZE + keySizeInBytes;
}
}
| odnoklassniki/one-nio | src/one/nio/mem/SharedMemoryStringMap.java | Java | apache-2.0 | 2,649 |
// CString class.
// Mostly mirrors server/dtypes/cstring/cstring.go.
// By design, satisfies the require('eddie').Model interface.
var _ = require('lodash');
var inherits = require('inherits');
var cvalue = require('./cvalue');
var lib = require('../lib');
////////////////////////////////////////////////////////////
// Events
inherits(ReplaceText, cvalue.Event);
function ReplaceText(isLocal, pos, len, value) {
cvalue.Event.call(this, isLocal);
this.pos = pos;
this.len = len;
this.value = value;
}
inherits(SetSelectionRange, cvalue.Event);
function SetSelectionRange(isLocal, start, end) {
cvalue.Event.call(this, isLocal);
this.start = start;
this.end = end;
}
////////////////////////////////////////////////////////////
// CString
function Id(pos, agentId) {
this.pos = pos;
this.agentId = agentId;
}
function Pid(ids, seq) {
this.ids = ids;
this.seq = seq;
}
Pid.prototype.less = function(other) {
for (var i = 0; i < this.ids.length; i++) {
if (i === other.ids.length) {
return false;
}
var v = this.ids[i], vo = other.ids[i];
if (v.pos !== vo.pos) {
return v.pos < vo.pos;
} else if (v.agentId !== vo.agentId) {
return v.agentId < vo.agentId;
}
}
if (this.ids.length === other.ids.length) {
return this.seq < other.seq;
}
return true;
};
Pid.prototype.encode = function() {
return _.map(this.ids, function(id) {
return [id.pos, id.agentId].join('.');
}).join(':') + '~' + this.seq;
};
function decodePid(s) {
var idsAndSeq = s.split('~');
if (idsAndSeq.length !== 2 ) {
throw new Error('invalid pid: ' + s);
}
var seq = lib.atoi(idsAndSeq[1]);
var ids = _.map(idsAndSeq[0].split(':'), function(idStr) {
var parts = idStr.split('.');
if (parts.length !== 2) {
throw new Error('invalid id: ' + idStr);
}
return new Id(lib.atoi(parts[0]), lib.atoi(parts[1]));
});
return new Pid(ids, seq);
}
function Op() {}
Op.prototype.encode = function() {
throw new Error('not implemented');
};
inherits(ClientInsert, Op);
function ClientInsert(prevPid, nextPid, value) {
Op.call(this);
this.prevPid = prevPid;
this.nextPid = nextPid;
this.value = value;
}
ClientInsert.prototype.encode = function() {
var prevPid = this.prevPid ? this.prevPid.encode() : '';
var nextPid = this.nextPid ? this.nextPid.encode() : '';
return ['ci', prevPid, nextPid, this.value].join(',');
};
inherits(Insert, Op);
function Insert(pid, value) {
Op.call(this);
this.pid = pid;
this.value = value;
}
Insert.prototype.encode = function() {
return ['i', this.pid.encode(), this.value].join(',');
};
inherits(Delete, Op);
function Delete(pid) {
Op.call(this);
this.pid = pid;
}
Delete.prototype.encode = function() {
return ['d', this.pid.encode()].join(',');
};
function newParseError(s) {
return new Error('failed to parse op: ' + s);
}
function decodeOp(s) {
var parts;
var t = s.split(',', 1)[0];
switch (t) {
case 'ci':
parts = lib.splitN(s, ',', 4);
if (parts.length < 4) {
throw newParseError(s);
}
return new ClientInsert(decodePid(parts[1]), decodePid(parts[2]), parts[3]);
case 'i':
parts = lib.splitN(s, ',', 3);
if (parts.length < 3) {
throw newParseError(s);
}
return new Insert(decodePid(parts[1]), parts[2]);
case 'd':
parts = lib.splitN(s, ',', 2);
if (parts.length < 2) {
throw newParseError(s);
}
return new Delete(decodePid(parts[1]));
default:
throw new Error('unknown op type: ' + t);
}
}
function encodePatch(ops) {
var strs = new Array(ops.length);
for (var i = 0; i < ops.length; i++) {
strs[i] = ops[i].encode();
}
return JSON.stringify(strs);
}
function decodePatch(s) {
var strs = JSON.parse(s);
var ops = new Array(strs.length);
for (var i = 0; i < strs.length; i++) {
ops[i] = decodeOp(strs[i]);
}
return ops;
}
function Atom(pid, value) {
this.pid = pid;
this.value = value;
}
inherits(CString, cvalue.CValue);
function CString(atoms) {
cvalue.CValue.call(this);
this.atoms_ = atoms;
this.text_ = _.map(atoms, 'value').join('');
this.selStart_ = 0;
this.selEnd_ = 0;
}
// Implements CValue.dtype.
CString.prototype.dtype = function() {
return cvalue.dtypeCString;
};
// Decodes the given string into a CString.
function decode(s) {
var atoms = JSON.parse(s);
return new CString(_.map(atoms, function(atom) {
return new Atom(decodePid(atom.Pid), atom.Value);
}));
}
// Implements CValue.applyPatch.
CString.prototype.applyPatch = function(isLocal, patch) {
var that = this;
if (isLocal) {
this.paused_ = false;
}
// Consecutive single-char insertions and deletions are common, and applying
// lots of point mutations to this.text_ is expensive (e.g. applying 400 point
// deletions takes hundreds of milliseconds), so we compact such ops when
// updating this.text_.
// TODO: Use a rope data structure, e.g. the jumprope npm package.
var pos = -1, len = 0, value = '';
function applyReplaceText() {
if (pos !== -1) {
that.applyReplaceText_(isLocal, pos, len, value);
}
}
var ops = decodePatch(patch);
for (var i = 0; i < ops.length; i++) {
var op = ops[i];
switch(op.constructor.name) {
case 'Insert':
var insertPos = this.search_(op.pid);
this.atoms_.splice(insertPos, 0, new Atom(op.pid, op.value));
if (insertPos === pos + value.length) {
value += op.value;
} else {
applyReplaceText();
pos = insertPos;
len = 0;
value = op.value;
}
break;
case 'Delete':
var deletePos = this.search_(op.pid);
this.atoms_.splice(deletePos, 1);
if (deletePos === pos) {
len++;
} else {
applyReplaceText();
pos = deletePos;
len = 1;
value = '';
}
break;
default:
throw new Error(op.constructor.name);
}
}
applyReplaceText();
};
// Returns the text, a string.
CString.prototype.getText = function() {
return this.text_;
};
// Returns the selection range, an array representing the half-closed interval
// [start, end).
CString.prototype.getSelectionRange = function(value) {
return [this.selStart_, this.selEnd_];
};
// Replaces text.substr(pos, len) with the given value and updates the selection
// range accordingly. Assumes line breaks have been canonicalized to \n.
CString.prototype.replaceText = function(pos, len, value) {
if (this.paused_) {
throw new Error('paused');
}
if (len === 0 && value.length === 0) {
return;
}
this.paused_ = true;
var ops = new Array(len);
for (var i = 0; i < len; i++) {
ops[i] = new Delete(this.atoms_[pos + i].pid);
}
if (value) {
var prevPid = pos === 0 ? '' : this.atoms_[pos - 1].pid;
var nextPid = '';
if (pos + len < this.atoms_.length) {
nextPid = this.atoms_[pos + len].pid;
}
ops.push(new ClientInsert(prevPid, nextPid, value));
}
this.emit('patch', encodePatch(ops));
};
// Updates the selection range to the half-closed interval [start, end).
CString.prototype.setSelectionRange = function(start, end) {
if (this.paused_) {
throw new Error('paused');
}
if (this.selStart_ === start && this.selEnd_ === end) {
return;
}
// TODO: Set this.paused_ and notify server. For now, we simply update local
// state and emit a 'setSelectionRange' event.
this.selStart_ = start;
this.selEnd_ = end;
this.emit('setSelectionRange', new SetSelectionRange(true, start, end));
};
CString.prototype.search_ = function(pid) {
var that = this;
return lib.search(this.atoms_.length, function(i) {
return !that.atoms_[i].pid.less(pid);
});
};
// Note: A single call to CString.replaceText can result in multiple calls to
// CString.applyReplaceText_.
CString.prototype.applyReplaceText_ = function(isLocal, pos, len, value) {
if (len === 0 && value.length === 0) {
return;
}
var t = this.text_;
if (pos < 0 || pos + len > t.length) {
throw new Error('out of bounds');
}
this.text_ = t.substr(0, pos) + value + t.substr(pos + len);
// Update selection range.
if (isLocal) {
this.selStart_ = pos + value.length;
this.selEnd_ = this.selStart_;
} else {
if (this.selStart_ >= pos) {
this.selStart_ = Math.max(pos, this.selStart_ - len) + value.length;
}
if (this.selEnd_ >= pos) {
this.selEnd_ = Math.max(pos, this.selEnd_ - len) + value.length;
}
}
this.emit('replaceText', new ReplaceText(isLocal, pos, len, value));
};
////////////////////////////////////////////////////////////
// Exports
module.exports = {
CString: CString,
decode: decode,
ReplaceText: ReplaceText,
SetSelectionRange: SetSelectionRange
};
| asadovsky/cdb | client/dtypes/cstring.js | JavaScript | apache-2.0 | 8,759 |
---
code: true
type: page
title: incr
---
# incr
Increments the number stored at `key` by 1. If the key does not exist, it is set to 0 before performing the operation.
[[_Redis documentation_]](https://redis.io/commands/incr)
## Arguments
```js
incr(key, [options]);
```
<br/>
| Arguments | Type | Description |
| --------- | ----------------- | ------------------------ |
| `key` | <pre>string</pre> | Key |
| `options` | <pre>object</pre> | Optional query arguments |
### options
The `options` arguments can contain the following option properties:
| Property | Type (default) | Description |
| ---------- | ------------------------- | ---------------------------------------------------------------------------- |
| `queuable` | <pre>boolean (true)</pre> | If true, queues the request during downtime, until connected to Kuzzle again |
## Resolve
Resolves to the incremented key value.
## Usage
<<< ./snippets/incr.js
| kuzzleio/sdk-javascript | doc/7/controllers/ms/incr/index.md | Markdown | apache-2.0 | 1,064 |
package com.demo.amazing.activity;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import com.demo.amazing.R;
public class ConstraintActivity extends Activity {
public static void start(Context context) {
Intent intent = new Intent();
intent.setClass(context, ConstraintActivity.class);
context.startActivity(intent);
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
//fullscreen();
setContentView(R.layout.activity_constraint);
}
private void fullscreen() {
View decorView = getWindow().getDecorView();
// Hide the status bar.
int uiOptions = View.SYSTEM_UI_FLAG_FULLSCREEN;
decorView.setSystemUiVisibility(uiOptions);
// Remember that you should never show the action bar if the
// status bar is hidden, so hide that too if necessary.
// ActionBar actionBar = getActionBar();
// actionBar.hide();
}
}
| FreeSunny/Amazing | app/src/main/java/com/demo/amazing/activity/ConstraintActivity.java | Java | apache-2.0 | 1,111 |
#define PI_2 (2 * M_PI_F)
// buffer length defaults to the argument to the integrate kernel
// but if it's known at compile time, it can be provided which allows
// compiler to change i%n to i&(n-1) if n is a power of two.
#ifndef NH
#define NH nh
#endif
#ifndef WARP_SIZE
#define WARP_SIZE 32
#endif
#include <stdio.h> // for printf
#include <curand_kernel.h>
#include <curand.h>
__device__ float wrap_2_pi_(float x)/*{{{*/
{
bool neg_mask = x < 0.0f;
bool pos_mask = !neg_mask;
// fmodf diverges 51% of time
float pos_val = fmodf(x, PI_2);
float neg_val = PI_2 - fmodf(-x, PI_2);
return neg_mask * neg_val + pos_mask * pos_val;
}/*}}}*/
__device__ float wrap_2_pi(float x) // not divergent/*{{{*/
{
bool lt_0 = x < 0.0f;
bool gt_2pi = x > PI_2;
return (x + PI_2)*lt_0 + x*(!lt_0)*(!gt_2pi) + (x - PI_2)*gt_2pi;
}/*}}}*/
__global__ void integrate(/*{{{*/
// config
unsigned int i_step, unsigned int n_node, unsigned int nh, unsigned int n_step, unsigned int n_params,
float dt, float speed,
float * __restrict__ weights,
float * __restrict__ lengths,
float * __restrict__ params_pwi, // pwi: per work item
// state
float * __restrict__ state_pwi,
// outputs
float * __restrict__ tavg_pwi
)
{/*}}}*/
// work id & size/*{{{*/
const unsigned int id = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
const unsigned int size = blockDim.x * gridDim.x * gridDim.y;/*}}}*/
// ND array accessors (TODO autogen from py shape info)/*{{{*/
#define params(i_par) (params_pwi[(size * (i_par)) + id])
#define state(time, i_node) (state_pwi[((time) * n_node + (i_node))*size + id])
#define tavg(i_node) (tavg_pwi[((i_node) * size) + id])/*}}}*/
// unpack params/*{{{*/
//***// These are the two parameters which are usually explore in fitting in this model
const float global_coupling = params(1);
const float global_speed = params(0);/*}}}*/
// derived/*{{{*/
const float rec_n = 1.0f / n_node;
//***// The speed affects the delay and speed_value is a parameter which is usually explored in fitting ***
const float rec_speed_dt = 1.0f / global_speed / (dt);
//***// This is a parameter specific to the Kuramoto model
const float omega = 60.0 * 2.0 * M_PI_F / 1e3;
//***// This is a parameter for the stochastic integration step, you can leave this constant for the moment
const float sig = sqrt(dt) * sqrt(2.0 * 1e-5);/*}}}*/ //-->noise sigma value
curandState s;
curand_init(id * (blockDim.x * gridDim.x * gridDim.y), 0, 0, &s);
//***// This is only initialization of the observable
for (unsigned int i_node = 0; i_node < n_node; i_node++)
tavg(i_node) = 0.0f;
//***// This is the loop over time, should stay always the same
for (unsigned int t = i_step; t < (i_step + n_step); t++)
{
//***// This is the loop over nodes, which also should stay the same
for (unsigned int i_node = threadIdx.y; i_node < n_node; i_node+=blockDim.y)
{
//***// We here gather the current state of the node
float theta_i = state(t % NH, i_node);
//***// This variable is used to traverse the weights and lengths matrix, which is really just a vector. It is just a displacement.
unsigned int i_n = i_node * n_node;
float sum = 0.0f;
//***// For all nodes that are not the current node (i_node) sum the coupling
for (unsigned int j_node = 0; j_node < n_node; j_node++)
{
//***// Get the weight of the coupling between node i and node j
float wij = weights[i_n + j_node]; // nb. not coalesced
if (wij == 0.0)
continue;
//***// Get the delay between node i and node j
unsigned int dij = lengths[i_n + j_node] * rec_speed_dt;
//***// Get the state of node j which is delayed by dij
float theta_j = state((t - dij + NH) % NH, j_node);
//***// Sum it all together using the coupling function. This is a kuramoto coupling so: a * sin(pre_syn - post_syn)
coupling_value += wij * sin(theta_j - theta_i);
} // j_node
//***// This is actually the integration step and the update in the state of the node
theta_i += dt * (omega + global_coupling * rec_n * coupling_value);
//***// We add some noise if noise is selected
theta_i += sig * curand_normal2(&s).x;
//***// Wrap it within the limits of the model (0-2pi)
theta_i = wrap_2_pi_(theta_i);
//***// Update the state
state((t + 1) % NH, i_node) = theta_i;
//***// Update the observable
tavg(i_node) = sin(theta_i);
// sync across warps executing nodes for single sim, before going on to next time step
__syncthreads();
} // for i_node
} // for t
// cleanup macros/*{{{*/
#undef params
#undef state
#undef tavg/*}}}*/
} // kernel integrate | the-virtual-brain/tvb-hpc | dsl/dsl_cuda/CUDAmodels/refs/kuramoto_network.c | C | apache-2.0 | 5,148 |
/*
* Copyright 2017 StreamSets Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.streamsets.pipeline.stage.destination.hdfs.writer;
import com.google.common.annotations.VisibleForTesting;
import com.streamsets.pipeline.api.Record;
import com.streamsets.pipeline.api.StageException;
import com.streamsets.pipeline.api.impl.Utils;
import com.streamsets.pipeline.stage.destination.hdfs.Errors;
import org.apache.hadoop.fs.FileSystem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.DelayQueue;
import java.util.concurrent.Delayed;
import java.util.concurrent.TimeUnit;
public class ActiveRecordWriters {
private final static Logger LOG = LoggerFactory.getLogger(ActiveRecordWriters.class);
private final static boolean IS_TRACE_ENABLED = LOG.isTraceEnabled();
private static class DelayedRecordWriter implements Delayed {
private final RecordWriter writer;
public DelayedRecordWriter(RecordWriter writer) {
this.writer = writer;
}
@Override
public long getDelay(TimeUnit unit) {
return unit.convert(writer.getExpiresOn() - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
}
@Override
public int compareTo(Delayed o) {
long diff = writer.getExpiresOn() - ((DelayedRecordWriter)o).writer.getExpiresOn();
return (diff > 0) ? 1 : (diff < 0) ? -1 : 0;
}
public boolean equals(Delayed o) {
return compareTo(o) == 0;
}
public RecordWriter getWriter() {
return writer;
}
@Override
public String toString() {
return Utils.format("DelayedRecordWriter[path='{}' expiresInSecs='{}'", writer.getPath(),
getDelay(TimeUnit.SECONDS));
}
}
private final RecordWriterManager manager;
@VisibleForTesting
Map<String, RecordWriter> writers;
private DelayQueue<DelayedRecordWriter> cutOffQueue;
public ActiveRecordWriters(RecordWriterManager manager) {
writers = new HashMap<>();
cutOffQueue = new DelayQueue<>();
this.manager = manager;
}
public void commitOldFiles(FileSystem fs) throws IOException, StageException {
manager.commitOldFiles(fs);
}
public void purge() throws IOException, StageException {
if (IS_TRACE_ENABLED) {
LOG.trace("Purge");
}
DelayedRecordWriter delayedWriter = cutOffQueue.poll();
while (delayedWriter != null) {
if (!delayedWriter.getWriter().isClosed()) {
if (IS_TRACE_ENABLED) {
LOG.trace("Purging '{}'", delayedWriter.getWriter().getPath());
}
//We are fine no lock on writer needed.
synchronized (this) {
writers.remove(delayedWriter.getWriter().getPath().toString());
}
manager.commitWriter(delayedWriter.getWriter());
}
delayedWriter = cutOffQueue.poll();
}
}
public RecordWriter get(Date now, Date recordDate, Record record) throws StageException, IOException {
String path = manager.getPath(recordDate, record).toString();
RecordWriter writer = null;
//We are fine no lock on writer needed.
synchronized (this) {
writer = writers.get(path);
}
if(writer != null && manager.shouldRoll(writer, record)) {
release(writer, true);
writer = null;
}
if (writer == null) {
writer = manager.getWriter(now, recordDate, record);
if (writer != null) {
if (IS_TRACE_ENABLED) {
LOG.trace("Got '{}'", writer.getPath());
}
writer.setActiveRecordWriters(this);
//We are fine no lock on writer needed.
synchronized(this) {
writers.put(path, writer);
}
cutOffQueue.add(new DelayedRecordWriter(writer));
}
}
return writer;
}
public RecordWriterManager getWriterManager() {
return manager;
}
@VisibleForTesting
public int getActiveWritersCount() {
return cutOffQueue.size();
}
//The whole function is synchronized because
//the locks always have to taken in the following order
//1. ActiveRecordWriters and 2. RecordWriter (if we need both of them)
//or else we will get into a deadlock
//For Ex: idle close thread calls this method
//and the hdfsTarget (in the pipeline runnable thread), calls flushAll
public synchronized void release(RecordWriter writer, boolean roll) throws StageException, IOException {
writer.closeLock();
try {
if (roll || writer.isIdleClosed() || manager.isOverThresholds(writer)) {
if (IS_TRACE_ENABLED) {
LOG.trace("Release '{}'", writer.getPath());
}
writers.remove(writer.getPath().toString());
manager.commitWriter(writer);
}
} finally {
writer.closeUnlock();
}
purge();
}
public synchronized void flushAll() throws StageException {
if (IS_TRACE_ENABLED) {
LOG.trace("Flush all '{}'", toString());
}
for (RecordWriter writer : writers.values()) {
if (!writer.isClosed()) {
try {
writer.flush();
} catch (IOException ex) {
String msg = Utils.format("Flush failed on file : '{}'", writer.getPath().toString());
LOG.error(msg);
throw new StageException(Errors.HADOOPFS_58, writer.getPath().toString(), ex);
}
}
}
}
public synchronized void closeAll() throws StageException{
if (IS_TRACE_ENABLED) {
LOG.trace("Close all '{}'", toString());
}
if(writers != null) {
for (RecordWriter writer : writers.values()) {
writer.closeLock();
try {
if (!writer.isClosed()) {
manager.commitWriter(writer);
}
} catch (IOException ex) {
String msg = Utils.format("Error closing writer {} : {}", writer, ex);
LOG.warn(msg, ex);
} finally {
writer.closeUnlock();
}
}
}
writers = null;
cutOffQueue = null;
}
}
| z123/datacollector | hdfs-protolib/src/main/java/com/streamsets/pipeline/stage/destination/hdfs/writer/ActiveRecordWriters.java | Java | apache-2.0 | 6,511 |
/**
* Copyright 2014 LinkedIn Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package com.linkedin.proxy.pool;
import java.sql.DriverManager;
import java.util.Properties;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import org.apache.log4j.Logger;
import com.linkedin.proxy.conn.MyConnection;
import com.linkedin.proxy.conn.MysqlConnection;
public class BlockingMysqlConnectionPool implements ConnectionPool
{
public static final String FLAG_PROXY_MYSQL_HOST = "mysql.host";
public static final String FLAG_PROXY_MYSQL_PORT = "mysql.port";
public static final String FLAG_PROXY_MYSQL_USERNAME = "mysql.userName";
public static final String FLAG_PROXY_MYSQL_USERPASS = "mysql.userPass";
public static final String FLAG_PROXY_MYSQL_CONN_POOL = "mysql.connPool";
private static final Logger m_log = Logger.getLogger(BlockingMysqlConnectionPool.class);
protected String m_connStr;
protected BlockingQueue<MyConnection> m_que;
@Override
public boolean init(Properties prop) throws Exception
{
//get mysql host name
String hostName;
String temp = prop.getProperty(FLAG_PROXY_MYSQL_HOST);
if(temp == null)
{
m_log.error("Mysql host name is missing");
return false;
}
else
{
hostName = temp;
m_log.debug("Mysql host name: " + hostName);
}
//get mysql host port
int hostPort;
temp = prop.getProperty(FLAG_PROXY_MYSQL_PORT);
if(temp == null)
{
m_log.error("Mysql host port is missing");
return false;
}
else
{
hostPort = Integer.parseInt(temp);
m_log.debug("Mysql host port: " + hostPort);
}
//get mysql user name
String userName;
temp = prop.getProperty(FLAG_PROXY_MYSQL_USERNAME);
if(temp == null)
{
m_log.error("Mysql user name is missing");
return false;
}
else
{
userName = temp;
m_log.debug("Mysql user name: " + userName);
}
//get mysql user pass
String userPass;
temp = prop.getProperty(FLAG_PROXY_MYSQL_USERPASS);
if(temp == null)
{
m_log.error("Mysql user pass is missing");
return false;
}
else
{
userPass = temp;
m_log.debug("Mysql user pass: " + userPass);
}
//get size of the connection pool
int connPool;
temp = prop.getProperty(FLAG_PROXY_MYSQL_CONN_POOL);
if(temp == null)
{
m_log.error("Connection pool size is missing");
return false;
}
else
{
connPool = Integer.parseInt(temp);
m_log.debug("Connection pool size: " + connPool);
}
m_connStr = "jdbc:mysql://" + hostName + ":" + hostPort + "/?useUnicode=true&characterEncoding=utf-8" + "&user=" + userName + "&password=" + userPass;
m_que = new LinkedBlockingQueue<MyConnection>(connPool);
try
{
for(int a = 0; a<connPool; a++)
{
MyConnection conn = new MysqlConnection(DriverManager.getConnection(m_connStr), "");
m_que.add(conn);
}
}
catch(Exception e)
{
m_log.fatal("Cannot create connection to Mysql Database", e);
return false;
}
return true;
}
public MyConnection getConnection(String dbName) throws Exception
{
MyConnection conn = m_que.take();
if(conn.isClosed())
{
m_log.debug(Thread.currentThread().getName() + ": Connection from pool is closed. Starting a new connection...");
conn = new MysqlConnection(DriverManager.getConnection(m_connStr), "");
}
return conn;
}
public void releaseConnection(MyConnection conn) throws Exception
{
m_que.put(conn);
}
public void closeAll() throws Exception
{
if(m_que != null)
{
while(!m_que.isEmpty())
{
MyConnection conn = m_que.take();
conn.closeConn();
}
}
}
}
| linkedin/MTBT | mt-proxy/src/main/java/com/linkedin/proxy/pool/BlockingMysqlConnectionPool.java | Java | apache-2.0 | 4,253 |
/**
* OLAT - Online Learning and Training<br />
* http://www.olat.org
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); <br />
* you may not use this file except in compliance with the License.<br />
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing,<br />
* software distributed under the License is distributed on an "AS IS" BASIS, <br />
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <br />
* See the License for the specific language governing permissions and <br />
* limitations under the License.
* <p>
* Copyright (c) 1999-2006 at Multimedia- & E-Learning Services (MELS),<br />
* University of Zurich, Switzerland.
* <p>
*/
package org.olat.system.exception;
/**
* Description:<br />
* Thrown if an unrecoverable error occurs. These Exceptions get caught by the Servlet. The user will get an orange screen and a log message is recorded.
*
* @author Felix Jost
*/
public class OLATRuntimeException extends RuntimeException {
private String logMsg;
private String usrMsgKey;
private String usrMsgPackage;
private String[] usrMsgArgs;
private Class throwingClazz;
/**
* @param throwing
* class
* @param usrMsgKey
* @param usrMsgArgs
* @param usrMsgPackage
* @param logMsg
* @param cause
*/
public OLATRuntimeException(Class throwingClazz, String usrMsgKey, String[] usrMsgArgs, String usrMsgPackage, String logMsg, Throwable cause) {
super(logMsg);
this.throwingClazz = throwingClazz != null ? throwingClazz : OLATRuntimeException.class;
this.usrMsgKey = usrMsgKey;
this.usrMsgArgs = usrMsgArgs;
this.usrMsgPackage = usrMsgPackage;
this.logMsg = logMsg;
if (cause == null) {
cause = new Exception("olat_rtexception_stackgenerator");
}
initCause(cause);
}
/**
* @param usrMsgKey
* @param usrMsgArgs
* @param usrMsgPackage
* @param logMsg
* @param cause
*/
public OLATRuntimeException(String usrMsgKey, String[] usrMsgArgs, String usrMsgPackage, String logMsg, Throwable cause) {
this(OLATRuntimeException.class, usrMsgKey, usrMsgArgs, usrMsgPackage, logMsg, cause);
}
/**
* @param category
* @param logMsg
* @param cause
*/
public OLATRuntimeException(Class throwingClazz, String logMsg, Throwable cause) {
this(throwingClazz, null, null, null, logMsg, cause);
}
/**
* @param logMsg
* @param cause
*/
public OLATRuntimeException(String logMsg, Throwable cause) {
this(OLATRuntimeException.class, null, null, null, logMsg, cause);
}
/**
* Format throwable as HTML fragment.
*
* @param th
* @return HTML fragment.
*/
public static StringBuilder throwableToHtml(Throwable th) {
StringBuilder sb = new StringBuilder("<br />");
if (th == null) {
sb.append("n/a");
} else {
sb.append("Throwable: " + th.getClass().getName() + "<br /><br />");
toHtml(sb, th);
// 1st cause:
Throwable ca = th.getCause();
int i = 1;
while (ca != null) {
sb.append("<hr /><br />" + i + ". cause:<br /><br />");
toHtml(sb, ca);
i++;
ca = ca.getCause();
}
}
return sb;
}
private static void toHtml(StringBuilder sb, Throwable th) {
if (th instanceof OLATRuntimeException) {
sb.append("logmsg:").append(((OLATRuntimeException) th).getLogMsg()).append("<br />");
}
sb.append("message:" + th.getMessage() + "," + th.getClass().getName() + "<br /><br />");
StackTraceElement[] ste = th.getStackTrace();
int nr = ste.length < 10 ? ste.length : 10;
for (int i = 0; i < nr; i++) {
StackTraceElement st = ste[i];
sb.append("at " + st.toString() + "<br />");
}
}
/**
* @return the log message
*/
public String getLogMsg() {
return logMsg;
}
public Class getThrowingClazz() {
return throwingClazz;
}
/**
* @return String key of user message in the given package
*/
public String getUsrMsgKey() {
return usrMsgKey;
}
/**
* @return String package name where usr msg key is found
*/
public String getUsrMsgPackage() {
return usrMsgPackage;
}
/**
* @return String[] The translator arguments or null if none available
*/
public String[] getUsrMsgArgs() {
return usrMsgArgs;
}
}
| huihoo/olat | OLAT-LMS/src/main/java/org/olat/system/exception/OLATRuntimeException.java | Java | apache-2.0 | 4,805 |
package com.coolweather.android.db;
import org.litepal.crud.DataSupport;
/**
* Created by Drug on 2017/6/15.
* 创建一个市类,包括,市ID,市名字cityName,市编码cityCode,省ID:provinceId
*/
public class City extends DataSupport {
private int id; //市id
private String cityName; //市名
private int cityCode; //市编码
private int provinceId; //省id
public void setId(int id) {
this.id = id;
}
public int getId() {
return id;
}
public void setCityCode(int cityCode) {
this.cityCode = cityCode;
}
public int getCityCode() {
return cityCode;
}
public void setCityName(String cityName) {
this.cityName = cityName;
}
public String getCityName() {
return cityName;
}
public void setProvinceId(int provinceId) {
this.provinceId = provinceId;
}
public int getProvinceId() {
return provinceId;
}
}
| fengbin15/coolweather | app/src/main/java/com/coolweather/android/db/City.java | Java | apache-2.0 | 982 |
import {QueryService} from '../../../src/mapboxgl/services/QueryService';
import {QueryByBoundsParameters} from '../../../src/common/iServer/QueryByBoundsParameters';
import {QueryByDistanceParameters} from '../../../src/common/iServer/QueryByDistanceParameters';
import {QueryBySQLParameters} from '../../../src/common/iServer/QueryBySQLParameters';
import {QueryByGeometryParameters} from '../../../src/common/iServer/QueryByGeometryParameters';
import mapboxgl from 'mapbox-gl';
import { FetchRequest } from '../../../src/common/util/FetchRequest';
var url = GlobeParameter.WorldURL;
describe('mapboxgl_QueryService', () => {
var serviceResult;
var originalTimeout;
beforeEach(() => {
originalTimeout = jasmine.DEFAULT_TIMEOUT_INTERVAL;
jasmine.DEFAULT_TIMEOUT_INTERVAL = 50000;
serviceResult = null;
});
afterEach(() => {
jasmine.DEFAULT_TIMEOUT_INTERVAL = originalTimeout;
});
//地图bounds查询
it('queryByBounds', (done) => {
var param = new QueryByBoundsParameters({
queryParams: {name: "Capitals@World"},
bounds: new mapboxgl.LngLatBounds([0, 0], [60, 39])
});
var queryService = new QueryService(url);
spyOn(FetchRequest, 'commit').and.callFake((method, testUrl, params, options) => {
expect(method).toBe("POST");
expect(testUrl).toBe(url + "/queryResults?returnContent=true");
expect(params).not.toBeNull();
expect(params).toContain("'queryMode':'BoundsQuery'");
expect(params).toContain("'bounds': {'rightTop':{'y':39,'x':60},'leftBottom':{'y':0,'x':0}}");
expect(options).not.toBeNull();
return Promise.resolve(new Response(JSON.stringify(queryResultJson)));
});
queryService.queryByBounds(param, (result) => {
serviceResult = result
try {
expect(queryService).not.toBeNull();
expect(serviceResult).not.toBeNull();
expect(serviceResult.type).toEqual("processCompleted");
expect(serviceResult.result.succeed).toBe(true);
expect(serviceResult.result.currentCount).not.toBeNull();
expect(serviceResult.result.totalCount).toEqual(serviceResult.result.currentCount);
var recordSets = serviceResult.result.recordsets[0];
expect(recordSets.datasetName).toEqual("Capitals@World");
expect(recordSets.features.type).toEqual("FeatureCollection");
var features = recordSets.features.features;
expect(features.length).toBeGreaterThan(0);
for (var i = 0; i < features.length; i++) {
expect(features[i].type).toEqual("Feature");
expect(features[i].id).not.toBeNull();
expect(features[i].properties).not.toBeNull();
expect(features[i].geometry.type).toEqual("Point");
expect(features[i].geometry.coordinates.length).toEqual(2);
}
expect(recordSets.fieldCaptions.length).toEqual(2);
expect(recordSets.fieldTypes.length).toEqual(recordSets.fieldCaptions.length);
expect(recordSets.fields.length).toEqual(recordSets.fieldCaptions.length);
done();
} catch (e) {
console.log("'queryByBounds'案例失败" + e.name + ":" + e.message);
expect(false).toBeTruthy();
done();
}
});
});
//地图距离查询服务
it('queryByDistance', (done) => {
var param = new QueryByDistanceParameters({
queryParams: {name: "Capitals@World"},
distance: 10,
geometry: new mapboxgl.LngLat(104, 30)
});
var queryService = new QueryService(url);
spyOn(FetchRequest, 'commit').and.callFake((method, testUrl, params, options) => {
expect(method).toBe("POST");
expect(testUrl).toBe(url + "/queryResults?returnContent=true");
expect(params).not.toBeNull();
expect(params).toContain("'queryMode':'DistanceQuery'");
expect(params).toContain("'distance':10");
expect(options).not.toBeNull();
return Promise.resolve(new Response(JSON.stringify(queryResultJson)));
});
queryService.queryByDistance(param, (result) => {
serviceResult = result
try {
expect(queryService).not.toBeNull();
expect(serviceResult).not.toBeNull();
expect(serviceResult.type).toEqual("processCompleted");
expect(serviceResult.result.succeed).toBe(true);
expect(serviceResult.result.currentCount).not.toBeNull();
expect(serviceResult.result.totalCount).toEqual(serviceResult.result.currentCount);
var recordSets = serviceResult.result.recordsets[0];
expect(recordSets.datasetName).toEqual("Capitals@World");
expect(recordSets.features.type).toEqual("FeatureCollection");
var features = recordSets.features.features;
expect(features.length).toBeGreaterThan(0);
for (var i = 0; i < features.length; i++) {
expect(features[i].type).toEqual("Feature");
expect(features[i].id).not.toBeNull();
expect(features[i].properties).not.toBeNull();
expect(features[i].geometry.type).toEqual("Point");
expect(features[i].geometry.coordinates.length).toEqual(2);
}
expect(recordSets.fieldCaptions.length).toEqual(2);
expect(recordSets.fieldTypes.length).toEqual(recordSets.fieldCaptions.length);
expect(recordSets.fields.length).toEqual(recordSets.fieldCaptions.length);
done();
} catch (e) {
console.log("'queryByDistance'案例失败" + e.name + ":" + e.message);
expect(false).toBeTruthy();
done();
}
});
});
//地图SQL查询服务
it('queryBySQL', (done) => {
var param = new QueryBySQLParameters({
queryParams: {
name: "Capitals@World",
attributeFilter: "SMID < 10"
}
});
var queryService = new QueryService(url);
spyOn(FetchRequest, 'commit').and.callFake((method, testUrl, params, options) => {
expect(method).toBe("POST");
expect(testUrl).toBe(url + "/queryResults?returnContent=true");
expect(params).not.toBeNull();
expect(params).toContain("'queryMode':'SqlQuery'");
expect(params).toContain("'name':\"Capitals@World\"");
expect(options).not.toBeNull();
return Promise.resolve(new Response(JSON.stringify(queryResultJson)));
});
queryService.queryBySQL(param, (result) => {
serviceResult = result
try {
expect(queryService).not.toBeNull();
expect(serviceResult).not.toBeNull();
expect(serviceResult.type).toEqual("processCompleted");
expect(serviceResult.result.succeed).toBe(true);
expect(serviceResult.result.currentCount).not.toBeNull();
expect(serviceResult.result.totalCount).toEqual(serviceResult.result.currentCount);
var recordSets = serviceResult.result.recordsets[0];
expect(recordSets.datasetName).toEqual("Capitals@World");
expect(recordSets.features.type).toEqual("FeatureCollection");
var features = recordSets.features.features;
expect(features.length).toBeGreaterThan(0);
for (var i = 0; i < features.length; i++) {
expect(features[i].type).toEqual("Feature");
expect(features[i].id).not.toBeNull();
expect(features[i].properties).not.toBeNull();
expect(features[i].geometry.type).toEqual("Point");
expect(features[i].geometry.coordinates.length).toEqual(2);
}
expect(recordSets.fieldCaptions.length).toEqual(2);
expect(recordSets.fieldTypes.length).toEqual(recordSets.fieldCaptions.length);
expect(recordSets.fields.length).toEqual(recordSets.fieldCaptions.length);
done();
} catch (e) {
console.log("'queryBySQL'案例失败" + e.name + ":" + e.message);
expect(false).toBeTruthy();
done();
}
});
});
//地图几何查询服务
it('queryByGeometry', (done) => {
var geo = {
'type': 'Feature',
'geometry': {
'type': 'Polygon',
'coordinates': [[[0, 0], [-30, 0], [-10, 30], [0, 0]]],
}
};
var param = new QueryByGeometryParameters({
queryParams: {name: "Capitals@World.1"},
geometry: geo
});
var queryService = new QueryService(url);
spyOn(FetchRequest, 'commit').and.callFake((method, testUrl, params, options) => {
expect(method).toBe("POST");
expect(testUrl).toBe(url + "/queryResults?returnContent=true");
expect(params).not.toBeNull();
expect(params).toContain("'queryMode':'SpatialQuery'");
expect(options).not.toBeNull();
return Promise.resolve(new Response(JSON.stringify(queryResultJson)));
});
queryService.queryByGeometry(param, (serviceResult) => {
try {
expect(queryService).not.toBeNull();
expect(serviceResult).not.toBeNull();
expect(serviceResult.type).toEqual("processCompleted");
expect(serviceResult.result.succeed).toBe(true);
expect(serviceResult.result.currentCount).not.toBeNull();
expect(serviceResult.result.totalCount).toEqual(serviceResult.result.currentCount);
var recordSets = serviceResult.result.recordsets[0];
expect(recordSets.datasetName).toEqual("Capitals@World");
expect(recordSets.features.type).toEqual("FeatureCollection");
var features = recordSets.features.features;
expect(features.length).toBeGreaterThan(0);
for (var i = 0; i < features.length; i++) {
expect(features[i].type).toEqual("Feature");
expect(features[i].id).not.toBeNull();
expect(features[i].properties).not.toBeNull();
expect(features[i].geometry.type).toEqual("Point");
expect(features[i].geometry.coordinates.length).toEqual(2);
}
expect(recordSets.fieldCaptions.length).toEqual(2);
expect(recordSets.fieldTypes.length).toEqual(recordSets.fieldCaptions.length);
expect(recordSets.fields.length).toEqual(recordSets.fieldCaptions.length);
done();
} catch (e) {
console.log("'queryByGeometry'案例失败" + e.name + ":" + e.message);
expect(false).toBeTruthy();
done();
}
});
});
}); | SuperMap/iClient9 | test/mapboxgl/services/QueryServiceSpec.js | JavaScript | apache-2.0 | 11,464 |
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using Google.Cloud.Spanner.Connection.MockServer;
using Grpc.Core;
using System;
using System.Linq;
namespace Google.Cloud.Spanner.Connection.Tests.MockServer
{
public class SpannerMockServerFixture : IDisposable
{
private readonly Random _random = new Random();
private readonly Server _server;
public MockSpannerService SpannerMock { get; }
public MockDatabaseAdminService DatabaseAdminMock { get; }
public string Endpoint => $"{_server.Ports.ElementAt(0).Host}:{_server.Ports.ElementAt(0).BoundPort}";
public string Host => _server.Ports.ElementAt(0).Host;
public int Port => _server.Ports.ElementAt(0).BoundPort;
public SpannerMockServerFixture()
{
SpannerMock = new MockSpannerService();
DatabaseAdminMock = new MockDatabaseAdminService();
_server = new Server
{
Services = { V1.Spanner.BindService(SpannerMock), Google.Cloud.Spanner.Admin.Database.V1.DatabaseAdmin.BindService(DatabaseAdminMock) },
Ports = { new ServerPort("localhost", 0, ServerCredentials.Insecure) }
};
_server.Start();
}
public void Dispose()
{
_server.ShutdownAsync().Wait();
}
public long RandomLong()
{
return RandomLong(0, long.MaxValue);
}
public long RandomLong(long min, long max)
{
byte[] buf = new byte[8];
_random.NextBytes(buf);
long longRand = BitConverter.ToInt64(buf, 0);
return (Math.Abs(longRand % (max - min)) + min);
}
}
} | googleapis/dotnet-spanner-nhibernate | Google.Cloud.Spanner.Connection.Tests/MockServer/SpannerMockServerFixture.cs | C# | apache-2.0 | 2,255 |
# EasyML
Simple codes that accompany my blog posts about learning "Machine Learning - The easy way"!
You can find the instruction and information on my weblog : "www.frdn.info".
| fereydouni/EasyML | README.md | Markdown | apache-2.0 | 178 |
/*
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.videointelligence.v1p3beta1;
import com.google.api.gax.core.NoCredentialsProvider;
import com.google.api.gax.grpc.GaxGrpcProperties;
import com.google.api.gax.grpc.testing.LocalChannelProvider;
import com.google.api.gax.grpc.testing.MockGrpcService;
import com.google.api.gax.grpc.testing.MockServiceHelper;
import com.google.api.gax.rpc.ApiClientHeaderProvider;
import com.google.api.gax.rpc.InvalidArgumentException;
import com.google.api.gax.rpc.StatusCode;
import com.google.longrunning.Operation;
import com.google.protobuf.AbstractMessage;
import com.google.protobuf.Any;
import io.grpc.StatusRuntimeException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import javax.annotation.Generated;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@Generated("by gapic-generator-java")
public class VideoIntelligenceServiceClientTest {
private static MockServiceHelper mockServiceHelper;
private static MockVideoIntelligenceService mockVideoIntelligenceService;
private LocalChannelProvider channelProvider;
private VideoIntelligenceServiceClient client;
@BeforeClass
public static void startStaticServer() {
mockVideoIntelligenceService = new MockVideoIntelligenceService();
mockServiceHelper =
new MockServiceHelper(
UUID.randomUUID().toString(),
Arrays.<MockGrpcService>asList(mockVideoIntelligenceService));
mockServiceHelper.start();
}
@AfterClass
public static void stopServer() {
mockServiceHelper.stop();
}
@Before
public void setUp() throws IOException {
mockServiceHelper.reset();
channelProvider = mockServiceHelper.createChannelProvider();
VideoIntelligenceServiceSettings settings =
VideoIntelligenceServiceSettings.newBuilder()
.setTransportChannelProvider(channelProvider)
.setCredentialsProvider(NoCredentialsProvider.create())
.build();
client = VideoIntelligenceServiceClient.create(settings);
}
@After
public void tearDown() throws Exception {
client.close();
}
@Test
public void annotateVideoTest() throws Exception {
AnnotateVideoResponse expectedResponse =
AnnotateVideoResponse.newBuilder()
.addAllAnnotationResults(new ArrayList<VideoAnnotationResults>())
.build();
Operation resultOperation =
Operation.newBuilder()
.setName("annotateVideoTest")
.setDone(true)
.setResponse(Any.pack(expectedResponse))
.build();
mockVideoIntelligenceService.addResponse(resultOperation);
String inputUri = "inputUri470706498";
List<Feature> features = new ArrayList<>();
AnnotateVideoResponse actualResponse = client.annotateVideoAsync(inputUri, features).get();
Assert.assertEquals(expectedResponse, actualResponse);
List<AbstractMessage> actualRequests = mockVideoIntelligenceService.getRequests();
Assert.assertEquals(1, actualRequests.size());
AnnotateVideoRequest actualRequest = ((AnnotateVideoRequest) actualRequests.get(0));
Assert.assertEquals(inputUri, actualRequest.getInputUri());
Assert.assertEquals(features, actualRequest.getFeaturesList());
Assert.assertTrue(
channelProvider.isHeaderSent(
ApiClientHeaderProvider.getDefaultApiClientHeaderKey(),
GaxGrpcProperties.getDefaultApiClientHeaderPattern()));
}
@Test
public void annotateVideoExceptionTest() throws Exception {
StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT);
mockVideoIntelligenceService.addException(exception);
try {
String inputUri = "inputUri470706498";
List<Feature> features = new ArrayList<>();
client.annotateVideoAsync(inputUri, features).get();
Assert.fail("No exception raised");
} catch (ExecutionException e) {
Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass());
InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause());
Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode());
}
}
}
| googleapis/java-video-intelligence | google-cloud-video-intelligence/src/test/java/com/google/cloud/videointelligence/v1p3beta1/VideoIntelligenceServiceClientTest.java | Java | apache-2.0 | 4,977 |
/*******************************************************************************
* Copyright 2014 DEIB - Politecnico di Milano
*
* Marco Balduini (marco.balduini@polimi.it)
* Emanuele Della Valle (emanuele.dellavalle@polimi.it)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This work was partially supported by the European project LarKC (FP7-215535) and by the European project MODAClouds (FP7-318484)
******************************************************************************/
package it.polimi.deib.rsp_services_csparql.queries.utilities;
import java.util.Observer;
public class Csparql_Observer_Descriptor {
private String id;
private Observer observer;
public Csparql_Observer_Descriptor(String id, Observer observer) {
super();
this.id = id;
this.observer = observer;
}
public Csparql_Observer_Descriptor() {
super();
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public Observer getObserver() {
return observer;
}
public void setObserver(Observer observer) {
this.observer = observer;
}
}
| streamreasoning/rsp-services-csparql | src/main/java/it/polimi/deib/rsp_services_csparql/queries/utilities/Csparql_Observer_Descriptor.java | Java | apache-2.0 | 1,618 |
package de.dailab.newsreel.recommender.common.util;
import org.apache.commons.lang.NullArgumentException;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
/**
* Created by jens on 02.03.16.
*/
public class EvalLevelTest {
public static void main(String[] args) {
Logger logger = Logger.getLogger(EvalLevel.class);
logger.setLevel(EvalLevel.EVAL);
Throwable _throw = new NullArgumentException("some npe");
logger.log(EvalLevel.EVAL, "Eval log", _throw);
logger.log(Level.DEBUG, "I am a DEBUG message");
logger.log(Level.INFO, "I am a INFO message");
logger.log(Level.FATAL, "I am a FATAL message");
}
} | jasjisdo/spark-newsreel-recommender | common/src/test/java/de/dailab/newsreel/recommender/common/util/EvalLevelTest.java | Java | apache-2.0 | 685 |
/*
*
* Copyright 2015 Stephen Cameron Data Services
*
*
* Licensed under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package au.com.scds.chats.dom.general.names;
import javax.jdo.annotations.Column;
import javax.jdo.annotations.IdentityType;
import javax.jdo.annotations.Inheritance;
import javax.jdo.annotations.InheritanceStrategy;
import javax.jdo.annotations.PersistenceCapable;
import javax.jdo.annotations.PrimaryKey;
import org.apache.isis.applib.util.ObjectContracts;
import au.com.scds.chats.dom.activity.Activity;
//import au.com.scds.chats.dom.AbstractNamedChatsDomainEntity;
/**
* Base class for the 'name' types.
*
* We want to have referential integrity in the database but use strings as
* primary and foreign keys for these simple code types, so have to use
* Application as the Identity Type and identify name as the primary key.
*
* This means we cannot extend the AbstractNamedChatsDomain entity as DN seems
* to want the Identity Type of child to be the same as parent class.
*
* @author stevec
*
*/
// TODO maybe this could be a map to prevent unnecessary database queries
@PersistenceCapable(identityType = IdentityType.APPLICATION)
@Inheritance(strategy = InheritanceStrategy.SUBCLASS_TABLE)
public abstract class ClassificationValue {
private String name;
public ClassificationValue() {
}
public ClassificationValue(String name) {
this.name = name;
}
public String title(){
return getName();
}
@PrimaryKey
@Column(allowsNull = "false", length = 50)
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public boolean equals(final Object obj) {
if (obj == null) {
return false;
}
if (obj == this) {
return true;
}
if (!this.getClass().isInstance(obj)) {
return false;
}
return ((ClassificationValue) obj).getName().equals(this.getName());
}
@Override
public String toString() {
return this.getClass().getSimpleName() + ":" + this.getName();
}
}
| Stephen-Cameron-Data-Services/isis-chats | dom/src/main/java/au/com/scds/chats/dom/general/names/ClassificationValue.java | Java | apache-2.0 | 2,524 |
using System;
using System.Linq;
using System.Linq.Expressions;
namespace Checkk.Exceptions
{
public class InvariantShouldHavePublicConstructorWithParametersException
: InvariantException
{
private readonly Expression<Func<Type>> _target;
private readonly Type[] _types;
public InvariantShouldHavePublicConstructorWithParametersException(
Expression<Func<Type>> target,
Type[] types,
string message)
: base(message)
{
_target = target;
_types = types;
}
protected override string AutoMessage
{
get { return string.Format(
"{0} should have a public constructor accepting ({1})",
_target.Body,
string.Join(", ", _types.Select(x => x.Name))); }
}
}
} | bendetat/check | src/Checkk/Exceptions/InvariantShouldHavePublicConstructorWithParametersException.cs | C# | apache-2.0 | 866 |
\section{\'Etude de la vitesse sur le site M}
\begin{center}
\includegraphics[scale=0.5]
\end{center} | malhys/maths_projects | bac_techno/1ST2S/stats/exam/annexe_eolienne.tex | TeX | apache-2.0 | 103 |
package com.qszxin.asterism.activity;
import android.os.Bundle;
import android.support.v4.app.FragmentPagerAdapter;
import android.view.View;
import android.support.design.widget.NavigationView;
import android.support.v4.view.GravityCompat;
import android.support.v4.widget.DrawerLayout;
import android.support.v7.app.ActionBarDrawerToggle;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.Toolbar;
import android.view.Menu;
import android.view.MenuItem;
import android.app.Activity;
import android.content.Intent;
import android.graphics.Bitmap;
import android.os.Environment;
import android.provider.MediaStore;
import android.text.format.DateFormat;
import android.util.Log;
import android.view.ViewGroup;
import android.widget.ImageView;
import android.widget.RelativeLayout;
import android.widget.Toast;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Calendar;;
import java.util.Locale;
import java.util.ArrayList;
import java.util.List;
import android.support.v4.view.ViewPager;
import android.support.v4.app.Fragment;
import com.qszxin.asterism.R;
/**
* Created by wubo on 2016/3/4.
* 主界面
*/
public class MainActivity extends AppCompatActivity implements NavigationView.OnNavigationItemSelectedListener { //继承OnRefreshListener的接口,实现刷新
//实现Tablayout
private ViewPager mViewPager;
private FragmentPagerAdapter mAdapter;
private List<Fragment> mFragments = new ArrayList<Fragment>();
private RelativeLayout[] rl = new RelativeLayout[3];
private ImageView[] iv = new ImageView[3];
private int[] rl_id={
R.id.id_rec_lay, R.id.id_atten_lay,R.id.id_me_lay
};
private int[] iv_id={
R.id.id_rec_laybtn,R.id.id_atten_laybtn,R.id.id_me_laybtn
};
private int[] icon_id = {
R.drawable.icon_rec,R.drawable.icon_atten,R.drawable.icon_user
};
private int[] tabColor;
private String username;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
Bundle bundle = this.getIntent().getExtras();
username = bundle.get("username").toString(); //从登陆界面获取数据
//toolbar设置
Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
toolbar.setTitle(username); //必须要在setSupportActionBar之前调用
setSupportActionBar(toolbar);
DrawerLayout drawer = (DrawerLayout) findViewById(R.id.drawer_layout);
ActionBarDrawerToggle toggle = new ActionBarDrawerToggle(
this, drawer, toolbar, R.string.navigation_drawer_open, R.string.navigation_drawer_close);
drawer.setDrawerListener(toggle);
toggle.syncState();
NavigationView navigationView = (NavigationView) findViewById(R.id.nav_view);
navigationView.setNavigationItemSelectedListener(this);
}
@Override
public void onBackPressed() {
DrawerLayout drawer = (DrawerLayout) findViewById(R.id.drawer_layout);
if (drawer.isDrawerOpen(GravityCompat.START)) {
drawer.closeDrawer(GravityCompat.START);
} else {
super.onBackPressed();
}
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.main, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
// Handle action bar item clicks here. The action bar will
// automatically handle clicks on the Home/Up button, so long
// as you specify a parent activity in AndroidManifest.xml.
int id = item.getItemId();
//noinspection SimplifiableIfStatement
if (id == R.id.action_settings) {
return true;
}
return super.onOptionsItemSelected(item);
}
@SuppressWarnings("StatementWithEmptyBody")
@Override
public boolean onNavigationItemSelected(MenuItem item) {
// Handle navigation view item clicks here.
int id = item.getItemId();
if (id == R.id.nav_camera) {
// TODO Auto-generated method stub
Intent intent = new Intent(MediaStore.ACTION_IMAGE_CAPTURE);
startActivityForResult(intent, 1);
// Handle the camera action
} else if (id == R.id.nav_gallery) {
} else if (id == R.id.nav_slideshow) {
} else if (id == R.id.nav_manage) {
} else if (id == R.id.nav_share) {
} else if (id == R.id.nav_send) {
}
DrawerLayout drawer = (DrawerLayout) findViewById(R.id.drawer_layout);
drawer.closeDrawer(GravityCompat.START);
return true;
}
@Override //调用相机后的操作
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
// TODO Auto-generated method stub
super.onActivityResult(requestCode, resultCode, data);
if (resultCode == Activity.RESULT_OK) {
String sdStatus = Environment.getExternalStorageState();
if (!sdStatus.equals(Environment.MEDIA_MOUNTED)) { // 检测sd是否可用
Log.i("TestFile",
"SD card is not avaiable/writeable right now.");
return;
}
String name = new DateFormat().format("yyyyMMdd_hhmmss", Calendar.getInstance(Locale.CHINA)) + ".jpg";
Toast.makeText(this, name, Toast.LENGTH_LONG).show();
Bundle bundle = data.getExtras();
Bitmap bitmap = (Bitmap) bundle.get("data");// 获取相机返回的数据,并转换为Bitmap图片格式
FileOutputStream b = null;
//???????????????????????????????为什么不能直接保存在系统相册位置呢????????????
File file = new File("/sdcard/myImage/");
file.mkdirs();// 创建文件夹
String fileName = "/sdcard/myImage/" + name;
try {
b = new FileOutputStream(fileName);
bitmap.compress(Bitmap.CompressFormat.JPEG, 100, b);// 把数据写入文件
} catch (FileNotFoundException e) {
e.printStackTrace();
} finally {
try {
b.flush();
b.close();
} catch (IOException e) {
e.printStackTrace();
}
}
((ImageView) findViewById(R.id.imageView)).setImageBitmap(bitmap);// 将图片显示在ImageView里
}
}
}
| wubohnu/Asterism | app/src/main/java/com/qszxin/asterism/activity/MainActivity.java | Java | apache-2.0 | 6,927 |
/*
* Copyright (C) 2016 the original author or authors.
*
* This file is part of jGrades Application Project.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*/
package org.jgrades.security.utils;
import org.springframework.security.core.GrantedAuthority;
import org.springframework.security.core.userdetails.User;
import java.util.Collection;
public class UserDetailsImpl extends User {
public UserDetailsImpl(String username, String password, Collection<? extends GrantedAuthority> authorities) {
super(username, password, authorities);
}
public UserDetailsImpl(String username, String password, boolean enabled, boolean accountNonExpired, boolean credentialsNonExpired, boolean accountNonLocked, Collection<? extends GrantedAuthority> authorities) {
super(username, password, enabled, accountNonExpired, credentialsNonExpired, accountNonLocked, authorities);
}
}
| jgrades/jgrades | jg-backend/implementation/base/jg-security/implementation/src/main/java/org/jgrades/security/utils/UserDetailsImpl.java | Java | apache-2.0 | 1,029 |
/*
* Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "modules/webdatabase/SQLTransactionStateMachine.h"
#include "core/platform/Logging.h"
#include "wtf/Assertions.h"
namespace WebCore {
#if !LOG_DISABLED
const char* nameForSQLTransactionState(SQLTransactionState state)
{
switch (state) {
case SQLTransactionState::End:
return "end";
case SQLTransactionState::Idle:
return "idle";
case SQLTransactionState::AcquireLock:
return "acquireLock";
case SQLTransactionState::OpenTransactionAndPreflight:
return "openTransactionAndPreflight";
case SQLTransactionState::RunStatements:
return "runStatements";
case SQLTransactionState::PostflightAndCommit:
return "postflightAndCommit";
case SQLTransactionState::CleanupAndTerminate:
return "cleanupAndTerminate";
case SQLTransactionState::CleanupAfterTransactionErrorCallback:
return "cleanupAfterTransactionErrorCallback";
case SQLTransactionState::DeliverTransactionCallback:
return "deliverTransactionCallback";
case SQLTransactionState::DeliverTransactionErrorCallback:
return "deliverTransactionErrorCallback";
case SQLTransactionState::DeliverStatementCallback:
return "deliverStatementCallback";
case SQLTransactionState::DeliverQuotaIncreaseCallback:
return "deliverQuotaIncreaseCallback";
case SQLTransactionState::DeliverSuccessCallback:
return "deliverSuccessCallback";
default:
return "UNKNOWN";
}
}
#endif
} // namespace WebCore
| indashnet/InDashNet.Open.UN2000 | android/external/chromium_org/third_party/WebKit/Source/modules/webdatabase/SQLTransactionStateMachine.cpp | C++ | apache-2.0 | 2,872 |
# Clients package
Packages contained by this package provide client libraries for accessing the ecosystem of stellar services. At present, it only contains a simple horizon client library, but in the future it will contain clients to interact with stellar-core, federation, the bridge server and more.
See [godoc](https://godoc.org/github.com/stellar/go/clients) for details about each package.
## Adding new client packages
Ideally, each one of our client packages will have commonalities in their API to ease the cost of learning each. It's recommended that we follow a pattern similar to the `net/http` package's client shape:
A type, `Client`, is the central type of any client package, and its methods should provide the bulk of the functionality for the package. A `DefaultClient` var is provided for consumers that don't need client-level customization of behavior. Each method on the `Client` type should have a corresponding func at the package level that proxies a call through to the default client. For example, `http.Get()` is the equivalent of `http.DefaultClient.Get()`. | stellar/gateway-server | vendor/src/github.com/stellar/go/clients/README.md | Markdown | apache-2.0 | 1,095 |
{#-
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-#}
<!DOCTYPE html>
<!-- Server: {{g.server_name}} -->
{% import 'allura:templates/jinja_master/lib.html' as lib with context %}
{% if g.theme.jinja_macros %}
{% import g.theme.jinja_macros as theme_macros with context %}
{% endif %}
{% do g.register_forge_js('js/jquery-base.js') %}
{% do g.register_forge_js('js/jquery.notify.js') %}
{% do g.register_forge_js('js/modernizr.js') %}
{% do g.register_forge_js('js/sylvester.js') %}
{% do g.register_forge_js('js/pb.transformie.min.js') %}
{% do g.register_forge_js('js/allura-base.js') %}
{% do g.register_forge_css('css/forge/hilite.css') %}
{% do g.register_css('/nf/tool_icon_css', compress=False) %}
{% do g.theme.require() %}
{% do g.resource_manager.register_widgets(c) %}
{# paulirish.com/2008/conditional-stylesheets-vs-css-hacks-answer-neither/ #}
<!--[if lt IE 7 ]> <html lang="en" class="no-js ie6"> <![endif]-->
<!--[if IE 7 ]> <html lang="en" class="no-js ie7"> <![endif]-->
<!--[if IE 8 ]> <html lang="en" class="no-js ie8"> <![endif]-->
<!--[if IE 9 ]> <html lang="en" class="no-js ie9"> <![endif]-->
<!--[if (gt IE 9)|!(IE)]>--> <html lang="en" class="no-js"> <!--<![endif]-->
<head>
<meta content="text/html; charset=UTF-8" http-equiv="content-type"/>
<title>{% block title %}Your title goes here{% endblock %}</title>
{{theme_macros.extra_header(g.theme_href(''))}}
<script type="text/javascript">
/*jslint onevar: false, nomen: false, evil: true, css: true, plusplus: false, white: false, forin: true, on: true, immed: false */
/*global confirm, alert, unescape, window, jQuery, $, net, COMSCORE */
</script>
{% for blob in g.resource_manager.emit('head_css') %}
{{ blob }}
{% endfor %}
{% if c.project %}
{% for blob in g.resource_manager.emit('head_js') %}
{{ blob }}
{% endfor %}
{% endif %}
{% if c.project and c.project.neighborhood.css %}
<style type="text/css">
{{c.project.neighborhood.get_custom_css()|safe}}
</style>
{% elif neighborhood and neighborhood.css %}
<style type="text/css">
{{neighborhood.get_custom_css()}}
</style>
{% endif %}
{% block extra_css %}{% endblock %}
<style>.{{ g.antispam.honey_class }} { display:none }</style>
{% block head %}
{% endblock %}
{% if g.production_mode %}{{g.analytics.display()}}{% endif %}
</head>
<body{% block body_attrs %}{% endblock %} id="forge">
<h2 class="hidden">
<span style="color:red">Error:</span> CSS did not load.<br>
This may happen on the first request due to CSS mimetype issues.
Try clearing your browser cache and refreshing.
<hr>
</h2>
{% block body_top_js %}
{% for blob in g.resource_manager.emit('body_top_js') %}
{{ blob }}
{% endfor %}
{% endblock %}
{{theme_macros.header(g.login_url, '/auth/logout')}}
{{theme_macros.site_notification()}}
{% set flash = tg.flash_obj.render('flash', use_js=False) %}
<section id="page-body" class="{{g.document_class(neighborhood)}}">
<div id="nav_menu_holder">
{% block nav_menu %}
{% include g.theme.nav_menu %}
{% endblock %}
</div>
<div id="top_nav" class="">
{% block top_nav %}
{% include g.theme.top_nav %}
{% endblock %}
</div>
<div id="content_base">
{% block content_base %}
{% if not hide_left_bar %}
{% block sidebar_menu %}
{% include g.theme.sidebar_menu %}
{% endblock %}
{% set outer_width = 20 %}
{% else %}
{% set outer_width = 24 %}
{% endif %}
{% if show_right_bar %}
{% set inner_width = outer_width - 8 %}
{% else %}
{% set inner_width = outer_width %}
{% endif %}
<div class="grid-{{outer_width}} pad">
<h2 class="dark{% block header_classes %} title{% endblock %}">{% block header %}{% endblock %}
<!-- actions -->
<small>
{% block actions %}{% endblock %}
</small>
<!-- /actions -->
</h2>
{% block edit_box %}{% endblock %}
<div{% if show_right_bar %} class="{% block inner_grid %}grid-{{inner_width}}"{% endblock %}{% endif %}>
{% block before_content %}{% endblock %}
{% block content %}{% endblock %}
</div>
{% if show_right_bar %}
<div id="sidebar-right" class="grid-6 fright">
{% block right_content %}{% endblock %}
</div>
{% endif %}
{% block after_content %}{% endblock %}
</div>
{% endblock %}
</div>
</section>
{{theme_macros.footer(g.year(), g.theme_href(''))}}
<div id="messages">
{% for n in h.pop_user_notifications() %}
<section class="message {{ n.subject or 'info' }}">
<header>Notification:</header>
<div class="content">{{ n.text }}</div>
</section>
{% endfor %}
</div>
{% if c.show_login_overlay %}
{{theme_macros.login_overlay()}}
{% endif %}
{% for blob in g.resource_manager.emit('body_js') %}
{{ blob }}
{% endfor %}
{% for blob in g.resource_manager.emit('body_js_tail') %}
{{ blob }}
{% endfor %}
{% block extra_js %}{% endblock %}
{% if neighborhood %}
{{ neighborhood.site_specific_html | safe }}
{% elif c.project.neighborhood %}
{{ c.project.neighborhood.site_specific_html | safe }}
{% endif %}
{{theme_macros.custom_js()}}
{% if flash %}
<script type="text/javascript">{{flash | safe}}</script>
{% endif %}
</body>
</html>
| apache/incubator-allura | Allura/allura/templates/jinja_master/master.html | HTML | apache-2.0 | 6,543 |
package com.ccs.ir;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileWriter;
import java.io.InputStreamReader;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.elasticsearch.index.engine.Engine.Get;
public class HitsQuery {
public static void main(String[] args) {
// TODO Auto-generated method stub
// build inLinks and outLinks Map on the base set
Map<String, Set<String>> inLinksMap = new HashMap<String, Set<String>>();
inLinksMap = getLinksMap("baseSetInlinkmap");
Map<String, Set<String>> outLinksMap = new HashMap<String, Set<String>>();
outLinksMap = getLinksMap("baseSetOutlinkmap");
// create auth and hubs
Map<String, Double> authsMap = new HashMap<String, Double>();
Map<String, Double> hubsMap = new HashMap<String, Double>();
// iterate inLinks and create auths and hubs
for (String li : inLinksMap.keySet()) {
authsMap.put(li, 1.0);
hubsMap.put(li, 1.0);
}
// initialize counts
int countHub = 1;
int countAuth = 1;
// initialize temp variables for auth and Hubs
int tempAuth = 10;
int tempHub = 10;
// initialize flags for convergence decision
boolean auth = true;
boolean hub = true;
int iter = 0; // too get the number of iterations
boolean change= true;
Set<String> baseSet = hubsMap.keySet();
while (change) {
// Initialize a normalization value
Double normAuth = 0.0,normHub = 0.0;
Map<String,Double> tempA = new HashMap<String, Double>();
Map<String,Double> tempH = new HashMap<String, Double>();
for(String url : baseSet){
tempA = authsMap;
tempH = hubsMap;
Set<String> inlinks = inLinksMap.get(url);
Set<String> outLinks = outLinksMap.get(url);
Double authWeight = 0.0,hubWeight = 0.0;
for (String in : inlinks) {
if(hubsMap.containsKey(in))
authWeight += hubsMap.get(in);
}
normAuth+= (authWeight*authWeight);
for (String out : outLinks) {
if(authsMap.containsKey(out))
hubWeight += authsMap.get(out);
}
normHub+= (hubWeight*hubWeight);
authsMap.put(url,authWeight);
hubsMap.put(url,hubWeight);
}
for(String url : baseSet){
authsMap.put(url,authsMap.get(url)/normAuth);
hubsMap.put(url,hubsMap.get(url)/normHub);
}
Double tmpA = 0.0;
Double tmpH = 0.0;
Double a=0.0;
Double h=0.0;
/*check for convergence*/
for(String url : baseSet){
tmpA += tempA.get(url);
tmpH += tempH.get(url);
a+=authsMap.get(url);
h+=hubsMap.get(url);
}
System.out.println("tmpA::: "+ tmpA);
System.out.println("a::: "+ a);
System.out.println("tmpH::: "+ tmpH);
System.out.println("h::: "+ h);
if(a==tmpA && h==tmpH)
change=false;
iter++;
System.out.println(iter);
}
// sort the auth and hub Maps to get the top 500
authsMap = sortHITSMap(authsMap);
hubsMap = sortHITSMap(hubsMap);
// Once Sortedd, write output to file
writeFile(authsMap, "AuthTop500");
writeFile(hubsMap, "HubsTop500");
}
private static Map<String, Set<String>> getLinksMap(String fileName) {
Map<String, Set<String>> urlToinLinks = new HashMap<String, Set<String>>();
int count = 0;
File file = new File("C:/Users/Nitin/Assign4/" + fileName + ".txt");
try {
BufferedReader br = new BufferedReader(new InputStreamReader(
new FileInputStream(file)));
String str = "";
while ((str = br.readLine()) != null) {
String[] inUrls = str.split(" ");
String url = inUrls[0];
Set<String> inLinksforUrl = new HashSet<String>();
for (int i = 1; i < inUrls.length; i++) {
inLinksforUrl.add(inUrls[i]);
}
urlToinLinks.put(url, inLinksforUrl);
count++;
}
System.out.println("Total " + fileName + " Files Crawled:: "
+ count);
br.close();
} catch (Exception e) {
System.out.println("In Error..");
e.printStackTrace();
}
return urlToinLinks;
}
public static <K, V extends Comparable<? super V>> Map<K, V> sortHITSMap(
Map<K, V> hitsMap) {
System.out.println("Started Sorting...@ " + new Date());
List<Map.Entry<K, V>> list = new LinkedList<Map.Entry<K, V>>(
hitsMap.entrySet());
Collections.sort(list, new Comparator<Map.Entry<K, V>>() {
public int compare(Map.Entry<K, V> o1, Map.Entry<K, V> o2) {
// return (o1.getValue()).compareTo(o2.getValue());
return Double.parseDouble(o1.getValue().toString()) > Double
.parseDouble(o2.getValue().toString()) ? -1 : Double
.parseDouble(o1.getValue().toString()) == Double
.parseDouble(o2.getValue().toString()) ? 0 : 1;
}
});
Map<K, V> result = new LinkedHashMap<K, V>();
for (Map.Entry<K, V> entry : list) {
result.put(entry.getKey(), entry.getValue());
}
System.out.println("Stopped Sorting...@ " + new Date());
return result;
}
private static void writeFile(Map<String, Double> maps, String fileName) {
try {
File file = new File("C:/Users/Nitin/Assign4/HITSOutput/"
+ fileName + ".txt");
BufferedWriter out = new BufferedWriter(new FileWriter(file));
String str = "";
int pageCount = 0;
for (Map.Entry<String, Double> pr : maps.entrySet()) {
str = pr.getKey() + "\t" + pr.getValue();
out.write(str);
out.newLine();
pageCount++;
if (pageCount == 500) {
break;
}
}
out.close();
} catch (Exception e) {
}
}
}
| tintin1343/Information-Retrieval | fourth_assignment_ir/code/hw4/src/com/ccs/ir/HitsQuery.java | Java | apache-2.0 | 5,626 |
from distutils.core import setup
from src import __version__
setup(
name="irma.common",
version=__version__,
author="Quarkslab",
author_email="irma@quarkslab.com",
description="The common component of the IRMA software",
packages=["irma.common",
"irma.common.base",
"irma.common.utils",
"irma.common.configuration",
"irma.common.ftp",
"irma.common.plugins"],
package_dir={"irma.common": "src",
"irma.common.utils": "src/utils",
"irma.common.base": "src/base",
"irma.common.plugins": "src/plugins"},
namespace_packages=["irma"]
)
| quarkslab/irma | common/setup.py | Python | apache-2.0 | 683 |
# Agropyron cristatum subsp. tarbagataicum SUBSPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Liliopsida/Poales/Poaceae/Agropyron/Agropyron cristatum/ Syn. Agropyron cristatum tarbagataicum/README.md | Markdown | apache-2.0 | 200 |
# Hieracium corniculans Johanss. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Asterales/Asteraceae/Hieracium/Hieracium corniculans/README.md | Markdown | apache-2.0 | 180 |
# Talinum monandrum Ruiz & Pav. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Caryophyllales/Talinaceae/Talinum/Talinum monandrum/README.md | Markdown | apache-2.0 | 179 |
# Leptospermum eriocalyx Sieber ex Spreng. SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Myrtales/Myrtaceae/Leptospermum/Leptospermum parvifolium/ Syn. Leptospermum eriocalyx/README.md | Markdown | apache-2.0 | 197 |
# Acontias platylobus Schott SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Liliopsida/Alismatales/Araceae/Xanthosoma/Xanthosoma platylobum/ Syn. Acontias platylobus/README.md | Markdown | apache-2.0 | 183 |
# Phrynium placentarium (Lour.) Merr. SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Liliopsida/Zingiberales/Marantaceae/Stachyphrynium/Stachyphrynium placentarium/ Syn. Phrynium placentarium/README.md | Markdown | apache-2.0 | 192 |
# Viola filiformis Ruiz & Pav. ex DC. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Malpighiales/Violaceae/Viola/Viola filiformis/README.md | Markdown | apache-2.0 | 185 |
# Jacksonia hemisericea D.A.Herb. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Fabales/Fabaceae/Jacksonia/Jacksonia hemisericea/README.md | Markdown | apache-2.0 | 181 |
# Polypodium feuillei var. minus G.Kunkel VARIETY
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Pteridophyta/Polypodiopsida/Polypodiales/Polypodiaceae/Polypodium/Polypodium feuillei/Polypodium feuillei minus/README.md | Markdown | apache-2.0 | 189 |
# Dryopteris debilis (Mett.) C.Chr. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Pteridophyta/Polypodiopsida/Polypodiales/Dryopteridaceae/Dryopteris/Dryopteris debilis/README.md | Markdown | apache-2.0 | 183 |
# Achyranthes stenophylla Standl. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Caryophyllales/Amaranthaceae/Achyranthes/Achyranthes stenophylla/README.md | Markdown | apache-2.0 | 181 |
# Taraxacum hellenicum Dahlst. SPECIES
#### Status
ACCEPTED
#### According to
Euro+Med Plantbase
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Asterales/Asteraceae/Taraxacum/Taraxacum hellenicum/README.md | Markdown | apache-2.0 | 165 |
# Lecidea squamifera Stizenb. SPECIES
#### Status
ACCEPTED
#### According to
Index Fungorum
#### Published in
null
#### Original name
Lecidea squamifera Stizenb.
### Remarks
null | mdoering/backbone | life/Fungi/Ascomycota/Lecanoromycetes/Lecanorales/Lecideaceae/Lecidea/Lecidea squamifera/README.md | Markdown | apache-2.0 | 183 |
/*
* Copyright 2010-2013 Ning, Inc.
* Copyright 2014-2016 Groupon, Inc
* Copyright 2014-2016 The Billing Project, LLC
*
* The Billing Project licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.killbill.billing.entitlement.engine.core;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import javax.annotation.Nullable;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.killbill.billing.ObjectType;
import org.killbill.billing.account.api.AccountApiException;
import org.killbill.billing.account.api.AccountInternalApi;
import org.killbill.billing.account.api.ImmutableAccountData;
import org.killbill.billing.callcontext.InternalTenantContext;
import org.killbill.billing.catalog.api.BillingPeriod;
import org.killbill.billing.catalog.api.PhaseType;
import org.killbill.billing.catalog.api.PlanPhase;
import org.killbill.billing.catalog.api.PlanPhaseSpecifier;
import org.killbill.billing.catalog.api.Product;
import org.killbill.billing.catalog.api.ProductCategory;
import org.killbill.billing.entitlement.AccountEventsStreams;
import org.killbill.billing.entitlement.EventsStream;
import org.killbill.billing.entitlement.api.BlockingState;
import org.killbill.billing.entitlement.api.BlockingStateType;
import org.killbill.billing.entitlement.api.Entitlement.EntitlementState;
import org.killbill.billing.entitlement.api.EntitlementApiException;
import org.killbill.billing.entitlement.api.svcs.DefaultAccountEventsStreams;
import org.killbill.billing.entitlement.block.BlockingChecker;
import org.killbill.billing.entitlement.dao.DefaultBlockingStateDao;
import org.killbill.billing.entitlement.dao.OptimizedProxyBlockingStateDao;
import org.killbill.billing.entitlement.dao.ProxyBlockingStateDao;
import org.killbill.billing.subscription.api.SubscriptionBase;
import org.killbill.billing.subscription.api.SubscriptionBaseInternalApi;
import org.killbill.billing.subscription.api.user.SubscriptionBaseApiException;
import org.killbill.billing.subscription.api.user.SubscriptionBaseBundle;
import org.killbill.billing.subscription.api.user.SubscriptionBaseTransition;
import org.killbill.billing.util.cache.CacheControllerDispatcher;
import org.killbill.billing.util.callcontext.InternalCallContextFactory;
import org.killbill.billing.util.callcontext.TenantContext;
import org.killbill.billing.util.dao.NonEntityDao;
import org.killbill.bus.api.PersistentBus;
import org.killbill.clock.Clock;
import org.killbill.notificationq.api.NotificationQueueService;
import org.skife.jdbi.v2.IDBI;
import com.google.common.base.MoreObjects;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
@Singleton
public class EventsStreamBuilder {
private final AccountInternalApi accountInternalApi;
private final SubscriptionBaseInternalApi subscriptionInternalApi;
private final BlockingChecker checker;
private final OptimizedProxyBlockingStateDao blockingStateDao;
private final DefaultBlockingStateDao defaultBlockingStateDao;
private final Clock clock;
private final InternalCallContextFactory internalCallContextFactory;
@Inject
public EventsStreamBuilder(final AccountInternalApi accountInternalApi, final SubscriptionBaseInternalApi subscriptionInternalApi,
final BlockingChecker checker, final IDBI dbi, final Clock clock,
final NotificationQueueService notificationQueueService, final PersistentBus eventBus,
final CacheControllerDispatcher cacheControllerDispatcher,
final NonEntityDao nonEntityDao,
final InternalCallContextFactory internalCallContextFactory) {
this.accountInternalApi = accountInternalApi;
this.subscriptionInternalApi = subscriptionInternalApi;
this.checker = checker;
this.clock = clock;
this.internalCallContextFactory = internalCallContextFactory;
this.defaultBlockingStateDao = new DefaultBlockingStateDao(dbi, clock, notificationQueueService, eventBus, cacheControllerDispatcher, nonEntityDao, internalCallContextFactory);
this.blockingStateDao = new OptimizedProxyBlockingStateDao(this, subscriptionInternalApi, dbi, clock, notificationQueueService, eventBus, cacheControllerDispatcher, nonEntityDao, internalCallContextFactory);
}
public EventsStream refresh(final EventsStream eventsStream, final TenantContext tenantContext) throws EntitlementApiException {
return buildForEntitlement(eventsStream.getEntitlementId(), tenantContext);
}
public EventsStream buildForBaseSubscription(final UUID bundleId, final TenantContext tenantContext) throws EntitlementApiException {
final SubscriptionBase baseSubscription;
try {
final InternalTenantContext internalTenantContext = internalCallContextFactory.createInternalTenantContext(bundleId, ObjectType.BUNDLE, tenantContext);
baseSubscription = subscriptionInternalApi.getBaseSubscription(bundleId, internalTenantContext);
} catch (SubscriptionBaseApiException e) {
throw new EntitlementApiException(e);
}
return buildForEntitlement(baseSubscription.getId(), tenantContext);
}
public EventsStream buildForEntitlement(final UUID entitlementId, final TenantContext tenantContext) throws EntitlementApiException {
final InternalTenantContext internalTenantContext = internalCallContextFactory.createInternalTenantContext(entitlementId, ObjectType.SUBSCRIPTION, tenantContext);
return buildForEntitlement(entitlementId, internalTenantContext);
}
public AccountEventsStreams buildForAccount(final InternalTenantContext internalTenantContext) throws EntitlementApiException {
// Retrieve the subscriptions (map bundle id -> subscriptions)
final Map<UUID, List<SubscriptionBase>> subscriptions;
try {
subscriptions = subscriptionInternalApi.getSubscriptionsForAccount(internalTenantContext);
return buildForAccount(subscriptions, internalTenantContext);
} catch (SubscriptionBaseApiException e) {
throw new EntitlementApiException(e);
}
}
// Special signature for ProxyBlockingStateDao to save a DAO call
public AccountEventsStreams buildForAccount(final Map<UUID, List<SubscriptionBase>> subscriptions, final InternalTenantContext internalTenantContext) throws EntitlementApiException {
// Retrieve the account
final ImmutableAccountData account;
try {
account = accountInternalApi.getImmutableAccountDataByRecordId(internalTenantContext.getAccountRecordId(), internalTenantContext);
} catch (AccountApiException e) {
throw new EntitlementApiException(e);
}
if (subscriptions.isEmpty()) {
// Bail early
return new DefaultAccountEventsStreams(account);
}
// Retrieve the bundles
final List<SubscriptionBaseBundle> bundles = subscriptionInternalApi.getBundlesForAccount(account.getId(), internalTenantContext);
// Map bundle id -> bundles
final Map<UUID, SubscriptionBaseBundle> bundlesPerId = new HashMap<UUID, SubscriptionBaseBundle>();
for (final SubscriptionBaseBundle bundle : bundles) {
bundlesPerId.put(bundle.getId(), bundle);
}
// Retrieve the blocking states
final List<BlockingState> blockingStatesForAccount = defaultBlockingStateDao.getBlockingAllForAccountRecordId(internalTenantContext);
// Optimization: build lookup tables for blocking states states
final Collection<BlockingState> accountBlockingStates = new LinkedList<BlockingState>();
final Map<UUID, List<BlockingState>> blockingStatesPerSubscription = new HashMap<UUID, List<BlockingState>>();
final Map<UUID, List<BlockingState>> blockingStatesPerBundle = new HashMap<UUID, List<BlockingState>>();
for (final BlockingState blockingState : blockingStatesForAccount) {
if (BlockingStateType.SUBSCRIPTION.equals(blockingState.getType())) {
if (blockingStatesPerSubscription.get(blockingState.getBlockedId()) == null) {
blockingStatesPerSubscription.put(blockingState.getBlockedId(), new LinkedList<BlockingState>());
}
blockingStatesPerSubscription.get(blockingState.getBlockedId()).add(blockingState);
} else if (BlockingStateType.SUBSCRIPTION_BUNDLE.equals(blockingState.getType())) {
if (blockingStatesPerBundle.get(blockingState.getBlockedId()) == null) {
blockingStatesPerBundle.put(blockingState.getBlockedId(), new LinkedList<BlockingState>());
}
blockingStatesPerBundle.get(blockingState.getBlockedId()).add(blockingState);
} else if (BlockingStateType.ACCOUNT.equals(blockingState.getType()) &&
account.getId().equals(blockingState.getBlockedId())) {
accountBlockingStates.add(blockingState);
}
}
// Build the EventsStream objects
final Map<UUID, Integer> bcdCache = new HashMap<UUID, Integer>();
final Map<UUID, Collection<EventsStream>> entitlementsPerBundle = new HashMap<UUID, Collection<EventsStream>>();
for (final UUID bundleId : subscriptions.keySet()) {
final SubscriptionBaseBundle bundle = bundlesPerId.get(bundleId);
final List<SubscriptionBase> allSubscriptionsForBundle = subscriptions.get(bundleId);
final SubscriptionBase baseSubscription = findBaseSubscription(allSubscriptionsForBundle);
final List<BlockingState> bundleBlockingStates = MoreObjects.firstNonNull(blockingStatesPerBundle.get(bundleId), ImmutableList.<BlockingState>of());
if (entitlementsPerBundle.get(bundleId) == null) {
entitlementsPerBundle.put(bundleId, new LinkedList<EventsStream>());
}
for (final SubscriptionBase subscription : allSubscriptionsForBundle) {
final List<BlockingState> subscriptionBlockingStatesOnDisk = MoreObjects.firstNonNull(blockingStatesPerSubscription.get(subscription.getId()), ImmutableList.<BlockingState>of());
// We cannot always use blockingStatesForAccount here: we need subscriptionBlockingStates to contain the events not on disk when building an EventsStream
// for an add-on - which means going through the magic of ProxyBlockingStateDao, which will recursively
// create EventsStream objects. To avoid an infinite recursion, bypass ProxyBlockingStateDao when it's not
// needed, i.e. if this EventStream is for a standalone or a base subscription
final List<BlockingState> subscriptionBlockingStates;
if (baseSubscription == null || subscription.getId().equals(baseSubscription.getId())) {
subscriptionBlockingStates = subscriptionBlockingStatesOnDisk;
} else {
subscriptionBlockingStates = blockingStateDao.getBlockingHistory(subscriptionBlockingStatesOnDisk,
blockingStatesForAccount,
account,
bundle,
baseSubscription,
subscription,
allSubscriptionsForBundle,
internalTenantContext);
}
// Merge the BlockingStates
final Collection<BlockingState> blockingStateSet = new LinkedHashSet<BlockingState>(accountBlockingStates);
blockingStateSet.addAll(bundleBlockingStates);
blockingStateSet.addAll(subscriptionBlockingStates);
final List<BlockingState> blockingStates = ProxyBlockingStateDao.sortedCopy(blockingStateSet);
final EventsStream eventStream = buildForEntitlement(account, bundle, baseSubscription, subscription, allSubscriptionsForBundle, blockingStates, bcdCache, internalTenantContext);
entitlementsPerBundle.get(bundleId).add(eventStream);
}
}
return new DefaultAccountEventsStreams(account, bundles, entitlementsPerBundle);
}
public EventsStream buildForEntitlement(final UUID entitlementId, final InternalTenantContext internalTenantContext) throws EntitlementApiException {
final SubscriptionBaseBundle bundle;
final SubscriptionBase subscription;
final List<SubscriptionBase> allSubscriptionsForBundle;
final SubscriptionBase baseSubscription;
try {
subscription = subscriptionInternalApi.getSubscriptionFromId(entitlementId, internalTenantContext);
bundle = subscriptionInternalApi.getBundleFromId(subscription.getBundleId(), internalTenantContext);
allSubscriptionsForBundle = subscriptionInternalApi.getSubscriptionsForBundle(subscription.getBundleId(), null, internalTenantContext);
baseSubscription = findBaseSubscription(allSubscriptionsForBundle);
} catch (SubscriptionBaseApiException e) {
throw new EntitlementApiException(e);
}
final ImmutableAccountData account;
try {
account = accountInternalApi.getImmutableAccountDataById(bundle.getAccountId(), internalTenantContext);
} catch (AccountApiException e) {
throw new EntitlementApiException(e);
}
// Retrieve the blocking states
final List<BlockingState> blockingStatesForAccount = defaultBlockingStateDao.getBlockingAllForAccountRecordId(internalTenantContext);
final Map<UUID, Integer> bcdCache = new HashMap<UUID, Integer>();
return buildForEntitlement(blockingStatesForAccount, account, bundle, baseSubscription, subscription, allSubscriptionsForBundle, bcdCache, internalTenantContext);
}
// Special signature for OptimizedProxyBlockingStateDao to save some DAO calls
public EventsStream buildForEntitlement(final List<BlockingState> blockingStatesForAccount,
final ImmutableAccountData account,
final SubscriptionBaseBundle bundle,
final SubscriptionBase baseSubscription,
final List<SubscriptionBase> allSubscriptionsForBundle,
final InternalTenantContext internalTenantContext) throws EntitlementApiException {
final Map<UUID, Integer> bcdCache = new HashMap<UUID, Integer>();
return buildForEntitlement(blockingStatesForAccount, account, bundle, baseSubscription, baseSubscription, allSubscriptionsForBundle, bcdCache, internalTenantContext);
}
private EventsStream buildForEntitlement(final List<BlockingState> blockingStatesForAccount,
final ImmutableAccountData account,
final SubscriptionBaseBundle bundle,
@Nullable final SubscriptionBase baseSubscription,
final SubscriptionBase subscription,
final List<SubscriptionBase> allSubscriptionsForBundle,
final Map<UUID, Integer> bcdCache,
final InternalTenantContext internalTenantContext) throws EntitlementApiException {
// Optimization: build lookup tables for blocking states states
final Collection<BlockingState> accountBlockingStates = new LinkedList<BlockingState>();
final Map<UUID, List<BlockingState>> blockingStatesPerSubscription = new HashMap<UUID, List<BlockingState>>();
final Map<UUID, List<BlockingState>> blockingStatesPerBundle = new HashMap<UUID, List<BlockingState>>();
for (final BlockingState blockingState : blockingStatesForAccount) {
if (BlockingStateType.SUBSCRIPTION.equals(blockingState.getType())) {
if (blockingStatesPerSubscription.get(blockingState.getBlockedId()) == null) {
blockingStatesPerSubscription.put(blockingState.getBlockedId(), new LinkedList<BlockingState>());
}
blockingStatesPerSubscription.get(blockingState.getBlockedId()).add(blockingState);
} else if (BlockingStateType.SUBSCRIPTION_BUNDLE.equals(blockingState.getType())) {
if (blockingStatesPerBundle.get(blockingState.getBlockedId()) == null) {
blockingStatesPerBundle.put(blockingState.getBlockedId(), new LinkedList<BlockingState>());
}
blockingStatesPerBundle.get(blockingState.getBlockedId()).add(blockingState);
} else if (BlockingStateType.ACCOUNT.equals(blockingState.getType()) &&
account.getId().equals(blockingState.getBlockedId())) {
accountBlockingStates.add(blockingState);
}
}
final List<BlockingState> bundleBlockingStates = MoreObjects.firstNonNull(blockingStatesPerBundle.get(subscription.getBundleId()), ImmutableList.<BlockingState>of());
final List<BlockingState> subscriptionBlockingStatesOnDisk = MoreObjects.firstNonNull(blockingStatesPerSubscription.get(subscription.getId()), ImmutableList.<BlockingState>of());
// We cannot always use blockingStatesForAccount here: we need subscriptionBlockingStates to contain the events not on disk when building an EventsStream
// for an add-on - which means going through the magic of ProxyBlockingStateDao, which will recursively
// create EventsStream objects. To avoid an infinite recursion, bypass ProxyBlockingStateDao when it's not
// needed, i.e. if this EventStream is for a standalone or a base subscription
final Collection<BlockingState> subscriptionBlockingStates;
if (baseSubscription == null || subscription.getId().equals(baseSubscription.getId())) {
// Note: we come here during the recursion from OptimizedProxyBlockingStateDao#getBlockingHistory
// (called by blockingStateDao.getBlockingHistory below)
subscriptionBlockingStates = subscriptionBlockingStatesOnDisk;
} else {
subscriptionBlockingStates = blockingStateDao.getBlockingHistory(ImmutableList.<BlockingState>copyOf(subscriptionBlockingStatesOnDisk),
blockingStatesForAccount,
account,
bundle,
baseSubscription,
subscription,
allSubscriptionsForBundle,
internalTenantContext);
}
// Merge the BlockingStates
final Collection<BlockingState> blockingStateSet = new LinkedHashSet<BlockingState>(accountBlockingStates);
blockingStateSet.addAll(bundleBlockingStates);
blockingStateSet.addAll(subscriptionBlockingStates);
final List<BlockingState> blockingStates = ProxyBlockingStateDao.sortedCopy(blockingStateSet);
return buildForEntitlement(account, bundle, baseSubscription, subscription, allSubscriptionsForBundle, blockingStates, bcdCache, internalTenantContext);
}
private EventsStream buildForEntitlement(final ImmutableAccountData account,
final SubscriptionBaseBundle bundle,
@Nullable final SubscriptionBase baseSubscription,
final SubscriptionBase subscription,
final List<SubscriptionBase> allSubscriptionsForBundle,
final List<BlockingState> blockingStates,
final Map<UUID, Integer> bcdCache,
final InternalTenantContext internalTenantContext) throws EntitlementApiException {
try {
int accountBCD = accountInternalApi.getBCD(account.getId(), internalTenantContext);
int defaultAlignmentDay = subscriptionInternalApi.getDefaultBillCycleDayLocal(bcdCache, subscription, baseSubscription, createPlanPhaseSpecifier(subscription), account.getTimeZone(), accountBCD, clock.getUTCNow(), internalTenantContext);
return new DefaultEventsStream(account,
bundle,
blockingStates,
checker,
baseSubscription,
subscription,
allSubscriptionsForBundle,
defaultAlignmentDay,
internalTenantContext,
clock.getUTCNow());
} catch (final SubscriptionBaseApiException e) {
throw new EntitlementApiException(e);
} catch (final AccountApiException e) {
throw new EntitlementApiException(e);
}
}
private PlanPhaseSpecifier createPlanPhaseSpecifier(final SubscriptionBase subscription) {
final String lastActiveProductName;
final BillingPeriod billingPeriod;
final ProductCategory productCategory;
final String priceListName;
final PhaseType phaseType;
if (subscription.getState() == EntitlementState.PENDING) {
final SubscriptionBaseTransition transition = subscription.getPendingTransition();
final Product pendingProduct = transition.getNextPlan().getProduct();
lastActiveProductName = pendingProduct.getName();
productCategory = pendingProduct.getCategory();
final PlanPhase pendingPlanPhase = transition.getNextPhase();
billingPeriod = pendingPlanPhase.getRecurring() != null ? pendingPlanPhase.getRecurring().getBillingPeriod() : BillingPeriod.NO_BILLING_PERIOD;
priceListName = transition.getNextPriceList().getName();
phaseType = transition.getNextPhase().getPhaseType();
} else {
final Product lastActiveProduct = subscription.getLastActiveProduct();
lastActiveProductName = lastActiveProduct.getName();
productCategory = lastActiveProduct.getCategory();
final PlanPhase lastActivePlanPhase = subscription.getLastActivePhase();
billingPeriod = lastActivePlanPhase.getRecurring() != null ? lastActivePlanPhase.getRecurring().getBillingPeriod() : BillingPeriod.NO_BILLING_PERIOD;
priceListName = subscription.getLastActivePlan().getPriceListName();
phaseType = subscription.getLastActivePhase().getPhaseType();
}
return new PlanPhaseSpecifier(lastActiveProductName,
billingPeriod,
priceListName,
phaseType);
}
private SubscriptionBase findBaseSubscription(final Iterable<SubscriptionBase> subscriptions) {
return Iterables.<SubscriptionBase>tryFind(subscriptions,
new Predicate<SubscriptionBase>() {
@Override
public boolean apply(final SubscriptionBase input) {
final List<SubscriptionBaseTransition> allTransitions = input.getAllTransitions();
return !allTransitions.isEmpty() &&
allTransitions.get(0).getNextPlan() != null &&
allTransitions.get(0).getNextPlan().getProduct() != null &&
ProductCategory.BASE.equals(allTransitions.get(0).getNextPlan().getProduct().getCategory());
}
}).orNull(); // null for standalone subscriptions
}
}
| andrenpaes/killbill | entitlement/src/main/java/org/killbill/billing/entitlement/engine/core/EventsStreamBuilder.java | Java | apache-2.0 | 26,014 |
package org.gekko.test.service.shops;
public enum CarShopType {
sport, vacation, business;
}
| epanikas/gekko | src/test/java/org/gekko/test/service/shops/CarShopType.java | Java | apache-2.0 | 97 |
// Copyright 2004, 2005 The Apache Software Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gaderian.test.services;
/**
* Basic service used for testing Gaderian.
*
* @author Howard Lewis Ship
*/
public interface SimpleService
{
public int add(int a, int b);
}
| Abnaxos/gaderian | core/src/test/java/gaderian/test/services/SimpleService.java | Java | apache-2.0 | 797 |
Set-DefaultAWSRegion 'us-east-1'
#$VerbosePreference='Continue'
trap { break } #This stops execution on any exception
$ErrorActionPreference = 'Stop'
cd $PSScriptRoot
. .\ssmcommon.ps1
#Define which accounts or AWS services can assume the role.
$assumePolicy = @"
{
"Version":"2012-10-17",
"Statement":[
{
"Sid":"",
"Effect":"Allow",
"Principal":{"Service":"ec2.amazonaws.com"},
"Action":"sts:AssumeRole"
}
]
}
"@
# Define which API actions and resources the application can use
# after assuming the role
$policy = @"
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowAccessToSSM",
"Effect": "Allow",
"Action": [
"ssm:DescribeAssociations",
"ssm:ListAssociations",
"ssm:GetDocument",
"ssm:UpdateAssociationStatus",
"ds:CreateComputer",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"cloudwatch:PutMetricData"
],
"Resource": [
"*"
]
}
]
}
"@
$index = '1'
if ($psISE -ne $null)
{
#Role name is suffixed with the index corresponding to the ISE tab
#Ensures to run multiple scripts concurrently without conflict.
$index = $psISE.CurrentPowerShellTab.DisplayName.Split(' ')[1]
}
#Create the role, write the rolepolicy
$role = 'role' + $index
$null = New-IAMRole -RoleName $role -AssumeRolePolicyDocument $assumePolicy
Write-IAMRolePolicy -RoleName $role -PolicyDocument $policy -PolicyName 'ssm'
#Create instance profile and add the above created role
$null = New-IAMInstanceProfile -InstanceProfileName $role
Add-IAMRoleToInstanceProfile -InstanceProfileName $role -RoleName $role
$d = Get-DSDirectory | select -First 1
function checkSubnet ([string]$cidr, [string]$ip)
{
$network, [int]$subnetlen = $cidr.Split('/')
$a = [uint32[]]$network.split('.')
[uint32] $unetwork = ($a[0] -shl 24) + ($a[1] -shl 16) + ($a[2] -shl 8) + $a[3]
$mask = (-bnot [uint32]0) -shl (32 - $subnetlen)
$a = [uint32[]]$ip.split('.')
[uint32] $uip = ($a[0] -shl 24) + ($a[1] -shl 16) + ($a[2] -shl 8) + $a[3]
$unetwork -eq ($mask -band $uip)
}
$subnet = Get-EC2Subnet | ? { checkSubnet $_.CidrBlock $d.DnsIpAddrs[0]} | select -First 1
$subnet = Get-EC2Subnet | select -First 1
#create keypair
$keyName = 'ssm-demo-key' + $index
$keypair = New-EC2KeyPair -KeyName $keyName
$dir = pwd
$keyfile = "$dir\$keyName.pem"
"$($keypair.KeyMaterial)" | Out-File -encoding ascii -filepath $keyfile
#Create Security Group
#Security group and the instance should be in the same network (VPC)
$securityGroupName = 'ssm-demo-sg' + $index
$securityGroupId = New-EC2SecurityGroup $securityGroupName -Description "SSM Demo" -VpcId $subnet.VpcId
$bytes = (Invoke-WebRequest 'http://checkip.amazonaws.com/').Content
$SourceIPRange = @(([System.Text.Encoding]::Ascii.GetString($bytes).Trim() + "/32"))
Write-Verbose "$sourceIPRange retreived from checkip.amazonaws.com"
$fireWallPermissions = @(
@{IpProtocol = 'tcp'; FromPort = 3389; ToPort = 3389; IpRanges = $SourceIPRange},
@{IpProtocol = 'tcp'; FromPort = 5985; ToPort = 5986; IpRanges = $SourceIPRange},
@{IpProtocol = 'tcp'; FromPort = 80; ToPort = 80; IpRanges = $SourceIPRange},
@{IpProtocol = 'icmp'; FromPort = -1; ToPort = -1; IpRanges = $SourceIPRange}
)
Grant-EC2SecurityGroupIngress -GroupId $securityGroupId `
-IpPermissions $fireWallPermissions
#Get the latest R2 base image
$image = Get-EC2ImageByName WINDOWS_2012R2_BASE
#User Data to enable PowerShell remoting on port 80
#User data must be passed in as 64bit encoding.
$userdata = @"
<powershell>
Enable-NetFirewallRule FPS-ICMP4-ERQ-In
Set-NetFirewallRule -Name WINRM-HTTP-In-TCP-PUBLIC -RemoteAddress Any
New-NetFirewallRule -Name "WinRM80" -DisplayName "WinRM80" -Protocol TCP -LocalPort 80
Set-Item WSMan:\localhost\Service\EnableCompatibilityHttpListener -Value true
</powershell>
"@
$utf8 = [System.Text.Encoding]::UTF8.GetBytes($userdata)
$userdataBase64Encoded = [System.Convert]::ToBase64String($utf8)
#Launch EC2 Instance with the role, firewall group created
# and on the right subnet
$instance = (New-EC2Instance -ImageId $image.ImageId `
-InstanceProfile_Id $role `
-AssociatePublicIp $true `
-SecurityGroupId $securityGroupId `
-SubnetId $subnet.SubnetId `
-KeyName $keyName `
-UserData $userdataBase64Encoded `
-InstanceType 'c3.large').Instances[0]
#Wait to retrieve password
$cmd = {
$password = Get-EC2PasswordData -InstanceId $instance.InstanceId `
-PemFile $keyfile -Decrypt
$password -ne $null
}
SSMWait $cmd 'Password Generation' 600
$password = Get-EC2PasswordData -InstanceId $instance.InstanceId `
-PemFile $keyfile -Decrypt
$securepassword = ConvertTo-SecureString $Password -AsPlainText -Force
$creds = New-Object System.Management.Automation.PSCredential ("Administrator", $securepassword)
#update the instance to get the public IP Address
$instance = (Get-EC2Instance $instance.InstanceId).Instances[0]
#Wait for remote PS connection
$cmd = {
icm $instance.PublicIpAddress {dir c:\} -Credential $creds -Port 80
}
SSMWait $cmd 'Remote Connection' 450
New-EC2Tag -ResourceId $instance.InstanceId -Tag @{Key='Name'; Value=$role}
#Cloud Watch
& .\cw.ps1 $instance $creds
#MSI Application to insall 7-zip
& .\7zip.ps1 $instance $creds
#PowerShell module
function PSUtilZipFolder(
$SourceFolder,
$ZipFileName,
$IncludeBaseDirectory = $true)
{
del $ZipFileName -ErrorAction 0
Add-Type -Assembly System.IO.Compression.FileSystem
[System.IO.Compression.ZipFile]::CreateFromDirectory($SourceFolder,
$ZipFileName, [System.IO.Compression.CompressionLevel]::Optimal,
$IncludeBaseDirectory)
}
PSUtilZipFolder -SourceFolder "$dir\PSDemo" `
-ZipFileName "$dir\PSDemo.zip" -IncludeBaseDirectory $false
write-S3Object -BucketName 'sivabuckets3' -key 'public/PSDemo.zip' `
-File .\PSDemo.zip -PublicReadOnly
del .\PSDemo.zip
& .\psmodule.ps1 $instance $creds
#Domain Join
& .\dj.ps1 $instance $creds
#Cleanup
#Terminate the instance
$null = Stop-EC2Instance -Instance $instance.InstanceId -Force -Terminate
#Remove Association and Document Cleanup
$association = Get-SSMAssociationList -AssociationFilterList `
@{Key='InstanceId'; Value=$instance.instanceid}
if ($association)
{
Remove-SSMAssociation -InstanceId $association.InstanceId `
-Name $association.Name -Force
Remove-SSMDocument -Name $association.Name -Force
}
#Remove the instance role and IAM Role
Remove-IAMRoleFromInstanceProfile -InstanceProfileName $role `
-RoleName $role -Force
Remove-IAMInstanceProfile $role -Force
Remove-IAMRolePolicy $role ssm -Force
Remove-IAMRole $role -Force
#delete keypair
del $keyfile -ea 0
Remove-EC2KeyPair -KeyName $keyName -Force
#To deal with timing, SSMWait is used.
SSMWait {(Remove-EC2SecurityGroup $securityGroupId -Force) -eq $null} `
'Delete Security Group' 150
| padisetty/Samples | AWS/ssm/Old/setup-config.ps1 | PowerShell | apache-2.0 | 7,197 |
# menu
Simple sugar for terminal application
#### For example:
```
#include "menu.hpp"
class App: public Menu
{
public:
App() : Menu(START, "Application menu"){};
private:
virtual void initialize() override;
enum Condition: int
{
START,
ADD,
DEL,
EDIT,
EXIT,
};
void start();
void add();
void del();
void edit();
void exit();
};
```
## main.cpp:
```
int main()
{
App app;
app.run();
}
```
| vbloodv/menu | README.md | Markdown | apache-2.0 | 484 |
#!/bin/bash
# Author: yeho <lj2007331 AT gmail.com>
# BLOG: https://blog.linuxeye.com
#
# Notes: OneinStack for CentOS/RadHat 5+ Debian 6+ and Ubuntu 12+
#
# Project home page:
# https://oneinstack.com
# https://github.com/lj2007331/oneinstack
Upgrade_Redis() {
pushd ${asn_dir}/src > /dev/null
[ ! -d "$redis_install_dir" ] && echo "${CWARNING}Redis is not installed on your system! ${CEND}" && exit 1
OLD_Redis_version=`$redis_install_dir/bin/redis-cli --version | awk '{print $2}'`
echo "Current Redis Version: ${CMSG}$OLD_Redis_version${CEND}"
while :; do echo
read -p "Please input upgrade Redis Version(example: 3.0.5): " NEW_Redis_version
if [ "$NEW_Redis_version" != "$OLD_Redis_version" ]; then
[ ! -e "redis-$NEW_Redis_version.tar.gz" ] && wget --no-check-certificate -c http://download.redis.io/releases/redis-$NEW_Redis_version.tar.gz > /dev/null 2>&1
if [ -e "redis-$NEW_Redis_version.tar.gz" ]; then
echo "Download [${CMSG}redis-$NEW_Redis_version.tar.gz${CEND}] successfully! "
break
else
echo "${CWARNING}Redis version does not exist! ${CEND}"
fi
else
echo "${CWARNING}input error! Upgrade Redis version is the same as the old version${CEND}"
fi
done
if [ -e "redis-$NEW_Redis_version.tar.gz" ]; then
echo "[${CMSG}redis-$NEW_Redis_version.tar.gz${CEND}] found"
echo "Press Ctrl+c to cancel or Press any key to continue..."
char=`get_char`
tar xzf redis-$NEW_Redis_version.tar.gz
pushd redis-$NEW_Redis_version
make clean
if [ "$OS_BIT" == '32' ]; then
sed -i '1i\CFLAGS= -march=i686' src/Makefile
sed -i 's@^OPT=.*@OPT=-O2 -march=i686@' src/.make-settings
fi
make -j ${THREAD}
if [ -f "src/redis-server" ]; then
echo "Restarting Redis..."
service redis-server stop
/bin/cp src/{redis-benchmark,redis-check-aof,redis-check-rdb,redis-cli,redis-sentinel,redis-server} $redis_install_dir/bin/
service redis-server start
popd > /dev/null
echo "You have ${CMSG}successfully${CEND} upgrade from ${CWARNING}$OLD_Redis_version${CEND} to ${CWARNING}$NEW_Redis_version${CEND}"
rm -rf redis-$NEW_Redis_version
else
echo "${CFAILURE}Upgrade Redis failed! ${CEND}"
fi
fi
popd > /dev/null
}
| asntechsolution/lamp-lemp | include/upgrade_redis.sh | Shell | apache-2.0 | 2,307 |
package com.github.fellowship_of_the_bus
package draconia
package game
// import lib.util.rand
// import lib.math.floor
case class Attributes (
//base stats
var strength: Int,
var intelligence: Int,
var speed: Int,
var health: Int,
var physicalDefense: Int,
var magicDefense: Int,
//Equipment only Values
var fireResistance: Int,
var iceResistance: Int,
var lightningResistance: Int)
// Incase we want to change growth rates
case class Growth (var strength: Int, var intelligence: Int, var speed: Int,
var health: Int, var physicalDefenese: Int, var magicDefense: Int)
| Fellowship-of-the-Bus/Draconia | src/main/scala/game/Attributes.scala | Scala | apache-2.0 | 610 |
package uk.ac.ncl.cs.csc8101.weblogcoursework;
import java.io.File;
import java.io.PrintWriter;
public class TestImplementation {
/**
* @param args
*/
public static void main(String[] args) {
try
{
/**** Test ActivitiesBetweenTimes class: uncomment a line to test it ***/
ActivitiesBetweenTimes activities = new ActivitiesBetweenTimes();
// query the DB
String id = "182";
String startTime = "[30/Apr/1998:22:17:59 +0000]";
String endTime = "[30/Apr/1998:23:20:00 +0000]";
activities.writeTodB();
//activities.activityBetweenTimes(id, startTime, endTime);
/*** Testing TotalNumberOfAccesses class: uncomment a line to test it ***/
String startHour = "[05/Apr/1998:00]";
String endHour = "[30/May/1998:22]";
TotalNumberOfAccesses total = new TotalNumberOfAccesses();
total.writeToDB();
//total.totalAccessRead(startHour, endHour);
/*** Testing UserSession class: uncomment a line to test it ***/
UserSession session = new UserSession();
session.writeSession();
//session.queryUserSessions("9618");
}
catch(Exception e)
{
e.printStackTrace();
}
}
}
| b0354345/project | src/main/java/uk/ac/ncl/cs/csc8101/weblogcoursework/TestImplementation.java | Java | apache-2.0 | 1,154 |
import { Component, OnInit } from '@angular/core';
import { PersonService } from '../../service/person.service';
@Component({
selector: 'app-draw',
templateUrl: './draw.component.html',
styleUrls: ['./draw.component.scss'],
providers: [
PersonService
]
})
export class DrawComponent implements OnInit {
public mrecord;
public itemsPerPage: number = 12;
public totalRecords: number = 10;
public currentPage: number = 1;
public offset: number = 0;
public end: number = 0;
constructor(
public getPerson: PersonService
) { }
ngOnInit() {
this.getCommission(this.currentPage, this.itemsPerPage )
}
getCommission(page, pagesize) {
this.getPerson.myIncomeDetail(page, pagesize, 2).subscribe(data => {
this.mrecord = data.d;
this.totalRecords = data.d.total;
})
}
pageChanged(event) {
this.getCommission(event.page + 1 ,this.itemsPerPage)
}
}
| liaohongdong/CLive | src/app/person/account/draw/draw.component.ts | TypeScript | apache-2.0 | 916 |
namespace ParTech.Modules.UrlRewriter.Events
{
using System;
using Sitecore.Events;
/// <summary>
/// EventArgs for ClearCacheEvent
/// </summary>
public class ClearCacheEventArgs : EventArgs, IPassNativeEventArgs
{
/// <summary>
/// Initializes a new instance of the <see cref="ClearCacheEventArgs"/> class.
/// </summary>
/// <param name="e">The event.</param>
public ClearCacheEventArgs(ClearCacheEvent e)
{
}
}
} | ParTech/Url-Rewriter | Source/ParTech.Modules.UrlRewriter/Events/ClearCacheEventArgs.cs | C# | apache-2.0 | 508 |
/*
* Copyright 2012-2015 JetBrains s.r.o
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jetbrains.jetpad.mapper;
import jetbrains.jetpad.model.collections.CollectionAdapter;
import jetbrains.jetpad.model.collections.CollectionItemEvent;
import jetbrains.jetpad.model.collections.list.ObservableList;
import jetbrains.jetpad.base.Registration;
import java.util.List;
class ObservableCollectionRoleSynchronizer<
SourceT,
TargetT> extends BaseCollectionRoleSynchronizer<SourceT, TargetT> {
private ObservableList<SourceT> mySource;
private List<? super TargetT> myTarget;
private Registration myCollectionRegistration;
ObservableCollectionRoleSynchronizer(
Mapper<?, ?> mapper,
ObservableList<SourceT> source,
List<? super TargetT> target,
MapperFactory<SourceT, TargetT> factory) {
super(mapper);
mySource = source;
myTarget = target;
addMapperFactory(factory);
}
protected void onAttach() {
super.onAttach();
if (!myTarget.isEmpty()) {
throw new IllegalArgumentException("Target Collection Should Be Empty");
}
new MapperUpdater().update(mySource);
List<Mapper<? extends SourceT, ? extends TargetT>> modifiableMappers = getModifiableMappers();
for (Mapper<? extends SourceT, ? extends TargetT> m : modifiableMappers) {
myTarget.add(m.getTarget());
}
myCollectionRegistration = mySource.addListener(new CollectionAdapter<SourceT>() {
@Override
public void onItemAdded(CollectionItemEvent<? extends SourceT> event) {
Mapper<? extends SourceT, ? extends TargetT> mapper = createMapper(event.getItem());
getModifiableMappers().add(event.getIndex(), mapper);
myTarget.add(event.getIndex(), mapper.getTarget());
processMapper(mapper);
}
@Override
public void onItemRemoved(CollectionItemEvent<? extends SourceT> event) {
getModifiableMappers().remove(event.getIndex());
myTarget.remove(event.getIndex());
}
});
}
protected void onDetach() {
super.onDetach();
myCollectionRegistration.remove();
myTarget.clear();
}
} | anqidong/jetpad-mapper | mapper/src/main/java/jetbrains/jetpad/mapper/ObservableCollectionRoleSynchronizer.java | Java | apache-2.0 | 2,653 |
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net.WebSockets;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Http;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
namespace EchoApp
{
public class Startup
{
// This method gets called by the runtime. Use this method to add services to the container.
// For more information on how to configure your application, visit http://go.microsoft.com/fwlink/?LinkID=398940
public void ConfigureServices(IServiceCollection services)
{
services.AddLogging(builder => builder.AddConsole());
}
// This method gets called by the runtime. Use this method to configure the HTTP request pipeline.
public void Configure(IApplicationBuilder app, IWebHostEnvironment env, ILoggerFactory loggerFactory)
{
if (env.IsDevelopment())
{
app.UseDeveloperExceptionPage();
}
app.UseWebSockets();
app.Use(async (context, next) =>
{
if (context.WebSockets.IsWebSocketRequest)
{
var webSocket = await context.WebSockets.AcceptWebSocketAsync();
await Echo(context, webSocket, loggerFactory.CreateLogger("Echo"));
}
else
{
await next(context);
}
});
app.UseFileServer();
}
private async Task Echo(HttpContext context, WebSocket webSocket, ILogger logger)
{
var buffer = new byte[1024 * 4];
var result = await webSocket.ReceiveAsync(buffer.AsMemory(), CancellationToken.None);
LogFrame(logger, webSocket, result, buffer);
while (result.MessageType != WebSocketMessageType.Close)
{
// If the client send "ServerClose", then they want a server-originated close to occur
string content = "<<binary>>";
if (result.MessageType == WebSocketMessageType.Text)
{
content = Encoding.UTF8.GetString(buffer, 0, result.Count);
if (content.Equals("ServerClose"))
{
await webSocket.CloseAsync(WebSocketCloseStatus.NormalClosure, "Closing from Server", CancellationToken.None);
logger.LogDebug($"Sent Frame Close: {WebSocketCloseStatus.NormalClosure} Closing from Server");
return;
}
else if (content.Equals("ServerAbort"))
{
context.Abort();
}
}
await webSocket.SendAsync(new ArraySegment<byte>(buffer, 0, result.Count), result.MessageType, result.EndOfMessage, CancellationToken.None);
logger.LogDebug($"Sent Frame {result.MessageType}: Len={result.Count}, Fin={result.EndOfMessage}: {content}");
result = await webSocket.ReceiveAsync(buffer.AsMemory(), CancellationToken.None);
LogFrame(logger, webSocket, result, buffer);
}
await webSocket.CloseAsync(webSocket.CloseStatus.Value, webSocket.CloseStatusDescription, CancellationToken.None);
}
private void LogFrame(ILogger logger, WebSocket webSocket, ValueWebSocketReceiveResult frame, byte[] buffer)
{
var close = frame.MessageType == WebSocketMessageType.Close;
string message;
if (close)
{
message = $"Close: {webSocket.CloseStatus.Value} {webSocket.CloseStatusDescription}";
}
else
{
string content = "<<binary>>";
if (frame.MessageType == WebSocketMessageType.Text)
{
content = Encoding.UTF8.GetString(buffer, 0, frame.Count);
}
message = $"{frame.MessageType}: Len={frame.Count}, Fin={frame.EndOfMessage}: {content}";
}
logger.LogDebug("Received Frame " + message);
}
}
}
| aspnet/AspNetCore | src/Middleware/WebSockets/samples/EchoApp/Startup.cs | C# | apache-2.0 | 4,507 |
package com.mrprona.dota2assitant.player.fragment;
import android.content.Intent;
import android.os.Bundle;
import android.support.v4.app.Fragment;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.ArrayAdapter;
import android.widget.Button;
import android.widget.Spinner;
import com.mrprona.dota2assitant.R;
import com.mrprona.dota2assitant.player.activity.PlayerByHeroStatsActivity;
import com.mrprona.dota2assitant.player.api.Unit;
/**
* User: ABadretdinov
* Date: 19.02.14
* Time: 14:06
*/
public class PlayerByHeroStatsFilter extends Fragment {
private static final String[] DATE = new String[]{
"",
"month",
"week",
"patch_6.84",
"patch_6.84b",
"patch_6.84c"
};
private static final String[] GAME_MODE = new String[]{
"",
"ability_draft",
"all_pick",
"all_random",
"captains_draft",
"captains_mode",
"least_player",
"pool1",
"random_draft",
"single_draft",
"all_random_deathmatch",
"1v1_solo_mid"
};
/* private static final String[] MATCH_TYPE=new String[]{
"",
"real",
"unreal"
};*/
private static final String[] METRIC = new String[]{
"played",
"winning",
"impact",
"economy"
};
private Unit account;
private Intent intent;
public static PlayerByHeroStatsFilter newInstance(Unit account) {
PlayerByHeroStatsFilter fragment = new PlayerByHeroStatsFilter();
fragment.account = account;
return fragment;
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
return inflater.inflate(R.layout.player_by_hero_stats_filters, container, false);
}
@Override
public void onActivityCreated(Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
intent = new Intent(getActivity(), PlayerByHeroStatsActivity.class);
intent.putExtra("account", account);
View root = getView();
Spinner byDateSpinner = (Spinner) root.findViewById(R.id.date);
Spinner byGameModeSpinner = (Spinner) root.findViewById(R.id.game_mode);
//Spinner byMatchTypeSpinner= (Spinner)root.findViewById(R.id.match_type);
Spinner byMetricSpinner = (Spinner) root.findViewById(R.id.metric);
Button submit = (Button) root.findViewById(R.id.submit);
submit.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
startActivity(intent);
}
});
ArrayAdapter<String> adapter = new ArrayAdapter<String>(getActivity(), android.R.layout.simple_spinner_item, getResources().getStringArray(R.array.statsByHero_date));
adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
byDateSpinner.setAdapter(adapter);
byDateSpinner.setSelection(0);
byDateSpinner.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> parent, View view, int position, long id) {
intent.putExtra("date", DATE[position]);
}
@Override
public void onNothingSelected(AdapterView<?> parent) {
}
});
adapter = new ArrayAdapter<String>(getActivity(), android.R.layout.simple_spinner_item, getResources().getStringArray(R.array.statsByHero_game_mode));
adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
byGameModeSpinner.setAdapter(adapter);
byGameModeSpinner.setSelection(0);
byGameModeSpinner.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> parent, View view, int position, long id) {
intent.putExtra("game_mode", GAME_MODE[position]);
}
@Override
public void onNothingSelected(AdapterView<?> parent) {
}
});
/*adapter=new ArrayAdapter<String>(getActivity(), android.R.layout.simple_spinner_item,getResources().getStringArray(R.array.statsByHero_match_type));
adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
byMatchTypeSpinner.setAdapter(adapter);
byMatchTypeSpinner.setSelection(0);
byMatchTypeSpinner.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener()
{
@Override
public void onItemSelected(AdapterView<?> parent, View view, int position, long id)
{
intent.putExtra("match_type",MATCH_TYPE[position]);
}
@Override
public void onNothingSelected(AdapterView<?> parent)
{
}
});*/
adapter = new ArrayAdapter<String>(getActivity(), android.R.layout.simple_spinner_item, getResources().getStringArray(R.array.statsByHero_metric));
adapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
byMetricSpinner.setAdapter(adapter);
byMetricSpinner.setSelection(0);
byMetricSpinner.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> parent, View view, int position, long id) {
intent.putExtra("metric", METRIC[position]);
}
@Override
public void onNothingSelected(AdapterView<?> parent) {
}
});
}
}
| mrprona92/SecretBrand | app/src/main/java/com/mrprona/dota2assitant/player/fragment/PlayerByHeroStatsFilter.java | Java | apache-2.0 | 5,771 |
/*=========================================================================
*
* Copyright Insight Software Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#ifndef itkPromoteType_h
#define itkPromoteType_h
// Simplification of boost::common_type
namespace itk {
namespace details {
template <int N, typename TA, typename TB> struct SizeToType;
template <int N> struct Identity { typedef char Type[N]; };
#define ASSOC(N, Typed)\
template <typename TA, typename TB> struct SizeToType<N,TA,TB> { typedef Typed Type; };
ASSOC(1, TA);
ASSOC(2, TB);
ASSOC(3, short);
ASSOC(4, unsigned short);
ASSOC(5, int);
ASSOC(6, unsigned int);
ASSOC(7, long);
ASSOC(8, unsigned long);
ASSOC(9, long long);
ASSOC(10, unsigned long long);
ASSOC(11, float);
ASSOC(12, double);
ASSOC(13, long double);
#undef ASSOC
} // details namespace
template <typename TA, typename TB> struct PromoteType
{
static TA a;
static TB b;
// Aimed at supporting overloads
template <typename T> static details::Identity<1>::Type& check(typename details::SizeToType<1, TA, TB>::Type, T);
template <typename T> static details::Identity<2>::Type& check(typename details::SizeToType<2, TA, TB>::Type, T);
// Common numeric types
static details::Identity<3 >::Type& check(typename details::SizeToType<3, TA, TB>::Type, int);
static details::Identity<4 >::Type& check(typename details::SizeToType<4, TA, TB>::Type, int);
static details::Identity<5 >::Type& check(typename details::SizeToType<5, TA, TB>::Type, int);
static details::Identity<6 >::Type& check(typename details::SizeToType<6, TA, TB>::Type, int);
static details::Identity<7 >::Type& check(typename details::SizeToType<7, TA, TB>::Type, int);
static details::Identity<8 >::Type& check(typename details::SizeToType<8, TA, TB>::Type, int);
static details::Identity<9 >::Type& check(typename details::SizeToType<9, TA, TB>::Type, int);
static details::Identity<10>::Type& check(typename details::SizeToType<10, TA, TB>::Type, int);
static details::Identity<11>::Type& check(typename details::SizeToType<11, TA, TB>::Type, int);
static details::Identity<12>::Type& check(typename details::SizeToType<12, TA, TB>::Type, int);
static details::Identity<13>::Type& check(typename details::SizeToType<13, TA, TB>::Type, int);
public:
typedef typename details::SizeToType<sizeof check(a+b, 0), TA, TB>::Type Type;
};
} // itk namespace
#if 0
#include <boost/mpl/assert.hpp>
#include <boost/type_traits/is_same.hpp>
BOOST_MPL_ASSERT((boost::is_same<itk::PromoteType<int,int> ::Type, int>));
BOOST_MPL_ASSERT((boost::is_same<itk::PromoteType<short,int> ::Type, int>));
BOOST_MPL_ASSERT((boost::is_same<itk::PromoteType<double,int> ::Type, double>));
BOOST_MPL_ASSERT((boost::is_same<itk::PromoteType<float,int> ::Type, float>));
BOOST_MPL_ASSERT((boost::is_same<itk::PromoteType<long,int> ::Type, long>));
BOOST_MPL_ASSERT((boost::is_same<itk::PromoteType<long,long double>::Type, long double>));
#endif
#endif // itkPromoteType_h
| LucHermitte/ITK | Modules/Core/Common/include/itkPromoteType.h | C | apache-2.0 | 3,911 |
/*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2022 DBeaver Corp and others
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.ext.kognitio;
import org.jkiss.dbeaver.ext.generic.GenericDataSourceProvider;
public class KognitioDataSourceProvider extends GenericDataSourceProvider {
public KognitioDataSourceProvider() {
}
@Override
public long getFeatures() {
return FEATURE_NONE;
}
}
| dbeaver/dbeaver | plugins/org.jkiss.dbeaver.ext.kognitio/src/org/jkiss/dbeaver/ext/kognitio/KognitioDataSourceProvider.java | Java | apache-2.0 | 970 |
package ar_ubin.benotified.data.source.beacons;
import android.support.annotation.NonNull;
import java.util.List;
import ar_ubin.benotified.data.models.Beacon;
public interface BeaconDataSource
{
interface LoadBeaconCallback
{
void onBeaconLoaded( List<Beacon> beacons );
void onDataNotAvailable();
}
interface GetBeaconCallback
{
void onBeaconLoaded( Beacon beacon );
void onDataNotAvailable();
}
interface SaveBeaconCallback
{
void onBeaconSaved();
void onFailed( Exception exception );
}
void getBeacons( @NonNull LoadBeaconCallback callback );
void getBeacon( @NonNull int minor, @NonNull GetBeaconCallback callback );
void saveBeacon( @NonNull Beacon beacon, @NonNull SaveBeaconCallback callback );
void activateBeacon( @NonNull Beacon beacon );
void activateBeacon( @NonNull int minor );
void deleteAllBeacons();
void deleteBeacon( @NonNull int minor );
}
| ar-ubin/BeNotified | app/src/main/java/ar_ubin/benotified/data/source/beacons/BeaconDataSource.java | Java | apache-2.0 | 991 |
# -*- coding: utf-8 -*-
import hashlib
import random
from rest_framework import serializers
from sita.users.models import User
from sita.subscriptions.models import Subscription
from sita.utils.refresh_token import create_token
from hashlib import md5
from datetime import datetime, timedelta
import pytz
class LoginSerializer(serializers.Serializer):
"""
Serializer for user login
"""
email = serializers.EmailField(
required=True
)
password = serializers.CharField(
required=True
)
device_os= serializers.ChoiceField(
required=False,
choices=['ANDROID', 'IOS']
)
device_token= serializers.CharField(
required=False,
max_length=254
)
def validate(self, data):
"""
Validation email, password and active status
"""
try:
user = User.objects.get(email__exact=data.get('email'))
except User.DoesNotExist:
raise serializers.ValidationError({"email":"invalid credentials"})
if not user.check_password(data.get('password')):
raise serializers.ValidationError({"email":"invalid credentials"})
if data.get("device_os") or data.get("device_token"):
if not data.get("device_os") or not data.get("device_token"):
raise serializers.ValidationError(
{"device_token":"Don`t send device OS or device token"})
if not user.is_active:
raise serializers.ValidationError(
{"email":"The user is not actived"}
)
return data
def get_user(self, data):
"""
return user object
"""
return User.objects.get(email__exact=data.get('email'))
class SignUpSerializer(serializers.Serializer):
""""""
TYPE_OS = (
('1', 'IOS'),
('2', 'ANDROID')
)
email = serializers.EmailField(
max_length=254,
required=True
)
password = serializers.CharField(
max_length=100,
required=True
)
time_zone = serializers.CharField(
max_length=100,
required=True
)
name = serializers.CharField(
required=False,
max_length = 100
)
phone = serializers.CharField(
required=False,
max_length=10
)
device_os= serializers.ChoiceField(
required=False,
choices=['ANDROID', 'IOS']
)
device_token= serializers.CharField(
required=False,
max_length=254
)
conekta_card = serializers.CharField(
max_length=254,
required=False
)
subscription_id= serializers.IntegerField(
required=False
)
def validate(self, data):
if data.get("device_os") or data.get("device_token"):
if not data.get("device_os") or not data.get("device_token"):
raise serializers.ValidationError(
{"device_token":"Don`t send device OS or device token"})
if data.get("conekta_card"):
if not data.get("phone") or not data.get("name") or not data.get("subscription_id"):
raise serializers.ValidationError(
{"conekta_card":
"If send conektaCard you should send phone and name"})
try:
subscription = Subscription.objects.get(id=data.get('subscription_id'))
except Subscription.DoesNotExist:
raise serializers.ValidationError(
{"subscription_id":"That subscription don't exists"}
)
try:
user = User.objects.get(email__exact=data.get('email'))
raise serializers.ValidationError(
{"email":"The user is not actived"}
)
except User.DoesNotExist:
pass
try:
datetime.now(pytz.timezone(data.get("time_zone")))
except pytz.UnknownTimeZoneError:
raise serializers.ValidationError(
{"time_zone":"The time zone is not correct"}
)
return data
class LoginResponseSerializer(object):
"""
Serializer used to return the proper token, when the user was succesfully
logged in.
"""
def __init__(self):
pass
def get_token(self,obj):
"""
Create token.
"""
return create_token(obj)
class RecoveryPasswordSerializer(serializers.Serializer):
"""
Serializer for user recovery password
"""
email = serializers.EmailField(
required=True
)
def validate(self, data):
"""
Validation email and active status
"""
try:
user = User.objects.get(email__exact=data.get('email'))
except User.DoesNotExist:
raise serializers.ValidationError("invalid credentials")
if not user.is_active:
raise serializers.ValidationError(
{"email":"The user is not actived"}
)
return data
def generate_recovery_token(self, data):
""" Generate code to recovery password. """
user = User.objects.get(email__exact=data.get('email'))
email = user.email
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
if isinstance(email, unicode):
email = email.encode('utf-8')
key = hashlib.sha1(salt + email).hexdigest()
user.reset_pass_code = key
user.save()
return True
class ResetPasswordWithCodeSerializer(serializers.Serializer):
"""
Serializer for user login
"""
password = serializers.CharField(
required=True
)
password_confim = serializers.CharField(
required=True
)
recovery_code = serializers.CharField(
required=True
)
def validate(self, data):
"""
Validation email, password and active status
"""
try:
user = User.objects.get(reset_pass_code=data.get('recovery_code'))
except User.DoesNotExist:
raise serializers.ValidationError(
{"recovery_code":"Don't exits code"})
if not data.get('password') == data.get('password_confim'):
raise serializers.ValidationError(
{"password_confim":
"Password is not equals to Confirm Password"})
return data
def update_password(self, data):
"""
Change password
"""
user = User.objects.get(reset_pass_code=data.get('recovery_code'))
user.reset_pass_code = None
user.set_password(data.get('password'))
user.save()
return True
| Fabfm4/Sita-BackEnd | src/sita/authentication/serializers.py | Python | apache-2.0 | 6,676 |
package com.icesecret.ex;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* Created by charlie on 17/10/31.
*/
public class MemOrderBook extends PrintableOrderBook implements OrderBook {
private List<LimitOrder> list = Collections.synchronizedList(new LinkedList<>());
private Map<String, LimitOrder> map = new ConcurrentHashMap<>();
private Comparator<LimitOrder> comparable=Comparator.comparing(LimitOrder::getPrice);
public static volatile int LIMIT = 5;
PersistOrderBook persistOrderBook = new PersistOrderBook();
@Override
public synchronized void put(LimitOrder limitOrder) {
add(limitOrder);
Collections.sort(list, comparable);
if (list.size() > LIMIT) {
int index = list.size() - 1;
LimitOrder over = list.remove(index);
map.remove(over.getId());
persistOrderBook.put(over);
}
}
@Override
public void cancel(String orderId) {
LimitOrder limitOrder = new LimitOrder();
limitOrder.setId(orderId);
limitOrder.setStatus(OrderStatus.Cancelled);
remove(limitOrder);
Collections.sort(list, comparable);
}
private void add(LimitOrder limitOrder) {
list.add(limitOrder);
map.put(limitOrder.getId(), limitOrder);
}
private void remove(LimitOrder limitOrder) {
list.remove(limitOrder);
map.remove(limitOrder.getId());
persistOrderBook.put(limitOrder);
}
@Override
public void update(String orderId, Double change) {
LimitOrder limitOrder = map.get(orderId);
if (limitOrder != null) {
limitOrder.setAmount(limitOrder.getAmount() + change);
if (limitOrder.getAmount() <= 0) {
limitOrder.setStatus(OrderStatus.Completed);
//complete order
remove(limitOrder);
}
}
}
@Override
public List<LimitOrder> getOrderList() {
Object[] src = list.toArray();
LimitOrder[] dest = new LimitOrder[src.length];
System.arraycopy(src, 0, dest, 0, src.length);
return Arrays.asList(dest);
}
@Override
public OrderBook getPersistOrderBook() {
return persistOrderBook;
}
@Override
public TradePair getTradePair() {
return null;
}
@Override
public TradeSide getTradeSide() {
return null;
}
}
| Brucecarl/e-wallet | src/main/java/com/icesecret/ex/MemOrderBook.java | Java | apache-2.0 | 2,589 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.sumologic.sumobot.plugins.jenkins
import com.typesafe.config.Config
import scala.util.Try
case class JenkinsConfiguration(url: String,
username: String,
password: String,
buildToken: Option[String])
object JenkinsConfiguration {
def load(config: Config): JenkinsConfiguration = {
val url = config.getString("url")
val username = config.getString("username")
val password = config.getString("password")
val buildToken = Try(config.getString("build.token")).toOption
JenkinsConfiguration(url, username, password, buildToken)
}
}
| SumoLogic/sumobot | src/main/scala/com/sumologic/sumobot/plugins/jenkins/JenkinsConfiguration.scala | Scala | apache-2.0 | 1,470 |
package de.unvilgames.dodgingking.graph.effects;
import com.badlogic.gdx.graphics.g2d.SpriteBatch;
import com.badlogic.gdx.utils.Pool;
import de.unvilgames.dodgingking.graph.SizeEvaluator;
/**
* Created by timjk on 04.08.2017.
*/
public abstract class Effect implements Pool.Poolable {
protected boolean isAlive;
protected float timeAlive;
public Effect() {
isAlive = false;
timeAlive = 0;
}
@Override
public void reset() {
}
public void init(EffectEngine parent) {
isAlive = true;
timeAlive = 0;
parent.add(this);
}
public void update(float delta) {
timeAlive += delta;
}
public abstract void draw(SpriteBatch batch, SizeEvaluator sizeEvaluator);
public boolean isAlive() {
return isAlive;
}
public abstract void release();
}
| mit1234123/Dodgingking | core/src/de/unvilgames/dodgingking/graph/effects/Effect.java | Java | apache-2.0 | 860 |
---
type: post110
title: Indexing
categories: XAP110
parent: none
weight: 900
---
For read and take operations, XAP iterates non-null values that match template or SQLQuery criteria, returning matches from the Space. This process can be time consuming, especially when there are many potential matches. To improve performance, it is possible to index one or more Space class properties. The Space maintains additional data for indexed properties, which shortens the time required to determine a match, thus improving performance.
<br>
{{%fpanel%}}
[Basic concept](./indexing.html){{<wbr>}}
Basic index types
[Indexing Nested properties](./indexing-nested-properties.html){{<wbr>}}
An index can be defined on a nested property to improve performance of nested queries.
[Indexing collections](./indexing-collections.html){{<wbr>}}
An index can be defined on a Collection property (java.util.Collection implementation).
[Compound index](./indexing-compound.html){{<wbr>}}
Compound indexes can be defined using multiple class properties.
[Geospatial index](./indexing-geospatial.html){{<wbr>}}
Indexes can be created for geometry data types.
[Unique index](./indexing-unique.html){{<wbr>}}
Unique constraints can be defined for a property or properties of a space class.
{{%/fpanel%}}
| croffler/documentation | sites/xap-docs/content/xap110/indexing-overview.markdown | Markdown | apache-2.0 | 1,303 |
package com.littlechoc.olddriver.obd.commands;
/**
* @author Junhao Zhou 2017/4/22
*/
public class ObdCommandInterval extends ObdCommandProxy {
private static final long DEFAULT_INTERVAL = 1000;
private long lastTime;
private long interval;
public ObdCommandInterval(ObdCommand other) {
this(other, DEFAULT_INTERVAL);
}
public ObdCommandInterval(ObdCommand other, long interval) {
super(other);
lastTime = 0L;
this.interval = interval;
}
public boolean canAdd(long currentTime) {
return currentTime - lastTime > interval;
}
public void setLastTime(long lastTime) {
this.lastTime = lastTime;
}
}
| junhaozhou/old-driver | obd/src/main/java/com/littlechoc/olddriver/obd/commands/ObdCommandInterval.java | Java | apache-2.0 | 653 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.asterix.external.library.java.base;
import org.apache.asterix.dataflow.data.nontagged.serde.ADateTimeSerializerDeserializer;
import org.apache.asterix.om.base.AMutableDateTime;
import org.apache.asterix.om.types.ATypeTag;
import org.apache.asterix.om.types.BuiltinType;
import org.apache.asterix.om.types.IAType;
import org.apache.hyracks.api.exceptions.HyracksDataException;
import java.io.DataOutput;
public final class JDateTime extends JObject {
public JDateTime(long chrononTime) {
super(new AMutableDateTime(chrononTime));
}
public void setValue(long chrononTime) {
((AMutableDateTime) value).setValue(chrononTime);
}
public long getValue() {
return ((AMutableDateTime) value).getChrononTime();
}
@Override
public void serialize(DataOutput dataOutput, boolean writeTypeTag) throws HyracksDataException {
serializeTypeTag(writeTypeTag, dataOutput, ATypeTag.DATETIME);
ADateTimeSerializerDeserializer.INSTANCE.serialize((AMutableDateTime) value, dataOutput);
}
@Override
public void reset() {
((AMutableDateTime) value).setValue(0);
}
@Override
public IAType getIAType() {
return BuiltinType.ADATETIME;
}
} | ecarm002/incubator-asterixdb | asterixdb/asterix-external-data/src/main/java/org/apache/asterix/external/library/java/base/JDateTime.java | Java | apache-2.0 | 2,062 |
/*
* Copyright 2013 Maurício Linhares
*
* Maurício Linhares licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.github.mauricio.async.db.column
import java.nio.charset.Charset
import io.netty.buffer.ByteBuf
import com.github.mauricio.async.db.general.ColumnData
trait ColumnDecoderRegistry {
def decode(kind: ColumnData, value: ByteBuf, charset: Charset): Any
}
| dripower/postgresql-async | db-async-common/src/main/scala/com/github/mauricio/async/db/column/ColumnDecoderRegistry.scala | Scala | apache-2.0 | 906 |
# AUTOGENERATED FILE
FROM balenalib/revpi-connect-debian:buster-build
ENV NODE_VERSION 17.6.0
ENV YARN_VERSION 1.22.4
RUN for key in \
6A010C5166006599AA17F08146C2130DFD2497F5 \
; do \
gpg --batch --keyserver pgp.mit.edu --recv-keys "$key" || \
gpg --batch --keyserver keyserver.pgp.com --recv-keys "$key" || \
gpg --batch --keyserver keyserver.ubuntu.com --recv-keys "$key" ; \
done \
&& curl -SLO "http://nodejs.org/dist/v$NODE_VERSION/node-v$NODE_VERSION-linux-armv7l.tar.gz" \
&& echo "31786cf6387c85a34f1eb85be5838facaad40f50f61030557e42a4af4bb31294 node-v$NODE_VERSION-linux-armv7l.tar.gz" | sha256sum -c - \
&& tar -xzf "node-v$NODE_VERSION-linux-armv7l.tar.gz" -C /usr/local --strip-components=1 \
&& rm "node-v$NODE_VERSION-linux-armv7l.tar.gz" \
&& curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz" \
&& curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz.asc" \
&& gpg --batch --verify yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
&& mkdir -p /opt/yarn \
&& tar -xzf yarn-v$YARN_VERSION.tar.gz -C /opt/yarn --strip-components=1 \
&& ln -s /opt/yarn/bin/yarn /usr/local/bin/yarn \
&& ln -s /opt/yarn/bin/yarn /usr/local/bin/yarnpkg \
&& rm yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
&& npm config set unsafe-perm true -g --unsafe-perm \
&& rm -rf /tmp/*
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/test-stack@node.sh" \
&& echo "Running test-stack@node" \
&& chmod +x test-stack@node.sh \
&& bash test-stack@node.sh \
&& rm -rf test-stack@node.sh
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo 'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v7 \nOS: Debian Buster \nVariant: build variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nNode.js v17.6.0, Yarn v1.22.4 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo '#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& cp /bin/sh /bin/sh.real \
&& mv /bin/sh-shim /bin/sh | resin-io-library/base-images | balena-base-images/node/revpi-connect/debian/buster/17.6.0/build/Dockerfile | Dockerfile | apache-2.0 | 2,782 |
// @link http://schemas.wbeme.com/json-schema/eme/solicits/event/solicit-updated/1-0-0.json#
import EmeAccountsAccountRefV1Mixin from '@wbeme/schemas/eme/accounts/mixin/account-ref/AccountRefV1Mixin';
import GdbotsNcrNodeUpdatedV1Mixin from '@gdbots/schemas/gdbots/ncr/mixin/node-updated/NodeUpdatedV1Mixin';
import GdbotsPbjxEventV1Mixin from '@gdbots/schemas/gdbots/pbjx/mixin/event/EventV1Mixin';
import GdbotsPbjxEventV1Trait from '@gdbots/schemas/gdbots/pbjx/mixin/event/EventV1Trait';
import Message from '@gdbots/pbj/Message';
import MessageResolver from '@gdbots/pbj/MessageResolver';
import Schema from '@gdbots/pbj/Schema';
export default class SolicitUpdatedV1 extends Message {
/**
* @private
*
* @returns {Schema}
*/
static defineSchema() {
return new Schema('pbj:eme:solicits:event:solicit-updated:1-0-0', SolicitUpdatedV1,
[],
[
EmeAccountsAccountRefV1Mixin.create(),
GdbotsPbjxEventV1Mixin.create(),
GdbotsNcrNodeUpdatedV1Mixin.create(),
],
);
}
}
GdbotsPbjxEventV1Trait(SolicitUpdatedV1);
MessageResolver.register('eme:solicits:event:solicit-updated', SolicitUpdatedV1);
Object.freeze(SolicitUpdatedV1);
Object.freeze(SolicitUpdatedV1.prototype);
| wb-eme/schemas | build/js/src/eme/solicits/event/SolicitUpdatedV1.js | JavaScript | apache-2.0 | 1,234 |
# Show the location of each SNP.
SELECT
reference_name,
start,
reference_bases,
alternate_bases
FROM
[_THE_TABLE_]
WHERE
reference_name = 'chr17'
AND start BETWEEN 41196311
AND 41277499
AND LENGTH(alternate_bases) == 1
AND LENGTH(reference_bases) == 1
GROUP BY
reference_name,
start,
end,
reference_bases,
alternate_bases
ORDER BY
start
| gregmcinnes/codelabs-obselete | R/PlatinumGenomes-QC/sql/ti-tv-variants.sql | SQL | apache-2.0 | 461 |
## 精英主义的灭亡
精英主义是一种精神疾病,是一种心理变态。得了这种病的人,有不同的轻重程度。程度轻的人,也许只是张口闭口的“名校”和“牛人”。而程度重的人,最后可能成为反人类的战争罪犯。希特勒就是一个严重的精英主义者,他认为自己是精英,“劣等民族”的人都应该死。
这些所谓的精英,在智力上,体力上,学识上,人格上,都没有什么值得称道的地方。他们被称为“精英”,往往是通过家族关系,或者舔人屁股,玩弄权术。还有些人,尽其所能包装自己,把自己跟名人或者富豪挂钩,写回忆录,请人给自己写传记,这样别人就以为他是天才,是杰出人物。而他们其实并没有什么特别,也许还不如一般人。
精英们最在行的事情,其实是拉关系,互相吹捧,唱高调,包装自己。他们在心理上是弱小的,他们必须依赖于名校或者大公司的牌子,依赖于校友的认可,依赖于那些身居高位的人的赞许和提拔。他们很多人不过是某些幕后黑手的木偶人而已。有句话说得好:宠为下,得知若惊,失之若惊。如果你需要身居高位的人的认可,那么你其实是一个卑贱的人。
精英主义者集结的地方,被中国人叫做“世界一流大学”:哈佛,耶鲁,[Cornell](http://www.yinwang.org/blog-cn/2014/01/04/authority)…… 进入这些大学的人,一般都是通过关系或者金钱,却吹捧自己是因为才能出众而进入那些学校。等你亲自进去一看,发现里面的教授很少有真知灼见,上课照本宣科,学生忙着抄笔记,学得一知半解,作业互相抄袭,考试焦头烂额,蒙混过关。
这些教授倒是对宣传工作相当在行,屁大点研究,总要让纽约时报之类的搞一下政治宣传,然后在院系主页上贴个告示,看哪我们教授的研究被纽约时报报道了!这些人的实际水平,比起很多州立大学里潜心做研究的不知名学者,差不止一个档次。很多中国人都不知道,纽约时报,华盛顿邮报,CNN 之类,其实都是婊子媒体,出钱或者有关系就给你发文拍片。纽约时报的老板就是个毒贩子,黑帮老大。
实际上,“世界一流大学”这个名词,你只能在中国听到。在美国没有人使用这种词汇,甚至像“著名的”(prestigious)这种词都很少用。如果你进入普通的州立大学,会发现没有人在乎哈佛,耶鲁,Cornell 这些“常春藤联盟”。如果你老是提这些名字,会招致人的反感。因为这些大学的人都知道精英学校是什么东西,根本不屑于提到他们。
精英大学不但以无能著称,它们比你现象的还要可怕许多。这些学校里面一般都存在一些“秘密组织”,比如 Cornell 的 [Quill and Dagger](https://en.wikipedia.org/wiki/Quill_and_Dagger)。这些组织就是精英聚集的场所。为什么要是秘密的呢?因为他们会进行一些见不得人的犯罪活动,导致成员之间互相抓住把柄,从而形成共生死的团伙。甚至有些名校的[整个学院](https://www.youtube.com/watch?v=cKy2c-itZEg&feature=youtu.be&t=1255),都被罪犯花重金包下来,成为培养他们接班人的摇篮。所以美国的名校,其实跟娼妓没什么两样,名气越是大的越是这样。
很多进入白宫的精英,就是从这种名校秘密组织出来的,比如臭名昭著的克林顿国家安全顾问 [Sandy Berger](https://en.wikipedia.org/wiki/Sandy_Berger),就是 Quill and Dagger 的成员。在 [9/11 恐怖袭击](http://www.yinwang.org/blog-cn/2016/12/01/september-eleven)发生之后,Sandy Berger 进入国家档案馆,偷走关于克林顿与 9/11 之间关系的资料原版,并且销毁。这种销毁证据的做法,说明克林顿跟 9/11 肯定有扯不清的关系。
从这个现象,你也许可以理解为什么很多精英大学容易出现学生自杀的事件。比如上次新闻报道说,一周之内有三个学生从 Cornell 校内同一座桥上跳下去[自杀](http://www.nytimes.com/2010/03/17/education/17cornell.html),结果后来派了警察在桥上日夜巡逻。三个学生几乎在同一时间想不通,在同一地点寻短见,这也未免太巧了点吧。如果你研究过历史就会知道,美国很多所谓的自杀案件其实都是谋杀,只是用自杀来掩盖真相。所以这些学生到底是自杀还是谋杀,谁都说不清楚。想要把孩子送去精英大学读书的人,真的应该考虑一下他们的安全了。
在精英大学上过研究生的人,大可不必觉得我是在嘲笑你们。精英主义者心目中的所谓“校友”,一般只包括本科阶段的同僚。如果你跟我一样在“二流学校”本科毕业,进入精英学校读研究生或者博士生,他们不会把你当成校友。相反,他们会歧视你,觉得你是去那里打工的,甚至嘲笑你好像什么都不懂,怎么进来的。这是因为本科是塑造人格和价值观的主要阶段,如果你的本科生活是在其它学校度过的,那么你并不具有这种“精英品质”,就算你之后进去读完博士也不会改变这个观念。这就是为什么有些中国人在国内本科都快毕业了,却退学去美国精英大学再读一个本科,因为这些人很想要成为精英,进入“主流社会”。
然而现在我们已经看清楚了,美国的主流社会和精英们的本质,我们知道了他们在干些什么样的事情。所以如果你不是精英大学官方意义上的校友,反倒没有了这层嫌疑需要洗清。
美国精英们的“宣传部”,叫做好莱坞。好莱坞通过形象包装,形成偶像崇拜,好莱坞电影就是给人们洗脑的工具。好莱坞明星们给人们灌输错误的标准:审美标准,道德标准。说实话,好莱坞这么多女影星走红地毯,就找不出几个好看的。可是由于他们给人洗脑,以至于很多天生丽质的女生从小耳濡目染,居然觉得好莱坞那些丑女明星比自己美,去模仿她们的化妆和衣着样式,甚至想去做整形手术,这样可以变得更像她们。这些美丽的女孩因为明星崇拜,失去了对自己的尊重和自信,真是可惜。
说到道德,你可能已经听说了,好莱坞的明星们几乎每一个都吸毒,很多还进行更可怕的犯罪活动。有一种极其变态的犯罪活动,叫做恋童癖([pedophilia](https://en.wikipedia.org/wiki/Pedophilia)),或者直接一点,叫做性虐儿童(child sex abuse)。我们都听说 [Michael Jackson](https://youtu.be/iZ0B4jjNdjM?t=2963) 被指控强奸男童的事,后来又莫名其妙死了,这表现出团伙成员之间的内斗现象。有趣的是,好莱坞有很多的明星最后都说是自杀或者吸毒过量死亡。他们到底是自杀还是谋杀,也很值得怀疑。
恋童活动在好莱坞非常的[普遍](https://www.youtube.com/watch?v=roW238dfUUk),它还经常跟邪教仪式(satanic ritual)结合在一起,这些人会在仪式上当场宰杀儿童用来“祭祀”。这种丧尽天良的可怕罪行,在很多其他国家是要判死刑的,美国政府却几乎完全不管,因为白宫的官员们在干同样的事情。这不是危言耸听,如果你仔细研究,就会发现这就是全世界的精英团伙里面正在进行的:[美国白宫](https://www.youtube.com/watch?v=ekejhDu-biQ),[英国王室](https://www.youtube.com/watch?v=NUfJXdMIRGw),[好莱坞](https://www.youtube.com/watch?v=roW238dfUUk),[天主教会](https://en.wikipedia.org/wiki/Catholic_Church_sexual_abuse_cases),世界一流大学……
美国这几年每年有超过 40 万儿童失踪,每年车祸死亡的人数才 3 万多。失踪儿童数量比车祸死亡人数大十倍以上,这是不正常的。这些失踪的儿童到哪里去了?另外,美国有些儿童领养中心和孤儿院,被查出在从事贩卖儿童性奴(child sex trafficking)的生意。这些人还在落后和受灾国家办[孤儿院](http://www.usapoliticstoday.com/hillary-clinton-kids-wikileaks-nuke-hillary-see-potential-clinton-foundation-sex-trafficking-ring),说是人道主义援助,结果被当地警察发现他们带走的小孩都是有父母的…… 你不觉得毛骨悚然吗?
反正看到这一切的时候,我的世界观都被颠覆了。我真希望这只是一场还没醒来的噩梦,可是无数的证据和证人都说明这就是现实!
精英们一直以来都依靠媒体来掩盖自己罪恶的真相,给人洗脑,冠冕堂皇的让人崇拜。而这次的美国总统大选,导致了这些主流媒体的轰然倒塌:CNN,华盛顿邮报,纽约时报,时代周刊,BBC,…… 我们现在知道,这些媒体都是被庞大的恶势力网络控制的。
在互联网发达之后,精英们也利用网络媒体来对公众进行洗脑。他们的帮凶包括 [Google](https://twitter.com/wikileaks/status/809305627319291906),Facebook,Twitter,…… 这些社交媒体不但在政治上帮助这些精英搞宣传,而且帮助他们屏蔽对他们不利的真相,把这些真相都叫做“[假新闻](https://www.facebook.com/zuck/posts/10103253901916271)”。而事实是,CNN 之类的主流媒体才是假新闻。如果你仔细研究一下,会发现 Facebook 和华盛顿邮报的幕后支持者,其实是作恶多端的 [CIA](https://www.youtube.com/watch?v=oFH3uLuNL5Y)。
真正独立而自由的“另类媒体”,比如 [InfoWars](http://www.infowars.com/),[Breitbart](http://www.wnd.com/),[Prison Planet](http://www.prisonplanet.com/),再加上异常强大的 [WikiLeaks](https://wikileaks.org/),通过多方面的证据,揭示了精英们的真相。是这些敢于说真话的人,用他们的[生命](https://en.wikipedia.org/wiki/Andrew_Breitbart#Death)和[自由](http://www.yinwang.org/blog-cn/2016/11/22/assange-donation),换来了世界局势的转机,同时导致了精英主义走向灭亡。现在听说某人是“精英”,我都得先考虑一下他跟这些龌龊的事情有没有关系,上网搜索调查一下。
未来的世界属于每一个平凡的人,只是他们还在熟睡,旁边的吸血鬼们正在黑暗中选择他们的猎物…… 当真相的阳光照进来,当人们醒过来的时候,就是精英统治的时代结束的时候。如果你现在还以为有任何人高人一等,你的心里还存在某种偶像,你还以为世界上有[天才](http://www.yinwang.org/blog-cn/2015/10/18/turing)存在,你还很在乎好莱坞明星,或者 [Zuckerberg](https://www.youtube.com/watch?v=oFH3uLuNL5Y)之类小丑说的话或者做的事,你还在梦想有一天把孩子送到哈佛念书,进入“上流社会”,请仔细再看看这篇文章和相关的视频,你会意识到这些想法有多么的愚蠢。
你完全没有意识到自己的价值,你没有意识到身边的普通人的价值,你没有发现幸福就在身边。你只是生活在别人为你设计的梦里,追求那些他们让你追求的东西,最终却发现你出卖了灵魂。醒来的过程是痛苦的,但醒来后的未来是美好的。被精英们用欺骗手段收走的力量,就要回到人们的手里!
(写这样一篇文章挺费神的,说真话是需要一些胆量的,所以如果你觉得开了眼界的话,请考虑付款 $3。谢谢!) | Sum41forever/study-notes | Book/Blog Article/观察/王垠 - 精英主义的灭亡.md | Markdown | apache-2.0 | 11,686 |
// Copyright (c) 2017, Baidu.com, Inc. All Rights Reserved
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "http/action/snapshot_action.h"
#include <string>
#include <sstream>
#include "boost/lexical_cast.hpp"
#include "agent/cgroups_mgr.h"
#include "http/http_channel.h"
#include "http/http_headers.h"
#include "http/http_request.h"
#include "http/http_response.h"
#include "http/http_status.h"
namespace palo {
const std::string TABLET_ID = "tablet_id";
const std::string SCHEMA_HASH = "schema_hash";
SnapshotAction::SnapshotAction(ExecEnv* exec_env) :
_exec_env(exec_env) {
_command_executor = new CommandExecutor();
}
void SnapshotAction::handle(HttpRequest *req, HttpChannel *channel) {
LOG(INFO) << "accept one request " << req->debug_string();
// add tid to cgroup in order to limit read bandwidth
CgroupsMgr::apply_system_cgroup();
// Get tablet id
const std::string& tablet_id_str = req->param(TABLET_ID);
if (tablet_id_str.empty()) {
std::string error_msg = std::string(
"parameter " + TABLET_ID + " not specified in url.");
HttpResponse response(HttpStatus::BAD_REQUEST, &error_msg);
channel->send_response(response);
return;
}
// Get schema hash
const std::string& schema_hash_str = req->param(SCHEMA_HASH);
if (schema_hash_str.empty()) {
std::string error_msg = std::string(
"parameter " + SCHEMA_HASH + " not specified in url.");
HttpResponse response(HttpStatus::BAD_REQUEST, &error_msg);
channel->send_response(response);
return;
}
// valid str format
int64_t tablet_id;
int32_t schema_hash;
try {
tablet_id = boost::lexical_cast<int64_t>(tablet_id_str);
schema_hash = boost::lexical_cast<int64_t>(schema_hash_str);
} catch (boost::bad_lexical_cast& e) {
std::string error_msg = std::string("param format is invalid: ") + std::string(e.what());
HttpResponse response(HttpStatus::BAD_REQUEST, &error_msg);
channel->send_response(response);
return;
}
VLOG_ROW << "get make snapshot tablet info: " << tablet_id << "-" << schema_hash;
std::string snapshot_path;
int64_t ret = make_snapshot(tablet_id, schema_hash, &snapshot_path);
if (ret != 0L) {
std::string error_msg = std::string("make snapshot falied");
HttpResponse response(HttpStatus::INTERNAL_SERVER_ERROR, &error_msg);
channel->send_response(response);
return;
} else {
std::stringstream result;
result << snapshot_path;
std::string result_str = result.str();
HttpResponse response(HttpStatus::OK, &result_str);
channel->send_response(response);
}
LOG(INFO) << "deal with snapshot request finished! tablet id: " << tablet_id;
}
int64_t SnapshotAction::make_snapshot(int64_t tablet_id, int32_t schema_hash,
std::string* snapshot_path) {
OLAPStatus res = OLAPStatus::OLAP_SUCCESS;
res = _command_executor->make_snapshot(tablet_id, schema_hash, snapshot_path);
if (res != OLAPStatus::OLAP_SUCCESS) {
LOG(WARNING) << "make snapshot failed. status: " << res
<< ", signature: " << tablet_id;
return -1L;
} else {
LOG(INFO) << "make snapshot success. status: " << res
<< ", signature: " << tablet_id << ". path: " << *snapshot_path;
}
return 0L;
}
SnapshotAction::~SnapshotAction() {
if (_command_executor != NULL) {
delete _command_executor;
}
}
} // end namespace palo
| lingbin/palo | be/src/http/action/snapshot_action.cpp | C++ | apache-2.0 | 4,246 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.